#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
-/* Napi polling weight */
-#define MVNETA_RX_POLL_WEIGHT 64
-
/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
* supported by this driver) or is filled automatically by zeroes on
/* Max number of Tx descriptors */
#define MVNETA_MAX_TXD 532
+/* Max number of allowed TCP segments for software TSO */
+#define MVNETA_MAX_TSO_SEGS 100
+
+#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
/* descriptor aligned size */
#define MVNETA_DESC_ALIGNED_SIZE 32
ETH_HLEN + ETH_FCS_LEN, \
MVNETA_CPU_D_CACHE_LINE_SIZE)
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_phys) && \
+ (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
+
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
struct mvneta_pcpu_stats {
* descriptor ring
*/
int count;
+ int tx_stop_threshold;
+ int tx_wake_threshold;
/* Array of transmitted skb */
struct sk_buff **tx_skb;
mvneta_txq_inc_get(txq);
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
if (!skb)
continue;
-
- dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
txq->count -= tx_done;
if (netif_tx_queue_stopped(nq)) {
- if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
+ if (txq->count <= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
}
}
*/
for (i = desc_count - 1; i >= 0; i--) {
struct mvneta_tx_desc *tx_desc = txq->descs + i;
- if (!(tx_desc->command & MVNETA_TXD_F_DESC))
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
dma_unmap_single(pp->dev->dev.parent,
tx_desc->buf_phys_addr,
tx_desc->data_size,
txq->count += frags;
mvneta_txq_pend_desc_add(pp, txq, frags);
- if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
+ if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
u64_stats_update_begin(&stats->syncp);
{
txq->size = pp->tx_ring_size;
+ /* A queue must always have room for at least one skb.
+ * Therefore, stop the queue when the free entries reaches
+ * the maximum number of descriptors per skb.
+ */
+ txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
+ txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
+
/* Allocate memory for TX descriptors */
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
return -EINVAL;
pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
ring->rx_pending : MVNETA_MAX_RXD;
- pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
- ring->tx_pending : MVNETA_MAX_TXD;
+
+ pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
+ MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
+ if (pp->tx_ring_size != ring->tx_pending)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ pp->tx_ring_size, ring->tx_pending);
if (netif_running(dev)) {
mvneta_stop(dev);
if (dram_target_info)
mvneta_conf_mbus_windows(pp, dram_target_info);
- netif_napi_add(dev, &pp->napi, mvneta_poll, MVNETA_RX_POLL_WEIGHT);
+ netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
err = register_netdev(dev);
if (err < 0) {