mlx4_en: Consider inline packets on completion
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>
Thu, 8 Jan 2009 18:57:15 +0000 (10:57 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 8 Jan 2009 18:57:15 +0000 (10:57 -0800)
Was trying to unmap work queue entries that had inline packets,
so naturally weren't mapped.

Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/mlx4/en_tx.c
drivers/net/mlx4/mlx4_en.h

index ff4d75205c25bac5c6acb91fe216320c98fc568b..4afd5993e31c4cd01a2d12a452e5164e0fe92dd8 100644 (file)
@@ -203,19 +203,21 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
 
        /* Optimize the common case when there are no wraparounds */
        if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
-               if (tx_info->linear) {
-                       pci_unmap_single(mdev->pdev,
-                                        (dma_addr_t) be64_to_cpu(data->addr),
+               if (!tx_info->inl) {
+                       if (tx_info->linear) {
+                               pci_unmap_single(mdev->pdev,
+                                       (dma_addr_t) be64_to_cpu(data->addr),
                                         be32_to_cpu(data->byte_count),
                                         PCI_DMA_TODEVICE);
-                       ++data;
-               }
+                               ++data;
+                       }
 
-               for (i = 0; i < frags; i++) {
-                       frag = &skb_shinfo(skb)->frags[i];
-                       pci_unmap_page(mdev->pdev,
-                                      (dma_addr_t) be64_to_cpu(data[i].addr),
-                                      frag->size, PCI_DMA_TODEVICE);
+                       for (i = 0; i < frags; i++) {
+                               frag = &skb_shinfo(skb)->frags[i];
+                               pci_unmap_page(mdev->pdev,
+                                       (dma_addr_t) be64_to_cpu(data[i].addr),
+                                       frag->size, PCI_DMA_TODEVICE);
+                       }
                }
                /* Stamp the freed descriptor */
                for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -224,27 +226,29 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                }
 
        } else {
-               if ((void *) data >= end) {
-                       data = (struct mlx4_wqe_data_seg *)
-                                       (ring->buf + ((void *) data - end));
-               }
+               if (!tx_info->inl) {
+                       if ((void *) data >= end) {
+                               data = (struct mlx4_wqe_data_seg *)
+                                               (ring->buf + ((void *) data - end));
+                       }
 
-               if (tx_info->linear) {
-                       pci_unmap_single(mdev->pdev,
-                                        (dma_addr_t) be64_to_cpu(data->addr),
+                       if (tx_info->linear) {
+                               pci_unmap_single(mdev->pdev,
+                                       (dma_addr_t) be64_to_cpu(data->addr),
                                         be32_to_cpu(data->byte_count),
                                         PCI_DMA_TODEVICE);
-                       ++data;
-               }
+                               ++data;
+                       }
 
-               for (i = 0; i < frags; i++) {
-                       /* Check for wraparound before unmapping */
-                       if ((void *) data >= end)
-                               data = (struct mlx4_wqe_data_seg *) ring->buf;
-                       frag = &skb_shinfo(skb)->frags[i];
-                       pci_unmap_page(mdev->pdev,
+                       for (i = 0; i < frags; i++) {
+                               /* Check for wraparound before unmapping */
+                               if ((void *) data >= end)
+                                       data = (struct mlx4_wqe_data_seg *) ring->buf;
+                               frag = &skb_shinfo(skb)->frags[i];
+                               pci_unmap_page(mdev->pdev,
                                        (dma_addr_t) be64_to_cpu(data->addr),
                                         frag->size, PCI_DMA_TODEVICE);
+                       }
                }
                /* Stamp the freed descriptor */
                for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -790,8 +794,11 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                        wmb();
                        data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
                }
-       } else
+               tx_info->inl = 0;
+       } else {
                build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
+               tx_info->inl = 1;
+       }
 
        ring->prod += nr_txbb;
 
index 2e96c7b2180a86a4010c1b945d3bbe16c06c3b2b..08bf321a9e62b36eb145b1ac5c001048f6ccbcff 100644 (file)
@@ -202,6 +202,7 @@ struct mlx4_en_tx_info {
        u32 nr_txbb;
        u8 linear;
        u8 data_offset;
+       u8 inl;
 };