e1000: rename struct e1000_buffer to e1000_tx_buffer
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
index ad3d5d12173faea9992f9e46f74f57e21043b05d..ccb65ad27fc6a26e8b18f0d03cbb5ea34dd6dd2e 100644 (file)
@@ -1497,7 +1497,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
        struct pci_dev *pdev = adapter->pdev;
        int size;
 
-       size = sizeof(struct e1000_buffer) * txdr->count;
+       size = sizeof(struct e1000_tx_buffer) * txdr->count;
        txdr->buffer_info = vzalloc(size);
        if (!txdr->buffer_info)
                return -ENOMEM;
@@ -1687,7 +1687,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
        struct pci_dev *pdev = adapter->pdev;
        int size, desc_len;
 
-       size = sizeof(struct e1000_buffer) * rxdr->count;
+       size = sizeof(struct e1000_rx_buffer) * rxdr->count;
        rxdr->buffer_info = vzalloc(size);
        if (!rxdr->buffer_info)
                return -ENOMEM;
@@ -1947,8 +1947,9 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
                e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
 }
 
-static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
-                                            struct e1000_buffer *buffer_info)
+static void
+e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+                                struct e1000_tx_buffer *buffer_info)
 {
        if (buffer_info->dma) {
                if (buffer_info->mapped_as_page)
@@ -1977,7 +1978,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
                                struct e1000_tx_ring *tx_ring)
 {
        struct e1000_hw *hw = &adapter->hw;
-       struct e1000_buffer *buffer_info;
+       struct e1000_tx_buffer *buffer_info;
        unsigned long size;
        unsigned int i;
 
@@ -1989,7 +1990,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
        }
 
        netdev_reset_queue(adapter->netdev);
-       size = sizeof(struct e1000_buffer) * tx_ring->count;
+       size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
        memset(tx_ring->buffer_info, 0, size);
 
        /* Zero out the descriptor ring */
@@ -2062,7 +2063,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
                                struct e1000_rx_ring *rx_ring)
 {
        struct e1000_hw *hw = &adapter->hw;
-       struct e1000_buffer *buffer_info;
+       struct e1000_rx_buffer *buffer_info;
        struct pci_dev *pdev = adapter->pdev;
        unsigned long size;
        unsigned int i;
@@ -2073,12 +2074,12 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
                if (buffer_info->dma &&
                    adapter->clean_rx == e1000_clean_rx_irq) {
                        dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                        buffer_info->length,
+                                        adapter->rx_buffer_len,
                                         DMA_FROM_DEVICE);
                } else if (buffer_info->dma &&
                           adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
                        dma_unmap_page(&pdev->dev, buffer_info->dma,
-                                      buffer_info->length,
+                                      adapter->rx_buffer_len,
                                       DMA_FROM_DEVICE);
                }
 
@@ -2099,7 +2100,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
                rx_ring->rx_skb_top = NULL;
        }
 
-       size = sizeof(struct e1000_buffer) * rx_ring->count;
+       size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
        memset(rx_ring->buffer_info, 0, size);
 
        /* Zero out the descriptor ring */
@@ -2678,7 +2679,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
                     __be16 protocol)
 {
        struct e1000_context_desc *context_desc;
-       struct e1000_buffer *buffer_info;
+       struct e1000_tx_buffer *buffer_info;
        unsigned int i;
        u32 cmd_length = 0;
        u16 ipcse = 0, tucse, mss;
@@ -2750,7 +2751,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
                          __be16 protocol)
 {
        struct e1000_context_desc *context_desc;
-       struct e1000_buffer *buffer_info;
+       struct e1000_tx_buffer *buffer_info;
        unsigned int i;
        u8 css;
        u32 cmd_len = E1000_TXD_CMD_DEXT;
@@ -2809,7 +2810,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 {
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       struct e1000_buffer *buffer_info;
+       struct e1000_tx_buffer *buffer_info;
        unsigned int len = skb_headlen(skb);
        unsigned int offset = 0, size, count = 0, i;
        unsigned int f, bytecount, segs;
@@ -2955,7 +2956,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
 {
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_tx_desc *tx_desc = NULL;
-       struct e1000_buffer *buffer_info;
+       struct e1000_tx_buffer *buffer_info;
        u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
        unsigned int i;
 
@@ -3373,7 +3374,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
 
        for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
                struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
-               struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
+               struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
                struct my_u { __le64 a; __le64 b; };
                struct my_u *u = (struct my_u *)tx_desc;
                const char *type;
@@ -3415,7 +3416,7 @@ rx_ring_summary:
 
        for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
                struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
-               struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
+               struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
                struct my_u { __le64 a; __le64 b; };
                struct my_u *u = (struct my_u *)rx_desc;
                const char *type;
@@ -3811,7 +3812,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
        struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        struct e1000_tx_desc *tx_desc, *eop_desc;
-       struct e1000_buffer *buffer_info;
+       struct e1000_tx_buffer *buffer_info;
        unsigned int i, eop;
        unsigned int count = 0;
        unsigned int total_tx_bytes=0, total_tx_packets=0;
@@ -3951,7 +3952,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
 /**
  * e1000_consume_page - helper function
  **/
-static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
+static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
                               u16 length)
 {
        bi->page = NULL;
@@ -3980,6 +3981,113 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
        napi_gro_receive(&adapter->napi, skb);
 }
 
+/**
+ * e1000_tbi_adjust_stats
+ * @hw: Struct containing variables accessed by shared code
+ * @frame_len: The length of the frame in question
+ * @mac_addr: The Ethernet destination address of the frame in question
+ *
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ */
+static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
+                                  struct e1000_hw_stats *stats,
+                                  u32 frame_len, const u8 *mac_addr)
+{
+       u64 carry_bit;
+
+       /* First adjust the frame length. */
+       frame_len--;
+       /* We need to adjust the statistics counters, since the hardware
+        * counters overcount this packet as a CRC error and undercount
+        * the packet as a good packet
+        */
+       /* This packet should not be counted as a CRC error. */
+       stats->crcerrs--;
+       /* This packet does count as a Good Packet Received. */
+       stats->gprc++;
+
+       /* Adjust the Good Octets received counters */
+       carry_bit = 0x80000000 & stats->gorcl;
+       stats->gorcl += frame_len;
+       /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+        * Received Count) was one before the addition,
+        * AND it is zero after, then we lost the carry out,
+        * need to add one to Gorch (Good Octets Received Count High).
+        * This could be simplified if all environments supported
+        * 64-bit integers.
+        */
+       if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+               stats->gorch++;
+       /* Is this a broadcast or multicast?  Check broadcast first,
+        * since the test for a multicast frame will test positive on
+        * a broadcast frame.
+        */
+       if (is_broadcast_ether_addr(mac_addr))
+               stats->bprc++;
+       else if (is_multicast_ether_addr(mac_addr))
+               stats->mprc++;
+
+       if (frame_len == hw->max_frame_size) {
+               /* In this case, the hardware has overcounted the number of
+                * oversize frames.
+                */
+               if (stats->roc > 0)
+                       stats->roc--;
+       }
+
+       /* Adjust the bin counters when the extra byte put the frame in the
+        * wrong bin. Remember that the frame_len was adjusted above.
+        */
+       if (frame_len == 64) {
+               stats->prc64++;
+               stats->prc127--;
+       } else if (frame_len == 127) {
+               stats->prc127++;
+               stats->prc255--;
+       } else if (frame_len == 255) {
+               stats->prc255++;
+               stats->prc511--;
+       } else if (frame_len == 511) {
+               stats->prc511++;
+               stats->prc1023--;
+       } else if (frame_len == 1023) {
+               stats->prc1023++;
+               stats->prc1522--;
+       } else if (frame_len == 1522) {
+               stats->prc1522++;
+       }
+}
+
+static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
+                                   u8 status, u8 errors,
+                                   u32 length, const u8 *data)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u8 last_byte = *(data + length - 1);
+
+       if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
+               unsigned long irq_flags;
+
+               spin_lock_irqsave(&adapter->stats_lock, irq_flags);
+               e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
+               spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
+
+               return true;
+       }
+
+       return false;
+}
+
+static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
+                                         unsigned int bufsz)
+{
+       struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz);
+
+       if (unlikely(!skb))
+               adapter->alloc_rx_buff_failed++;
+       return skb;
+}
+
 /**
  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
  * @adapter: board private structure
@@ -3994,12 +4102,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                                     struct e1000_rx_ring *rx_ring,
                                     int *work_done, int work_to_do)
 {
-       struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_rx_desc *rx_desc, *next_rxd;
-       struct e1000_buffer *buffer_info, *next_buffer;
-       unsigned long irq_flags;
+       struct e1000_rx_buffer *buffer_info, *next_buffer;
        u32 length;
        unsigned int i;
        int cleaned_count = 0;
@@ -4032,7 +4138,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                cleaned = true;
                cleaned_count++;
                dma_unmap_page(&pdev->dev, buffer_info->dma,
-                              buffer_info->length, DMA_FROM_DEVICE);
+                              adapter->rx_buffer_len, DMA_FROM_DEVICE);
                buffer_info->dma = 0;
 
                length = le16_to_cpu(rx_desc->length);
@@ -4040,23 +4146,15 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                /* errors is only valid for DD + EOP descriptors */
                if (unlikely((status & E1000_RXD_STAT_EOP) &&
                    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
-                       u8 *mapped;
-                       u8 last_byte;
-
-                       mapped = page_address(buffer_info->page);
-                       last_byte = *(mapped + length - 1);
-                       if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
-                                      last_byte)) {
-                               spin_lock_irqsave(&adapter->stats_lock,
-                                                 irq_flags);
-                               e1000_tbi_adjust_stats(hw, &adapter->stats,
-                                                      length, mapped);
-                               spin_unlock_irqrestore(&adapter->stats_lock,
-                                                      irq_flags);
+                       u8 *mapped = page_address(buffer_info->page);
+
+                       if (e1000_tbi_should_accept(adapter, status,
+                                                   rx_desc->errors,
+                                                   length, mapped)) {
                                length--;
+                       } else if (netdev->features & NETIF_F_RXALL) {
+                               goto process_skb;
                        } else {
-                               if (netdev->features & NETIF_F_RXALL)
-                                       goto process_skb;
                                /* recycle both page and skb */
                                buffer_info->skb = skb;
                                /* an error means any chain goes out the window
@@ -4175,25 +4273,25 @@ next_desc:
 /* this should improve performance for small packets with large amounts
  * of reassembly being done in the stack
  */
-static void e1000_check_copybreak(struct net_device *netdev,
-                                struct e1000_buffer *buffer_info,
-                                u32 length, struct sk_buff **skb)
+static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
+                                      struct e1000_rx_buffer *buffer_info,
+                                      u32 length, const void *data)
 {
-       struct sk_buff *new_skb;
+       struct sk_buff *skb;
 
        if (length > copybreak)
-               return;
+               return NULL;
 
-       new_skb = netdev_alloc_skb_ip_align(netdev, length);
-       if (!new_skb)
-               return;
+       skb = e1000_alloc_rx_skb(adapter, length);
+       if (!skb)
+               return NULL;
+
+       dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
+                               length, DMA_FROM_DEVICE);
+
+       memcpy(skb_put(skb, length), data, length);
 
-       skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
-                                      (*skb)->data - NET_IP_ALIGN,
-                                      length + NET_IP_ALIGN);
-       /* save the skb in buffer_info as good */
-       buffer_info->skb = *skb;
-       *skb = new_skb;
+       return skb;
 }
 
 /**
@@ -4207,12 +4305,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
                               struct e1000_rx_ring *rx_ring,
                               int *work_done, int work_to_do)
 {
-       struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_rx_desc *rx_desc, *next_rxd;
-       struct e1000_buffer *buffer_info, *next_buffer;
-       unsigned long flags;
+       struct e1000_rx_buffer *buffer_info, *next_buffer;
        u32 length;
        unsigned int i;
        int cleaned_count = 0;
@@ -4233,10 +4329,19 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
                rmb(); /* read descriptor and rx_buffer_info after status DD */
 
                status = rx_desc->status;
-               skb = buffer_info->skb;
-               buffer_info->skb = NULL;
+               length = le16_to_cpu(rx_desc->length);
 
-               prefetch(skb->data - NET_IP_ALIGN);
+               prefetch(buffer_info->skb->data - NET_IP_ALIGN);
+               skb = e1000_copybreak(adapter, buffer_info, length,
+                                     buffer_info->skb->data);
+               if (!skb) {
+                       skb = buffer_info->skb;
+                       buffer_info->skb = NULL;
+                       dma_unmap_single(&pdev->dev, buffer_info->dma,
+                                        adapter->rx_buffer_len,
+                                        DMA_FROM_DEVICE);
+                       buffer_info->dma = 0;
+               }
 
                if (++i == rx_ring->count) i = 0;
                next_rxd = E1000_RX_DESC(*rx_ring, i);
@@ -4246,11 +4351,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
                cleaned = true;
                cleaned_count++;
-               dma_unmap_single(&pdev->dev, buffer_info->dma,
-                                buffer_info->length, DMA_FROM_DEVICE);
-               buffer_info->dma = 0;
 
-               length = le16_to_cpu(rx_desc->length);
                /* !EOP means multiple descriptors were used to store a single
                 * packet, if thats the case we need to toss it.  In fact, we
                 * to toss every packet with the EOP bit clear and the next
@@ -4262,29 +4363,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
                if (adapter->discarding) {
                        /* All receives must fit into a single buffer */
-                       e_dbg("Receive packet consumed multiple buffers\n");
-                       /* recycle */
-                       buffer_info->skb = skb;
+                       netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
+                       dev_kfree_skb(skb);
                        if (status & E1000_RXD_STAT_EOP)
                                adapter->discarding = false;
                        goto next_desc;
                }
 
                if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
-                       u8 last_byte = *(skb->data + length - 1);
-                       if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
-                                      last_byte)) {
-                               spin_lock_irqsave(&adapter->stats_lock, flags);
-                               e1000_tbi_adjust_stats(hw, &adapter->stats,
-                                                      length, skb->data);
-                               spin_unlock_irqrestore(&adapter->stats_lock,
-                                                      flags);
+                       if (e1000_tbi_should_accept(adapter, status,
+                                                   rx_desc->errors,
+                                                   length, skb->data)) {
                                length--;
+                       } else if (netdev->features & NETIF_F_RXALL) {
+                               goto process_skb;
                        } else {
-                               if (netdev->features & NETIF_F_RXALL)
-                                       goto process_skb;
-                               /* recycle */
-                               buffer_info->skb = skb;
+                               dev_kfree_skb(skb);
                                goto next_desc;
                        }
                }
@@ -4299,9 +4393,10 @@ process_skb:
                         */
                        length -= 4;
 
-               e1000_check_copybreak(netdev, buffer_info, length, &skb);
-
-               skb_put(skb, length);
+               if (buffer_info->skb == NULL)
+                       skb_put(skb, length);
+               else /* copybreak skb */
+                       skb_trim(skb, length);
 
                /* Receive Checksum Offload */
                e1000_rx_checksum(adapter,
@@ -4350,7 +4445,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_rx_desc *rx_desc;
-       struct e1000_buffer *buffer_info;
+       struct e1000_rx_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
        unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
@@ -4373,7 +4468,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
                }
 
                buffer_info->skb = skb;
-               buffer_info->length = adapter->rx_buffer_len;
 check_page:
                /* allocate a new page if necessary */
                if (!buffer_info->page) {
@@ -4387,7 +4481,7 @@ check_page:
                if (!buffer_info->dma) {
                        buffer_info->dma = dma_map_page(&pdev->dev,
                                                        buffer_info->page, 0,
-                                                       buffer_info->length,
+                                                       PAGE_SIZE,
                                                        DMA_FROM_DEVICE);
                        if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                                put_page(buffer_info->page);
@@ -4435,7 +4529,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_rx_desc *rx_desc;
-       struct e1000_buffer *buffer_info;
+       struct e1000_rx_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
        unsigned int bufsz = adapter->rx_buffer_len;
@@ -4447,7 +4541,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                skb = buffer_info->skb;
                if (skb) {
                        skb_trim(skb, 0);
-                       goto map_skb;
+                       goto skip;
                }
 
                skb = netdev_alloc_skb_ip_align(netdev, bufsz);
@@ -4483,11 +4577,9 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                        dev_kfree_skb(oldskb);
                }
                buffer_info->skb = skb;
-               buffer_info->length = adapter->rx_buffer_len;
-map_skb:
                buffer_info->dma = dma_map_single(&pdev->dev,
                                                  skb->data,
-                                                 buffer_info->length,
+                                                 adapter->rx_buffer_len,
                                                  DMA_FROM_DEVICE);
                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                        dev_kfree_skb(skb);
@@ -4522,6 +4614,7 @@ map_skb:
                rx_desc = E1000_RX_DESC(*rx_ring, i);
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 
+skip:
                if (unlikely(++i == rx_ring->count))
                        i = 0;
                buffer_info = &rx_ring->buffer_info[i];