1 /*******************************************************************************
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/init.h>
33 #include <linux/pci.h>
34 #include <linux/vmalloc.h>
35 #include <linux/pagemap.h>
36 #include <linux/delay.h>
37 #include <linux/netdevice.h>
38 #include <linux/tcp.h>
39 #include <linux/ipv6.h>
40 #include <linux/slab.h>
41 #include <net/checksum.h>
42 #include <net/ip6_checksum.h>
43 #include <linux/mii.h>
44 #include <linux/ethtool.h>
45 #include <linux/if_vlan.h>
46 #include <linux/prefetch.h>
50 #define DRV_VERSION "2.0.2-k"
51 char igbvf_driver_name[] = "igbvf";
52 const char igbvf_driver_version[] = DRV_VERSION;
53 static const char igbvf_driver_string[] =
54 "Intel(R) Gigabit Virtual Function Network Driver";
55 static const char igbvf_copyright[] =
56 "Copyright (c) 2009 - 2012 Intel Corporation.";
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
59 static int debug = -1;
60 module_param(debug, int, 0);
61 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
63 static int igbvf_poll(struct napi_struct *napi, int budget);
64 static void igbvf_reset(struct igbvf_adapter *);
65 static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
66 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
68 static struct igbvf_info igbvf_vf_info = {
72 .init_ops = e1000_init_function_pointers_vf,
75 static struct igbvf_info igbvf_i350_vf_info = {
76 .mac = e1000_vfadapt_i350,
79 .init_ops = e1000_init_function_pointers_vf,
82 static const struct igbvf_info *igbvf_info_tbl[] = {
83 [board_vf] = &igbvf_vf_info,
84 [board_i350_vf] = &igbvf_i350_vf_info,
88 * igbvf_desc_unused - calculate if we have unused descriptors
90 static int igbvf_desc_unused(struct igbvf_ring *ring)
92 if (ring->next_to_clean > ring->next_to_use)
93 return ring->next_to_clean - ring->next_to_use - 1;
95 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
99 * igbvf_receive_skb - helper function to handle Rx indications
100 * @adapter: board private structure
101 * @status: descriptor status field as written by hardware
102 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
103 * @skb: pointer to sk_buff to be indicated to stack
105 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
106 struct net_device *netdev,
108 u32 status, u16 vlan)
112 if (status & E1000_RXD_STAT_VP) {
113 if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
114 (status & E1000_RXDEXT_STATERR_LB))
115 vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
117 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
118 if (test_bit(vid, adapter->active_vlans))
119 __vlan_hwaccel_put_tag(skb, vid);
122 napi_gro_receive(&adapter->rx_ring->napi, skb);
125 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
126 u32 status_err, struct sk_buff *skb)
128 skb_checksum_none_assert(skb);
130 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
131 if ((status_err & E1000_RXD_STAT_IXSM) ||
132 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
135 /* TCP/UDP checksum error bit is set */
137 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
138 /* let the stack verify checksum errors */
139 adapter->hw_csum_err++;
143 /* It must be a TCP or UDP packet with a valid checksum */
144 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
145 skb->ip_summed = CHECKSUM_UNNECESSARY;
147 adapter->hw_csum_good++;
151 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
152 * @rx_ring: address of ring structure to repopulate
153 * @cleaned_count: number of buffers to repopulate
155 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
158 struct igbvf_adapter *adapter = rx_ring->adapter;
159 struct net_device *netdev = adapter->netdev;
160 struct pci_dev *pdev = adapter->pdev;
161 union e1000_adv_rx_desc *rx_desc;
162 struct igbvf_buffer *buffer_info;
167 i = rx_ring->next_to_use;
168 buffer_info = &rx_ring->buffer_info[i];
170 if (adapter->rx_ps_hdr_size)
171 bufsz = adapter->rx_ps_hdr_size;
173 bufsz = adapter->rx_buffer_len;
175 while (cleaned_count--) {
176 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
178 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
179 if (!buffer_info->page) {
180 buffer_info->page = alloc_page(GFP_ATOMIC);
181 if (!buffer_info->page) {
182 adapter->alloc_rx_buff_failed++;
185 buffer_info->page_offset = 0;
187 buffer_info->page_offset ^= PAGE_SIZE / 2;
189 buffer_info->page_dma =
190 dma_map_page(&pdev->dev, buffer_info->page,
191 buffer_info->page_offset,
194 if (dma_mapping_error(&pdev->dev,
195 buffer_info->page_dma)) {
196 __free_page(buffer_info->page);
197 buffer_info->page = NULL;
198 dev_err(&pdev->dev, "RX DMA map failed\n");
203 if (!buffer_info->skb) {
204 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
206 adapter->alloc_rx_buff_failed++;
210 buffer_info->skb = skb;
211 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
214 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
215 dev_kfree_skb(buffer_info->skb);
216 buffer_info->skb = NULL;
217 dev_err(&pdev->dev, "RX DMA map failed\n");
221 /* Refresh the desc even if buffer_addrs didn't change because
222 * each write-back erases this info. */
223 if (adapter->rx_ps_hdr_size) {
224 rx_desc->read.pkt_addr =
225 cpu_to_le64(buffer_info->page_dma);
226 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
228 rx_desc->read.pkt_addr =
229 cpu_to_le64(buffer_info->dma);
230 rx_desc->read.hdr_addr = 0;
234 if (i == rx_ring->count)
236 buffer_info = &rx_ring->buffer_info[i];
240 if (rx_ring->next_to_use != i) {
241 rx_ring->next_to_use = i;
243 i = (rx_ring->count - 1);
247 /* Force memory writes to complete before letting h/w
248 * know there are new descriptors to fetch. (Only
249 * applicable for weak-ordered memory model archs,
252 writel(i, adapter->hw.hw_addr + rx_ring->tail);
257 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
258 * @adapter: board private structure
260 * the return value indicates whether actual cleaning was done, there
261 * is no guarantee that everything was cleaned
263 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
264 int *work_done, int work_to_do)
266 struct igbvf_ring *rx_ring = adapter->rx_ring;
267 struct net_device *netdev = adapter->netdev;
268 struct pci_dev *pdev = adapter->pdev;
269 union e1000_adv_rx_desc *rx_desc, *next_rxd;
270 struct igbvf_buffer *buffer_info, *next_buffer;
272 bool cleaned = false;
273 int cleaned_count = 0;
274 unsigned int total_bytes = 0, total_packets = 0;
276 u32 length, hlen, staterr;
278 i = rx_ring->next_to_clean;
279 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
280 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
282 while (staterr & E1000_RXD_STAT_DD) {
283 if (*work_done >= work_to_do)
286 rmb(); /* read descriptor and rx_buffer_info after status DD */
288 buffer_info = &rx_ring->buffer_info[i];
290 /* HW will not DMA in data larger than the given buffer, even
291 * if it parses the (NFS, of course) header to be larger. In
292 * that case, it fills the header buffer and spills the rest
295 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
296 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
297 if (hlen > adapter->rx_ps_hdr_size)
298 hlen = adapter->rx_ps_hdr_size;
300 length = le16_to_cpu(rx_desc->wb.upper.length);
304 skb = buffer_info->skb;
305 prefetch(skb->data - NET_IP_ALIGN);
306 buffer_info->skb = NULL;
307 if (!adapter->rx_ps_hdr_size) {
308 dma_unmap_single(&pdev->dev, buffer_info->dma,
309 adapter->rx_buffer_len,
311 buffer_info->dma = 0;
312 skb_put(skb, length);
316 if (!skb_shinfo(skb)->nr_frags) {
317 dma_unmap_single(&pdev->dev, buffer_info->dma,
318 adapter->rx_ps_hdr_size,
324 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
327 buffer_info->page_dma = 0;
329 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
331 buffer_info->page_offset,
334 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
335 (page_count(buffer_info->page) != 1))
336 buffer_info->page = NULL;
338 get_page(buffer_info->page);
341 skb->data_len += length;
342 skb->truesize += PAGE_SIZE / 2;
346 if (i == rx_ring->count)
348 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
350 next_buffer = &rx_ring->buffer_info[i];
352 if (!(staterr & E1000_RXD_STAT_EOP)) {
353 buffer_info->skb = next_buffer->skb;
354 buffer_info->dma = next_buffer->dma;
355 next_buffer->skb = skb;
356 next_buffer->dma = 0;
360 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
361 dev_kfree_skb_irq(skb);
365 total_bytes += skb->len;
368 igbvf_rx_checksum_adv(adapter, staterr, skb);
370 skb->protocol = eth_type_trans(skb, netdev);
372 igbvf_receive_skb(adapter, netdev, skb, staterr,
373 rx_desc->wb.upper.vlan);
376 rx_desc->wb.upper.status_error = 0;
378 /* return some buffers to hardware, one at a time is too slow */
379 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
380 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
384 /* use prefetched values */
386 buffer_info = next_buffer;
388 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
391 rx_ring->next_to_clean = i;
392 cleaned_count = igbvf_desc_unused(rx_ring);
395 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
397 adapter->total_rx_packets += total_packets;
398 adapter->total_rx_bytes += total_bytes;
399 adapter->net_stats.rx_bytes += total_bytes;
400 adapter->net_stats.rx_packets += total_packets;
404 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
405 struct igbvf_buffer *buffer_info)
407 if (buffer_info->dma) {
408 if (buffer_info->mapped_as_page)
409 dma_unmap_page(&adapter->pdev->dev,
414 dma_unmap_single(&adapter->pdev->dev,
418 buffer_info->dma = 0;
420 if (buffer_info->skb) {
421 dev_kfree_skb_any(buffer_info->skb);
422 buffer_info->skb = NULL;
424 buffer_info->time_stamp = 0;
428 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
429 * @adapter: board private structure
431 * Return 0 on success, negative on failure
433 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
434 struct igbvf_ring *tx_ring)
436 struct pci_dev *pdev = adapter->pdev;
439 size = sizeof(struct igbvf_buffer) * tx_ring->count;
440 tx_ring->buffer_info = vzalloc(size);
441 if (!tx_ring->buffer_info)
444 /* round up to nearest 4K */
445 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
446 tx_ring->size = ALIGN(tx_ring->size, 4096);
448 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
449 &tx_ring->dma, GFP_KERNEL);
454 tx_ring->adapter = adapter;
455 tx_ring->next_to_use = 0;
456 tx_ring->next_to_clean = 0;
460 vfree(tx_ring->buffer_info);
461 dev_err(&adapter->pdev->dev,
462 "Unable to allocate memory for the transmit descriptor ring\n");
467 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
468 * @adapter: board private structure
470 * Returns 0 on success, negative on failure
472 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
473 struct igbvf_ring *rx_ring)
475 struct pci_dev *pdev = adapter->pdev;
478 size = sizeof(struct igbvf_buffer) * rx_ring->count;
479 rx_ring->buffer_info = vzalloc(size);
480 if (!rx_ring->buffer_info)
483 desc_len = sizeof(union e1000_adv_rx_desc);
485 /* Round up to nearest 4K */
486 rx_ring->size = rx_ring->count * desc_len;
487 rx_ring->size = ALIGN(rx_ring->size, 4096);
489 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
490 &rx_ring->dma, GFP_KERNEL);
495 rx_ring->next_to_clean = 0;
496 rx_ring->next_to_use = 0;
498 rx_ring->adapter = adapter;
503 vfree(rx_ring->buffer_info);
504 rx_ring->buffer_info = NULL;
505 dev_err(&adapter->pdev->dev,
506 "Unable to allocate memory for the receive descriptor ring\n");
511 * igbvf_clean_tx_ring - Free Tx Buffers
512 * @tx_ring: ring to be cleaned
514 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
516 struct igbvf_adapter *adapter = tx_ring->adapter;
517 struct igbvf_buffer *buffer_info;
521 if (!tx_ring->buffer_info)
524 /* Free all the Tx ring sk_buffs */
525 for (i = 0; i < tx_ring->count; i++) {
526 buffer_info = &tx_ring->buffer_info[i];
527 igbvf_put_txbuf(adapter, buffer_info);
530 size = sizeof(struct igbvf_buffer) * tx_ring->count;
531 memset(tx_ring->buffer_info, 0, size);
533 /* Zero out the descriptor ring */
534 memset(tx_ring->desc, 0, tx_ring->size);
536 tx_ring->next_to_use = 0;
537 tx_ring->next_to_clean = 0;
539 writel(0, adapter->hw.hw_addr + tx_ring->head);
540 writel(0, adapter->hw.hw_addr + tx_ring->tail);
544 * igbvf_free_tx_resources - Free Tx Resources per Queue
545 * @tx_ring: ring to free resources from
547 * Free all transmit software resources
549 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
551 struct pci_dev *pdev = tx_ring->adapter->pdev;
553 igbvf_clean_tx_ring(tx_ring);
555 vfree(tx_ring->buffer_info);
556 tx_ring->buffer_info = NULL;
558 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
561 tx_ring->desc = NULL;
565 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
566 * @adapter: board private structure
568 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
570 struct igbvf_adapter *adapter = rx_ring->adapter;
571 struct igbvf_buffer *buffer_info;
572 struct pci_dev *pdev = adapter->pdev;
576 if (!rx_ring->buffer_info)
579 /* Free all the Rx ring sk_buffs */
580 for (i = 0; i < rx_ring->count; i++) {
581 buffer_info = &rx_ring->buffer_info[i];
582 if (buffer_info->dma) {
583 if (adapter->rx_ps_hdr_size){
584 dma_unmap_single(&pdev->dev, buffer_info->dma,
585 adapter->rx_ps_hdr_size,
588 dma_unmap_single(&pdev->dev, buffer_info->dma,
589 adapter->rx_buffer_len,
592 buffer_info->dma = 0;
595 if (buffer_info->skb) {
596 dev_kfree_skb(buffer_info->skb);
597 buffer_info->skb = NULL;
600 if (buffer_info->page) {
601 if (buffer_info->page_dma)
602 dma_unmap_page(&pdev->dev,
603 buffer_info->page_dma,
606 put_page(buffer_info->page);
607 buffer_info->page = NULL;
608 buffer_info->page_dma = 0;
609 buffer_info->page_offset = 0;
613 size = sizeof(struct igbvf_buffer) * rx_ring->count;
614 memset(rx_ring->buffer_info, 0, size);
616 /* Zero out the descriptor ring */
617 memset(rx_ring->desc, 0, rx_ring->size);
619 rx_ring->next_to_clean = 0;
620 rx_ring->next_to_use = 0;
622 writel(0, adapter->hw.hw_addr + rx_ring->head);
623 writel(0, adapter->hw.hw_addr + rx_ring->tail);
627 * igbvf_free_rx_resources - Free Rx Resources
628 * @rx_ring: ring to clean the resources from
630 * Free all receive software resources
633 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
635 struct pci_dev *pdev = rx_ring->adapter->pdev;
637 igbvf_clean_rx_ring(rx_ring);
639 vfree(rx_ring->buffer_info);
640 rx_ring->buffer_info = NULL;
642 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
644 rx_ring->desc = NULL;
648 * igbvf_update_itr - update the dynamic ITR value based on statistics
649 * @adapter: pointer to adapter
650 * @itr_setting: current adapter->itr
651 * @packets: the number of packets during this measurement interval
652 * @bytes: the number of bytes during this measurement interval
654 * Stores a new ITR value based on packets and byte
655 * counts during the last interrupt. The advantage of per interrupt
656 * computation is faster updates and more accurate ITR for the current
657 * traffic pattern. Constants in this function were computed
658 * based on theoretical maximum wire speed and thresholds were set based
659 * on testing data as well as attempting to minimize response time
660 * while increasing bulk throughput.
662 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
663 enum latency_range itr_setting,
664 int packets, int bytes)
666 enum latency_range retval = itr_setting;
669 goto update_itr_done;
671 switch (itr_setting) {
673 /* handle TSO and jumbo frames */
674 if (bytes/packets > 8000)
675 retval = bulk_latency;
676 else if ((packets < 5) && (bytes > 512))
677 retval = low_latency;
679 case low_latency: /* 50 usec aka 20000 ints/s */
681 /* this if handles the TSO accounting */
682 if (bytes/packets > 8000)
683 retval = bulk_latency;
684 else if ((packets < 10) || ((bytes/packets) > 1200))
685 retval = bulk_latency;
686 else if ((packets > 35))
687 retval = lowest_latency;
688 } else if (bytes/packets > 2000) {
689 retval = bulk_latency;
690 } else if (packets <= 2 && bytes < 512) {
691 retval = lowest_latency;
694 case bulk_latency: /* 250 usec aka 4000 ints/s */
697 retval = low_latency;
698 } else if (bytes < 6000) {
699 retval = low_latency;
710 static int igbvf_range_to_itr(enum latency_range current_range)
714 switch (current_range) {
715 /* counts and packets in update_itr are dependent on these numbers */
717 new_itr = IGBVF_70K_ITR;
720 new_itr = IGBVF_20K_ITR;
723 new_itr = IGBVF_4K_ITR;
726 new_itr = IGBVF_START_ITR;
732 static void igbvf_set_itr(struct igbvf_adapter *adapter)
736 adapter->tx_ring->itr_range =
737 igbvf_update_itr(adapter,
738 adapter->tx_ring->itr_val,
739 adapter->total_tx_packets,
740 adapter->total_tx_bytes);
742 /* conservative mode (itr 3) eliminates the lowest_latency setting */
743 if (adapter->requested_itr == 3 &&
744 adapter->tx_ring->itr_range == lowest_latency)
745 adapter->tx_ring->itr_range = low_latency;
747 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
750 if (new_itr != adapter->tx_ring->itr_val) {
751 u32 current_itr = adapter->tx_ring->itr_val;
753 * this attempts to bias the interrupt rate towards Bulk
754 * by adding intermediate steps when interrupt rate is
757 new_itr = new_itr > current_itr ?
758 min(current_itr + (new_itr >> 2), new_itr) :
760 adapter->tx_ring->itr_val = new_itr;
762 adapter->tx_ring->set_itr = 1;
765 adapter->rx_ring->itr_range =
766 igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
767 adapter->total_rx_packets,
768 adapter->total_rx_bytes);
769 if (adapter->requested_itr == 3 &&
770 adapter->rx_ring->itr_range == lowest_latency)
771 adapter->rx_ring->itr_range = low_latency;
773 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
775 if (new_itr != adapter->rx_ring->itr_val) {
776 u32 current_itr = adapter->rx_ring->itr_val;
777 new_itr = new_itr > current_itr ?
778 min(current_itr + (new_itr >> 2), new_itr) :
780 adapter->rx_ring->itr_val = new_itr;
782 adapter->rx_ring->set_itr = 1;
787 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
788 * @adapter: board private structure
790 * returns true if ring is completely cleaned
792 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
794 struct igbvf_adapter *adapter = tx_ring->adapter;
795 struct net_device *netdev = adapter->netdev;
796 struct igbvf_buffer *buffer_info;
798 union e1000_adv_tx_desc *tx_desc, *eop_desc;
799 unsigned int total_bytes = 0, total_packets = 0;
800 unsigned int i, eop, count = 0;
801 bool cleaned = false;
803 i = tx_ring->next_to_clean;
804 eop = tx_ring->buffer_info[i].next_to_watch;
805 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
807 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
808 (count < tx_ring->count)) {
809 rmb(); /* read buffer_info after eop_desc status */
810 for (cleaned = false; !cleaned; count++) {
811 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
812 buffer_info = &tx_ring->buffer_info[i];
813 cleaned = (i == eop);
814 skb = buffer_info->skb;
817 unsigned int segs, bytecount;
819 /* gso_segs is currently only valid for tcp */
820 segs = skb_shinfo(skb)->gso_segs ?: 1;
821 /* multiply data chunks by size of headers */
822 bytecount = ((segs - 1) * skb_headlen(skb)) +
824 total_packets += segs;
825 total_bytes += bytecount;
828 igbvf_put_txbuf(adapter, buffer_info);
829 tx_desc->wb.status = 0;
832 if (i == tx_ring->count)
835 eop = tx_ring->buffer_info[i].next_to_watch;
836 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
839 tx_ring->next_to_clean = i;
841 if (unlikely(count &&
842 netif_carrier_ok(netdev) &&
843 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
844 /* Make sure that anybody stopping the queue after this
845 * sees the new next_to_clean.
848 if (netif_queue_stopped(netdev) &&
849 !(test_bit(__IGBVF_DOWN, &adapter->state))) {
850 netif_wake_queue(netdev);
851 ++adapter->restart_queue;
855 adapter->net_stats.tx_bytes += total_bytes;
856 adapter->net_stats.tx_packets += total_packets;
857 return count < tx_ring->count;
860 static irqreturn_t igbvf_msix_other(int irq, void *data)
862 struct net_device *netdev = data;
863 struct igbvf_adapter *adapter = netdev_priv(netdev);
864 struct e1000_hw *hw = &adapter->hw;
866 adapter->int_counter1++;
868 netif_carrier_off(netdev);
869 hw->mac.get_link_status = 1;
870 if (!test_bit(__IGBVF_DOWN, &adapter->state))
871 mod_timer(&adapter->watchdog_timer, jiffies + 1);
873 ew32(EIMS, adapter->eims_other);
878 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
880 struct net_device *netdev = data;
881 struct igbvf_adapter *adapter = netdev_priv(netdev);
882 struct e1000_hw *hw = &adapter->hw;
883 struct igbvf_ring *tx_ring = adapter->tx_ring;
885 if (tx_ring->set_itr) {
886 writel(tx_ring->itr_val,
887 adapter->hw.hw_addr + tx_ring->itr_register);
888 adapter->tx_ring->set_itr = 0;
891 adapter->total_tx_bytes = 0;
892 adapter->total_tx_packets = 0;
894 /* auto mask will automatically reenable the interrupt when we write
896 if (!igbvf_clean_tx_irq(tx_ring))
897 /* Ring was not completely cleaned, so fire another interrupt */
898 ew32(EICS, tx_ring->eims_value);
900 ew32(EIMS, tx_ring->eims_value);
905 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
907 struct net_device *netdev = data;
908 struct igbvf_adapter *adapter = netdev_priv(netdev);
910 adapter->int_counter0++;
912 /* Write the ITR value calculated at the end of the
913 * previous interrupt.
915 if (adapter->rx_ring->set_itr) {
916 writel(adapter->rx_ring->itr_val,
917 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
918 adapter->rx_ring->set_itr = 0;
921 if (napi_schedule_prep(&adapter->rx_ring->napi)) {
922 adapter->total_rx_bytes = 0;
923 adapter->total_rx_packets = 0;
924 __napi_schedule(&adapter->rx_ring->napi);
930 #define IGBVF_NO_QUEUE -1
932 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
933 int tx_queue, int msix_vector)
935 struct e1000_hw *hw = &adapter->hw;
938 /* 82576 uses a table-based method for assigning vectors.
939 Each queue has a single entry in the table to which we write
940 a vector number along with a "valid" bit. Sadly, the layout
941 of the table is somewhat counterintuitive. */
942 if (rx_queue > IGBVF_NO_QUEUE) {
943 index = (rx_queue >> 1);
944 ivar = array_er32(IVAR0, index);
945 if (rx_queue & 0x1) {
946 /* vector goes into third byte of register */
947 ivar = ivar & 0xFF00FFFF;
948 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
950 /* vector goes into low byte of register */
951 ivar = ivar & 0xFFFFFF00;
952 ivar |= msix_vector | E1000_IVAR_VALID;
954 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
955 array_ew32(IVAR0, index, ivar);
957 if (tx_queue > IGBVF_NO_QUEUE) {
958 index = (tx_queue >> 1);
959 ivar = array_er32(IVAR0, index);
960 if (tx_queue & 0x1) {
961 /* vector goes into high byte of register */
962 ivar = ivar & 0x00FFFFFF;
963 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
965 /* vector goes into second byte of register */
966 ivar = ivar & 0xFFFF00FF;
967 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
969 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
970 array_ew32(IVAR0, index, ivar);
975 * igbvf_configure_msix - Configure MSI-X hardware
977 * igbvf_configure_msix sets up the hardware to properly
978 * generate MSI-X interrupts.
980 static void igbvf_configure_msix(struct igbvf_adapter *adapter)
983 struct e1000_hw *hw = &adapter->hw;
984 struct igbvf_ring *tx_ring = adapter->tx_ring;
985 struct igbvf_ring *rx_ring = adapter->rx_ring;
988 adapter->eims_enable_mask = 0;
990 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
991 adapter->eims_enable_mask |= tx_ring->eims_value;
992 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
993 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
994 adapter->eims_enable_mask |= rx_ring->eims_value;
995 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
997 /* set vector for other causes, i.e. link changes */
999 tmp = (vector++ | E1000_IVAR_VALID);
1001 ew32(IVAR_MISC, tmp);
1003 adapter->eims_enable_mask = (1 << (vector)) - 1;
1004 adapter->eims_other = 1 << (vector - 1);
1008 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
1010 if (adapter->msix_entries) {
1011 pci_disable_msix(adapter->pdev);
1012 kfree(adapter->msix_entries);
1013 adapter->msix_entries = NULL;
1018 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1020 * Attempt to configure interrupts using the best available
1021 * capabilities of the hardware and kernel.
1023 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1028 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
1029 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1031 if (adapter->msix_entries) {
1032 for (i = 0; i < 3; i++)
1033 adapter->msix_entries[i].entry = i;
1035 err = pci_enable_msix(adapter->pdev,
1036 adapter->msix_entries, 3);
1041 dev_err(&adapter->pdev->dev,
1042 "Failed to initialize MSI-X interrupts.\n");
1043 igbvf_reset_interrupt_capability(adapter);
1048 * igbvf_request_msix - Initialize MSI-X interrupts
1050 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1053 static int igbvf_request_msix(struct igbvf_adapter *adapter)
1055 struct net_device *netdev = adapter->netdev;
1056 int err = 0, vector = 0;
1058 if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1059 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1060 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1062 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1063 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1066 err = request_irq(adapter->msix_entries[vector].vector,
1067 igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1072 adapter->tx_ring->itr_register = E1000_EITR(vector);
1073 adapter->tx_ring->itr_val = adapter->current_itr;
1076 err = request_irq(adapter->msix_entries[vector].vector,
1077 igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1082 adapter->rx_ring->itr_register = E1000_EITR(vector);
1083 adapter->rx_ring->itr_val = adapter->current_itr;
1086 err = request_irq(adapter->msix_entries[vector].vector,
1087 igbvf_msix_other, 0, netdev->name, netdev);
1091 igbvf_configure_msix(adapter);
1098 * igbvf_alloc_queues - Allocate memory for all rings
1099 * @adapter: board private structure to initialize
1101 static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
1103 struct net_device *netdev = adapter->netdev;
1105 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1106 if (!adapter->tx_ring)
1109 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1110 if (!adapter->rx_ring) {
1111 kfree(adapter->tx_ring);
1115 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1121 * igbvf_request_irq - initialize interrupts
1123 * Attempts to configure interrupts using the best available
1124 * capabilities of the hardware and kernel.
1126 static int igbvf_request_irq(struct igbvf_adapter *adapter)
1130 /* igbvf supports msi-x only */
1131 if (adapter->msix_entries)
1132 err = igbvf_request_msix(adapter);
1137 dev_err(&adapter->pdev->dev,
1138 "Unable to allocate interrupt, Error: %d\n", err);
1143 static void igbvf_free_irq(struct igbvf_adapter *adapter)
1145 struct net_device *netdev = adapter->netdev;
1148 if (adapter->msix_entries) {
1149 for (vector = 0; vector < 3; vector++)
1150 free_irq(adapter->msix_entries[vector].vector, netdev);
1155 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1157 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1159 struct e1000_hw *hw = &adapter->hw;
1163 if (adapter->msix_entries)
1168 * igbvf_irq_enable - Enable default interrupt generation settings
1170 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1172 struct e1000_hw *hw = &adapter->hw;
1174 ew32(EIAC, adapter->eims_enable_mask);
1175 ew32(EIAM, adapter->eims_enable_mask);
1176 ew32(EIMS, adapter->eims_enable_mask);
1180 * igbvf_poll - NAPI Rx polling callback
1181 * @napi: struct associated with this polling callback
1182 * @budget: amount of packets driver is allowed to process this poll
1184 static int igbvf_poll(struct napi_struct *napi, int budget)
1186 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1187 struct igbvf_adapter *adapter = rx_ring->adapter;
1188 struct e1000_hw *hw = &adapter->hw;
1191 igbvf_clean_rx_irq(adapter, &work_done, budget);
1193 /* If not enough Rx work done, exit the polling mode */
1194 if (work_done < budget) {
1195 napi_complete(napi);
1197 if (adapter->requested_itr & 3)
1198 igbvf_set_itr(adapter);
1200 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1201 ew32(EIMS, adapter->rx_ring->eims_value);
1208 * igbvf_set_rlpml - set receive large packet maximum length
1209 * @adapter: board private structure
1211 * Configure the maximum size of packets that will be received
1213 static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1216 struct e1000_hw *hw = &adapter->hw;
1218 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
1219 e1000_rlpml_set_vf(hw, max_frame_size);
1222 static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1224 struct igbvf_adapter *adapter = netdev_priv(netdev);
1225 struct e1000_hw *hw = &adapter->hw;
1227 if (hw->mac.ops.set_vfta(hw, vid, true)) {
1228 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1231 set_bit(vid, adapter->active_vlans);
1235 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1237 struct igbvf_adapter *adapter = netdev_priv(netdev);
1238 struct e1000_hw *hw = &adapter->hw;
1240 if (hw->mac.ops.set_vfta(hw, vid, false)) {
1241 dev_err(&adapter->pdev->dev,
1242 "Failed to remove vlan id %d\n", vid);
1245 clear_bit(vid, adapter->active_vlans);
1249 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1253 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1254 igbvf_vlan_rx_add_vid(adapter->netdev, vid);
1258 * igbvf_configure_tx - Configure Transmit Unit after Reset
1259 * @adapter: board private structure
1261 * Configure the Tx unit of the MAC after a reset.
1263 static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1265 struct e1000_hw *hw = &adapter->hw;
1266 struct igbvf_ring *tx_ring = adapter->tx_ring;
1268 u32 txdctl, dca_txctrl;
1270 /* disable transmits */
1271 txdctl = er32(TXDCTL(0));
1272 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1276 /* Setup the HW Tx Head and Tail descriptor pointers */
1277 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1278 tdba = tx_ring->dma;
1279 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1280 ew32(TDBAH(0), (tdba >> 32));
1283 tx_ring->head = E1000_TDH(0);
1284 tx_ring->tail = E1000_TDT(0);
1286 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1287 * MUST be delivered in order or it will completely screw up
1290 dca_txctrl = er32(DCA_TXCTRL(0));
1291 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1292 ew32(DCA_TXCTRL(0), dca_txctrl);
1294 /* enable transmits */
1295 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1296 ew32(TXDCTL(0), txdctl);
1298 /* Setup Transmit Descriptor Settings for eop descriptor */
1299 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1301 /* enable Report Status bit */
1302 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1306 * igbvf_setup_srrctl - configure the receive control registers
1307 * @adapter: Board private structure
1309 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1311 struct e1000_hw *hw = &adapter->hw;
1314 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1315 E1000_SRRCTL_BSIZEHDR_MASK |
1316 E1000_SRRCTL_BSIZEPKT_MASK);
1318 /* Enable queue drop to avoid head of line blocking */
1319 srrctl |= E1000_SRRCTL_DROP_EN;
1321 /* Setup buffer sizes */
1322 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1323 E1000_SRRCTL_BSIZEPKT_SHIFT;
1325 if (adapter->rx_buffer_len < 2048) {
1326 adapter->rx_ps_hdr_size = 0;
1327 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1329 adapter->rx_ps_hdr_size = 128;
1330 srrctl |= adapter->rx_ps_hdr_size <<
1331 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1332 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1335 ew32(SRRCTL(0), srrctl);
1339 * igbvf_configure_rx - Configure Receive Unit after Reset
1340 * @adapter: board private structure
1342 * Configure the Rx unit of the MAC after a reset.
1344 static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1346 struct e1000_hw *hw = &adapter->hw;
1347 struct igbvf_ring *rx_ring = adapter->rx_ring;
1351 /* disable receives */
1352 rxdctl = er32(RXDCTL(0));
1353 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1357 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1360 * Setup the HW Rx Head and Tail Descriptor Pointers and
1361 * the Base and Length of the Rx Descriptor Ring
1363 rdba = rx_ring->dma;
1364 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1365 ew32(RDBAH(0), (rdba >> 32));
1366 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1367 rx_ring->head = E1000_RDH(0);
1368 rx_ring->tail = E1000_RDT(0);
1372 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1373 rxdctl &= 0xFFF00000;
1374 rxdctl |= IGBVF_RX_PTHRESH;
1375 rxdctl |= IGBVF_RX_HTHRESH << 8;
1376 rxdctl |= IGBVF_RX_WTHRESH << 16;
1378 igbvf_set_rlpml(adapter);
1380 /* enable receives */
1381 ew32(RXDCTL(0), rxdctl);
1385 * igbvf_set_multi - Multicast and Promiscuous mode set
1386 * @netdev: network interface device structure
1388 * The set_multi entry point is called whenever the multicast address
1389 * list or the network interface flags are updated. This routine is
1390 * responsible for configuring the hardware for proper multicast,
1391 * promiscuous mode, and all-multi behavior.
1393 static void igbvf_set_multi(struct net_device *netdev)
1395 struct igbvf_adapter *adapter = netdev_priv(netdev);
1396 struct e1000_hw *hw = &adapter->hw;
1397 struct netdev_hw_addr *ha;
1398 u8 *mta_list = NULL;
1401 if (!netdev_mc_empty(netdev)) {
1402 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
1404 dev_err(&adapter->pdev->dev,
1405 "failed to allocate multicast filter list\n");
1410 /* prepare a packed array of only addresses. */
1412 netdev_for_each_mc_addr(ha, netdev)
1413 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1415 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1420 * igbvf_configure - configure the hardware for Rx and Tx
1421 * @adapter: private board structure
1423 static void igbvf_configure(struct igbvf_adapter *adapter)
1425 igbvf_set_multi(adapter->netdev);
1427 igbvf_restore_vlan(adapter);
1429 igbvf_configure_tx(adapter);
1430 igbvf_setup_srrctl(adapter);
1431 igbvf_configure_rx(adapter);
1432 igbvf_alloc_rx_buffers(adapter->rx_ring,
1433 igbvf_desc_unused(adapter->rx_ring));
1436 /* igbvf_reset - bring the hardware into a known good state
1438 * This function boots the hardware and enables some settings that
1439 * require a configuration cycle of the hardware - those cannot be
1440 * set/changed during runtime. After reset the device needs to be
1441 * properly configured for Rx, Tx etc.
1443 static void igbvf_reset(struct igbvf_adapter *adapter)
1445 struct e1000_mac_info *mac = &adapter->hw.mac;
1446 struct net_device *netdev = adapter->netdev;
1447 struct e1000_hw *hw = &adapter->hw;
1449 /* Allow time for pending master requests to run */
1450 if (mac->ops.reset_hw(hw))
1451 dev_err(&adapter->pdev->dev, "PF still resetting\n");
1453 mac->ops.init_hw(hw);
1455 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1456 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1458 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1462 adapter->last_reset = jiffies;
1465 int igbvf_up(struct igbvf_adapter *adapter)
1467 struct e1000_hw *hw = &adapter->hw;
1469 /* hardware has been reset, we need to reload some things */
1470 igbvf_configure(adapter);
1472 clear_bit(__IGBVF_DOWN, &adapter->state);
1474 napi_enable(&adapter->rx_ring->napi);
1475 if (adapter->msix_entries)
1476 igbvf_configure_msix(adapter);
1478 /* Clear any pending interrupts. */
1480 igbvf_irq_enable(adapter);
1482 /* start the watchdog */
1483 hw->mac.get_link_status = 1;
1484 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1490 void igbvf_down(struct igbvf_adapter *adapter)
1492 struct net_device *netdev = adapter->netdev;
1493 struct e1000_hw *hw = &adapter->hw;
1497 * signal that we're down so the interrupt handler does not
1498 * reschedule our watchdog timer
1500 set_bit(__IGBVF_DOWN, &adapter->state);
1502 /* disable receives in the hardware */
1503 rxdctl = er32(RXDCTL(0));
1504 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1506 netif_stop_queue(netdev);
1508 /* disable transmits in the hardware */
1509 txdctl = er32(TXDCTL(0));
1510 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1512 /* flush both disables and wait for them to finish */
1516 napi_disable(&adapter->rx_ring->napi);
1518 igbvf_irq_disable(adapter);
1520 del_timer_sync(&adapter->watchdog_timer);
1522 netif_carrier_off(netdev);
1524 /* record the stats before reset*/
1525 igbvf_update_stats(adapter);
1527 adapter->link_speed = 0;
1528 adapter->link_duplex = 0;
1530 igbvf_reset(adapter);
1531 igbvf_clean_tx_ring(adapter->tx_ring);
1532 igbvf_clean_rx_ring(adapter->rx_ring);
1535 void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1538 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1540 igbvf_down(adapter);
1542 clear_bit(__IGBVF_RESETTING, &adapter->state);
1546 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1547 * @adapter: board private structure to initialize
1549 * igbvf_sw_init initializes the Adapter private data structure.
1550 * Fields are initialized based on PCI device information and
1551 * OS network device settings (MTU size).
1553 static int igbvf_sw_init(struct igbvf_adapter *adapter)
1555 struct net_device *netdev = adapter->netdev;
1558 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1559 adapter->rx_ps_hdr_size = 0;
1560 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1561 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1563 adapter->tx_int_delay = 8;
1564 adapter->tx_abs_int_delay = 32;
1565 adapter->rx_int_delay = 0;
1566 adapter->rx_abs_int_delay = 8;
1567 adapter->requested_itr = 3;
1568 adapter->current_itr = IGBVF_START_ITR;
1570 /* Set various function pointers */
1571 adapter->ei->init_ops(&adapter->hw);
1573 rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1577 rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1581 igbvf_set_interrupt_capability(adapter);
1583 if (igbvf_alloc_queues(adapter))
1586 spin_lock_init(&adapter->tx_queue_lock);
1588 /* Explicitly disable IRQ since the NIC can be in any state. */
1589 igbvf_irq_disable(adapter);
1591 spin_lock_init(&adapter->stats_lock);
1593 set_bit(__IGBVF_DOWN, &adapter->state);
1597 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1599 struct e1000_hw *hw = &adapter->hw;
1601 adapter->stats.last_gprc = er32(VFGPRC);
1602 adapter->stats.last_gorc = er32(VFGORC);
1603 adapter->stats.last_gptc = er32(VFGPTC);
1604 adapter->stats.last_gotc = er32(VFGOTC);
1605 adapter->stats.last_mprc = er32(VFMPRC);
1606 adapter->stats.last_gotlbc = er32(VFGOTLBC);
1607 adapter->stats.last_gptlbc = er32(VFGPTLBC);
1608 adapter->stats.last_gorlbc = er32(VFGORLBC);
1609 adapter->stats.last_gprlbc = er32(VFGPRLBC);
1611 adapter->stats.base_gprc = er32(VFGPRC);
1612 adapter->stats.base_gorc = er32(VFGORC);
1613 adapter->stats.base_gptc = er32(VFGPTC);
1614 adapter->stats.base_gotc = er32(VFGOTC);
1615 adapter->stats.base_mprc = er32(VFMPRC);
1616 adapter->stats.base_gotlbc = er32(VFGOTLBC);
1617 adapter->stats.base_gptlbc = er32(VFGPTLBC);
1618 adapter->stats.base_gorlbc = er32(VFGORLBC);
1619 adapter->stats.base_gprlbc = er32(VFGPRLBC);
1623 * igbvf_open - Called when a network interface is made active
1624 * @netdev: network interface device structure
1626 * Returns 0 on success, negative value on failure
1628 * The open entry point is called when a network interface is made
1629 * active by the system (IFF_UP). At this point all resources needed
1630 * for transmit and receive operations are allocated, the interrupt
1631 * handler is registered with the OS, the watchdog timer is started,
1632 * and the stack is notified that the interface is ready.
1634 static int igbvf_open(struct net_device *netdev)
1636 struct igbvf_adapter *adapter = netdev_priv(netdev);
1637 struct e1000_hw *hw = &adapter->hw;
1640 /* disallow open during test */
1641 if (test_bit(__IGBVF_TESTING, &adapter->state))
1644 /* allocate transmit descriptors */
1645 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1649 /* allocate receive descriptors */
1650 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1655 * before we allocate an interrupt, we must be ready to handle it.
1656 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1657 * as soon as we call pci_request_irq, so we have to setup our
1658 * clean_rx handler before we do so.
1660 igbvf_configure(adapter);
1662 err = igbvf_request_irq(adapter);
1666 /* From here on the code is the same as igbvf_up() */
1667 clear_bit(__IGBVF_DOWN, &adapter->state);
1669 napi_enable(&adapter->rx_ring->napi);
1671 /* clear any pending interrupts */
1674 igbvf_irq_enable(adapter);
1676 /* start the watchdog */
1677 hw->mac.get_link_status = 1;
1678 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1683 igbvf_free_rx_resources(adapter->rx_ring);
1685 igbvf_free_tx_resources(adapter->tx_ring);
1687 igbvf_reset(adapter);
1693 * igbvf_close - Disables a network interface
1694 * @netdev: network interface device structure
1696 * Returns 0, this is not allowed to fail
1698 * The close entry point is called when an interface is de-activated
1699 * by the OS. The hardware is still under the drivers control, but
1700 * needs to be disabled. A global MAC reset is issued to stop the
1701 * hardware, and all transmit and receive resources are freed.
1703 static int igbvf_close(struct net_device *netdev)
1705 struct igbvf_adapter *adapter = netdev_priv(netdev);
1707 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1708 igbvf_down(adapter);
1710 igbvf_free_irq(adapter);
1712 igbvf_free_tx_resources(adapter->tx_ring);
1713 igbvf_free_rx_resources(adapter->rx_ring);
1718 * igbvf_set_mac - Change the Ethernet Address of the NIC
1719 * @netdev: network interface device structure
1720 * @p: pointer to an address structure
1722 * Returns 0 on success, negative on failure
1724 static int igbvf_set_mac(struct net_device *netdev, void *p)
1726 struct igbvf_adapter *adapter = netdev_priv(netdev);
1727 struct e1000_hw *hw = &adapter->hw;
1728 struct sockaddr *addr = p;
1730 if (!is_valid_ether_addr(addr->sa_data))
1731 return -EADDRNOTAVAIL;
1733 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1735 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1737 if (memcmp(addr->sa_data, hw->mac.addr, 6))
1738 return -EADDRNOTAVAIL;
1740 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1741 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
1746 #define UPDATE_VF_COUNTER(reg, name) \
1748 u32 current_counter = er32(reg); \
1749 if (current_counter < adapter->stats.last_##name) \
1750 adapter->stats.name += 0x100000000LL; \
1751 adapter->stats.last_##name = current_counter; \
1752 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1753 adapter->stats.name |= current_counter; \
1757 * igbvf_update_stats - Update the board statistics counters
1758 * @adapter: board private structure
1760 void igbvf_update_stats(struct igbvf_adapter *adapter)
1762 struct e1000_hw *hw = &adapter->hw;
1763 struct pci_dev *pdev = adapter->pdev;
1766 * Prevent stats update while adapter is being reset, link is down
1767 * or if the pci connection is down.
1769 if (adapter->link_speed == 0)
1772 if (test_bit(__IGBVF_RESETTING, &adapter->state))
1775 if (pci_channel_offline(pdev))
1778 UPDATE_VF_COUNTER(VFGPRC, gprc);
1779 UPDATE_VF_COUNTER(VFGORC, gorc);
1780 UPDATE_VF_COUNTER(VFGPTC, gptc);
1781 UPDATE_VF_COUNTER(VFGOTC, gotc);
1782 UPDATE_VF_COUNTER(VFMPRC, mprc);
1783 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1784 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1785 UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1786 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1788 /* Fill out the OS statistics structure */
1789 adapter->net_stats.multicast = adapter->stats.mprc;
1792 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1794 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
1795 adapter->link_speed,
1796 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
1799 static bool igbvf_has_link(struct igbvf_adapter *adapter)
1801 struct e1000_hw *hw = &adapter->hw;
1802 s32 ret_val = E1000_SUCCESS;
1805 /* If interface is down, stay link down */
1806 if (test_bit(__IGBVF_DOWN, &adapter->state))
1809 ret_val = hw->mac.ops.check_for_link(hw);
1810 link_active = !hw->mac.get_link_status;
1812 /* if check for link returns error we will need to reset */
1813 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
1814 schedule_work(&adapter->reset_task);
1820 * igbvf_watchdog - Timer Call-back
1821 * @data: pointer to adapter cast into an unsigned long
1823 static void igbvf_watchdog(unsigned long data)
1825 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
1827 /* Do the rest outside of interrupt context */
1828 schedule_work(&adapter->watchdog_task);
1831 static void igbvf_watchdog_task(struct work_struct *work)
1833 struct igbvf_adapter *adapter = container_of(work,
1834 struct igbvf_adapter,
1836 struct net_device *netdev = adapter->netdev;
1837 struct e1000_mac_info *mac = &adapter->hw.mac;
1838 struct igbvf_ring *tx_ring = adapter->tx_ring;
1839 struct e1000_hw *hw = &adapter->hw;
1843 link = igbvf_has_link(adapter);
1846 if (!netif_carrier_ok(netdev)) {
1847 mac->ops.get_link_up_info(&adapter->hw,
1848 &adapter->link_speed,
1849 &adapter->link_duplex);
1850 igbvf_print_link_info(adapter);
1852 netif_carrier_on(netdev);
1853 netif_wake_queue(netdev);
1856 if (netif_carrier_ok(netdev)) {
1857 adapter->link_speed = 0;
1858 adapter->link_duplex = 0;
1859 dev_info(&adapter->pdev->dev, "Link is Down\n");
1860 netif_carrier_off(netdev);
1861 netif_stop_queue(netdev);
1865 if (netif_carrier_ok(netdev)) {
1866 igbvf_update_stats(adapter);
1868 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1872 * We've lost link, so the controller stops DMA,
1873 * but we've got queued Tx work that's never going
1874 * to get done, so reset controller to flush Tx.
1875 * (Do the reset outside of interrupt context).
1877 adapter->tx_timeout_count++;
1878 schedule_work(&adapter->reset_task);
1882 /* Cause software interrupt to ensure Rx ring is cleaned */
1883 ew32(EICS, adapter->rx_ring->eims_value);
1885 /* Reset the timer */
1886 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1887 mod_timer(&adapter->watchdog_timer,
1888 round_jiffies(jiffies + (2 * HZ)));
1891 #define IGBVF_TX_FLAGS_CSUM 0x00000001
1892 #define IGBVF_TX_FLAGS_VLAN 0x00000002
1893 #define IGBVF_TX_FLAGS_TSO 0x00000004
1894 #define IGBVF_TX_FLAGS_IPV4 0x00000008
1895 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1896 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1898 static int igbvf_tso(struct igbvf_adapter *adapter,
1899 struct igbvf_ring *tx_ring,
1900 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1902 struct e1000_adv_tx_context_desc *context_desc;
1905 struct igbvf_buffer *buffer_info;
1906 u32 info = 0, tu_cmd = 0;
1907 u32 mss_l4len_idx, l4len;
1910 if (skb_header_cloned(skb)) {
1911 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1913 dev_err(&adapter->pdev->dev,
1914 "igbvf_tso returning an error\n");
1919 l4len = tcp_hdrlen(skb);
1922 if (skb->protocol == htons(ETH_P_IP)) {
1923 struct iphdr *iph = ip_hdr(skb);
1926 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1930 } else if (skb_is_gso_v6(skb)) {
1931 ipv6_hdr(skb)->payload_len = 0;
1932 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1933 &ipv6_hdr(skb)->daddr,
1937 i = tx_ring->next_to_use;
1939 buffer_info = &tx_ring->buffer_info[i];
1940 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1941 /* VLAN MACLEN IPLEN */
1942 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1943 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1944 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1945 *hdr_len += skb_network_offset(skb);
1946 info |= (skb_transport_header(skb) - skb_network_header(skb));
1947 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
1948 context_desc->vlan_macip_lens = cpu_to_le32(info);
1950 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1951 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1953 if (skb->protocol == htons(ETH_P_IP))
1954 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1955 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1957 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1960 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
1961 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
1963 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1964 context_desc->seqnum_seed = 0;
1966 buffer_info->time_stamp = jiffies;
1967 buffer_info->next_to_watch = i;
1968 buffer_info->dma = 0;
1970 if (i == tx_ring->count)
1973 tx_ring->next_to_use = i;
1978 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1979 struct igbvf_ring *tx_ring,
1980 struct sk_buff *skb, u32 tx_flags)
1982 struct e1000_adv_tx_context_desc *context_desc;
1984 struct igbvf_buffer *buffer_info;
1985 u32 info = 0, tu_cmd = 0;
1987 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
1988 (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
1989 i = tx_ring->next_to_use;
1990 buffer_info = &tx_ring->buffer_info[i];
1991 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1993 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1994 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1996 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1997 if (skb->ip_summed == CHECKSUM_PARTIAL)
1998 info |= (skb_transport_header(skb) -
1999 skb_network_header(skb));
2002 context_desc->vlan_macip_lens = cpu_to_le32(info);
2004 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2006 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2007 switch (skb->protocol) {
2008 case __constant_htons(ETH_P_IP):
2009 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2010 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2011 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2013 case __constant_htons(ETH_P_IPV6):
2014 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2015 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2022 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2023 context_desc->seqnum_seed = 0;
2024 context_desc->mss_l4len_idx = 0;
2026 buffer_info->time_stamp = jiffies;
2027 buffer_info->next_to_watch = i;
2028 buffer_info->dma = 0;
2030 if (i == tx_ring->count)
2032 tx_ring->next_to_use = i;
2040 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2042 struct igbvf_adapter *adapter = netdev_priv(netdev);
2044 /* there is enough descriptors then we don't need to worry */
2045 if (igbvf_desc_unused(adapter->tx_ring) >= size)
2048 netif_stop_queue(netdev);
2052 /* We need to check again just in case room has been made available */
2053 if (igbvf_desc_unused(adapter->tx_ring) < size)
2056 netif_wake_queue(netdev);
2058 ++adapter->restart_queue;
2062 #define IGBVF_MAX_TXD_PWR 16
2063 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2065 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2066 struct igbvf_ring *tx_ring,
2067 struct sk_buff *skb,
2070 struct igbvf_buffer *buffer_info;
2071 struct pci_dev *pdev = adapter->pdev;
2072 unsigned int len = skb_headlen(skb);
2073 unsigned int count = 0, i;
2076 i = tx_ring->next_to_use;
2078 buffer_info = &tx_ring->buffer_info[i];
2079 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2080 buffer_info->length = len;
2081 /* set time_stamp *before* dma to help avoid a possible race */
2082 buffer_info->time_stamp = jiffies;
2083 buffer_info->next_to_watch = i;
2084 buffer_info->mapped_as_page = false;
2085 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2087 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2091 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2092 const struct skb_frag_struct *frag;
2096 if (i == tx_ring->count)
2099 frag = &skb_shinfo(skb)->frags[f];
2100 len = skb_frag_size(frag);
2102 buffer_info = &tx_ring->buffer_info[i];
2103 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2104 buffer_info->length = len;
2105 buffer_info->time_stamp = jiffies;
2106 buffer_info->next_to_watch = i;
2107 buffer_info->mapped_as_page = true;
2108 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2110 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2114 tx_ring->buffer_info[i].skb = skb;
2115 tx_ring->buffer_info[first].next_to_watch = i;
2120 dev_err(&pdev->dev, "TX DMA map failed\n");
2122 /* clear timestamp and dma mappings for failed buffer_info mapping */
2123 buffer_info->dma = 0;
2124 buffer_info->time_stamp = 0;
2125 buffer_info->length = 0;
2126 buffer_info->next_to_watch = 0;
2127 buffer_info->mapped_as_page = false;
2131 /* clear timestamp and dma mappings for remaining portion of packet */
2134 i += tx_ring->count;
2136 buffer_info = &tx_ring->buffer_info[i];
2137 igbvf_put_txbuf(adapter, buffer_info);
2143 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2144 struct igbvf_ring *tx_ring,
2145 int tx_flags, int count, u32 paylen,
2148 union e1000_adv_tx_desc *tx_desc = NULL;
2149 struct igbvf_buffer *buffer_info;
2150 u32 olinfo_status = 0, cmd_type_len;
2153 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2154 E1000_ADVTXD_DCMD_DEXT);
2156 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2157 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2159 if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2160 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2162 /* insert tcp checksum */
2163 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2165 /* insert ip checksum */
2166 if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2167 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2169 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2170 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2173 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2175 i = tx_ring->next_to_use;
2177 buffer_info = &tx_ring->buffer_info[i];
2178 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2179 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2180 tx_desc->read.cmd_type_len =
2181 cpu_to_le32(cmd_type_len | buffer_info->length);
2182 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2184 if (i == tx_ring->count)
2188 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2189 /* Force memory writes to complete before letting h/w
2190 * know there are new descriptors to fetch. (Only
2191 * applicable for weak-ordered memory model archs,
2192 * such as IA-64). */
2195 tx_ring->next_to_use = i;
2196 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2197 /* we need this if more than one processor can write to our tail
2198 * at a time, it syncronizes IO on IA64/Altix systems */
2202 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2203 struct net_device *netdev,
2204 struct igbvf_ring *tx_ring)
2206 struct igbvf_adapter *adapter = netdev_priv(netdev);
2207 unsigned int first, tx_flags = 0;
2212 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2213 dev_kfree_skb_any(skb);
2214 return NETDEV_TX_OK;
2217 if (skb->len <= 0) {
2218 dev_kfree_skb_any(skb);
2219 return NETDEV_TX_OK;
2223 * need: count + 4 desc gap to keep tail from touching
2224 * + 2 desc gap to keep tail from touching head,
2225 * + 1 desc for skb->data,
2226 * + 1 desc for context descriptor,
2227 * head, otherwise try next time
2229 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2230 /* this is a hard error */
2231 return NETDEV_TX_BUSY;
2234 if (vlan_tx_tag_present(skb)) {
2235 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2236 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2239 if (skb->protocol == htons(ETH_P_IP))
2240 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2242 first = tx_ring->next_to_use;
2244 tso = skb_is_gso(skb) ?
2245 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
2246 if (unlikely(tso < 0)) {
2247 dev_kfree_skb_any(skb);
2248 return NETDEV_TX_OK;
2252 tx_flags |= IGBVF_TX_FLAGS_TSO;
2253 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2254 (skb->ip_summed == CHECKSUM_PARTIAL))
2255 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2258 * count reflects descriptors mapped, if 0 then mapping error
2259 * has occurred and we need to rewind the descriptor queue
2261 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
2264 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2266 /* Make sure there is space in the ring for the next send. */
2267 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2269 dev_kfree_skb_any(skb);
2270 tx_ring->buffer_info[first].time_stamp = 0;
2271 tx_ring->next_to_use = first;
2274 return NETDEV_TX_OK;
2277 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2278 struct net_device *netdev)
2280 struct igbvf_adapter *adapter = netdev_priv(netdev);
2281 struct igbvf_ring *tx_ring;
2283 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2284 dev_kfree_skb_any(skb);
2285 return NETDEV_TX_OK;
2288 tx_ring = &adapter->tx_ring[0];
2290 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2294 * igbvf_tx_timeout - Respond to a Tx Hang
2295 * @netdev: network interface device structure
2297 static void igbvf_tx_timeout(struct net_device *netdev)
2299 struct igbvf_adapter *adapter = netdev_priv(netdev);
2301 /* Do the reset outside of interrupt context */
2302 adapter->tx_timeout_count++;
2303 schedule_work(&adapter->reset_task);
2306 static void igbvf_reset_task(struct work_struct *work)
2308 struct igbvf_adapter *adapter;
2309 adapter = container_of(work, struct igbvf_adapter, reset_task);
2311 igbvf_reinit_locked(adapter);
2315 * igbvf_get_stats - Get System Network Statistics
2316 * @netdev: network interface device structure
2318 * Returns the address of the device statistics structure.
2319 * The statistics are actually updated from the timer callback.
2321 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
2323 struct igbvf_adapter *adapter = netdev_priv(netdev);
2325 /* only return the current stats */
2326 return &adapter->net_stats;
2330 * igbvf_change_mtu - Change the Maximum Transfer Unit
2331 * @netdev: network interface device structure
2332 * @new_mtu: new value for maximum frame size
2334 * Returns 0 on success, negative on failure
2336 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2338 struct igbvf_adapter *adapter = netdev_priv(netdev);
2339 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2341 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2342 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2346 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2347 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2348 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2352 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2354 /* igbvf_down has a dependency on max_frame_size */
2355 adapter->max_frame_size = max_frame;
2356 if (netif_running(netdev))
2357 igbvf_down(adapter);
2360 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2361 * means we reserve 2 more, this pushes us to allocate from the next
2363 * i.e. RXBUFFER_2048 --> size-4096 slab
2364 * However with the new *_jumbo_rx* routines, jumbo receives will use
2368 if (max_frame <= 1024)
2369 adapter->rx_buffer_len = 1024;
2370 else if (max_frame <= 2048)
2371 adapter->rx_buffer_len = 2048;
2373 #if (PAGE_SIZE / 2) > 16384
2374 adapter->rx_buffer_len = 16384;
2376 adapter->rx_buffer_len = PAGE_SIZE / 2;
2380 /* adjust allocation if LPE protects us, and we aren't using SBP */
2381 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2382 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2383 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2386 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2387 netdev->mtu, new_mtu);
2388 netdev->mtu = new_mtu;
2390 if (netif_running(netdev))
2393 igbvf_reset(adapter);
2395 clear_bit(__IGBVF_RESETTING, &adapter->state);
2400 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2408 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2410 struct net_device *netdev = pci_get_drvdata(pdev);
2411 struct igbvf_adapter *adapter = netdev_priv(netdev);
2416 netif_device_detach(netdev);
2418 if (netif_running(netdev)) {
2419 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2420 igbvf_down(adapter);
2421 igbvf_free_irq(adapter);
2425 retval = pci_save_state(pdev);
2430 pci_disable_device(pdev);
2436 static int igbvf_resume(struct pci_dev *pdev)
2438 struct net_device *netdev = pci_get_drvdata(pdev);
2439 struct igbvf_adapter *adapter = netdev_priv(netdev);
2442 pci_restore_state(pdev);
2443 err = pci_enable_device_mem(pdev);
2445 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2449 pci_set_master(pdev);
2451 if (netif_running(netdev)) {
2452 err = igbvf_request_irq(adapter);
2457 igbvf_reset(adapter);
2459 if (netif_running(netdev))
2462 netif_device_attach(netdev);
2468 static void igbvf_shutdown(struct pci_dev *pdev)
2470 igbvf_suspend(pdev, PMSG_SUSPEND);
2473 #ifdef CONFIG_NET_POLL_CONTROLLER
2475 * Polling 'interrupt' - used by things like netconsole to send skbs
2476 * without having to re-enable interrupts. It's not called while
2477 * the interrupt routine is executing.
2479 static void igbvf_netpoll(struct net_device *netdev)
2481 struct igbvf_adapter *adapter = netdev_priv(netdev);
2483 disable_irq(adapter->pdev->irq);
2485 igbvf_clean_tx_irq(adapter->tx_ring);
2487 enable_irq(adapter->pdev->irq);
2492 * igbvf_io_error_detected - called when PCI error is detected
2493 * @pdev: Pointer to PCI device
2494 * @state: The current pci connection state
2496 * This function is called after a PCI bus error affecting
2497 * this device has been detected.
2499 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2500 pci_channel_state_t state)
2502 struct net_device *netdev = pci_get_drvdata(pdev);
2503 struct igbvf_adapter *adapter = netdev_priv(netdev);
2505 netif_device_detach(netdev);
2507 if (state == pci_channel_io_perm_failure)
2508 return PCI_ERS_RESULT_DISCONNECT;
2510 if (netif_running(netdev))
2511 igbvf_down(adapter);
2512 pci_disable_device(pdev);
2514 /* Request a slot slot reset. */
2515 return PCI_ERS_RESULT_NEED_RESET;
2519 * igbvf_io_slot_reset - called after the pci bus has been reset.
2520 * @pdev: Pointer to PCI device
2522 * Restart the card from scratch, as if from a cold-boot. Implementation
2523 * resembles the first-half of the igbvf_resume routine.
2525 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2527 struct net_device *netdev = pci_get_drvdata(pdev);
2528 struct igbvf_adapter *adapter = netdev_priv(netdev);
2530 if (pci_enable_device_mem(pdev)) {
2532 "Cannot re-enable PCI device after reset.\n");
2533 return PCI_ERS_RESULT_DISCONNECT;
2535 pci_set_master(pdev);
2537 igbvf_reset(adapter);
2539 return PCI_ERS_RESULT_RECOVERED;
2543 * igbvf_io_resume - called when traffic can start flowing again.
2544 * @pdev: Pointer to PCI device
2546 * This callback is called when the error recovery driver tells us that
2547 * its OK to resume normal operation. Implementation resembles the
2548 * second-half of the igbvf_resume routine.
2550 static void igbvf_io_resume(struct pci_dev *pdev)
2552 struct net_device *netdev = pci_get_drvdata(pdev);
2553 struct igbvf_adapter *adapter = netdev_priv(netdev);
2555 if (netif_running(netdev)) {
2556 if (igbvf_up(adapter)) {
2558 "can't bring device back up after reset\n");
2563 netif_device_attach(netdev);
2566 static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2568 struct e1000_hw *hw = &adapter->hw;
2569 struct net_device *netdev = adapter->netdev;
2570 struct pci_dev *pdev = adapter->pdev;
2572 if (hw->mac.type == e1000_vfadapt_i350)
2573 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2575 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2576 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2579 static int igbvf_set_features(struct net_device *netdev,
2580 netdev_features_t features)
2582 struct igbvf_adapter *adapter = netdev_priv(netdev);
2584 if (features & NETIF_F_RXCSUM)
2585 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2587 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2592 static const struct net_device_ops igbvf_netdev_ops = {
2593 .ndo_open = igbvf_open,
2594 .ndo_stop = igbvf_close,
2595 .ndo_start_xmit = igbvf_xmit_frame,
2596 .ndo_get_stats = igbvf_get_stats,
2597 .ndo_set_rx_mode = igbvf_set_multi,
2598 .ndo_set_mac_address = igbvf_set_mac,
2599 .ndo_change_mtu = igbvf_change_mtu,
2600 .ndo_do_ioctl = igbvf_ioctl,
2601 .ndo_tx_timeout = igbvf_tx_timeout,
2602 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2603 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2604 #ifdef CONFIG_NET_POLL_CONTROLLER
2605 .ndo_poll_controller = igbvf_netpoll,
2607 .ndo_set_features = igbvf_set_features,
2611 * igbvf_probe - Device Initialization Routine
2612 * @pdev: PCI device information struct
2613 * @ent: entry in igbvf_pci_tbl
2615 * Returns 0 on success, negative on failure
2617 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2618 * The OS initialization, configuring of the adapter private structure,
2619 * and a hardware reset occur.
2621 static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2623 struct net_device *netdev;
2624 struct igbvf_adapter *adapter;
2625 struct e1000_hw *hw;
2626 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2628 static int cards_found;
2629 int err, pci_using_dac;
2631 err = pci_enable_device_mem(pdev);
2636 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2638 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2642 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2644 err = dma_set_coherent_mask(&pdev->dev,
2647 dev_err(&pdev->dev, "No usable DMA "
2648 "configuration, aborting\n");
2654 err = pci_request_regions(pdev, igbvf_driver_name);
2658 pci_set_master(pdev);
2661 netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2663 goto err_alloc_etherdev;
2665 SET_NETDEV_DEV(netdev, &pdev->dev);
2667 pci_set_drvdata(pdev, netdev);
2668 adapter = netdev_priv(netdev);
2670 adapter->netdev = netdev;
2671 adapter->pdev = pdev;
2673 adapter->pba = ei->pba;
2674 adapter->flags = ei->flags;
2675 adapter->hw.back = adapter;
2676 adapter->hw.mac.type = ei->mac;
2677 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2679 /* PCI config space info */
2681 hw->vendor_id = pdev->vendor;
2682 hw->device_id = pdev->device;
2683 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2684 hw->subsystem_device_id = pdev->subsystem_device;
2685 hw->revision_id = pdev->revision;
2688 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2689 pci_resource_len(pdev, 0));
2691 if (!adapter->hw.hw_addr)
2694 if (ei->get_variants) {
2695 err = ei->get_variants(adapter);
2700 /* setup adapter struct */
2701 err = igbvf_sw_init(adapter);
2705 /* construct the net_device struct */
2706 netdev->netdev_ops = &igbvf_netdev_ops;
2708 igbvf_set_ethtool_ops(netdev);
2709 netdev->watchdog_timeo = 5 * HZ;
2710 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2712 adapter->bd_number = cards_found++;
2714 netdev->hw_features = NETIF_F_SG |
2721 netdev->features = netdev->hw_features |
2722 NETIF_F_HW_VLAN_TX |
2723 NETIF_F_HW_VLAN_RX |
2724 NETIF_F_HW_VLAN_FILTER;
2727 netdev->features |= NETIF_F_HIGHDMA;
2729 netdev->vlan_features |= NETIF_F_TSO;
2730 netdev->vlan_features |= NETIF_F_TSO6;
2731 netdev->vlan_features |= NETIF_F_IP_CSUM;
2732 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2733 netdev->vlan_features |= NETIF_F_SG;
2735 /*reset the controller to put the device in a known good state */
2736 err = hw->mac.ops.reset_hw(hw);
2738 dev_info(&pdev->dev,
2739 "PF still in reset state, assigning new address."
2740 " Is the PF interface up?\n");
2741 eth_hw_addr_random(netdev);
2742 memcpy(adapter->hw.mac.addr, netdev->dev_addr,
2745 err = hw->mac.ops.read_mac_addr(hw);
2747 dev_err(&pdev->dev, "Error reading MAC address\n");
2750 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2754 if (!is_valid_ether_addr(netdev->dev_addr)) {
2755 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2761 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2763 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2764 (unsigned long) adapter);
2766 INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2767 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2769 /* ring size defaults */
2770 adapter->rx_ring->count = 1024;
2771 adapter->tx_ring->count = 1024;
2773 /* reset the hardware with the new settings */
2774 igbvf_reset(adapter);
2776 /* set hardware-specific flags */
2777 if (adapter->hw.mac.type == e1000_vfadapt_i350)
2778 adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
2780 strcpy(netdev->name, "eth%d");
2781 err = register_netdev(netdev);
2785 /* tell the stack to leave us alone until igbvf_open() is called */
2786 netif_carrier_off(netdev);
2787 netif_stop_queue(netdev);
2789 igbvf_print_device_info(adapter);
2791 igbvf_initialize_last_counter_stats(adapter);
2796 kfree(adapter->tx_ring);
2797 kfree(adapter->rx_ring);
2799 igbvf_reset_interrupt_capability(adapter);
2800 iounmap(adapter->hw.hw_addr);
2802 free_netdev(netdev);
2804 pci_release_regions(pdev);
2807 pci_disable_device(pdev);
2812 * igbvf_remove - Device Removal Routine
2813 * @pdev: PCI device information struct
2815 * igbvf_remove is called by the PCI subsystem to alert the driver
2816 * that it should release a PCI device. The could be caused by a
2817 * Hot-Plug event, or because the driver is going to be removed from
2820 static void igbvf_remove(struct pci_dev *pdev)
2822 struct net_device *netdev = pci_get_drvdata(pdev);
2823 struct igbvf_adapter *adapter = netdev_priv(netdev);
2824 struct e1000_hw *hw = &adapter->hw;
2827 * The watchdog timer may be rescheduled, so explicitly
2828 * disable it from being rescheduled.
2830 set_bit(__IGBVF_DOWN, &adapter->state);
2831 del_timer_sync(&adapter->watchdog_timer);
2833 cancel_work_sync(&adapter->reset_task);
2834 cancel_work_sync(&adapter->watchdog_task);
2836 unregister_netdev(netdev);
2838 igbvf_reset_interrupt_capability(adapter);
2841 * it is important to delete the napi struct prior to freeing the
2842 * rx ring so that you do not end up with null pointer refs
2844 netif_napi_del(&adapter->rx_ring->napi);
2845 kfree(adapter->tx_ring);
2846 kfree(adapter->rx_ring);
2848 iounmap(hw->hw_addr);
2849 if (hw->flash_address)
2850 iounmap(hw->flash_address);
2851 pci_release_regions(pdev);
2853 free_netdev(netdev);
2855 pci_disable_device(pdev);
2858 /* PCI Error Recovery (ERS) */
2859 static const struct pci_error_handlers igbvf_err_handler = {
2860 .error_detected = igbvf_io_error_detected,
2861 .slot_reset = igbvf_io_slot_reset,
2862 .resume = igbvf_io_resume,
2865 static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
2866 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2867 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
2868 { } /* terminate list */
2870 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2872 /* PCI Device API Driver */
2873 static struct pci_driver igbvf_driver = {
2874 .name = igbvf_driver_name,
2875 .id_table = igbvf_pci_tbl,
2876 .probe = igbvf_probe,
2877 .remove = igbvf_remove,
2879 /* Power Management Hooks */
2880 .suspend = igbvf_suspend,
2881 .resume = igbvf_resume,
2883 .shutdown = igbvf_shutdown,
2884 .err_handler = &igbvf_err_handler
2888 * igbvf_init_module - Driver Registration Routine
2890 * igbvf_init_module is the first routine called when the driver is
2891 * loaded. All it does is register with the PCI subsystem.
2893 static int __init igbvf_init_module(void)
2896 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
2897 pr_info("%s\n", igbvf_copyright);
2899 ret = pci_register_driver(&igbvf_driver);
2903 module_init(igbvf_init_module);
2906 * igbvf_exit_module - Driver Exit Cleanup Routine
2908 * igbvf_exit_module is called just before the driver is removed
2911 static void __exit igbvf_exit_module(void)
2913 pci_unregister_driver(&igbvf_driver);
2915 module_exit(igbvf_exit_module);
2918 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2919 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2920 MODULE_LICENSE("GPL");
2921 MODULE_VERSION(DRV_VERSION);