1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include <net/ip6_checksum.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 /* e1000_pci_tbl - PCI Device ID Table
44 * Last entry must be all 0s
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102 struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104 struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106 struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108 struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148 struct e1000_rx_ring *rx_ring,
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
167 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
169 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
171 static void e1000_restore_vlan(struct e1000_adapter *adapter);
174 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
175 static int e1000_resume(struct pci_dev *pdev);
177 static void e1000_shutdown(struct pci_dev *pdev);
179 #ifdef CONFIG_NET_POLL_CONTROLLER
180 /* for netdump / net console */
181 static void e1000_netpoll (struct net_device *netdev);
184 #define COPYBREAK_DEFAULT 256
185 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186 module_param(copybreak, uint, 0644);
187 MODULE_PARM_DESC(copybreak,
188 "Maximum size of packet that is copied to a new buffer on receive");
190 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191 pci_channel_state_t state);
192 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193 static void e1000_io_resume(struct pci_dev *pdev);
195 static const struct pci_error_handlers e1000_err_handler = {
196 .error_detected = e1000_io_error_detected,
197 .slot_reset = e1000_io_slot_reset,
198 .resume = e1000_io_resume,
201 static struct pci_driver e1000_driver = {
202 .name = e1000_driver_name,
203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe,
205 .remove = e1000_remove,
207 /* Power Management Hooks */
208 .suspend = e1000_suspend,
209 .resume = e1000_resume,
211 .shutdown = e1000_shutdown,
212 .err_handler = &e1000_err_handler
215 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_VERSION);
220 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221 static int debug = -1;
222 module_param(debug, int, 0);
223 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
226 * e1000_get_hw_dev - return device
227 * used by hardware layer to print debugging information
230 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
232 struct e1000_adapter *adapter = hw->back;
233 return adapter->netdev;
237 * e1000_init_module - Driver Registration Routine
239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem.
242 static int __init e1000_init_module(void)
245 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
247 pr_info("%s\n", e1000_copyright);
249 ret = pci_register_driver(&e1000_driver);
250 if (copybreak != COPYBREAK_DEFAULT) {
252 pr_info("copybreak disabled\n");
254 pr_info("copybreak enabled for "
255 "packets <= %u bytes\n", copybreak);
260 module_init(e1000_init_module);
263 * e1000_exit_module - Driver Exit Cleanup Routine
265 * e1000_exit_module is called just before the driver is removed
268 static void __exit e1000_exit_module(void)
270 pci_unregister_driver(&e1000_driver);
273 module_exit(e1000_exit_module);
275 static int e1000_request_irq(struct e1000_adapter *adapter)
277 struct net_device *netdev = adapter->netdev;
278 irq_handler_t handler = e1000_intr;
279 int irq_flags = IRQF_SHARED;
282 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
291 static void e1000_free_irq(struct e1000_adapter *adapter)
293 struct net_device *netdev = adapter->netdev;
295 free_irq(adapter->pdev->irq, netdev);
299 * e1000_irq_disable - Mask off interrupt generation on the NIC
300 * @adapter: board private structure
302 static void e1000_irq_disable(struct e1000_adapter *adapter)
304 struct e1000_hw *hw = &adapter->hw;
308 synchronize_irq(adapter->pdev->irq);
312 * e1000_irq_enable - Enable default interrupt generation settings
313 * @adapter: board private structure
315 static void e1000_irq_enable(struct e1000_adapter *adapter)
317 struct e1000_hw *hw = &adapter->hw;
319 ew32(IMS, IMS_ENABLE_MASK);
323 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
325 struct e1000_hw *hw = &adapter->hw;
326 struct net_device *netdev = adapter->netdev;
327 u16 vid = hw->mng_cookie.vlan_id;
328 u16 old_vid = adapter->mng_vlan_id;
330 if (!e1000_vlan_used(adapter))
333 if (!test_bit(vid, adapter->active_vlans)) {
334 if (hw->mng_cookie.status &
335 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
336 e1000_vlan_rx_add_vid(netdev, vid);
337 adapter->mng_vlan_id = vid;
339 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
341 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
343 !test_bit(old_vid, adapter->active_vlans))
344 e1000_vlan_rx_kill_vid(netdev, old_vid);
346 adapter->mng_vlan_id = vid;
350 static void e1000_init_manageability(struct e1000_adapter *adapter)
352 struct e1000_hw *hw = &adapter->hw;
354 if (adapter->en_mng_pt) {
355 u32 manc = er32(MANC);
357 /* disable hardware interception of ARP */
358 manc &= ~(E1000_MANC_ARP_EN);
364 static void e1000_release_manageability(struct e1000_adapter *adapter)
366 struct e1000_hw *hw = &adapter->hw;
368 if (adapter->en_mng_pt) {
369 u32 manc = er32(MANC);
371 /* re-enable hardware interception of ARP */
372 manc |= E1000_MANC_ARP_EN;
379 * e1000_configure - configure the hardware for RX and TX
380 * @adapter = private board structure
382 static void e1000_configure(struct e1000_adapter *adapter)
384 struct net_device *netdev = adapter->netdev;
387 e1000_set_rx_mode(netdev);
389 e1000_restore_vlan(adapter);
390 e1000_init_manageability(adapter);
392 e1000_configure_tx(adapter);
393 e1000_setup_rctl(adapter);
394 e1000_configure_rx(adapter);
395 /* call E1000_DESC_UNUSED which always leaves
396 * at least 1 descriptor unused to make sure
397 * next_to_use != next_to_clean
399 for (i = 0; i < adapter->num_rx_queues; i++) {
400 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
401 adapter->alloc_rx_buf(adapter, ring,
402 E1000_DESC_UNUSED(ring));
406 int e1000_up(struct e1000_adapter *adapter)
408 struct e1000_hw *hw = &adapter->hw;
410 /* hardware has been reset, we need to reload some things */
411 e1000_configure(adapter);
413 clear_bit(__E1000_DOWN, &adapter->flags);
415 napi_enable(&adapter->napi);
417 e1000_irq_enable(adapter);
419 netif_wake_queue(adapter->netdev);
421 /* fire a link change interrupt to start the watchdog */
422 ew32(ICS, E1000_ICS_LSC);
427 * e1000_power_up_phy - restore link in case the phy was powered down
428 * @adapter: address of board private structure
430 * The phy may be powered down to save power and turn off link when the
431 * driver is unloaded and wake on lan is not enabled (among others)
432 * *** this routine MUST be followed by a call to e1000_reset ***
434 void e1000_power_up_phy(struct e1000_adapter *adapter)
436 struct e1000_hw *hw = &adapter->hw;
439 /* Just clear the power down bit to wake the phy back up */
440 if (hw->media_type == e1000_media_type_copper) {
441 /* according to the manual, the phy will retain its
442 * settings across a power-down/up cycle
444 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
445 mii_reg &= ~MII_CR_POWER_DOWN;
446 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
450 static void e1000_power_down_phy(struct e1000_adapter *adapter)
452 struct e1000_hw *hw = &adapter->hw;
454 /* Power down the PHY so no link is implied when interface is down *
455 * The PHY cannot be powered down if any of the following is true *
458 * (c) SoL/IDER session is active
460 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
461 hw->media_type == e1000_media_type_copper) {
464 switch (hw->mac_type) {
467 case e1000_82545_rev_3:
470 case e1000_82546_rev_3:
472 case e1000_82541_rev_2:
474 case e1000_82547_rev_2:
475 if (er32(MANC) & E1000_MANC_SMBUS_EN)
481 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
482 mii_reg |= MII_CR_POWER_DOWN;
483 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
490 static void e1000_down_and_stop(struct e1000_adapter *adapter)
492 set_bit(__E1000_DOWN, &adapter->flags);
494 /* Only kill reset task if adapter is not resetting */
495 if (!test_bit(__E1000_RESETTING, &adapter->flags))
496 cancel_work_sync(&adapter->reset_task);
498 cancel_delayed_work_sync(&adapter->watchdog_task);
499 cancel_delayed_work_sync(&adapter->phy_info_task);
500 cancel_delayed_work_sync(&adapter->fifo_stall_task);
503 void e1000_down(struct e1000_adapter *adapter)
505 struct e1000_hw *hw = &adapter->hw;
506 struct net_device *netdev = adapter->netdev;
510 /* disable receives in the hardware */
512 ew32(RCTL, rctl & ~E1000_RCTL_EN);
513 /* flush and sleep below */
515 netif_tx_disable(netdev);
517 /* disable transmits in the hardware */
519 tctl &= ~E1000_TCTL_EN;
521 /* flush both disables and wait for them to finish */
525 napi_disable(&adapter->napi);
527 e1000_irq_disable(adapter);
529 /* Setting DOWN must be after irq_disable to prevent
530 * a screaming interrupt. Setting DOWN also prevents
531 * tasks from rescheduling.
533 e1000_down_and_stop(adapter);
535 adapter->link_speed = 0;
536 adapter->link_duplex = 0;
537 netif_carrier_off(netdev);
539 e1000_reset(adapter);
540 e1000_clean_all_tx_rings(adapter);
541 e1000_clean_all_rx_rings(adapter);
544 static void e1000_reinit_safe(struct e1000_adapter *adapter)
546 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
548 mutex_lock(&adapter->mutex);
551 mutex_unlock(&adapter->mutex);
552 clear_bit(__E1000_RESETTING, &adapter->flags);
555 void e1000_reinit_locked(struct e1000_adapter *adapter)
557 /* if rtnl_lock is not held the call path is bogus */
559 WARN_ON(in_interrupt());
560 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
564 clear_bit(__E1000_RESETTING, &adapter->flags);
567 void e1000_reset(struct e1000_adapter *adapter)
569 struct e1000_hw *hw = &adapter->hw;
570 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
571 bool legacy_pba_adjust = false;
574 /* Repartition Pba for greater than 9k mtu
575 * To take effect CTRL.RST is required.
578 switch (hw->mac_type) {
579 case e1000_82542_rev2_0:
580 case e1000_82542_rev2_1:
585 case e1000_82541_rev_2:
586 legacy_pba_adjust = true;
590 case e1000_82545_rev_3:
593 case e1000_82546_rev_3:
597 case e1000_82547_rev_2:
598 legacy_pba_adjust = true;
601 case e1000_undefined:
606 if (legacy_pba_adjust) {
607 if (hw->max_frame_size > E1000_RXBUFFER_8192)
608 pba -= 8; /* allocate more FIFO for Tx */
610 if (hw->mac_type == e1000_82547) {
611 adapter->tx_fifo_head = 0;
612 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
613 adapter->tx_fifo_size =
614 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
615 atomic_set(&adapter->tx_fifo_stall, 0);
617 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
618 /* adjust PBA for jumbo frames */
621 /* To maintain wire speed transmits, the Tx FIFO should be
622 * large enough to accommodate two full transmit packets,
623 * rounded up to the next 1KB and expressed in KB. Likewise,
624 * the Rx FIFO should be large enough to accommodate at least
625 * one full receive packet and is similarly rounded up and
629 /* upper 16 bits has Tx packet buffer allocation size in KB */
630 tx_space = pba >> 16;
631 /* lower 16 bits has Rx packet buffer allocation size in KB */
633 /* the Tx fifo also stores 16 bytes of information about the Tx
634 * but don't include ethernet FCS because hardware appends it
636 min_tx_space = (hw->max_frame_size +
637 sizeof(struct e1000_tx_desc) -
639 min_tx_space = ALIGN(min_tx_space, 1024);
641 /* software strips receive CRC, so leave room for it */
642 min_rx_space = hw->max_frame_size;
643 min_rx_space = ALIGN(min_rx_space, 1024);
646 /* If current Tx allocation is less than the min Tx FIFO size,
647 * and the min Tx FIFO size is less than the current Rx FIFO
648 * allocation, take space away from current Rx allocation
650 if (tx_space < min_tx_space &&
651 ((min_tx_space - tx_space) < pba)) {
652 pba = pba - (min_tx_space - tx_space);
654 /* PCI/PCIx hardware has PBA alignment constraints */
655 switch (hw->mac_type) {
656 case e1000_82545 ... e1000_82546_rev_3:
657 pba &= ~(E1000_PBA_8K - 1);
663 /* if short on Rx space, Rx wins and must trump Tx
664 * adjustment or use Early Receive if available
666 if (pba < min_rx_space)
673 /* flow control settings:
674 * The high water mark must be low enough to fit one full frame
675 * (or the size used for early receive) above it in the Rx FIFO.
676 * Set it to the lower of:
677 * - 90% of the Rx FIFO size, and
678 * - the full Rx FIFO size minus the early receive size (for parts
679 * with ERT support assuming ERT set to E1000_ERT_2048), or
680 * - the full Rx FIFO size minus one full frame
682 hwm = min(((pba << 10) * 9 / 10),
683 ((pba << 10) - hw->max_frame_size));
685 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
686 hw->fc_low_water = hw->fc_high_water - 8;
687 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
689 hw->fc = hw->original_fc;
691 /* Allow time for pending master requests to run */
693 if (hw->mac_type >= e1000_82544)
696 if (e1000_init_hw(hw))
697 e_dev_err("Hardware Error\n");
698 e1000_update_mng_vlan(adapter);
700 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
701 if (hw->mac_type >= e1000_82544 &&
703 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
704 u32 ctrl = er32(CTRL);
705 /* clear phy power management bit if we are in gig only mode,
706 * which if enabled will attempt negotiation to 100Mb, which
707 * can cause a loss of link at power off or driver unload
709 ctrl &= ~E1000_CTRL_SWDPIN3;
713 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
714 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
716 e1000_reset_adaptive(hw);
717 e1000_phy_get_info(hw, &adapter->phy_info);
719 e1000_release_manageability(adapter);
722 /* Dump the eeprom for users having checksum issues */
723 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
725 struct net_device *netdev = adapter->netdev;
726 struct ethtool_eeprom eeprom;
727 const struct ethtool_ops *ops = netdev->ethtool_ops;
730 u16 csum_old, csum_new = 0;
732 eeprom.len = ops->get_eeprom_len(netdev);
735 data = kmalloc(eeprom.len, GFP_KERNEL);
739 ops->get_eeprom(netdev, &eeprom, data);
741 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
742 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
743 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
744 csum_new += data[i] + (data[i + 1] << 8);
745 csum_new = EEPROM_SUM - csum_new;
747 pr_err("/*********************/\n");
748 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
749 pr_err("Calculated : 0x%04x\n", csum_new);
751 pr_err("Offset Values\n");
752 pr_err("======== ======\n");
753 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
755 pr_err("Include this output when contacting your support provider.\n");
756 pr_err("This is not a software error! Something bad happened to\n");
757 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
758 pr_err("result in further problems, possibly loss of data,\n");
759 pr_err("corruption or system hangs!\n");
760 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
761 pr_err("which is invalid and requires you to set the proper MAC\n");
762 pr_err("address manually before continuing to enable this network\n");
763 pr_err("device. Please inspect the EEPROM dump and report the\n");
764 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
765 pr_err("/*********************/\n");
771 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
772 * @pdev: PCI device information struct
774 * Return true if an adapter needs ioport resources
776 static int e1000_is_need_ioport(struct pci_dev *pdev)
778 switch (pdev->device) {
779 case E1000_DEV_ID_82540EM:
780 case E1000_DEV_ID_82540EM_LOM:
781 case E1000_DEV_ID_82540EP:
782 case E1000_DEV_ID_82540EP_LOM:
783 case E1000_DEV_ID_82540EP_LP:
784 case E1000_DEV_ID_82541EI:
785 case E1000_DEV_ID_82541EI_MOBILE:
786 case E1000_DEV_ID_82541ER:
787 case E1000_DEV_ID_82541ER_LOM:
788 case E1000_DEV_ID_82541GI:
789 case E1000_DEV_ID_82541GI_LF:
790 case E1000_DEV_ID_82541GI_MOBILE:
791 case E1000_DEV_ID_82544EI_COPPER:
792 case E1000_DEV_ID_82544EI_FIBER:
793 case E1000_DEV_ID_82544GC_COPPER:
794 case E1000_DEV_ID_82544GC_LOM:
795 case E1000_DEV_ID_82545EM_COPPER:
796 case E1000_DEV_ID_82545EM_FIBER:
797 case E1000_DEV_ID_82546EB_COPPER:
798 case E1000_DEV_ID_82546EB_FIBER:
799 case E1000_DEV_ID_82546EB_QUAD_COPPER:
806 static netdev_features_t e1000_fix_features(struct net_device *netdev,
807 netdev_features_t features)
809 /* Since there is no support for separate Rx/Tx vlan accel
810 * enable/disable make sure Tx flag is always in same state as Rx.
812 if (features & NETIF_F_HW_VLAN_RX)
813 features |= NETIF_F_HW_VLAN_TX;
815 features &= ~NETIF_F_HW_VLAN_TX;
820 static int e1000_set_features(struct net_device *netdev,
821 netdev_features_t features)
823 struct e1000_adapter *adapter = netdev_priv(netdev);
824 netdev_features_t changed = features ^ netdev->features;
826 if (changed & NETIF_F_HW_VLAN_RX)
827 e1000_vlan_mode(netdev, features);
829 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
832 netdev->features = features;
833 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
835 if (netif_running(netdev))
836 e1000_reinit_locked(adapter);
838 e1000_reset(adapter);
843 static const struct net_device_ops e1000_netdev_ops = {
844 .ndo_open = e1000_open,
845 .ndo_stop = e1000_close,
846 .ndo_start_xmit = e1000_xmit_frame,
847 .ndo_get_stats = e1000_get_stats,
848 .ndo_set_rx_mode = e1000_set_rx_mode,
849 .ndo_set_mac_address = e1000_set_mac,
850 .ndo_tx_timeout = e1000_tx_timeout,
851 .ndo_change_mtu = e1000_change_mtu,
852 .ndo_do_ioctl = e1000_ioctl,
853 .ndo_validate_addr = eth_validate_addr,
854 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
855 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
856 #ifdef CONFIG_NET_POLL_CONTROLLER
857 .ndo_poll_controller = e1000_netpoll,
859 .ndo_fix_features = e1000_fix_features,
860 .ndo_set_features = e1000_set_features,
864 * e1000_init_hw_struct - initialize members of hw struct
865 * @adapter: board private struct
866 * @hw: structure used by e1000_hw.c
868 * Factors out initialization of the e1000_hw struct to its own function
869 * that can be called very early at init (just after struct allocation).
870 * Fields are initialized based on PCI device information and
871 * OS network device settings (MTU size).
872 * Returns negative error codes if MAC type setup fails.
874 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
877 struct pci_dev *pdev = adapter->pdev;
879 /* PCI config space info */
880 hw->vendor_id = pdev->vendor;
881 hw->device_id = pdev->device;
882 hw->subsystem_vendor_id = pdev->subsystem_vendor;
883 hw->subsystem_id = pdev->subsystem_device;
884 hw->revision_id = pdev->revision;
886 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
888 hw->max_frame_size = adapter->netdev->mtu +
889 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
890 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
892 /* identify the MAC */
893 if (e1000_set_mac_type(hw)) {
894 e_err(probe, "Unknown MAC Type\n");
898 switch (hw->mac_type) {
903 case e1000_82541_rev_2:
904 case e1000_82547_rev_2:
905 hw->phy_init_script = 1;
909 e1000_set_media_type(hw);
910 e1000_get_bus_info(hw);
912 hw->wait_autoneg_complete = false;
913 hw->tbi_compatibility_en = true;
914 hw->adaptive_ifs = true;
918 if (hw->media_type == e1000_media_type_copper) {
919 hw->mdix = AUTO_ALL_MODES;
920 hw->disable_polarity_correction = false;
921 hw->master_slave = E1000_MASTER_SLAVE;
928 * e1000_probe - Device Initialization Routine
929 * @pdev: PCI device information struct
930 * @ent: entry in e1000_pci_tbl
932 * Returns 0 on success, negative on failure
934 * e1000_probe initializes an adapter identified by a pci_dev structure.
935 * The OS initialization, configuring of the adapter private structure,
936 * and a hardware reset occur.
938 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
940 struct net_device *netdev;
941 struct e1000_adapter *adapter;
944 static int cards_found = 0;
945 static int global_quad_port_a = 0; /* global ksp3 port a indication */
946 int i, err, pci_using_dac;
949 u16 eeprom_apme_mask = E1000_EEPROM_APME;
950 int bars, need_ioport;
952 /* do not allocate ioport bars when not needed */
953 need_ioport = e1000_is_need_ioport(pdev);
955 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
956 err = pci_enable_device(pdev);
958 bars = pci_select_bars(pdev, IORESOURCE_MEM);
959 err = pci_enable_device_mem(pdev);
964 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
968 pci_set_master(pdev);
969 err = pci_save_state(pdev);
971 goto err_alloc_etherdev;
974 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
976 goto err_alloc_etherdev;
978 SET_NETDEV_DEV(netdev, &pdev->dev);
980 pci_set_drvdata(pdev, netdev);
981 adapter = netdev_priv(netdev);
982 adapter->netdev = netdev;
983 adapter->pdev = pdev;
984 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
985 adapter->bars = bars;
986 adapter->need_ioport = need_ioport;
992 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
996 if (adapter->need_ioport) {
997 for (i = BAR_1; i <= BAR_5; i++) {
998 if (pci_resource_len(pdev, i) == 0)
1000 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1001 hw->io_base = pci_resource_start(pdev, i);
1007 /* make ready for any if (hw->...) below */
1008 err = e1000_init_hw_struct(adapter, hw);
1012 /* there is a workaround being applied below that limits
1013 * 64-bit DMA addresses to 64-bit hardware. There are some
1014 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1017 if ((hw->bus_type == e1000_bus_type_pcix) &&
1018 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1019 /* according to DMA-API-HOWTO, coherent calls will always
1020 * succeed if the set call did
1022 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1025 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1027 pr_err("No usable DMA config, aborting\n");
1030 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1033 netdev->netdev_ops = &e1000_netdev_ops;
1034 e1000_set_ethtool_ops(netdev);
1035 netdev->watchdog_timeo = 5 * HZ;
1036 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1038 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1040 adapter->bd_number = cards_found;
1042 /* setup the private structure */
1044 err = e1000_sw_init(adapter);
1049 if (hw->mac_type == e1000_ce4100) {
1050 hw->ce4100_gbe_mdio_base_virt =
1051 ioremap(pci_resource_start(pdev, BAR_1),
1052 pci_resource_len(pdev, BAR_1));
1054 if (!hw->ce4100_gbe_mdio_base_virt)
1055 goto err_mdio_ioremap;
1058 if (hw->mac_type >= e1000_82543) {
1059 netdev->hw_features = NETIF_F_SG |
1062 netdev->features = NETIF_F_HW_VLAN_TX |
1063 NETIF_F_HW_VLAN_FILTER;
1066 if ((hw->mac_type >= e1000_82544) &&
1067 (hw->mac_type != e1000_82547))
1068 netdev->hw_features |= NETIF_F_TSO;
1070 netdev->priv_flags |= IFF_SUPP_NOFCS;
1072 netdev->features |= netdev->hw_features;
1073 netdev->hw_features |= (NETIF_F_RXCSUM |
1077 if (pci_using_dac) {
1078 netdev->features |= NETIF_F_HIGHDMA;
1079 netdev->vlan_features |= NETIF_F_HIGHDMA;
1082 netdev->vlan_features |= (NETIF_F_TSO |
1086 netdev->priv_flags |= IFF_UNICAST_FLT;
1088 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1090 /* initialize eeprom parameters */
1091 if (e1000_init_eeprom_params(hw)) {
1092 e_err(probe, "EEPROM initialization failed\n");
1096 /* before reading the EEPROM, reset the controller to
1097 * put the device in a known good starting state
1102 /* make sure the EEPROM is good */
1103 if (e1000_validate_eeprom_checksum(hw) < 0) {
1104 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1105 e1000_dump_eeprom(adapter);
1106 /* set MAC address to all zeroes to invalidate and temporary
1107 * disable this device for the user. This blocks regular
1108 * traffic while still permitting ethtool ioctls from reaching
1109 * the hardware as well as allowing the user to run the
1110 * interface after manually setting a hw addr using
1113 memset(hw->mac_addr, 0, netdev->addr_len);
1115 /* copy the MAC address out of the EEPROM */
1116 if (e1000_read_mac_addr(hw))
1117 e_err(probe, "EEPROM Read Error\n");
1119 /* don't block initalization here due to bad MAC address */
1120 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1122 if (!is_valid_ether_addr(netdev->dev_addr))
1123 e_err(probe, "Invalid MAC Address\n");
1126 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1127 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1128 e1000_82547_tx_fifo_stall_task);
1129 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1130 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1132 e1000_check_options(adapter);
1134 /* Initial Wake on LAN setting
1135 * If APM wake is enabled in the EEPROM,
1136 * enable the ACPI Magic Packet filter
1139 switch (hw->mac_type) {
1140 case e1000_82542_rev2_0:
1141 case e1000_82542_rev2_1:
1145 e1000_read_eeprom(hw,
1146 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1147 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1150 case e1000_82546_rev_3:
1151 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1152 e1000_read_eeprom(hw,
1153 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1158 e1000_read_eeprom(hw,
1159 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1162 if (eeprom_data & eeprom_apme_mask)
1163 adapter->eeprom_wol |= E1000_WUFC_MAG;
1165 /* now that we have the eeprom settings, apply the special cases
1166 * where the eeprom may be wrong or the board simply won't support
1167 * wake on lan on a particular port
1169 switch (pdev->device) {
1170 case E1000_DEV_ID_82546GB_PCIE:
1171 adapter->eeprom_wol = 0;
1173 case E1000_DEV_ID_82546EB_FIBER:
1174 case E1000_DEV_ID_82546GB_FIBER:
1175 /* Wake events only supported on port A for dual fiber
1176 * regardless of eeprom setting
1178 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1179 adapter->eeprom_wol = 0;
1181 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1182 /* if quad port adapter, disable WoL on all but port A */
1183 if (global_quad_port_a != 0)
1184 adapter->eeprom_wol = 0;
1186 adapter->quad_port_a = true;
1187 /* Reset for multiple quad port adapters */
1188 if (++global_quad_port_a == 4)
1189 global_quad_port_a = 0;
1193 /* initialize the wol settings based on the eeprom settings */
1194 adapter->wol = adapter->eeprom_wol;
1195 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1197 /* Auto detect PHY address */
1198 if (hw->mac_type == e1000_ce4100) {
1199 for (i = 0; i < 32; i++) {
1201 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1202 if (tmp == 0 || tmp == 0xFF) {
1211 /* reset the hardware with the new settings */
1212 e1000_reset(adapter);
1214 strcpy(netdev->name, "eth%d");
1215 err = register_netdev(netdev);
1219 e1000_vlan_filter_on_off(adapter, false);
1221 /* print bus type/speed/width info */
1222 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1223 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1224 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1225 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1226 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1227 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1228 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1231 /* carrier off reporting is important to ethtool even BEFORE open */
1232 netif_carrier_off(netdev);
1234 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1241 e1000_phy_hw_reset(hw);
1243 if (hw->flash_address)
1244 iounmap(hw->flash_address);
1245 kfree(adapter->tx_ring);
1246 kfree(adapter->rx_ring);
1250 iounmap(hw->ce4100_gbe_mdio_base_virt);
1251 iounmap(hw->hw_addr);
1253 free_netdev(netdev);
1255 pci_release_selected_regions(pdev, bars);
1257 pci_disable_device(pdev);
1262 * e1000_remove - Device Removal Routine
1263 * @pdev: PCI device information struct
1265 * e1000_remove is called by the PCI subsystem to alert the driver
1266 * that it should release a PCI device. The could be caused by a
1267 * Hot-Plug event, or because the driver is going to be removed from
1270 static void e1000_remove(struct pci_dev *pdev)
1272 struct net_device *netdev = pci_get_drvdata(pdev);
1273 struct e1000_adapter *adapter = netdev_priv(netdev);
1274 struct e1000_hw *hw = &adapter->hw;
1276 e1000_down_and_stop(adapter);
1277 e1000_release_manageability(adapter);
1279 unregister_netdev(netdev);
1281 e1000_phy_hw_reset(hw);
1283 kfree(adapter->tx_ring);
1284 kfree(adapter->rx_ring);
1286 if (hw->mac_type == e1000_ce4100)
1287 iounmap(hw->ce4100_gbe_mdio_base_virt);
1288 iounmap(hw->hw_addr);
1289 if (hw->flash_address)
1290 iounmap(hw->flash_address);
1291 pci_release_selected_regions(pdev, adapter->bars);
1293 free_netdev(netdev);
1295 pci_disable_device(pdev);
1299 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1300 * @adapter: board private structure to initialize
1302 * e1000_sw_init initializes the Adapter private data structure.
1303 * e1000_init_hw_struct MUST be called before this function
1305 static int e1000_sw_init(struct e1000_adapter *adapter)
1307 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1309 adapter->num_tx_queues = 1;
1310 adapter->num_rx_queues = 1;
1312 if (e1000_alloc_queues(adapter)) {
1313 e_err(probe, "Unable to allocate memory for queues\n");
1317 /* Explicitly disable IRQ since the NIC can be in any state. */
1318 e1000_irq_disable(adapter);
1320 spin_lock_init(&adapter->stats_lock);
1321 mutex_init(&adapter->mutex);
1323 set_bit(__E1000_DOWN, &adapter->flags);
1329 * e1000_alloc_queues - Allocate memory for all rings
1330 * @adapter: board private structure to initialize
1332 * We allocate one ring per queue at run-time since we don't know the
1333 * number of queues at compile-time.
1335 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1337 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1338 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1339 if (!adapter->tx_ring)
1342 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1343 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1344 if (!adapter->rx_ring) {
1345 kfree(adapter->tx_ring);
1349 return E1000_SUCCESS;
1353 * e1000_open - Called when a network interface is made active
1354 * @netdev: network interface device structure
1356 * Returns 0 on success, negative value on failure
1358 * The open entry point is called when a network interface is made
1359 * active by the system (IFF_UP). At this point all resources needed
1360 * for transmit and receive operations are allocated, the interrupt
1361 * handler is registered with the OS, the watchdog task is started,
1362 * and the stack is notified that the interface is ready.
1364 static int e1000_open(struct net_device *netdev)
1366 struct e1000_adapter *adapter = netdev_priv(netdev);
1367 struct e1000_hw *hw = &adapter->hw;
1370 /* disallow open during test */
1371 if (test_bit(__E1000_TESTING, &adapter->flags))
1374 netif_carrier_off(netdev);
1376 /* allocate transmit descriptors */
1377 err = e1000_setup_all_tx_resources(adapter);
1381 /* allocate receive descriptors */
1382 err = e1000_setup_all_rx_resources(adapter);
1386 e1000_power_up_phy(adapter);
1388 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1389 if ((hw->mng_cookie.status &
1390 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1391 e1000_update_mng_vlan(adapter);
1394 /* before we allocate an interrupt, we must be ready to handle it.
1395 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1396 * as soon as we call pci_request_irq, so we have to setup our
1397 * clean_rx handler before we do so.
1399 e1000_configure(adapter);
1401 err = e1000_request_irq(adapter);
1405 /* From here on the code is the same as e1000_up() */
1406 clear_bit(__E1000_DOWN, &adapter->flags);
1408 napi_enable(&adapter->napi);
1410 e1000_irq_enable(adapter);
1412 netif_start_queue(netdev);
1414 /* fire a link status change interrupt to start the watchdog */
1415 ew32(ICS, E1000_ICS_LSC);
1417 return E1000_SUCCESS;
1420 e1000_power_down_phy(adapter);
1421 e1000_free_all_rx_resources(adapter);
1423 e1000_free_all_tx_resources(adapter);
1425 e1000_reset(adapter);
1431 * e1000_close - Disables a network interface
1432 * @netdev: network interface device structure
1434 * Returns 0, this is not allowed to fail
1436 * The close entry point is called when an interface is de-activated
1437 * by the OS. The hardware is still under the drivers control, but
1438 * needs to be disabled. A global MAC reset is issued to stop the
1439 * hardware, and all transmit and receive resources are freed.
1441 static int e1000_close(struct net_device *netdev)
1443 struct e1000_adapter *adapter = netdev_priv(netdev);
1444 struct e1000_hw *hw = &adapter->hw;
1446 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1447 e1000_down(adapter);
1448 e1000_power_down_phy(adapter);
1449 e1000_free_irq(adapter);
1451 e1000_free_all_tx_resources(adapter);
1452 e1000_free_all_rx_resources(adapter);
1454 /* kill manageability vlan ID if supported, but not if a vlan with
1455 * the same ID is registered on the host OS (let 8021q kill it)
1457 if ((hw->mng_cookie.status &
1458 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1459 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1460 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1467 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1468 * @adapter: address of board private structure
1469 * @start: address of beginning of memory
1470 * @len: length of memory
1472 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1475 struct e1000_hw *hw = &adapter->hw;
1476 unsigned long begin = (unsigned long)start;
1477 unsigned long end = begin + len;
1479 /* First rev 82545 and 82546 need to not allow any memory
1480 * write location to cross 64k boundary due to errata 23
1482 if (hw->mac_type == e1000_82545 ||
1483 hw->mac_type == e1000_ce4100 ||
1484 hw->mac_type == e1000_82546) {
1485 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1492 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1493 * @adapter: board private structure
1494 * @txdr: tx descriptor ring (for a specific queue) to setup
1496 * Return 0 on success, negative on failure
1498 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1499 struct e1000_tx_ring *txdr)
1501 struct pci_dev *pdev = adapter->pdev;
1504 size = sizeof(struct e1000_buffer) * txdr->count;
1505 txdr->buffer_info = vzalloc(size);
1506 if (!txdr->buffer_info)
1509 /* round up to nearest 4K */
1511 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1512 txdr->size = ALIGN(txdr->size, 4096);
1514 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1518 vfree(txdr->buffer_info);
1519 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1524 /* Fix for errata 23, can't cross 64kB boundary */
1525 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1526 void *olddesc = txdr->desc;
1527 dma_addr_t olddma = txdr->dma;
1528 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1529 txdr->size, txdr->desc);
1530 /* Try again, without freeing the previous */
1531 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1532 &txdr->dma, GFP_KERNEL);
1533 /* Failed allocation, critical failure */
1535 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1537 goto setup_tx_desc_die;
1540 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1542 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1544 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1546 e_err(probe, "Unable to allocate aligned memory "
1547 "for the transmit descriptor ring\n");
1548 vfree(txdr->buffer_info);
1551 /* Free old allocation, new allocation was successful */
1552 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1556 memset(txdr->desc, 0, txdr->size);
1558 txdr->next_to_use = 0;
1559 txdr->next_to_clean = 0;
1565 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1566 * (Descriptors) for all queues
1567 * @adapter: board private structure
1569 * Return 0 on success, negative on failure
1571 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1575 for (i = 0; i < adapter->num_tx_queues; i++) {
1576 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1578 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1579 for (i-- ; i >= 0; i--)
1580 e1000_free_tx_resources(adapter,
1581 &adapter->tx_ring[i]);
1590 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1591 * @adapter: board private structure
1593 * Configure the Tx unit of the MAC after a reset.
1595 static void e1000_configure_tx(struct e1000_adapter *adapter)
1598 struct e1000_hw *hw = &adapter->hw;
1599 u32 tdlen, tctl, tipg;
1602 /* Setup the HW Tx Head and Tail descriptor pointers */
1604 switch (adapter->num_tx_queues) {
1607 tdba = adapter->tx_ring[0].dma;
1608 tdlen = adapter->tx_ring[0].count *
1609 sizeof(struct e1000_tx_desc);
1611 ew32(TDBAH, (tdba >> 32));
1612 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1615 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1616 E1000_TDH : E1000_82542_TDH);
1617 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1618 E1000_TDT : E1000_82542_TDT);
1622 /* Set the default values for the Tx Inter Packet Gap timer */
1623 if ((hw->media_type == e1000_media_type_fiber ||
1624 hw->media_type == e1000_media_type_internal_serdes))
1625 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1627 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1629 switch (hw->mac_type) {
1630 case e1000_82542_rev2_0:
1631 case e1000_82542_rev2_1:
1632 tipg = DEFAULT_82542_TIPG_IPGT;
1633 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1634 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1637 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1638 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1641 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1642 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1645 /* Set the Tx Interrupt Delay register */
1647 ew32(TIDV, adapter->tx_int_delay);
1648 if (hw->mac_type >= e1000_82540)
1649 ew32(TADV, adapter->tx_abs_int_delay);
1651 /* Program the Transmit Control Register */
1654 tctl &= ~E1000_TCTL_CT;
1655 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1656 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1658 e1000_config_collision_dist(hw);
1660 /* Setup Transmit Descriptor Settings for eop descriptor */
1661 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1663 /* only set IDE if we are delaying interrupts using the timers */
1664 if (adapter->tx_int_delay)
1665 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1667 if (hw->mac_type < e1000_82543)
1668 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1670 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1672 /* Cache if we're 82544 running in PCI-X because we'll
1673 * need this to apply a workaround later in the send path.
1675 if (hw->mac_type == e1000_82544 &&
1676 hw->bus_type == e1000_bus_type_pcix)
1677 adapter->pcix_82544 = true;
1684 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1685 * @adapter: board private structure
1686 * @rxdr: rx descriptor ring (for a specific queue) to setup
1688 * Returns 0 on success, negative on failure
1690 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1691 struct e1000_rx_ring *rxdr)
1693 struct pci_dev *pdev = adapter->pdev;
1696 size = sizeof(struct e1000_buffer) * rxdr->count;
1697 rxdr->buffer_info = vzalloc(size);
1698 if (!rxdr->buffer_info)
1701 desc_len = sizeof(struct e1000_rx_desc);
1703 /* Round up to nearest 4K */
1705 rxdr->size = rxdr->count * desc_len;
1706 rxdr->size = ALIGN(rxdr->size, 4096);
1708 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1712 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1715 vfree(rxdr->buffer_info);
1719 /* Fix for errata 23, can't cross 64kB boundary */
1720 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1721 void *olddesc = rxdr->desc;
1722 dma_addr_t olddma = rxdr->dma;
1723 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1724 rxdr->size, rxdr->desc);
1725 /* Try again, without freeing the previous */
1726 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1727 &rxdr->dma, GFP_KERNEL);
1728 /* Failed allocation, critical failure */
1730 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1732 e_err(probe, "Unable to allocate memory for the Rx "
1733 "descriptor ring\n");
1734 goto setup_rx_desc_die;
1737 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1739 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1741 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1743 e_err(probe, "Unable to allocate aligned memory for "
1744 "the Rx descriptor ring\n");
1745 goto setup_rx_desc_die;
1747 /* Free old allocation, new allocation was successful */
1748 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1752 memset(rxdr->desc, 0, rxdr->size);
1754 rxdr->next_to_clean = 0;
1755 rxdr->next_to_use = 0;
1756 rxdr->rx_skb_top = NULL;
1762 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1763 * (Descriptors) for all queues
1764 * @adapter: board private structure
1766 * Return 0 on success, negative on failure
1768 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1772 for (i = 0; i < adapter->num_rx_queues; i++) {
1773 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1775 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1776 for (i-- ; i >= 0; i--)
1777 e1000_free_rx_resources(adapter,
1778 &adapter->rx_ring[i]);
1787 * e1000_setup_rctl - configure the receive control registers
1788 * @adapter: Board private structure
1790 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1792 struct e1000_hw *hw = &adapter->hw;
1797 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1799 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1800 E1000_RCTL_RDMTS_HALF |
1801 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1803 if (hw->tbi_compatibility_on == 1)
1804 rctl |= E1000_RCTL_SBP;
1806 rctl &= ~E1000_RCTL_SBP;
1808 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1809 rctl &= ~E1000_RCTL_LPE;
1811 rctl |= E1000_RCTL_LPE;
1813 /* Setup buffer sizes */
1814 rctl &= ~E1000_RCTL_SZ_4096;
1815 rctl |= E1000_RCTL_BSEX;
1816 switch (adapter->rx_buffer_len) {
1817 case E1000_RXBUFFER_2048:
1819 rctl |= E1000_RCTL_SZ_2048;
1820 rctl &= ~E1000_RCTL_BSEX;
1822 case E1000_RXBUFFER_4096:
1823 rctl |= E1000_RCTL_SZ_4096;
1825 case E1000_RXBUFFER_8192:
1826 rctl |= E1000_RCTL_SZ_8192;
1828 case E1000_RXBUFFER_16384:
1829 rctl |= E1000_RCTL_SZ_16384;
1833 /* This is useful for sniffing bad packets. */
1834 if (adapter->netdev->features & NETIF_F_RXALL) {
1835 /* UPE and MPE will be handled by normal PROMISC logic
1836 * in e1000e_set_rx_mode
1838 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1839 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1840 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1842 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1843 E1000_RCTL_DPF | /* Allow filtered pause */
1844 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1845 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1846 * and that breaks VLANs.
1854 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1855 * @adapter: board private structure
1857 * Configure the Rx unit of the MAC after a reset.
1859 static void e1000_configure_rx(struct e1000_adapter *adapter)
1862 struct e1000_hw *hw = &adapter->hw;
1863 u32 rdlen, rctl, rxcsum;
1865 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1866 rdlen = adapter->rx_ring[0].count *
1867 sizeof(struct e1000_rx_desc);
1868 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1869 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1871 rdlen = adapter->rx_ring[0].count *
1872 sizeof(struct e1000_rx_desc);
1873 adapter->clean_rx = e1000_clean_rx_irq;
1874 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1877 /* disable receives while setting up the descriptors */
1879 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1881 /* set the Receive Delay Timer Register */
1882 ew32(RDTR, adapter->rx_int_delay);
1884 if (hw->mac_type >= e1000_82540) {
1885 ew32(RADV, adapter->rx_abs_int_delay);
1886 if (adapter->itr_setting != 0)
1887 ew32(ITR, 1000000000 / (adapter->itr * 256));
1890 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1891 * the Base and Length of the Rx Descriptor Ring
1893 switch (adapter->num_rx_queues) {
1896 rdba = adapter->rx_ring[0].dma;
1898 ew32(RDBAH, (rdba >> 32));
1899 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1902 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1903 E1000_RDH : E1000_82542_RDH);
1904 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1905 E1000_RDT : E1000_82542_RDT);
1909 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1910 if (hw->mac_type >= e1000_82543) {
1911 rxcsum = er32(RXCSUM);
1912 if (adapter->rx_csum)
1913 rxcsum |= E1000_RXCSUM_TUOFL;
1915 /* don't need to clear IPPCSE as it defaults to 0 */
1916 rxcsum &= ~E1000_RXCSUM_TUOFL;
1917 ew32(RXCSUM, rxcsum);
1920 /* Enable Receives */
1921 ew32(RCTL, rctl | E1000_RCTL_EN);
1925 * e1000_free_tx_resources - Free Tx Resources per Queue
1926 * @adapter: board private structure
1927 * @tx_ring: Tx descriptor ring for a specific queue
1929 * Free all transmit software resources
1931 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1932 struct e1000_tx_ring *tx_ring)
1934 struct pci_dev *pdev = adapter->pdev;
1936 e1000_clean_tx_ring(adapter, tx_ring);
1938 vfree(tx_ring->buffer_info);
1939 tx_ring->buffer_info = NULL;
1941 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1944 tx_ring->desc = NULL;
1948 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1949 * @adapter: board private structure
1951 * Free all transmit software resources
1953 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1957 for (i = 0; i < adapter->num_tx_queues; i++)
1958 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1961 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1962 struct e1000_buffer *buffer_info)
1964 if (buffer_info->dma) {
1965 if (buffer_info->mapped_as_page)
1966 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1967 buffer_info->length, DMA_TO_DEVICE);
1969 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1970 buffer_info->length,
1972 buffer_info->dma = 0;
1974 if (buffer_info->skb) {
1975 dev_kfree_skb_any(buffer_info->skb);
1976 buffer_info->skb = NULL;
1978 buffer_info->time_stamp = 0;
1979 /* buffer_info must be completely set up in the transmit path */
1983 * e1000_clean_tx_ring - Free Tx Buffers
1984 * @adapter: board private structure
1985 * @tx_ring: ring to be cleaned
1987 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1988 struct e1000_tx_ring *tx_ring)
1990 struct e1000_hw *hw = &adapter->hw;
1991 struct e1000_buffer *buffer_info;
1995 /* Free all the Tx ring sk_buffs */
1997 for (i = 0; i < tx_ring->count; i++) {
1998 buffer_info = &tx_ring->buffer_info[i];
1999 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2002 netdev_reset_queue(adapter->netdev);
2003 size = sizeof(struct e1000_buffer) * tx_ring->count;
2004 memset(tx_ring->buffer_info, 0, size);
2006 /* Zero out the descriptor ring */
2008 memset(tx_ring->desc, 0, tx_ring->size);
2010 tx_ring->next_to_use = 0;
2011 tx_ring->next_to_clean = 0;
2012 tx_ring->last_tx_tso = false;
2014 writel(0, hw->hw_addr + tx_ring->tdh);
2015 writel(0, hw->hw_addr + tx_ring->tdt);
2019 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2020 * @adapter: board private structure
2022 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2026 for (i = 0; i < adapter->num_tx_queues; i++)
2027 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2031 * e1000_free_rx_resources - Free Rx Resources
2032 * @adapter: board private structure
2033 * @rx_ring: ring to clean the resources from
2035 * Free all receive software resources
2037 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2038 struct e1000_rx_ring *rx_ring)
2040 struct pci_dev *pdev = adapter->pdev;
2042 e1000_clean_rx_ring(adapter, rx_ring);
2044 vfree(rx_ring->buffer_info);
2045 rx_ring->buffer_info = NULL;
2047 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2050 rx_ring->desc = NULL;
2054 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2055 * @adapter: board private structure
2057 * Free all receive software resources
2059 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2063 for (i = 0; i < adapter->num_rx_queues; i++)
2064 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2068 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2069 * @adapter: board private structure
2070 * @rx_ring: ring to free buffers from
2072 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2073 struct e1000_rx_ring *rx_ring)
2075 struct e1000_hw *hw = &adapter->hw;
2076 struct e1000_buffer *buffer_info;
2077 struct pci_dev *pdev = adapter->pdev;
2081 /* Free all the Rx ring sk_buffs */
2082 for (i = 0; i < rx_ring->count; i++) {
2083 buffer_info = &rx_ring->buffer_info[i];
2084 if (buffer_info->dma &&
2085 adapter->clean_rx == e1000_clean_rx_irq) {
2086 dma_unmap_single(&pdev->dev, buffer_info->dma,
2087 buffer_info->length,
2089 } else if (buffer_info->dma &&
2090 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2091 dma_unmap_page(&pdev->dev, buffer_info->dma,
2092 buffer_info->length,
2096 buffer_info->dma = 0;
2097 if (buffer_info->page) {
2098 put_page(buffer_info->page);
2099 buffer_info->page = NULL;
2101 if (buffer_info->skb) {
2102 dev_kfree_skb(buffer_info->skb);
2103 buffer_info->skb = NULL;
2107 /* there also may be some cached data from a chained receive */
2108 if (rx_ring->rx_skb_top) {
2109 dev_kfree_skb(rx_ring->rx_skb_top);
2110 rx_ring->rx_skb_top = NULL;
2113 size = sizeof(struct e1000_buffer) * rx_ring->count;
2114 memset(rx_ring->buffer_info, 0, size);
2116 /* Zero out the descriptor ring */
2117 memset(rx_ring->desc, 0, rx_ring->size);
2119 rx_ring->next_to_clean = 0;
2120 rx_ring->next_to_use = 0;
2122 writel(0, hw->hw_addr + rx_ring->rdh);
2123 writel(0, hw->hw_addr + rx_ring->rdt);
2127 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2128 * @adapter: board private structure
2130 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2134 for (i = 0; i < adapter->num_rx_queues; i++)
2135 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2138 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2139 * and memory write and invalidate disabled for certain operations
2141 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2143 struct e1000_hw *hw = &adapter->hw;
2144 struct net_device *netdev = adapter->netdev;
2147 e1000_pci_clear_mwi(hw);
2150 rctl |= E1000_RCTL_RST;
2152 E1000_WRITE_FLUSH();
2155 if (netif_running(netdev))
2156 e1000_clean_all_rx_rings(adapter);
2159 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2161 struct e1000_hw *hw = &adapter->hw;
2162 struct net_device *netdev = adapter->netdev;
2166 rctl &= ~E1000_RCTL_RST;
2168 E1000_WRITE_FLUSH();
2171 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2172 e1000_pci_set_mwi(hw);
2174 if (netif_running(netdev)) {
2175 /* No need to loop, because 82542 supports only 1 queue */
2176 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2177 e1000_configure_rx(adapter);
2178 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2183 * e1000_set_mac - Change the Ethernet Address of the NIC
2184 * @netdev: network interface device structure
2185 * @p: pointer to an address structure
2187 * Returns 0 on success, negative on failure
2189 static int e1000_set_mac(struct net_device *netdev, void *p)
2191 struct e1000_adapter *adapter = netdev_priv(netdev);
2192 struct e1000_hw *hw = &adapter->hw;
2193 struct sockaddr *addr = p;
2195 if (!is_valid_ether_addr(addr->sa_data))
2196 return -EADDRNOTAVAIL;
2198 /* 82542 2.0 needs to be in reset to write receive address registers */
2200 if (hw->mac_type == e1000_82542_rev2_0)
2201 e1000_enter_82542_rst(adapter);
2203 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2204 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2206 e1000_rar_set(hw, hw->mac_addr, 0);
2208 if (hw->mac_type == e1000_82542_rev2_0)
2209 e1000_leave_82542_rst(adapter);
2215 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2216 * @netdev: network interface device structure
2218 * The set_rx_mode entry point is called whenever the unicast or multicast
2219 * address lists or the network interface flags are updated. This routine is
2220 * responsible for configuring the hardware for proper unicast, multicast,
2221 * promiscuous mode, and all-multi behavior.
2223 static void e1000_set_rx_mode(struct net_device *netdev)
2225 struct e1000_adapter *adapter = netdev_priv(netdev);
2226 struct e1000_hw *hw = &adapter->hw;
2227 struct netdev_hw_addr *ha;
2228 bool use_uc = false;
2231 int i, rar_entries = E1000_RAR_ENTRIES;
2232 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2233 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2238 /* Check for Promiscuous and All Multicast modes */
2242 if (netdev->flags & IFF_PROMISC) {
2243 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2244 rctl &= ~E1000_RCTL_VFE;
2246 if (netdev->flags & IFF_ALLMULTI)
2247 rctl |= E1000_RCTL_MPE;
2249 rctl &= ~E1000_RCTL_MPE;
2250 /* Enable VLAN filter if there is a VLAN */
2251 if (e1000_vlan_used(adapter))
2252 rctl |= E1000_RCTL_VFE;
2255 if (netdev_uc_count(netdev) > rar_entries - 1) {
2256 rctl |= E1000_RCTL_UPE;
2257 } else if (!(netdev->flags & IFF_PROMISC)) {
2258 rctl &= ~E1000_RCTL_UPE;
2264 /* 82542 2.0 needs to be in reset to write receive address registers */
2266 if (hw->mac_type == e1000_82542_rev2_0)
2267 e1000_enter_82542_rst(adapter);
2269 /* load the first 14 addresses into the exact filters 1-14. Unicast
2270 * addresses take precedence to avoid disabling unicast filtering
2273 * RAR 0 is used for the station MAC address
2274 * if there are not 14 addresses, go ahead and clear the filters
2278 netdev_for_each_uc_addr(ha, netdev) {
2279 if (i == rar_entries)
2281 e1000_rar_set(hw, ha->addr, i++);
2284 netdev_for_each_mc_addr(ha, netdev) {
2285 if (i == rar_entries) {
2286 /* load any remaining addresses into the hash table */
2287 u32 hash_reg, hash_bit, mta;
2288 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2289 hash_reg = (hash_value >> 5) & 0x7F;
2290 hash_bit = hash_value & 0x1F;
2291 mta = (1 << hash_bit);
2292 mcarray[hash_reg] |= mta;
2294 e1000_rar_set(hw, ha->addr, i++);
2298 for (; i < rar_entries; i++) {
2299 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2300 E1000_WRITE_FLUSH();
2301 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2302 E1000_WRITE_FLUSH();
2305 /* write the hash table completely, write from bottom to avoid
2306 * both stupid write combining chipsets, and flushing each write
2308 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2309 /* If we are on an 82544 has an errata where writing odd
2310 * offsets overwrites the previous even offset, but writing
2311 * backwards over the range solves the issue by always
2312 * writing the odd offset first
2314 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2316 E1000_WRITE_FLUSH();
2318 if (hw->mac_type == e1000_82542_rev2_0)
2319 e1000_leave_82542_rst(adapter);
2325 * e1000_update_phy_info_task - get phy info
2326 * @work: work struct contained inside adapter struct
2328 * Need to wait a few seconds after link up to get diagnostic information from
2331 static void e1000_update_phy_info_task(struct work_struct *work)
2333 struct e1000_adapter *adapter = container_of(work,
2334 struct e1000_adapter,
2335 phy_info_task.work);
2336 if (test_bit(__E1000_DOWN, &adapter->flags))
2338 mutex_lock(&adapter->mutex);
2339 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2340 mutex_unlock(&adapter->mutex);
2344 * e1000_82547_tx_fifo_stall_task - task to complete work
2345 * @work: work struct contained inside adapter struct
2347 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2349 struct e1000_adapter *adapter = container_of(work,
2350 struct e1000_adapter,
2351 fifo_stall_task.work);
2352 struct e1000_hw *hw = &adapter->hw;
2353 struct net_device *netdev = adapter->netdev;
2356 if (test_bit(__E1000_DOWN, &adapter->flags))
2358 mutex_lock(&adapter->mutex);
2359 if (atomic_read(&adapter->tx_fifo_stall)) {
2360 if ((er32(TDT) == er32(TDH)) &&
2361 (er32(TDFT) == er32(TDFH)) &&
2362 (er32(TDFTS) == er32(TDFHS))) {
2364 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2365 ew32(TDFT, adapter->tx_head_addr);
2366 ew32(TDFH, adapter->tx_head_addr);
2367 ew32(TDFTS, adapter->tx_head_addr);
2368 ew32(TDFHS, adapter->tx_head_addr);
2370 E1000_WRITE_FLUSH();
2372 adapter->tx_fifo_head = 0;
2373 atomic_set(&adapter->tx_fifo_stall, 0);
2374 netif_wake_queue(netdev);
2375 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2376 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2379 mutex_unlock(&adapter->mutex);
2382 bool e1000_has_link(struct e1000_adapter *adapter)
2384 struct e1000_hw *hw = &adapter->hw;
2385 bool link_active = false;
2387 /* get_link_status is set on LSC (link status) interrupt or rx
2388 * sequence error interrupt (except on intel ce4100).
2389 * get_link_status will stay false until the
2390 * e1000_check_for_link establishes link for copper adapters
2393 switch (hw->media_type) {
2394 case e1000_media_type_copper:
2395 if (hw->mac_type == e1000_ce4100)
2396 hw->get_link_status = 1;
2397 if (hw->get_link_status) {
2398 e1000_check_for_link(hw);
2399 link_active = !hw->get_link_status;
2404 case e1000_media_type_fiber:
2405 e1000_check_for_link(hw);
2406 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2408 case e1000_media_type_internal_serdes:
2409 e1000_check_for_link(hw);
2410 link_active = hw->serdes_has_link;
2420 * e1000_watchdog - work function
2421 * @work: work struct contained inside adapter struct
2423 static void e1000_watchdog(struct work_struct *work)
2425 struct e1000_adapter *adapter = container_of(work,
2426 struct e1000_adapter,
2427 watchdog_task.work);
2428 struct e1000_hw *hw = &adapter->hw;
2429 struct net_device *netdev = adapter->netdev;
2430 struct e1000_tx_ring *txdr = adapter->tx_ring;
2433 if (test_bit(__E1000_DOWN, &adapter->flags))
2436 mutex_lock(&adapter->mutex);
2437 link = e1000_has_link(adapter);
2438 if ((netif_carrier_ok(netdev)) && link)
2442 if (!netif_carrier_ok(netdev)) {
2445 /* update snapshot of PHY registers on LSC */
2446 e1000_get_speed_and_duplex(hw,
2447 &adapter->link_speed,
2448 &adapter->link_duplex);
2451 pr_info("%s NIC Link is Up %d Mbps %s, "
2452 "Flow Control: %s\n",
2454 adapter->link_speed,
2455 adapter->link_duplex == FULL_DUPLEX ?
2456 "Full Duplex" : "Half Duplex",
2457 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2458 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2459 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2460 E1000_CTRL_TFCE) ? "TX" : "None")));
2462 /* adjust timeout factor according to speed/duplex */
2463 adapter->tx_timeout_factor = 1;
2464 switch (adapter->link_speed) {
2467 adapter->tx_timeout_factor = 16;
2471 /* maybe add some timeout factor ? */
2475 /* enable transmits in the hardware */
2477 tctl |= E1000_TCTL_EN;
2480 netif_carrier_on(netdev);
2481 if (!test_bit(__E1000_DOWN, &adapter->flags))
2482 schedule_delayed_work(&adapter->phy_info_task,
2484 adapter->smartspeed = 0;
2487 if (netif_carrier_ok(netdev)) {
2488 adapter->link_speed = 0;
2489 adapter->link_duplex = 0;
2490 pr_info("%s NIC Link is Down\n",
2492 netif_carrier_off(netdev);
2494 if (!test_bit(__E1000_DOWN, &adapter->flags))
2495 schedule_delayed_work(&adapter->phy_info_task,
2499 e1000_smartspeed(adapter);
2503 e1000_update_stats(adapter);
2505 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2506 adapter->tpt_old = adapter->stats.tpt;
2507 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2508 adapter->colc_old = adapter->stats.colc;
2510 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2511 adapter->gorcl_old = adapter->stats.gorcl;
2512 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2513 adapter->gotcl_old = adapter->stats.gotcl;
2515 e1000_update_adaptive(hw);
2517 if (!netif_carrier_ok(netdev)) {
2518 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2519 /* We've lost link, so the controller stops DMA,
2520 * but we've got queued Tx work that's never going
2521 * to get done, so reset controller to flush Tx.
2522 * (Do the reset outside of interrupt context).
2524 adapter->tx_timeout_count++;
2525 schedule_work(&adapter->reset_task);
2526 /* exit immediately since reset is imminent */
2531 /* Simple mode for Interrupt Throttle Rate (ITR) */
2532 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2533 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2534 * Total asymmetrical Tx or Rx gets ITR=8000;
2535 * everyone else is between 2000-8000.
2537 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2538 u32 dif = (adapter->gotcl > adapter->gorcl ?
2539 adapter->gotcl - adapter->gorcl :
2540 adapter->gorcl - adapter->gotcl) / 10000;
2541 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2543 ew32(ITR, 1000000000 / (itr * 256));
2546 /* Cause software interrupt to ensure rx ring is cleaned */
2547 ew32(ICS, E1000_ICS_RXDMT0);
2549 /* Force detection of hung controller every watchdog period */
2550 adapter->detect_tx_hung = true;
2552 /* Reschedule the task */
2553 if (!test_bit(__E1000_DOWN, &adapter->flags))
2554 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2557 mutex_unlock(&adapter->mutex);
2560 enum latency_range {
2564 latency_invalid = 255
2568 * e1000_update_itr - update the dynamic ITR value based on statistics
2569 * @adapter: pointer to adapter
2570 * @itr_setting: current adapter->itr
2571 * @packets: the number of packets during this measurement interval
2572 * @bytes: the number of bytes during this measurement interval
2574 * Stores a new ITR value based on packets and byte
2575 * counts during the last interrupt. The advantage of per interrupt
2576 * computation is faster updates and more accurate ITR for the current
2577 * traffic pattern. Constants in this function were computed
2578 * based on theoretical maximum wire speed and thresholds were set based
2579 * on testing data as well as attempting to minimize response time
2580 * while increasing bulk throughput.
2581 * this functionality is controlled by the InterruptThrottleRate module
2582 * parameter (see e1000_param.c)
2584 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2585 u16 itr_setting, int packets, int bytes)
2587 unsigned int retval = itr_setting;
2588 struct e1000_hw *hw = &adapter->hw;
2590 if (unlikely(hw->mac_type < e1000_82540))
2591 goto update_itr_done;
2594 goto update_itr_done;
2596 switch (itr_setting) {
2597 case lowest_latency:
2598 /* jumbo frames get bulk treatment*/
2599 if (bytes/packets > 8000)
2600 retval = bulk_latency;
2601 else if ((packets < 5) && (bytes > 512))
2602 retval = low_latency;
2604 case low_latency: /* 50 usec aka 20000 ints/s */
2605 if (bytes > 10000) {
2606 /* jumbo frames need bulk latency setting */
2607 if (bytes/packets > 8000)
2608 retval = bulk_latency;
2609 else if ((packets < 10) || ((bytes/packets) > 1200))
2610 retval = bulk_latency;
2611 else if ((packets > 35))
2612 retval = lowest_latency;
2613 } else if (bytes/packets > 2000)
2614 retval = bulk_latency;
2615 else if (packets <= 2 && bytes < 512)
2616 retval = lowest_latency;
2618 case bulk_latency: /* 250 usec aka 4000 ints/s */
2619 if (bytes > 25000) {
2621 retval = low_latency;
2622 } else if (bytes < 6000) {
2623 retval = low_latency;
2632 static void e1000_set_itr(struct e1000_adapter *adapter)
2634 struct e1000_hw *hw = &adapter->hw;
2636 u32 new_itr = adapter->itr;
2638 if (unlikely(hw->mac_type < e1000_82540))
2641 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2642 if (unlikely(adapter->link_speed != SPEED_1000)) {
2648 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2649 adapter->total_tx_packets,
2650 adapter->total_tx_bytes);
2651 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2652 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2653 adapter->tx_itr = low_latency;
2655 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2656 adapter->total_rx_packets,
2657 adapter->total_rx_bytes);
2658 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2659 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2660 adapter->rx_itr = low_latency;
2662 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2664 switch (current_itr) {
2665 /* counts and packets in update_itr are dependent on these numbers */
2666 case lowest_latency:
2670 new_itr = 20000; /* aka hwitr = ~200 */
2680 if (new_itr != adapter->itr) {
2681 /* this attempts to bias the interrupt rate towards Bulk
2682 * by adding intermediate steps when interrupt rate is
2685 new_itr = new_itr > adapter->itr ?
2686 min(adapter->itr + (new_itr >> 2), new_itr) :
2688 adapter->itr = new_itr;
2689 ew32(ITR, 1000000000 / (new_itr * 256));
2693 #define E1000_TX_FLAGS_CSUM 0x00000001
2694 #define E1000_TX_FLAGS_VLAN 0x00000002
2695 #define E1000_TX_FLAGS_TSO 0x00000004
2696 #define E1000_TX_FLAGS_IPV4 0x00000008
2697 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2698 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2699 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2701 static int e1000_tso(struct e1000_adapter *adapter,
2702 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2704 struct e1000_context_desc *context_desc;
2705 struct e1000_buffer *buffer_info;
2708 u16 ipcse = 0, tucse, mss;
2709 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2712 if (skb_is_gso(skb)) {
2713 if (skb_header_cloned(skb)) {
2714 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2719 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2720 mss = skb_shinfo(skb)->gso_size;
2721 if (skb->protocol == htons(ETH_P_IP)) {
2722 struct iphdr *iph = ip_hdr(skb);
2725 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2729 cmd_length = E1000_TXD_CMD_IP;
2730 ipcse = skb_transport_offset(skb) - 1;
2731 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2732 ipv6_hdr(skb)->payload_len = 0;
2733 tcp_hdr(skb)->check =
2734 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2735 &ipv6_hdr(skb)->daddr,
2739 ipcss = skb_network_offset(skb);
2740 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2741 tucss = skb_transport_offset(skb);
2742 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2745 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2746 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2748 i = tx_ring->next_to_use;
2749 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2750 buffer_info = &tx_ring->buffer_info[i];
2752 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2753 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2754 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2755 context_desc->upper_setup.tcp_fields.tucss = tucss;
2756 context_desc->upper_setup.tcp_fields.tucso = tucso;
2757 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2758 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2759 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2760 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2762 buffer_info->time_stamp = jiffies;
2763 buffer_info->next_to_watch = i;
2765 if (++i == tx_ring->count) i = 0;
2766 tx_ring->next_to_use = i;
2773 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2774 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2776 struct e1000_context_desc *context_desc;
2777 struct e1000_buffer *buffer_info;
2780 u32 cmd_len = E1000_TXD_CMD_DEXT;
2782 if (skb->ip_summed != CHECKSUM_PARTIAL)
2785 switch (skb->protocol) {
2786 case cpu_to_be16(ETH_P_IP):
2787 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2788 cmd_len |= E1000_TXD_CMD_TCP;
2790 case cpu_to_be16(ETH_P_IPV6):
2791 /* XXX not handling all IPV6 headers */
2792 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2793 cmd_len |= E1000_TXD_CMD_TCP;
2796 if (unlikely(net_ratelimit()))
2797 e_warn(drv, "checksum_partial proto=%x!\n",
2802 css = skb_checksum_start_offset(skb);
2804 i = tx_ring->next_to_use;
2805 buffer_info = &tx_ring->buffer_info[i];
2806 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2808 context_desc->lower_setup.ip_config = 0;
2809 context_desc->upper_setup.tcp_fields.tucss = css;
2810 context_desc->upper_setup.tcp_fields.tucso =
2811 css + skb->csum_offset;
2812 context_desc->upper_setup.tcp_fields.tucse = 0;
2813 context_desc->tcp_seg_setup.data = 0;
2814 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2816 buffer_info->time_stamp = jiffies;
2817 buffer_info->next_to_watch = i;
2819 if (unlikely(++i == tx_ring->count)) i = 0;
2820 tx_ring->next_to_use = i;
2825 #define E1000_MAX_TXD_PWR 12
2826 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2828 static int e1000_tx_map(struct e1000_adapter *adapter,
2829 struct e1000_tx_ring *tx_ring,
2830 struct sk_buff *skb, unsigned int first,
2831 unsigned int max_per_txd, unsigned int nr_frags,
2834 struct e1000_hw *hw = &adapter->hw;
2835 struct pci_dev *pdev = adapter->pdev;
2836 struct e1000_buffer *buffer_info;
2837 unsigned int len = skb_headlen(skb);
2838 unsigned int offset = 0, size, count = 0, i;
2839 unsigned int f, bytecount, segs;
2841 i = tx_ring->next_to_use;
2844 buffer_info = &tx_ring->buffer_info[i];
2845 size = min(len, max_per_txd);
2846 /* Workaround for Controller erratum --
2847 * descriptor for non-tso packet in a linear SKB that follows a
2848 * tso gets written back prematurely before the data is fully
2849 * DMA'd to the controller
2851 if (!skb->data_len && tx_ring->last_tx_tso &&
2853 tx_ring->last_tx_tso = false;
2857 /* Workaround for premature desc write-backs
2858 * in TSO mode. Append 4-byte sentinel desc
2860 if (unlikely(mss && !nr_frags && size == len && size > 8))
2862 /* work-around for errata 10 and it applies
2863 * to all controllers in PCI-X mode
2864 * The fix is to make sure that the first descriptor of a
2865 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2867 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2868 (size > 2015) && count == 0))
2871 /* Workaround for potential 82544 hang in PCI-X. Avoid
2872 * terminating buffers within evenly-aligned dwords.
2874 if (unlikely(adapter->pcix_82544 &&
2875 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2879 buffer_info->length = size;
2880 /* set time_stamp *before* dma to help avoid a possible race */
2881 buffer_info->time_stamp = jiffies;
2882 buffer_info->mapped_as_page = false;
2883 buffer_info->dma = dma_map_single(&pdev->dev,
2885 size, DMA_TO_DEVICE);
2886 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2888 buffer_info->next_to_watch = i;
2895 if (unlikely(i == tx_ring->count))
2900 for (f = 0; f < nr_frags; f++) {
2901 const struct skb_frag_struct *frag;
2903 frag = &skb_shinfo(skb)->frags[f];
2904 len = skb_frag_size(frag);
2908 unsigned long bufend;
2910 if (unlikely(i == tx_ring->count))
2913 buffer_info = &tx_ring->buffer_info[i];
2914 size = min(len, max_per_txd);
2915 /* Workaround for premature desc write-backs
2916 * in TSO mode. Append 4-byte sentinel desc
2918 if (unlikely(mss && f == (nr_frags-1) &&
2919 size == len && size > 8))
2921 /* Workaround for potential 82544 hang in PCI-X.
2922 * Avoid terminating buffers within evenly-aligned
2925 bufend = (unsigned long)
2926 page_to_phys(skb_frag_page(frag));
2927 bufend += offset + size - 1;
2928 if (unlikely(adapter->pcix_82544 &&
2933 buffer_info->length = size;
2934 buffer_info->time_stamp = jiffies;
2935 buffer_info->mapped_as_page = true;
2936 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2937 offset, size, DMA_TO_DEVICE);
2938 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2940 buffer_info->next_to_watch = i;
2948 segs = skb_shinfo(skb)->gso_segs ?: 1;
2949 /* multiply data chunks by size of headers */
2950 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2952 tx_ring->buffer_info[i].skb = skb;
2953 tx_ring->buffer_info[i].segs = segs;
2954 tx_ring->buffer_info[i].bytecount = bytecount;
2955 tx_ring->buffer_info[first].next_to_watch = i;
2960 dev_err(&pdev->dev, "TX DMA map failed\n");
2961 buffer_info->dma = 0;
2967 i += tx_ring->count;
2969 buffer_info = &tx_ring->buffer_info[i];
2970 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2976 static void e1000_tx_queue(struct e1000_adapter *adapter,
2977 struct e1000_tx_ring *tx_ring, int tx_flags,
2980 struct e1000_hw *hw = &adapter->hw;
2981 struct e1000_tx_desc *tx_desc = NULL;
2982 struct e1000_buffer *buffer_info;
2983 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2986 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2987 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2989 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2991 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2992 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2995 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2996 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2997 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3000 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3001 txd_lower |= E1000_TXD_CMD_VLE;
3002 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3005 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3006 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3008 i = tx_ring->next_to_use;
3011 buffer_info = &tx_ring->buffer_info[i];
3012 tx_desc = E1000_TX_DESC(*tx_ring, i);
3013 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3014 tx_desc->lower.data =
3015 cpu_to_le32(txd_lower | buffer_info->length);
3016 tx_desc->upper.data = cpu_to_le32(txd_upper);
3017 if (unlikely(++i == tx_ring->count)) i = 0;
3020 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3022 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3023 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3024 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3026 /* Force memory writes to complete before letting h/w
3027 * know there are new descriptors to fetch. (Only
3028 * applicable for weak-ordered memory model archs,
3033 tx_ring->next_to_use = i;
3034 writel(i, hw->hw_addr + tx_ring->tdt);
3035 /* we need this if more than one processor can write to our tail
3036 * at a time, it synchronizes IO on IA64/Altix systems
3041 /* 82547 workaround to avoid controller hang in half-duplex environment.
3042 * The workaround is to avoid queuing a large packet that would span
3043 * the internal Tx FIFO ring boundary by notifying the stack to resend
3044 * the packet at a later time. This gives the Tx FIFO an opportunity to
3045 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3046 * to the beginning of the Tx FIFO.
3049 #define E1000_FIFO_HDR 0x10
3050 #define E1000_82547_PAD_LEN 0x3E0
3052 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3053 struct sk_buff *skb)
3055 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3056 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3058 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3060 if (adapter->link_duplex != HALF_DUPLEX)
3061 goto no_fifo_stall_required;
3063 if (atomic_read(&adapter->tx_fifo_stall))
3066 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3067 atomic_set(&adapter->tx_fifo_stall, 1);
3071 no_fifo_stall_required:
3072 adapter->tx_fifo_head += skb_fifo_len;
3073 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3074 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3078 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3080 struct e1000_adapter *adapter = netdev_priv(netdev);
3081 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3083 netif_stop_queue(netdev);
3084 /* Herbert's original patch had:
3085 * smp_mb__after_netif_stop_queue();
3086 * but since that doesn't exist yet, just open code it.
3090 /* We need to check again in a case another CPU has just
3091 * made room available.
3093 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3097 netif_start_queue(netdev);
3098 ++adapter->restart_queue;
3102 static int e1000_maybe_stop_tx(struct net_device *netdev,
3103 struct e1000_tx_ring *tx_ring, int size)
3105 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3107 return __e1000_maybe_stop_tx(netdev, size);
3110 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3111 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3112 struct net_device *netdev)
3114 struct e1000_adapter *adapter = netdev_priv(netdev);
3115 struct e1000_hw *hw = &adapter->hw;
3116 struct e1000_tx_ring *tx_ring;
3117 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3118 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3119 unsigned int tx_flags = 0;
3120 unsigned int len = skb_headlen(skb);
3121 unsigned int nr_frags;
3127 /* This goes back to the question of how to logically map a Tx queue
3128 * to a flow. Right now, performance is impacted slightly negatively
3129 * if using multiple Tx queues. If the stack breaks away from a
3130 * single qdisc implementation, we can look at this again.
3132 tx_ring = adapter->tx_ring;
3134 if (unlikely(skb->len <= 0)) {
3135 dev_kfree_skb_any(skb);
3136 return NETDEV_TX_OK;
3139 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3140 * packets may get corrupted during padding by HW.
3141 * To WA this issue, pad all small packets manually.
3143 if (skb->len < ETH_ZLEN) {
3144 if (skb_pad(skb, ETH_ZLEN - skb->len))
3145 return NETDEV_TX_OK;
3146 skb->len = ETH_ZLEN;
3147 skb_set_tail_pointer(skb, ETH_ZLEN);
3150 mss = skb_shinfo(skb)->gso_size;
3151 /* The controller does a simple calculation to
3152 * make sure there is enough room in the FIFO before
3153 * initiating the DMA for each buffer. The calc is:
3154 * 4 = ceil(buffer len/mss). To make sure we don't
3155 * overrun the FIFO, adjust the max buffer len if mss
3160 max_per_txd = min(mss << 2, max_per_txd);
3161 max_txd_pwr = fls(max_per_txd) - 1;
3163 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3164 if (skb->data_len && hdr_len == len) {
3165 switch (hw->mac_type) {
3166 unsigned int pull_size;
3168 /* Make sure we have room to chop off 4 bytes,
3169 * and that the end alignment will work out to
3170 * this hardware's requirements
3171 * NOTE: this is a TSO only workaround
3172 * if end byte alignment not correct move us
3173 * into the next dword
3175 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3179 pull_size = min((unsigned int)4, skb->data_len);
3180 if (!__pskb_pull_tail(skb, pull_size)) {
3181 e_err(drv, "__pskb_pull_tail "
3183 dev_kfree_skb_any(skb);
3184 return NETDEV_TX_OK;
3186 len = skb_headlen(skb);
3195 /* reserve a descriptor for the offload context */
3196 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3200 /* Controller Erratum workaround */
3201 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3204 count += TXD_USE_COUNT(len, max_txd_pwr);
3206 if (adapter->pcix_82544)
3209 /* work-around for errata 10 and it applies to all controllers
3210 * in PCI-X mode, so add one more descriptor to the count
3212 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3216 nr_frags = skb_shinfo(skb)->nr_frags;
3217 for (f = 0; f < nr_frags; f++)
3218 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3220 if (adapter->pcix_82544)
3223 /* need: count + 2 desc gap to keep tail from touching
3224 * head, otherwise try next time
3226 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3227 return NETDEV_TX_BUSY;
3229 if (unlikely((hw->mac_type == e1000_82547) &&
3230 (e1000_82547_fifo_workaround(adapter, skb)))) {
3231 netif_stop_queue(netdev);
3232 if (!test_bit(__E1000_DOWN, &adapter->flags))
3233 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3234 return NETDEV_TX_BUSY;
3237 if (vlan_tx_tag_present(skb)) {
3238 tx_flags |= E1000_TX_FLAGS_VLAN;
3239 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3242 first = tx_ring->next_to_use;
3244 tso = e1000_tso(adapter, tx_ring, skb);
3246 dev_kfree_skb_any(skb);
3247 return NETDEV_TX_OK;
3251 if (likely(hw->mac_type != e1000_82544))
3252 tx_ring->last_tx_tso = true;
3253 tx_flags |= E1000_TX_FLAGS_TSO;
3254 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3255 tx_flags |= E1000_TX_FLAGS_CSUM;
3257 if (likely(skb->protocol == htons(ETH_P_IP)))
3258 tx_flags |= E1000_TX_FLAGS_IPV4;
3260 if (unlikely(skb->no_fcs))
3261 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3263 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3267 netdev_sent_queue(netdev, skb->len);
3268 skb_tx_timestamp(skb);
3270 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3271 /* Make sure there is space in the ring for the next send. */
3272 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3275 dev_kfree_skb_any(skb);
3276 tx_ring->buffer_info[first].time_stamp = 0;
3277 tx_ring->next_to_use = first;
3280 return NETDEV_TX_OK;
3283 #define NUM_REGS 38 /* 1 based count */
3284 static void e1000_regdump(struct e1000_adapter *adapter)
3286 struct e1000_hw *hw = &adapter->hw;
3288 u32 *regs_buff = regs;
3291 static const char * const reg_name[] = {
3293 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3294 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3295 "TIDV", "TXDCTL", "TADV", "TARC0",
3296 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3298 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3299 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3300 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3303 regs_buff[0] = er32(CTRL);
3304 regs_buff[1] = er32(STATUS);
3306 regs_buff[2] = er32(RCTL);
3307 regs_buff[3] = er32(RDLEN);
3308 regs_buff[4] = er32(RDH);
3309 regs_buff[5] = er32(RDT);
3310 regs_buff[6] = er32(RDTR);
3312 regs_buff[7] = er32(TCTL);
3313 regs_buff[8] = er32(TDBAL);
3314 regs_buff[9] = er32(TDBAH);
3315 regs_buff[10] = er32(TDLEN);
3316 regs_buff[11] = er32(TDH);
3317 regs_buff[12] = er32(TDT);
3318 regs_buff[13] = er32(TIDV);
3319 regs_buff[14] = er32(TXDCTL);
3320 regs_buff[15] = er32(TADV);
3321 regs_buff[16] = er32(TARC0);
3323 regs_buff[17] = er32(TDBAL1);
3324 regs_buff[18] = er32(TDBAH1);
3325 regs_buff[19] = er32(TDLEN1);
3326 regs_buff[20] = er32(TDH1);
3327 regs_buff[21] = er32(TDT1);
3328 regs_buff[22] = er32(TXDCTL1);
3329 regs_buff[23] = er32(TARC1);
3330 regs_buff[24] = er32(CTRL_EXT);
3331 regs_buff[25] = er32(ERT);
3332 regs_buff[26] = er32(RDBAL0);
3333 regs_buff[27] = er32(RDBAH0);
3334 regs_buff[28] = er32(TDFH);
3335 regs_buff[29] = er32(TDFT);
3336 regs_buff[30] = er32(TDFHS);
3337 regs_buff[31] = er32(TDFTS);
3338 regs_buff[32] = er32(TDFPC);
3339 regs_buff[33] = er32(RDFH);
3340 regs_buff[34] = er32(RDFT);
3341 regs_buff[35] = er32(RDFHS);
3342 regs_buff[36] = er32(RDFTS);
3343 regs_buff[37] = er32(RDFPC);
3345 pr_info("Register dump\n");
3346 for (i = 0; i < NUM_REGS; i++)
3347 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3351 * e1000_dump: Print registers, tx ring and rx ring
3353 static void e1000_dump(struct e1000_adapter *adapter)
3355 /* this code doesn't handle multiple rings */
3356 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3357 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3360 if (!netif_msg_hw(adapter))
3363 /* Print Registers */
3364 e1000_regdump(adapter);
3367 pr_info("TX Desc ring0 dump\n");
3369 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3371 * Legacy Transmit Descriptor
3372 * +--------------------------------------------------------------+
3373 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3374 * +--------------------------------------------------------------+
3375 * 8 | Special | CSS | Status | CMD | CSO | Length |
3376 * +--------------------------------------------------------------+
3377 * 63 48 47 36 35 32 31 24 23 16 15 0
3379 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3380 * 63 48 47 40 39 32 31 16 15 8 7 0
3381 * +----------------------------------------------------------------+
3382 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3383 * +----------------------------------------------------------------+
3384 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3385 * +----------------------------------------------------------------+
3386 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3388 * Extended Data Descriptor (DTYP=0x1)
3389 * +----------------------------------------------------------------+
3390 * 0 | Buffer Address [63:0] |
3391 * +----------------------------------------------------------------+
3392 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3393 * +----------------------------------------------------------------+
3394 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3396 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3397 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3399 if (!netif_msg_tx_done(adapter))
3400 goto rx_ring_summary;
3402 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3403 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3404 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3405 struct my_u { __le64 a; __le64 b; };
3406 struct my_u *u = (struct my_u *)tx_desc;
3409 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3411 else if (i == tx_ring->next_to_use)
3413 else if (i == tx_ring->next_to_clean)
3418 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3419 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3420 le64_to_cpu(u->a), le64_to_cpu(u->b),
3421 (u64)buffer_info->dma, buffer_info->length,
3422 buffer_info->next_to_watch,
3423 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3428 pr_info("\nRX Desc ring dump\n");
3430 /* Legacy Receive Descriptor Format
3432 * +-----------------------------------------------------+
3433 * | Buffer Address [63:0] |
3434 * +-----------------------------------------------------+
3435 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3436 * +-----------------------------------------------------+
3437 * 63 48 47 40 39 32 31 16 15 0
3439 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3441 if (!netif_msg_rx_status(adapter))
3444 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3445 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3446 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3447 struct my_u { __le64 a; __le64 b; };
3448 struct my_u *u = (struct my_u *)rx_desc;
3451 if (i == rx_ring->next_to_use)
3453 else if (i == rx_ring->next_to_clean)
3458 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3459 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3460 (u64)buffer_info->dma, buffer_info->skb, type);
3463 /* dump the descriptor caches */
3465 pr_info("Rx descriptor cache in 64bit format\n");
3466 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3467 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3469 readl(adapter->hw.hw_addr + i+4),
3470 readl(adapter->hw.hw_addr + i),
3471 readl(adapter->hw.hw_addr + i+12),
3472 readl(adapter->hw.hw_addr + i+8));
3475 pr_info("Tx descriptor cache in 64bit format\n");
3476 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3477 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3479 readl(adapter->hw.hw_addr + i+4),
3480 readl(adapter->hw.hw_addr + i),
3481 readl(adapter->hw.hw_addr + i+12),
3482 readl(adapter->hw.hw_addr + i+8));
3489 * e1000_tx_timeout - Respond to a Tx Hang
3490 * @netdev: network interface device structure
3492 static void e1000_tx_timeout(struct net_device *netdev)
3494 struct e1000_adapter *adapter = netdev_priv(netdev);
3496 /* Do the reset outside of interrupt context */
3497 adapter->tx_timeout_count++;
3498 schedule_work(&adapter->reset_task);
3501 static void e1000_reset_task(struct work_struct *work)
3503 struct e1000_adapter *adapter =
3504 container_of(work, struct e1000_adapter, reset_task);
3506 if (test_bit(__E1000_DOWN, &adapter->flags))
3508 e_err(drv, "Reset adapter\n");
3509 e1000_reinit_safe(adapter);
3513 * e1000_get_stats - Get System Network Statistics
3514 * @netdev: network interface device structure
3516 * Returns the address of the device statistics structure.
3517 * The statistics are actually updated from the watchdog.
3519 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3521 /* only return the current stats */
3522 return &netdev->stats;
3526 * e1000_change_mtu - Change the Maximum Transfer Unit
3527 * @netdev: network interface device structure
3528 * @new_mtu: new value for maximum frame size
3530 * Returns 0 on success, negative on failure
3532 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3534 struct e1000_adapter *adapter = netdev_priv(netdev);
3535 struct e1000_hw *hw = &adapter->hw;
3536 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3538 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3539 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3540 e_err(probe, "Invalid MTU setting\n");
3544 /* Adapter-specific max frame size limits. */
3545 switch (hw->mac_type) {
3546 case e1000_undefined ... e1000_82542_rev2_1:
3547 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3548 e_err(probe, "Jumbo Frames not supported.\n");
3553 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3557 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3559 /* e1000_down has a dependency on max_frame_size */
3560 hw->max_frame_size = max_frame;
3561 if (netif_running(netdev))
3562 e1000_down(adapter);
3564 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3565 * means we reserve 2 more, this pushes us to allocate from the next
3567 * i.e. RXBUFFER_2048 --> size-4096 slab
3568 * however with the new *_jumbo_rx* routines, jumbo receives will use
3572 if (max_frame <= E1000_RXBUFFER_2048)
3573 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3575 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3576 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3577 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3578 adapter->rx_buffer_len = PAGE_SIZE;
3581 /* adjust allocation if LPE protects us, and we aren't using SBP */
3582 if (!hw->tbi_compatibility_on &&
3583 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3584 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3585 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3587 pr_info("%s changing MTU from %d to %d\n",
3588 netdev->name, netdev->mtu, new_mtu);
3589 netdev->mtu = new_mtu;
3591 if (netif_running(netdev))
3594 e1000_reset(adapter);
3596 clear_bit(__E1000_RESETTING, &adapter->flags);
3602 * e1000_update_stats - Update the board statistics counters
3603 * @adapter: board private structure
3605 void e1000_update_stats(struct e1000_adapter *adapter)
3607 struct net_device *netdev = adapter->netdev;
3608 struct e1000_hw *hw = &adapter->hw;
3609 struct pci_dev *pdev = adapter->pdev;
3610 unsigned long flags;
3613 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3615 /* Prevent stats update while adapter is being reset, or if the pci
3616 * connection is down.
3618 if (adapter->link_speed == 0)
3620 if (pci_channel_offline(pdev))
3623 spin_lock_irqsave(&adapter->stats_lock, flags);
3625 /* these counters are modified from e1000_tbi_adjust_stats,
3626 * called from the interrupt context, so they must only
3627 * be written while holding adapter->stats_lock
3630 adapter->stats.crcerrs += er32(CRCERRS);
3631 adapter->stats.gprc += er32(GPRC);
3632 adapter->stats.gorcl += er32(GORCL);
3633 adapter->stats.gorch += er32(GORCH);
3634 adapter->stats.bprc += er32(BPRC);
3635 adapter->stats.mprc += er32(MPRC);
3636 adapter->stats.roc += er32(ROC);
3638 adapter->stats.prc64 += er32(PRC64);
3639 adapter->stats.prc127 += er32(PRC127);
3640 adapter->stats.prc255 += er32(PRC255);
3641 adapter->stats.prc511 += er32(PRC511);
3642 adapter->stats.prc1023 += er32(PRC1023);
3643 adapter->stats.prc1522 += er32(PRC1522);
3645 adapter->stats.symerrs += er32(SYMERRS);
3646 adapter->stats.mpc += er32(MPC);
3647 adapter->stats.scc += er32(SCC);
3648 adapter->stats.ecol += er32(ECOL);
3649 adapter->stats.mcc += er32(MCC);
3650 adapter->stats.latecol += er32(LATECOL);
3651 adapter->stats.dc += er32(DC);
3652 adapter->stats.sec += er32(SEC);
3653 adapter->stats.rlec += er32(RLEC);
3654 adapter->stats.xonrxc += er32(XONRXC);
3655 adapter->stats.xontxc += er32(XONTXC);
3656 adapter->stats.xoffrxc += er32(XOFFRXC);
3657 adapter->stats.xofftxc += er32(XOFFTXC);
3658 adapter->stats.fcruc += er32(FCRUC);
3659 adapter->stats.gptc += er32(GPTC);
3660 adapter->stats.gotcl += er32(GOTCL);
3661 adapter->stats.gotch += er32(GOTCH);
3662 adapter->stats.rnbc += er32(RNBC);
3663 adapter->stats.ruc += er32(RUC);
3664 adapter->stats.rfc += er32(RFC);
3665 adapter->stats.rjc += er32(RJC);
3666 adapter->stats.torl += er32(TORL);
3667 adapter->stats.torh += er32(TORH);
3668 adapter->stats.totl += er32(TOTL);
3669 adapter->stats.toth += er32(TOTH);
3670 adapter->stats.tpr += er32(TPR);
3672 adapter->stats.ptc64 += er32(PTC64);
3673 adapter->stats.ptc127 += er32(PTC127);
3674 adapter->stats.ptc255 += er32(PTC255);
3675 adapter->stats.ptc511 += er32(PTC511);
3676 adapter->stats.ptc1023 += er32(PTC1023);
3677 adapter->stats.ptc1522 += er32(PTC1522);
3679 adapter->stats.mptc += er32(MPTC);
3680 adapter->stats.bptc += er32(BPTC);
3682 /* used for adaptive IFS */
3684 hw->tx_packet_delta = er32(TPT);
3685 adapter->stats.tpt += hw->tx_packet_delta;
3686 hw->collision_delta = er32(COLC);
3687 adapter->stats.colc += hw->collision_delta;
3689 if (hw->mac_type >= e1000_82543) {
3690 adapter->stats.algnerrc += er32(ALGNERRC);
3691 adapter->stats.rxerrc += er32(RXERRC);
3692 adapter->stats.tncrs += er32(TNCRS);
3693 adapter->stats.cexterr += er32(CEXTERR);
3694 adapter->stats.tsctc += er32(TSCTC);
3695 adapter->stats.tsctfc += er32(TSCTFC);
3698 /* Fill out the OS statistics structure */
3699 netdev->stats.multicast = adapter->stats.mprc;
3700 netdev->stats.collisions = adapter->stats.colc;
3704 /* RLEC on some newer hardware can be incorrect so build
3705 * our own version based on RUC and ROC
3707 netdev->stats.rx_errors = adapter->stats.rxerrc +
3708 adapter->stats.crcerrs + adapter->stats.algnerrc +
3709 adapter->stats.ruc + adapter->stats.roc +
3710 adapter->stats.cexterr;
3711 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3712 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3713 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3714 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3715 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3718 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3719 netdev->stats.tx_errors = adapter->stats.txerrc;
3720 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3721 netdev->stats.tx_window_errors = adapter->stats.latecol;
3722 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3723 if (hw->bad_tx_carr_stats_fd &&
3724 adapter->link_duplex == FULL_DUPLEX) {
3725 netdev->stats.tx_carrier_errors = 0;
3726 adapter->stats.tncrs = 0;
3729 /* Tx Dropped needs to be maintained elsewhere */
3732 if (hw->media_type == e1000_media_type_copper) {
3733 if ((adapter->link_speed == SPEED_1000) &&
3734 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3735 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3736 adapter->phy_stats.idle_errors += phy_tmp;
3739 if ((hw->mac_type <= e1000_82546) &&
3740 (hw->phy_type == e1000_phy_m88) &&
3741 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3742 adapter->phy_stats.receive_errors += phy_tmp;
3745 /* Management Stats */
3746 if (hw->has_smbus) {
3747 adapter->stats.mgptc += er32(MGTPTC);
3748 adapter->stats.mgprc += er32(MGTPRC);
3749 adapter->stats.mgpdc += er32(MGTPDC);
3752 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3756 * e1000_intr - Interrupt Handler
3757 * @irq: interrupt number
3758 * @data: pointer to a network interface device structure
3760 static irqreturn_t e1000_intr(int irq, void *data)
3762 struct net_device *netdev = data;
3763 struct e1000_adapter *adapter = netdev_priv(netdev);
3764 struct e1000_hw *hw = &adapter->hw;
3765 u32 icr = er32(ICR);
3767 if (unlikely((!icr)))
3768 return IRQ_NONE; /* Not our interrupt */
3770 /* we might have caused the interrupt, but the above
3771 * read cleared it, and just in case the driver is
3772 * down there is nothing to do so return handled
3774 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3777 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3778 hw->get_link_status = 1;
3779 /* guard against interrupt when we're going down */
3780 if (!test_bit(__E1000_DOWN, &adapter->flags))
3781 schedule_delayed_work(&adapter->watchdog_task, 1);
3784 /* disable interrupts, without the synchronize_irq bit */
3786 E1000_WRITE_FLUSH();
3788 if (likely(napi_schedule_prep(&adapter->napi))) {
3789 adapter->total_tx_bytes = 0;
3790 adapter->total_tx_packets = 0;
3791 adapter->total_rx_bytes = 0;
3792 adapter->total_rx_packets = 0;
3793 __napi_schedule(&adapter->napi);
3795 /* this really should not happen! if it does it is basically a
3796 * bug, but not a hard error, so enable ints and continue
3798 if (!test_bit(__E1000_DOWN, &adapter->flags))
3799 e1000_irq_enable(adapter);
3806 * e1000_clean - NAPI Rx polling callback
3807 * @adapter: board private structure
3809 static int e1000_clean(struct napi_struct *napi, int budget)
3811 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3813 int tx_clean_complete = 0, work_done = 0;
3815 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3817 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3819 if (!tx_clean_complete)
3822 /* If budget not fully consumed, exit the polling mode */
3823 if (work_done < budget) {
3824 if (likely(adapter->itr_setting & 3))
3825 e1000_set_itr(adapter);
3826 napi_complete(napi);
3827 if (!test_bit(__E1000_DOWN, &adapter->flags))
3828 e1000_irq_enable(adapter);
3835 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3836 * @adapter: board private structure
3838 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3839 struct e1000_tx_ring *tx_ring)
3841 struct e1000_hw *hw = &adapter->hw;
3842 struct net_device *netdev = adapter->netdev;
3843 struct e1000_tx_desc *tx_desc, *eop_desc;
3844 struct e1000_buffer *buffer_info;
3845 unsigned int i, eop;
3846 unsigned int count = 0;
3847 unsigned int total_tx_bytes=0, total_tx_packets=0;
3848 unsigned int bytes_compl = 0, pkts_compl = 0;
3850 i = tx_ring->next_to_clean;
3851 eop = tx_ring->buffer_info[i].next_to_watch;
3852 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3854 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3855 (count < tx_ring->count)) {
3856 bool cleaned = false;
3857 rmb(); /* read buffer_info after eop_desc */
3858 for ( ; !cleaned; count++) {
3859 tx_desc = E1000_TX_DESC(*tx_ring, i);
3860 buffer_info = &tx_ring->buffer_info[i];
3861 cleaned = (i == eop);
3864 total_tx_packets += buffer_info->segs;
3865 total_tx_bytes += buffer_info->bytecount;
3866 if (buffer_info->skb) {
3867 bytes_compl += buffer_info->skb->len;
3872 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3873 tx_desc->upper.data = 0;
3875 if (unlikely(++i == tx_ring->count)) i = 0;
3878 eop = tx_ring->buffer_info[i].next_to_watch;
3879 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3882 tx_ring->next_to_clean = i;
3884 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3886 #define TX_WAKE_THRESHOLD 32
3887 if (unlikely(count && netif_carrier_ok(netdev) &&
3888 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3889 /* Make sure that anybody stopping the queue after this
3890 * sees the new next_to_clean.
3894 if (netif_queue_stopped(netdev) &&
3895 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3896 netif_wake_queue(netdev);
3897 ++adapter->restart_queue;
3901 if (adapter->detect_tx_hung) {
3902 /* Detect a transmit hang in hardware, this serializes the
3903 * check with the clearing of time_stamp and movement of i
3905 adapter->detect_tx_hung = false;
3906 if (tx_ring->buffer_info[eop].time_stamp &&
3907 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3908 (adapter->tx_timeout_factor * HZ)) &&
3909 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3911 /* detected Tx unit hang */
3912 e_err(drv, "Detected Tx Unit Hang\n"
3916 " next_to_use <%x>\n"
3917 " next_to_clean <%x>\n"
3918 "buffer_info[next_to_clean]\n"
3919 " time_stamp <%lx>\n"
3920 " next_to_watch <%x>\n"
3922 " next_to_watch.status <%x>\n",
3923 (unsigned long)((tx_ring - adapter->tx_ring) /
3924 sizeof(struct e1000_tx_ring)),
3925 readl(hw->hw_addr + tx_ring->tdh),
3926 readl(hw->hw_addr + tx_ring->tdt),
3927 tx_ring->next_to_use,
3928 tx_ring->next_to_clean,
3929 tx_ring->buffer_info[eop].time_stamp,
3932 eop_desc->upper.fields.status);
3933 e1000_dump(adapter);
3934 netif_stop_queue(netdev);
3937 adapter->total_tx_bytes += total_tx_bytes;
3938 adapter->total_tx_packets += total_tx_packets;
3939 netdev->stats.tx_bytes += total_tx_bytes;
3940 netdev->stats.tx_packets += total_tx_packets;
3941 return count < tx_ring->count;
3945 * e1000_rx_checksum - Receive Checksum Offload for 82543
3946 * @adapter: board private structure
3947 * @status_err: receive descriptor status and error fields
3948 * @csum: receive descriptor csum field
3949 * @sk_buff: socket buffer with received data
3951 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3952 u32 csum, struct sk_buff *skb)
3954 struct e1000_hw *hw = &adapter->hw;
3955 u16 status = (u16)status_err;
3956 u8 errors = (u8)(status_err >> 24);
3958 skb_checksum_none_assert(skb);
3960 /* 82543 or newer only */
3961 if (unlikely(hw->mac_type < e1000_82543)) return;
3962 /* Ignore Checksum bit is set */
3963 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3964 /* TCP/UDP checksum error bit is set */
3965 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3966 /* let the stack verify checksum errors */
3967 adapter->hw_csum_err++;
3970 /* TCP/UDP Checksum has not been calculated */
3971 if (!(status & E1000_RXD_STAT_TCPCS))
3974 /* It must be a TCP or UDP packet with a valid checksum */
3975 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3976 /* TCP checksum is good */
3977 skb->ip_summed = CHECKSUM_UNNECESSARY;
3979 adapter->hw_csum_good++;
3983 * e1000_consume_page - helper function
3985 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3990 skb->data_len += length;
3991 skb->truesize += PAGE_SIZE;
3995 * e1000_receive_skb - helper function to handle rx indications
3996 * @adapter: board private structure
3997 * @status: descriptor status field as written by hardware
3998 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3999 * @skb: pointer to sk_buff to be indicated to stack
4001 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4002 __le16 vlan, struct sk_buff *skb)
4004 skb->protocol = eth_type_trans(skb, adapter->netdev);
4006 if (status & E1000_RXD_STAT_VP) {
4007 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4009 __vlan_hwaccel_put_tag(skb, vid);
4011 napi_gro_receive(&adapter->napi, skb);
4015 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4016 * @adapter: board private structure
4017 * @rx_ring: ring to clean
4018 * @work_done: amount of napi work completed this call
4019 * @work_to_do: max amount of work allowed for this call to do
4021 * the return value indicates whether actual cleaning was done, there
4022 * is no guarantee that everything was cleaned
4024 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4025 struct e1000_rx_ring *rx_ring,
4026 int *work_done, int work_to_do)
4028 struct e1000_hw *hw = &adapter->hw;
4029 struct net_device *netdev = adapter->netdev;
4030 struct pci_dev *pdev = adapter->pdev;
4031 struct e1000_rx_desc *rx_desc, *next_rxd;
4032 struct e1000_buffer *buffer_info, *next_buffer;
4033 unsigned long irq_flags;
4036 int cleaned_count = 0;
4037 bool cleaned = false;
4038 unsigned int total_rx_bytes=0, total_rx_packets=0;
4040 i = rx_ring->next_to_clean;
4041 rx_desc = E1000_RX_DESC(*rx_ring, i);
4042 buffer_info = &rx_ring->buffer_info[i];
4044 while (rx_desc->status & E1000_RXD_STAT_DD) {
4045 struct sk_buff *skb;
4048 if (*work_done >= work_to_do)
4051 rmb(); /* read descriptor and rx_buffer_info after status DD */
4053 status = rx_desc->status;
4054 skb = buffer_info->skb;
4055 buffer_info->skb = NULL;
4057 if (++i == rx_ring->count) i = 0;
4058 next_rxd = E1000_RX_DESC(*rx_ring, i);
4061 next_buffer = &rx_ring->buffer_info[i];
4065 dma_unmap_page(&pdev->dev, buffer_info->dma,
4066 buffer_info->length, DMA_FROM_DEVICE);
4067 buffer_info->dma = 0;
4069 length = le16_to_cpu(rx_desc->length);
4071 /* errors is only valid for DD + EOP descriptors */
4072 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4073 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4077 mapped = page_address(buffer_info->page);
4078 last_byte = *(mapped + length - 1);
4079 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4081 spin_lock_irqsave(&adapter->stats_lock,
4083 e1000_tbi_adjust_stats(hw, &adapter->stats,
4085 spin_unlock_irqrestore(&adapter->stats_lock,
4089 if (netdev->features & NETIF_F_RXALL)
4091 /* recycle both page and skb */
4092 buffer_info->skb = skb;
4093 /* an error means any chain goes out the window
4096 if (rx_ring->rx_skb_top)
4097 dev_kfree_skb(rx_ring->rx_skb_top);
4098 rx_ring->rx_skb_top = NULL;
4103 #define rxtop rx_ring->rx_skb_top
4105 if (!(status & E1000_RXD_STAT_EOP)) {
4106 /* this descriptor is only the beginning (or middle) */
4108 /* this is the beginning of a chain */
4110 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4113 /* this is the middle of a chain */
4114 skb_fill_page_desc(rxtop,
4115 skb_shinfo(rxtop)->nr_frags,
4116 buffer_info->page, 0, length);
4117 /* re-use the skb, only consumed the page */
4118 buffer_info->skb = skb;
4120 e1000_consume_page(buffer_info, rxtop, length);
4124 /* end of the chain */
4125 skb_fill_page_desc(rxtop,
4126 skb_shinfo(rxtop)->nr_frags,
4127 buffer_info->page, 0, length);
4128 /* re-use the current skb, we only consumed the
4131 buffer_info->skb = skb;
4134 e1000_consume_page(buffer_info, skb, length);
4136 /* no chain, got EOP, this buf is the packet
4137 * copybreak to save the put_page/alloc_page
4139 if (length <= copybreak &&
4140 skb_tailroom(skb) >= length) {
4142 vaddr = kmap_atomic(buffer_info->page);
4143 memcpy(skb_tail_pointer(skb), vaddr,
4145 kunmap_atomic(vaddr);
4146 /* re-use the page, so don't erase
4149 skb_put(skb, length);
4151 skb_fill_page_desc(skb, 0,
4152 buffer_info->page, 0,
4154 e1000_consume_page(buffer_info, skb,
4160 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4161 e1000_rx_checksum(adapter,
4163 ((u32)(rx_desc->errors) << 24),
4164 le16_to_cpu(rx_desc->csum), skb);
4166 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4167 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4168 pskb_trim(skb, skb->len - 4);
4171 /* eth type trans needs skb->data to point to something */
4172 if (!pskb_may_pull(skb, ETH_HLEN)) {
4173 e_err(drv, "pskb_may_pull failed.\n");
4178 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4181 rx_desc->status = 0;
4183 /* return some buffers to hardware, one at a time is too slow */
4184 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4185 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4189 /* use prefetched values */
4191 buffer_info = next_buffer;
4193 rx_ring->next_to_clean = i;
4195 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4197 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4199 adapter->total_rx_packets += total_rx_packets;
4200 adapter->total_rx_bytes += total_rx_bytes;
4201 netdev->stats.rx_bytes += total_rx_bytes;
4202 netdev->stats.rx_packets += total_rx_packets;
4206 /* this should improve performance for small packets with large amounts
4207 * of reassembly being done in the stack
4209 static void e1000_check_copybreak(struct net_device *netdev,
4210 struct e1000_buffer *buffer_info,
4211 u32 length, struct sk_buff **skb)
4213 struct sk_buff *new_skb;
4215 if (length > copybreak)
4218 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4222 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4223 (*skb)->data - NET_IP_ALIGN,
4224 length + NET_IP_ALIGN);
4225 /* save the skb in buffer_info as good */
4226 buffer_info->skb = *skb;
4231 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4232 * @adapter: board private structure
4233 * @rx_ring: ring to clean
4234 * @work_done: amount of napi work completed this call
4235 * @work_to_do: max amount of work allowed for this call to do
4237 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4238 struct e1000_rx_ring *rx_ring,
4239 int *work_done, int work_to_do)
4241 struct e1000_hw *hw = &adapter->hw;
4242 struct net_device *netdev = adapter->netdev;
4243 struct pci_dev *pdev = adapter->pdev;
4244 struct e1000_rx_desc *rx_desc, *next_rxd;
4245 struct e1000_buffer *buffer_info, *next_buffer;
4246 unsigned long flags;
4249 int cleaned_count = 0;
4250 bool cleaned = false;
4251 unsigned int total_rx_bytes=0, total_rx_packets=0;
4253 i = rx_ring->next_to_clean;
4254 rx_desc = E1000_RX_DESC(*rx_ring, i);
4255 buffer_info = &rx_ring->buffer_info[i];
4257 while (rx_desc->status & E1000_RXD_STAT_DD) {
4258 struct sk_buff *skb;
4261 if (*work_done >= work_to_do)
4264 rmb(); /* read descriptor and rx_buffer_info after status DD */
4266 status = rx_desc->status;
4267 skb = buffer_info->skb;
4268 buffer_info->skb = NULL;
4270 prefetch(skb->data - NET_IP_ALIGN);
4272 if (++i == rx_ring->count) i = 0;
4273 next_rxd = E1000_RX_DESC(*rx_ring, i);
4276 next_buffer = &rx_ring->buffer_info[i];
4280 dma_unmap_single(&pdev->dev, buffer_info->dma,
4281 buffer_info->length, DMA_FROM_DEVICE);
4282 buffer_info->dma = 0;
4284 length = le16_to_cpu(rx_desc->length);
4285 /* !EOP means multiple descriptors were used to store a single
4286 * packet, if thats the case we need to toss it. In fact, we
4287 * to toss every packet with the EOP bit clear and the next
4288 * frame that _does_ have the EOP bit set, as it is by
4289 * definition only a frame fragment
4291 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4292 adapter->discarding = true;
4294 if (adapter->discarding) {
4295 /* All receives must fit into a single buffer */
4296 e_dbg("Receive packet consumed multiple buffers\n");
4298 buffer_info->skb = skb;
4299 if (status & E1000_RXD_STAT_EOP)
4300 adapter->discarding = false;
4304 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4305 u8 last_byte = *(skb->data + length - 1);
4306 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4308 spin_lock_irqsave(&adapter->stats_lock, flags);
4309 e1000_tbi_adjust_stats(hw, &adapter->stats,
4311 spin_unlock_irqrestore(&adapter->stats_lock,
4315 if (netdev->features & NETIF_F_RXALL)
4318 buffer_info->skb = skb;
4324 total_rx_bytes += (length - 4); /* don't count FCS */
4327 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4328 /* adjust length to remove Ethernet CRC, this must be
4329 * done after the TBI_ACCEPT workaround above
4333 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4335 skb_put(skb, length);
4337 /* Receive Checksum Offload */
4338 e1000_rx_checksum(adapter,
4340 ((u32)(rx_desc->errors) << 24),
4341 le16_to_cpu(rx_desc->csum), skb);
4343 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4346 rx_desc->status = 0;
4348 /* return some buffers to hardware, one at a time is too slow */
4349 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4350 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4354 /* use prefetched values */
4356 buffer_info = next_buffer;
4358 rx_ring->next_to_clean = i;
4360 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4362 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4364 adapter->total_rx_packets += total_rx_packets;
4365 adapter->total_rx_bytes += total_rx_bytes;
4366 netdev->stats.rx_bytes += total_rx_bytes;
4367 netdev->stats.rx_packets += total_rx_packets;
4372 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4373 * @adapter: address of board private structure
4374 * @rx_ring: pointer to receive ring structure
4375 * @cleaned_count: number of buffers to allocate this pass
4378 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4379 struct e1000_rx_ring *rx_ring, int cleaned_count)
4381 struct net_device *netdev = adapter->netdev;
4382 struct pci_dev *pdev = adapter->pdev;
4383 struct e1000_rx_desc *rx_desc;
4384 struct e1000_buffer *buffer_info;
4385 struct sk_buff *skb;
4387 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4389 i = rx_ring->next_to_use;
4390 buffer_info = &rx_ring->buffer_info[i];
4392 while (cleaned_count--) {
4393 skb = buffer_info->skb;
4399 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4400 if (unlikely(!skb)) {
4401 /* Better luck next round */
4402 adapter->alloc_rx_buff_failed++;
4406 buffer_info->skb = skb;
4407 buffer_info->length = adapter->rx_buffer_len;
4409 /* allocate a new page if necessary */
4410 if (!buffer_info->page) {
4411 buffer_info->page = alloc_page(GFP_ATOMIC);
4412 if (unlikely(!buffer_info->page)) {
4413 adapter->alloc_rx_buff_failed++;
4418 if (!buffer_info->dma) {
4419 buffer_info->dma = dma_map_page(&pdev->dev,
4420 buffer_info->page, 0,
4421 buffer_info->length,
4423 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4424 put_page(buffer_info->page);
4426 buffer_info->page = NULL;
4427 buffer_info->skb = NULL;
4428 buffer_info->dma = 0;
4429 adapter->alloc_rx_buff_failed++;
4430 break; /* while !buffer_info->skb */
4434 rx_desc = E1000_RX_DESC(*rx_ring, i);
4435 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4437 if (unlikely(++i == rx_ring->count))
4439 buffer_info = &rx_ring->buffer_info[i];
4442 if (likely(rx_ring->next_to_use != i)) {
4443 rx_ring->next_to_use = i;
4444 if (unlikely(i-- == 0))
4445 i = (rx_ring->count - 1);
4447 /* Force memory writes to complete before letting h/w
4448 * know there are new descriptors to fetch. (Only
4449 * applicable for weak-ordered memory model archs,
4453 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4458 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4459 * @adapter: address of board private structure
4461 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4462 struct e1000_rx_ring *rx_ring,
4465 struct e1000_hw *hw = &adapter->hw;
4466 struct net_device *netdev = adapter->netdev;
4467 struct pci_dev *pdev = adapter->pdev;
4468 struct e1000_rx_desc *rx_desc;
4469 struct e1000_buffer *buffer_info;
4470 struct sk_buff *skb;
4472 unsigned int bufsz = adapter->rx_buffer_len;
4474 i = rx_ring->next_to_use;
4475 buffer_info = &rx_ring->buffer_info[i];
4477 while (cleaned_count--) {
4478 skb = buffer_info->skb;
4484 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4485 if (unlikely(!skb)) {
4486 /* Better luck next round */
4487 adapter->alloc_rx_buff_failed++;
4491 /* Fix for errata 23, can't cross 64kB boundary */
4492 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4493 struct sk_buff *oldskb = skb;
4494 e_err(rx_err, "skb align check failed: %u bytes at "
4495 "%p\n", bufsz, skb->data);
4496 /* Try again, without freeing the previous */
4497 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4498 /* Failed allocation, critical failure */
4500 dev_kfree_skb(oldskb);
4501 adapter->alloc_rx_buff_failed++;
4505 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4508 dev_kfree_skb(oldskb);
4509 adapter->alloc_rx_buff_failed++;
4510 break; /* while !buffer_info->skb */
4513 /* Use new allocation */
4514 dev_kfree_skb(oldskb);
4516 buffer_info->skb = skb;
4517 buffer_info->length = adapter->rx_buffer_len;
4519 buffer_info->dma = dma_map_single(&pdev->dev,
4521 buffer_info->length,
4523 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4525 buffer_info->skb = NULL;
4526 buffer_info->dma = 0;
4527 adapter->alloc_rx_buff_failed++;
4528 break; /* while !buffer_info->skb */
4531 /* XXX if it was allocated cleanly it will never map to a
4535 /* Fix for errata 23, can't cross 64kB boundary */
4536 if (!e1000_check_64k_bound(adapter,
4537 (void *)(unsigned long)buffer_info->dma,
4538 adapter->rx_buffer_len)) {
4539 e_err(rx_err, "dma align check failed: %u bytes at "
4540 "%p\n", adapter->rx_buffer_len,
4541 (void *)(unsigned long)buffer_info->dma);
4543 buffer_info->skb = NULL;
4545 dma_unmap_single(&pdev->dev, buffer_info->dma,
4546 adapter->rx_buffer_len,
4548 buffer_info->dma = 0;
4550 adapter->alloc_rx_buff_failed++;
4551 break; /* while !buffer_info->skb */
4553 rx_desc = E1000_RX_DESC(*rx_ring, i);
4554 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4556 if (unlikely(++i == rx_ring->count))
4558 buffer_info = &rx_ring->buffer_info[i];
4561 if (likely(rx_ring->next_to_use != i)) {
4562 rx_ring->next_to_use = i;
4563 if (unlikely(i-- == 0))
4564 i = (rx_ring->count - 1);
4566 /* Force memory writes to complete before letting h/w
4567 * know there are new descriptors to fetch. (Only
4568 * applicable for weak-ordered memory model archs,
4572 writel(i, hw->hw_addr + rx_ring->rdt);
4577 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4580 static void e1000_smartspeed(struct e1000_adapter *adapter)
4582 struct e1000_hw *hw = &adapter->hw;
4586 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4587 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4590 if (adapter->smartspeed == 0) {
4591 /* If Master/Slave config fault is asserted twice,
4592 * we assume back-to-back
4594 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4595 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4596 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4597 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4598 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4599 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4600 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4601 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4603 adapter->smartspeed++;
4604 if (!e1000_phy_setup_autoneg(hw) &&
4605 !e1000_read_phy_reg(hw, PHY_CTRL,
4607 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4608 MII_CR_RESTART_AUTO_NEG);
4609 e1000_write_phy_reg(hw, PHY_CTRL,
4614 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4615 /* If still no link, perhaps using 2/3 pair cable */
4616 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4617 phy_ctrl |= CR_1000T_MS_ENABLE;
4618 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4619 if (!e1000_phy_setup_autoneg(hw) &&
4620 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4621 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4622 MII_CR_RESTART_AUTO_NEG);
4623 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4626 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4627 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4628 adapter->smartspeed = 0;
4637 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4643 return e1000_mii_ioctl(netdev, ifr, cmd);
4655 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4658 struct e1000_adapter *adapter = netdev_priv(netdev);
4659 struct e1000_hw *hw = &adapter->hw;
4660 struct mii_ioctl_data *data = if_mii(ifr);
4663 unsigned long flags;
4665 if (hw->media_type != e1000_media_type_copper)
4670 data->phy_id = hw->phy_addr;
4673 spin_lock_irqsave(&adapter->stats_lock, flags);
4674 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4676 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4679 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4682 if (data->reg_num & ~(0x1F))
4684 mii_reg = data->val_in;
4685 spin_lock_irqsave(&adapter->stats_lock, flags);
4686 if (e1000_write_phy_reg(hw, data->reg_num,
4688 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4691 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4692 if (hw->media_type == e1000_media_type_copper) {
4693 switch (data->reg_num) {
4695 if (mii_reg & MII_CR_POWER_DOWN)
4697 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4699 hw->autoneg_advertised = 0x2F;
4704 else if (mii_reg & 0x2000)
4708 retval = e1000_set_spd_dplx(
4716 if (netif_running(adapter->netdev))
4717 e1000_reinit_locked(adapter);
4719 e1000_reset(adapter);
4721 case M88E1000_PHY_SPEC_CTRL:
4722 case M88E1000_EXT_PHY_SPEC_CTRL:
4723 if (e1000_phy_reset(hw))
4728 switch (data->reg_num) {
4730 if (mii_reg & MII_CR_POWER_DOWN)
4732 if (netif_running(adapter->netdev))
4733 e1000_reinit_locked(adapter);
4735 e1000_reset(adapter);
4743 return E1000_SUCCESS;
4746 void e1000_pci_set_mwi(struct e1000_hw *hw)
4748 struct e1000_adapter *adapter = hw->back;
4749 int ret_val = pci_set_mwi(adapter->pdev);
4752 e_err(probe, "Error in setting MWI\n");
4755 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4757 struct e1000_adapter *adapter = hw->back;
4759 pci_clear_mwi(adapter->pdev);
4762 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4764 struct e1000_adapter *adapter = hw->back;
4765 return pcix_get_mmrbc(adapter->pdev);
4768 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4770 struct e1000_adapter *adapter = hw->back;
4771 pcix_set_mmrbc(adapter->pdev, mmrbc);
4774 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4779 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4783 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4788 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4789 netdev_features_t features)
4791 struct e1000_hw *hw = &adapter->hw;
4795 if (features & NETIF_F_HW_VLAN_RX) {
4796 /* enable VLAN tag insert/strip */
4797 ctrl |= E1000_CTRL_VME;
4799 /* disable VLAN tag insert/strip */
4800 ctrl &= ~E1000_CTRL_VME;
4804 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4807 struct e1000_hw *hw = &adapter->hw;
4810 if (!test_bit(__E1000_DOWN, &adapter->flags))
4811 e1000_irq_disable(adapter);
4813 __e1000_vlan_mode(adapter, adapter->netdev->features);
4815 /* enable VLAN receive filtering */
4817 rctl &= ~E1000_RCTL_CFIEN;
4818 if (!(adapter->netdev->flags & IFF_PROMISC))
4819 rctl |= E1000_RCTL_VFE;
4821 e1000_update_mng_vlan(adapter);
4823 /* disable VLAN receive filtering */
4825 rctl &= ~E1000_RCTL_VFE;
4829 if (!test_bit(__E1000_DOWN, &adapter->flags))
4830 e1000_irq_enable(adapter);
4833 static void e1000_vlan_mode(struct net_device *netdev,
4834 netdev_features_t features)
4836 struct e1000_adapter *adapter = netdev_priv(netdev);
4838 if (!test_bit(__E1000_DOWN, &adapter->flags))
4839 e1000_irq_disable(adapter);
4841 __e1000_vlan_mode(adapter, features);
4843 if (!test_bit(__E1000_DOWN, &adapter->flags))
4844 e1000_irq_enable(adapter);
4847 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4849 struct e1000_adapter *adapter = netdev_priv(netdev);
4850 struct e1000_hw *hw = &adapter->hw;
4853 if ((hw->mng_cookie.status &
4854 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4855 (vid == adapter->mng_vlan_id))
4858 if (!e1000_vlan_used(adapter))
4859 e1000_vlan_filter_on_off(adapter, true);
4861 /* add VID to filter table */
4862 index = (vid >> 5) & 0x7F;
4863 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4864 vfta |= (1 << (vid & 0x1F));
4865 e1000_write_vfta(hw, index, vfta);
4867 set_bit(vid, adapter->active_vlans);
4872 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4874 struct e1000_adapter *adapter = netdev_priv(netdev);
4875 struct e1000_hw *hw = &adapter->hw;
4878 if (!test_bit(__E1000_DOWN, &adapter->flags))
4879 e1000_irq_disable(adapter);
4880 if (!test_bit(__E1000_DOWN, &adapter->flags))
4881 e1000_irq_enable(adapter);
4883 /* remove VID from filter table */
4884 index = (vid >> 5) & 0x7F;
4885 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4886 vfta &= ~(1 << (vid & 0x1F));
4887 e1000_write_vfta(hw, index, vfta);
4889 clear_bit(vid, adapter->active_vlans);
4891 if (!e1000_vlan_used(adapter))
4892 e1000_vlan_filter_on_off(adapter, false);
4897 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4901 if (!e1000_vlan_used(adapter))
4904 e1000_vlan_filter_on_off(adapter, true);
4905 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4906 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4909 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4911 struct e1000_hw *hw = &adapter->hw;
4915 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4916 * for the switch() below to work
4918 if ((spd & 1) || (dplx & ~1))
4921 /* Fiber NICs only allow 1000 gbps Full duplex */
4922 if ((hw->media_type == e1000_media_type_fiber) &&
4923 spd != SPEED_1000 &&
4924 dplx != DUPLEX_FULL)
4927 switch (spd + dplx) {
4928 case SPEED_10 + DUPLEX_HALF:
4929 hw->forced_speed_duplex = e1000_10_half;
4931 case SPEED_10 + DUPLEX_FULL:
4932 hw->forced_speed_duplex = e1000_10_full;
4934 case SPEED_100 + DUPLEX_HALF:
4935 hw->forced_speed_duplex = e1000_100_half;
4937 case SPEED_100 + DUPLEX_FULL:
4938 hw->forced_speed_duplex = e1000_100_full;
4940 case SPEED_1000 + DUPLEX_FULL:
4942 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4944 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4949 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4950 hw->mdix = AUTO_ALL_MODES;
4955 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4959 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4961 struct net_device *netdev = pci_get_drvdata(pdev);
4962 struct e1000_adapter *adapter = netdev_priv(netdev);
4963 struct e1000_hw *hw = &adapter->hw;
4964 u32 ctrl, ctrl_ext, rctl, status;
4965 u32 wufc = adapter->wol;
4970 netif_device_detach(netdev);
4972 if (netif_running(netdev)) {
4973 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4974 e1000_down(adapter);
4978 retval = pci_save_state(pdev);
4983 status = er32(STATUS);
4984 if (status & E1000_STATUS_LU)
4985 wufc &= ~E1000_WUFC_LNKC;
4988 e1000_setup_rctl(adapter);
4989 e1000_set_rx_mode(netdev);
4993 /* turn on all-multi mode if wake on multicast is enabled */
4994 if (wufc & E1000_WUFC_MC)
4995 rctl |= E1000_RCTL_MPE;
4997 /* enable receives in the hardware */
4998 ew32(RCTL, rctl | E1000_RCTL_EN);
5000 if (hw->mac_type >= e1000_82540) {
5002 /* advertise wake from D3Cold */
5003 #define E1000_CTRL_ADVD3WUC 0x00100000
5004 /* phy power management enable */
5005 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5006 ctrl |= E1000_CTRL_ADVD3WUC |
5007 E1000_CTRL_EN_PHY_PWR_MGMT;
5011 if (hw->media_type == e1000_media_type_fiber ||
5012 hw->media_type == e1000_media_type_internal_serdes) {
5013 /* keep the laser running in D3 */
5014 ctrl_ext = er32(CTRL_EXT);
5015 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5016 ew32(CTRL_EXT, ctrl_ext);
5019 ew32(WUC, E1000_WUC_PME_EN);
5026 e1000_release_manageability(adapter);
5028 *enable_wake = !!wufc;
5030 /* make sure adapter isn't asleep if manageability is enabled */
5031 if (adapter->en_mng_pt)
5032 *enable_wake = true;
5034 if (netif_running(netdev))
5035 e1000_free_irq(adapter);
5037 pci_disable_device(pdev);
5043 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5048 retval = __e1000_shutdown(pdev, &wake);
5053 pci_prepare_to_sleep(pdev);
5055 pci_wake_from_d3(pdev, false);
5056 pci_set_power_state(pdev, PCI_D3hot);
5062 static int e1000_resume(struct pci_dev *pdev)
5064 struct net_device *netdev = pci_get_drvdata(pdev);
5065 struct e1000_adapter *adapter = netdev_priv(netdev);
5066 struct e1000_hw *hw = &adapter->hw;
5069 pci_set_power_state(pdev, PCI_D0);
5070 pci_restore_state(pdev);
5071 pci_save_state(pdev);
5073 if (adapter->need_ioport)
5074 err = pci_enable_device(pdev);
5076 err = pci_enable_device_mem(pdev);
5078 pr_err("Cannot enable PCI device from suspend\n");
5081 pci_set_master(pdev);
5083 pci_enable_wake(pdev, PCI_D3hot, 0);
5084 pci_enable_wake(pdev, PCI_D3cold, 0);
5086 if (netif_running(netdev)) {
5087 err = e1000_request_irq(adapter);
5092 e1000_power_up_phy(adapter);
5093 e1000_reset(adapter);
5096 e1000_init_manageability(adapter);
5098 if (netif_running(netdev))
5101 netif_device_attach(netdev);
5107 static void e1000_shutdown(struct pci_dev *pdev)
5111 __e1000_shutdown(pdev, &wake);
5113 if (system_state == SYSTEM_POWER_OFF) {
5114 pci_wake_from_d3(pdev, wake);
5115 pci_set_power_state(pdev, PCI_D3hot);
5119 #ifdef CONFIG_NET_POLL_CONTROLLER
5120 /* Polling 'interrupt' - used by things like netconsole to send skbs
5121 * without having to re-enable interrupts. It's not called while
5122 * the interrupt routine is executing.
5124 static void e1000_netpoll(struct net_device *netdev)
5126 struct e1000_adapter *adapter = netdev_priv(netdev);
5128 disable_irq(adapter->pdev->irq);
5129 e1000_intr(adapter->pdev->irq, netdev);
5130 enable_irq(adapter->pdev->irq);
5135 * e1000_io_error_detected - called when PCI error is detected
5136 * @pdev: Pointer to PCI device
5137 * @state: The current pci connection state
5139 * This function is called after a PCI bus error affecting
5140 * this device has been detected.
5142 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5143 pci_channel_state_t state)
5145 struct net_device *netdev = pci_get_drvdata(pdev);
5146 struct e1000_adapter *adapter = netdev_priv(netdev);
5148 netif_device_detach(netdev);
5150 if (state == pci_channel_io_perm_failure)
5151 return PCI_ERS_RESULT_DISCONNECT;
5153 if (netif_running(netdev))
5154 e1000_down(adapter);
5155 pci_disable_device(pdev);
5157 /* Request a slot slot reset. */
5158 return PCI_ERS_RESULT_NEED_RESET;
5162 * e1000_io_slot_reset - called after the pci bus has been reset.
5163 * @pdev: Pointer to PCI device
5165 * Restart the card from scratch, as if from a cold-boot. Implementation
5166 * resembles the first-half of the e1000_resume routine.
5168 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5170 struct net_device *netdev = pci_get_drvdata(pdev);
5171 struct e1000_adapter *adapter = netdev_priv(netdev);
5172 struct e1000_hw *hw = &adapter->hw;
5175 if (adapter->need_ioport)
5176 err = pci_enable_device(pdev);
5178 err = pci_enable_device_mem(pdev);
5180 pr_err("Cannot re-enable PCI device after reset.\n");
5181 return PCI_ERS_RESULT_DISCONNECT;
5183 pci_set_master(pdev);
5185 pci_enable_wake(pdev, PCI_D3hot, 0);
5186 pci_enable_wake(pdev, PCI_D3cold, 0);
5188 e1000_reset(adapter);
5191 return PCI_ERS_RESULT_RECOVERED;
5195 * e1000_io_resume - called when traffic can start flowing again.
5196 * @pdev: Pointer to PCI device
5198 * This callback is called when the error recovery driver tells us that
5199 * its OK to resume normal operation. Implementation resembles the
5200 * second-half of the e1000_resume routine.
5202 static void e1000_io_resume(struct pci_dev *pdev)
5204 struct net_device *netdev = pci_get_drvdata(pdev);
5205 struct e1000_adapter *adapter = netdev_priv(netdev);
5207 e1000_init_manageability(adapter);
5209 if (netif_running(netdev)) {
5210 if (e1000_up(adapter)) {
5211 pr_info("can't bring device back up after reset\n");
5216 netif_device_attach(netdev);