Linux 3.9-rc8
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50         INTEL_E1000_ETHERNET_DEVICE(0x1000),
51         INTEL_E1000_ETHERNET_DEVICE(0x1001),
52         INTEL_E1000_ETHERNET_DEVICE(0x1004),
53         INTEL_E1000_ETHERNET_DEVICE(0x1008),
54         INTEL_E1000_ETHERNET_DEVICE(0x1009),
55         INTEL_E1000_ETHERNET_DEVICE(0x100C),
56         INTEL_E1000_ETHERNET_DEVICE(0x100D),
57         INTEL_E1000_ETHERNET_DEVICE(0x100E),
58         INTEL_E1000_ETHERNET_DEVICE(0x100F),
59         INTEL_E1000_ETHERNET_DEVICE(0x1010),
60         INTEL_E1000_ETHERNET_DEVICE(0x1011),
61         INTEL_E1000_ETHERNET_DEVICE(0x1012),
62         INTEL_E1000_ETHERNET_DEVICE(0x1013),
63         INTEL_E1000_ETHERNET_DEVICE(0x1014),
64         INTEL_E1000_ETHERNET_DEVICE(0x1015),
65         INTEL_E1000_ETHERNET_DEVICE(0x1016),
66         INTEL_E1000_ETHERNET_DEVICE(0x1017),
67         INTEL_E1000_ETHERNET_DEVICE(0x1018),
68         INTEL_E1000_ETHERNET_DEVICE(0x1019),
69         INTEL_E1000_ETHERNET_DEVICE(0x101A),
70         INTEL_E1000_ETHERNET_DEVICE(0x101D),
71         INTEL_E1000_ETHERNET_DEVICE(0x101E),
72         INTEL_E1000_ETHERNET_DEVICE(0x1026),
73         INTEL_E1000_ETHERNET_DEVICE(0x1027),
74         INTEL_E1000_ETHERNET_DEVICE(0x1028),
75         INTEL_E1000_ETHERNET_DEVICE(0x1075),
76         INTEL_E1000_ETHERNET_DEVICE(0x1076),
77         INTEL_E1000_ETHERNET_DEVICE(0x1077),
78         INTEL_E1000_ETHERNET_DEVICE(0x1078),
79         INTEL_E1000_ETHERNET_DEVICE(0x1079),
80         INTEL_E1000_ETHERNET_DEVICE(0x107A),
81         INTEL_E1000_ETHERNET_DEVICE(0x107B),
82         INTEL_E1000_ETHERNET_DEVICE(0x107C),
83         INTEL_E1000_ETHERNET_DEVICE(0x108A),
84         INTEL_E1000_ETHERNET_DEVICE(0x1099),
85         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87         /* required last entry */
88         {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102                              struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104                              struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106                              struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108                              struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125                                 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127                                 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133                                     struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139                                struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142                                struct e1000_rx_ring *rx_ring,
143                                int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145                                      struct e1000_rx_ring *rx_ring,
146                                      int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148                                    struct e1000_rx_ring *rx_ring,
149                                    int cleaned_count);
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151                                          struct e1000_rx_ring *rx_ring,
152                                          int cleaned_count);
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155                            int cmd);
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162                                        struct sk_buff *skb);
163
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
166                             netdev_features_t features);
167 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168                                      bool filter_on);
169 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
171 static void e1000_restore_vlan(struct e1000_adapter *adapter);
172
173 #ifdef CONFIG_PM
174 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
175 static int e1000_resume(struct pci_dev *pdev);
176 #endif
177 static void e1000_shutdown(struct pci_dev *pdev);
178
179 #ifdef CONFIG_NET_POLL_CONTROLLER
180 /* for netdump / net console */
181 static void e1000_netpoll (struct net_device *netdev);
182 #endif
183
184 #define COPYBREAK_DEFAULT 256
185 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186 module_param(copybreak, uint, 0644);
187 MODULE_PARM_DESC(copybreak,
188         "Maximum size of packet that is copied to a new buffer on receive");
189
190 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191                      pci_channel_state_t state);
192 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193 static void e1000_io_resume(struct pci_dev *pdev);
194
195 static const struct pci_error_handlers e1000_err_handler = {
196         .error_detected = e1000_io_error_detected,
197         .slot_reset = e1000_io_slot_reset,
198         .resume = e1000_io_resume,
199 };
200
201 static struct pci_driver e1000_driver = {
202         .name     = e1000_driver_name,
203         .id_table = e1000_pci_tbl,
204         .probe    = e1000_probe,
205         .remove   = e1000_remove,
206 #ifdef CONFIG_PM
207         /* Power Management Hooks */
208         .suspend  = e1000_suspend,
209         .resume   = e1000_resume,
210 #endif
211         .shutdown = e1000_shutdown,
212         .err_handler = &e1000_err_handler
213 };
214
215 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_VERSION);
219
220 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221 static int debug = -1;
222 module_param(debug, int, 0);
223 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224
225 /**
226  * e1000_get_hw_dev - return device
227  * used by hardware layer to print debugging information
228  *
229  **/
230 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
231 {
232         struct e1000_adapter *adapter = hw->back;
233         return adapter->netdev;
234 }
235
236 /**
237  * e1000_init_module - Driver Registration Routine
238  *
239  * e1000_init_module is the first routine called when the driver is
240  * loaded. All it does is register with the PCI subsystem.
241  **/
242 static int __init e1000_init_module(void)
243 {
244         int ret;
245         pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
246
247         pr_info("%s\n", e1000_copyright);
248
249         ret = pci_register_driver(&e1000_driver);
250         if (copybreak != COPYBREAK_DEFAULT) {
251                 if (copybreak == 0)
252                         pr_info("copybreak disabled\n");
253                 else
254                         pr_info("copybreak enabled for "
255                                    "packets <= %u bytes\n", copybreak);
256         }
257         return ret;
258 }
259
260 module_init(e1000_init_module);
261
262 /**
263  * e1000_exit_module - Driver Exit Cleanup Routine
264  *
265  * e1000_exit_module is called just before the driver is removed
266  * from memory.
267  **/
268 static void __exit e1000_exit_module(void)
269 {
270         pci_unregister_driver(&e1000_driver);
271 }
272
273 module_exit(e1000_exit_module);
274
275 static int e1000_request_irq(struct e1000_adapter *adapter)
276 {
277         struct net_device *netdev = adapter->netdev;
278         irq_handler_t handler = e1000_intr;
279         int irq_flags = IRQF_SHARED;
280         int err;
281
282         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
283                           netdev);
284         if (err) {
285                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
286         }
287
288         return err;
289 }
290
291 static void e1000_free_irq(struct e1000_adapter *adapter)
292 {
293         struct net_device *netdev = adapter->netdev;
294
295         free_irq(adapter->pdev->irq, netdev);
296 }
297
298 /**
299  * e1000_irq_disable - Mask off interrupt generation on the NIC
300  * @adapter: board private structure
301  **/
302 static void e1000_irq_disable(struct e1000_adapter *adapter)
303 {
304         struct e1000_hw *hw = &adapter->hw;
305
306         ew32(IMC, ~0);
307         E1000_WRITE_FLUSH();
308         synchronize_irq(adapter->pdev->irq);
309 }
310
311 /**
312  * e1000_irq_enable - Enable default interrupt generation settings
313  * @adapter: board private structure
314  **/
315 static void e1000_irq_enable(struct e1000_adapter *adapter)
316 {
317         struct e1000_hw *hw = &adapter->hw;
318
319         ew32(IMS, IMS_ENABLE_MASK);
320         E1000_WRITE_FLUSH();
321 }
322
323 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
324 {
325         struct e1000_hw *hw = &adapter->hw;
326         struct net_device *netdev = adapter->netdev;
327         u16 vid = hw->mng_cookie.vlan_id;
328         u16 old_vid = adapter->mng_vlan_id;
329
330         if (!e1000_vlan_used(adapter))
331                 return;
332
333         if (!test_bit(vid, adapter->active_vlans)) {
334                 if (hw->mng_cookie.status &
335                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
336                         e1000_vlan_rx_add_vid(netdev, vid);
337                         adapter->mng_vlan_id = vid;
338                 } else {
339                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
340                 }
341                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
342                     (vid != old_vid) &&
343                     !test_bit(old_vid, adapter->active_vlans))
344                         e1000_vlan_rx_kill_vid(netdev, old_vid);
345         } else {
346                 adapter->mng_vlan_id = vid;
347         }
348 }
349
350 static void e1000_init_manageability(struct e1000_adapter *adapter)
351 {
352         struct e1000_hw *hw = &adapter->hw;
353
354         if (adapter->en_mng_pt) {
355                 u32 manc = er32(MANC);
356
357                 /* disable hardware interception of ARP */
358                 manc &= ~(E1000_MANC_ARP_EN);
359
360                 ew32(MANC, manc);
361         }
362 }
363
364 static void e1000_release_manageability(struct e1000_adapter *adapter)
365 {
366         struct e1000_hw *hw = &adapter->hw;
367
368         if (adapter->en_mng_pt) {
369                 u32 manc = er32(MANC);
370
371                 /* re-enable hardware interception of ARP */
372                 manc |= E1000_MANC_ARP_EN;
373
374                 ew32(MANC, manc);
375         }
376 }
377
378 /**
379  * e1000_configure - configure the hardware for RX and TX
380  * @adapter = private board structure
381  **/
382 static void e1000_configure(struct e1000_adapter *adapter)
383 {
384         struct net_device *netdev = adapter->netdev;
385         int i;
386
387         e1000_set_rx_mode(netdev);
388
389         e1000_restore_vlan(adapter);
390         e1000_init_manageability(adapter);
391
392         e1000_configure_tx(adapter);
393         e1000_setup_rctl(adapter);
394         e1000_configure_rx(adapter);
395         /* call E1000_DESC_UNUSED which always leaves
396          * at least 1 descriptor unused to make sure
397          * next_to_use != next_to_clean
398          */
399         for (i = 0; i < adapter->num_rx_queues; i++) {
400                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
401                 adapter->alloc_rx_buf(adapter, ring,
402                                       E1000_DESC_UNUSED(ring));
403         }
404 }
405
406 int e1000_up(struct e1000_adapter *adapter)
407 {
408         struct e1000_hw *hw = &adapter->hw;
409
410         /* hardware has been reset, we need to reload some things */
411         e1000_configure(adapter);
412
413         clear_bit(__E1000_DOWN, &adapter->flags);
414
415         napi_enable(&adapter->napi);
416
417         e1000_irq_enable(adapter);
418
419         netif_wake_queue(adapter->netdev);
420
421         /* fire a link change interrupt to start the watchdog */
422         ew32(ICS, E1000_ICS_LSC);
423         return 0;
424 }
425
426 /**
427  * e1000_power_up_phy - restore link in case the phy was powered down
428  * @adapter: address of board private structure
429  *
430  * The phy may be powered down to save power and turn off link when the
431  * driver is unloaded and wake on lan is not enabled (among others)
432  * *** this routine MUST be followed by a call to e1000_reset ***
433  **/
434 void e1000_power_up_phy(struct e1000_adapter *adapter)
435 {
436         struct e1000_hw *hw = &adapter->hw;
437         u16 mii_reg = 0;
438
439         /* Just clear the power down bit to wake the phy back up */
440         if (hw->media_type == e1000_media_type_copper) {
441                 /* according to the manual, the phy will retain its
442                  * settings across a power-down/up cycle
443                  */
444                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
445                 mii_reg &= ~MII_CR_POWER_DOWN;
446                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
447         }
448 }
449
450 static void e1000_power_down_phy(struct e1000_adapter *adapter)
451 {
452         struct e1000_hw *hw = &adapter->hw;
453
454         /* Power down the PHY so no link is implied when interface is down *
455          * The PHY cannot be powered down if any of the following is true *
456          * (a) WoL is enabled
457          * (b) AMT is active
458          * (c) SoL/IDER session is active
459          */
460         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
461            hw->media_type == e1000_media_type_copper) {
462                 u16 mii_reg = 0;
463
464                 switch (hw->mac_type) {
465                 case e1000_82540:
466                 case e1000_82545:
467                 case e1000_82545_rev_3:
468                 case e1000_82546:
469                 case e1000_ce4100:
470                 case e1000_82546_rev_3:
471                 case e1000_82541:
472                 case e1000_82541_rev_2:
473                 case e1000_82547:
474                 case e1000_82547_rev_2:
475                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
476                                 goto out;
477                         break;
478                 default:
479                         goto out;
480                 }
481                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
482                 mii_reg |= MII_CR_POWER_DOWN;
483                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
484                 msleep(1);
485         }
486 out:
487         return;
488 }
489
490 static void e1000_down_and_stop(struct e1000_adapter *adapter)
491 {
492         set_bit(__E1000_DOWN, &adapter->flags);
493
494         /* Only kill reset task if adapter is not resetting */
495         if (!test_bit(__E1000_RESETTING, &adapter->flags))
496                 cancel_work_sync(&adapter->reset_task);
497
498         cancel_delayed_work_sync(&adapter->watchdog_task);
499         cancel_delayed_work_sync(&adapter->phy_info_task);
500         cancel_delayed_work_sync(&adapter->fifo_stall_task);
501 }
502
503 void e1000_down(struct e1000_adapter *adapter)
504 {
505         struct e1000_hw *hw = &adapter->hw;
506         struct net_device *netdev = adapter->netdev;
507         u32 rctl, tctl;
508
509
510         /* disable receives in the hardware */
511         rctl = er32(RCTL);
512         ew32(RCTL, rctl & ~E1000_RCTL_EN);
513         /* flush and sleep below */
514
515         netif_tx_disable(netdev);
516
517         /* disable transmits in the hardware */
518         tctl = er32(TCTL);
519         tctl &= ~E1000_TCTL_EN;
520         ew32(TCTL, tctl);
521         /* flush both disables and wait for them to finish */
522         E1000_WRITE_FLUSH();
523         msleep(10);
524
525         napi_disable(&adapter->napi);
526
527         e1000_irq_disable(adapter);
528
529         /* Setting DOWN must be after irq_disable to prevent
530          * a screaming interrupt.  Setting DOWN also prevents
531          * tasks from rescheduling.
532          */
533         e1000_down_and_stop(adapter);
534
535         adapter->link_speed = 0;
536         adapter->link_duplex = 0;
537         netif_carrier_off(netdev);
538
539         e1000_reset(adapter);
540         e1000_clean_all_tx_rings(adapter);
541         e1000_clean_all_rx_rings(adapter);
542 }
543
544 static void e1000_reinit_safe(struct e1000_adapter *adapter)
545 {
546         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
547                 msleep(1);
548         mutex_lock(&adapter->mutex);
549         e1000_down(adapter);
550         e1000_up(adapter);
551         mutex_unlock(&adapter->mutex);
552         clear_bit(__E1000_RESETTING, &adapter->flags);
553 }
554
555 void e1000_reinit_locked(struct e1000_adapter *adapter)
556 {
557         /* if rtnl_lock is not held the call path is bogus */
558         ASSERT_RTNL();
559         WARN_ON(in_interrupt());
560         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
561                 msleep(1);
562         e1000_down(adapter);
563         e1000_up(adapter);
564         clear_bit(__E1000_RESETTING, &adapter->flags);
565 }
566
567 void e1000_reset(struct e1000_adapter *adapter)
568 {
569         struct e1000_hw *hw = &adapter->hw;
570         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
571         bool legacy_pba_adjust = false;
572         u16 hwm;
573
574         /* Repartition Pba for greater than 9k mtu
575          * To take effect CTRL.RST is required.
576          */
577
578         switch (hw->mac_type) {
579         case e1000_82542_rev2_0:
580         case e1000_82542_rev2_1:
581         case e1000_82543:
582         case e1000_82544:
583         case e1000_82540:
584         case e1000_82541:
585         case e1000_82541_rev_2:
586                 legacy_pba_adjust = true;
587                 pba = E1000_PBA_48K;
588                 break;
589         case e1000_82545:
590         case e1000_82545_rev_3:
591         case e1000_82546:
592         case e1000_ce4100:
593         case e1000_82546_rev_3:
594                 pba = E1000_PBA_48K;
595                 break;
596         case e1000_82547:
597         case e1000_82547_rev_2:
598                 legacy_pba_adjust = true;
599                 pba = E1000_PBA_30K;
600                 break;
601         case e1000_undefined:
602         case e1000_num_macs:
603                 break;
604         }
605
606         if (legacy_pba_adjust) {
607                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
608                         pba -= 8; /* allocate more FIFO for Tx */
609
610                 if (hw->mac_type == e1000_82547) {
611                         adapter->tx_fifo_head = 0;
612                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
613                         adapter->tx_fifo_size =
614                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
615                         atomic_set(&adapter->tx_fifo_stall, 0);
616                 }
617         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
618                 /* adjust PBA for jumbo frames */
619                 ew32(PBA, pba);
620
621                 /* To maintain wire speed transmits, the Tx FIFO should be
622                  * large enough to accommodate two full transmit packets,
623                  * rounded up to the next 1KB and expressed in KB.  Likewise,
624                  * the Rx FIFO should be large enough to accommodate at least
625                  * one full receive packet and is similarly rounded up and
626                  * expressed in KB.
627                  */
628                 pba = er32(PBA);
629                 /* upper 16 bits has Tx packet buffer allocation size in KB */
630                 tx_space = pba >> 16;
631                 /* lower 16 bits has Rx packet buffer allocation size in KB */
632                 pba &= 0xffff;
633                 /* the Tx fifo also stores 16 bytes of information about the Tx
634                  * but don't include ethernet FCS because hardware appends it
635                  */
636                 min_tx_space = (hw->max_frame_size +
637                                 sizeof(struct e1000_tx_desc) -
638                                 ETH_FCS_LEN) * 2;
639                 min_tx_space = ALIGN(min_tx_space, 1024);
640                 min_tx_space >>= 10;
641                 /* software strips receive CRC, so leave room for it */
642                 min_rx_space = hw->max_frame_size;
643                 min_rx_space = ALIGN(min_rx_space, 1024);
644                 min_rx_space >>= 10;
645
646                 /* If current Tx allocation is less than the min Tx FIFO size,
647                  * and the min Tx FIFO size is less than the current Rx FIFO
648                  * allocation, take space away from current Rx allocation
649                  */
650                 if (tx_space < min_tx_space &&
651                     ((min_tx_space - tx_space) < pba)) {
652                         pba = pba - (min_tx_space - tx_space);
653
654                         /* PCI/PCIx hardware has PBA alignment constraints */
655                         switch (hw->mac_type) {
656                         case e1000_82545 ... e1000_82546_rev_3:
657                                 pba &= ~(E1000_PBA_8K - 1);
658                                 break;
659                         default:
660                                 break;
661                         }
662
663                         /* if short on Rx space, Rx wins and must trump Tx
664                          * adjustment or use Early Receive if available
665                          */
666                         if (pba < min_rx_space)
667                                 pba = min_rx_space;
668                 }
669         }
670
671         ew32(PBA, pba);
672
673         /* flow control settings:
674          * The high water mark must be low enough to fit one full frame
675          * (or the size used for early receive) above it in the Rx FIFO.
676          * Set it to the lower of:
677          * - 90% of the Rx FIFO size, and
678          * - the full Rx FIFO size minus the early receive size (for parts
679          *   with ERT support assuming ERT set to E1000_ERT_2048), or
680          * - the full Rx FIFO size minus one full frame
681          */
682         hwm = min(((pba << 10) * 9 / 10),
683                   ((pba << 10) - hw->max_frame_size));
684
685         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
686         hw->fc_low_water = hw->fc_high_water - 8;
687         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
688         hw->fc_send_xon = 1;
689         hw->fc = hw->original_fc;
690
691         /* Allow time for pending master requests to run */
692         e1000_reset_hw(hw);
693         if (hw->mac_type >= e1000_82544)
694                 ew32(WUC, 0);
695
696         if (e1000_init_hw(hw))
697                 e_dev_err("Hardware Error\n");
698         e1000_update_mng_vlan(adapter);
699
700         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
701         if (hw->mac_type >= e1000_82544 &&
702             hw->autoneg == 1 &&
703             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
704                 u32 ctrl = er32(CTRL);
705                 /* clear phy power management bit if we are in gig only mode,
706                  * which if enabled will attempt negotiation to 100Mb, which
707                  * can cause a loss of link at power off or driver unload
708                  */
709                 ctrl &= ~E1000_CTRL_SWDPIN3;
710                 ew32(CTRL, ctrl);
711         }
712
713         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
714         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
715
716         e1000_reset_adaptive(hw);
717         e1000_phy_get_info(hw, &adapter->phy_info);
718
719         e1000_release_manageability(adapter);
720 }
721
722 /* Dump the eeprom for users having checksum issues */
723 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
724 {
725         struct net_device *netdev = adapter->netdev;
726         struct ethtool_eeprom eeprom;
727         const struct ethtool_ops *ops = netdev->ethtool_ops;
728         u8 *data;
729         int i;
730         u16 csum_old, csum_new = 0;
731
732         eeprom.len = ops->get_eeprom_len(netdev);
733         eeprom.offset = 0;
734
735         data = kmalloc(eeprom.len, GFP_KERNEL);
736         if (!data)
737                 return;
738
739         ops->get_eeprom(netdev, &eeprom, data);
740
741         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
742                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
743         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
744                 csum_new += data[i] + (data[i + 1] << 8);
745         csum_new = EEPROM_SUM - csum_new;
746
747         pr_err("/*********************/\n");
748         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
749         pr_err("Calculated              : 0x%04x\n", csum_new);
750
751         pr_err("Offset    Values\n");
752         pr_err("========  ======\n");
753         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
754
755         pr_err("Include this output when contacting your support provider.\n");
756         pr_err("This is not a software error! Something bad happened to\n");
757         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
758         pr_err("result in further problems, possibly loss of data,\n");
759         pr_err("corruption or system hangs!\n");
760         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
761         pr_err("which is invalid and requires you to set the proper MAC\n");
762         pr_err("address manually before continuing to enable this network\n");
763         pr_err("device. Please inspect the EEPROM dump and report the\n");
764         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
765         pr_err("/*********************/\n");
766
767         kfree(data);
768 }
769
770 /**
771  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
772  * @pdev: PCI device information struct
773  *
774  * Return true if an adapter needs ioport resources
775  **/
776 static int e1000_is_need_ioport(struct pci_dev *pdev)
777 {
778         switch (pdev->device) {
779         case E1000_DEV_ID_82540EM:
780         case E1000_DEV_ID_82540EM_LOM:
781         case E1000_DEV_ID_82540EP:
782         case E1000_DEV_ID_82540EP_LOM:
783         case E1000_DEV_ID_82540EP_LP:
784         case E1000_DEV_ID_82541EI:
785         case E1000_DEV_ID_82541EI_MOBILE:
786         case E1000_DEV_ID_82541ER:
787         case E1000_DEV_ID_82541ER_LOM:
788         case E1000_DEV_ID_82541GI:
789         case E1000_DEV_ID_82541GI_LF:
790         case E1000_DEV_ID_82541GI_MOBILE:
791         case E1000_DEV_ID_82544EI_COPPER:
792         case E1000_DEV_ID_82544EI_FIBER:
793         case E1000_DEV_ID_82544GC_COPPER:
794         case E1000_DEV_ID_82544GC_LOM:
795         case E1000_DEV_ID_82545EM_COPPER:
796         case E1000_DEV_ID_82545EM_FIBER:
797         case E1000_DEV_ID_82546EB_COPPER:
798         case E1000_DEV_ID_82546EB_FIBER:
799         case E1000_DEV_ID_82546EB_QUAD_COPPER:
800                 return true;
801         default:
802                 return false;
803         }
804 }
805
806 static netdev_features_t e1000_fix_features(struct net_device *netdev,
807         netdev_features_t features)
808 {
809         /* Since there is no support for separate Rx/Tx vlan accel
810          * enable/disable make sure Tx flag is always in same state as Rx.
811          */
812         if (features & NETIF_F_HW_VLAN_RX)
813                 features |= NETIF_F_HW_VLAN_TX;
814         else
815                 features &= ~NETIF_F_HW_VLAN_TX;
816
817         return features;
818 }
819
820 static int e1000_set_features(struct net_device *netdev,
821         netdev_features_t features)
822 {
823         struct e1000_adapter *adapter = netdev_priv(netdev);
824         netdev_features_t changed = features ^ netdev->features;
825
826         if (changed & NETIF_F_HW_VLAN_RX)
827                 e1000_vlan_mode(netdev, features);
828
829         if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
830                 return 0;
831
832         netdev->features = features;
833         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
834
835         if (netif_running(netdev))
836                 e1000_reinit_locked(adapter);
837         else
838                 e1000_reset(adapter);
839
840         return 0;
841 }
842
843 static const struct net_device_ops e1000_netdev_ops = {
844         .ndo_open               = e1000_open,
845         .ndo_stop               = e1000_close,
846         .ndo_start_xmit         = e1000_xmit_frame,
847         .ndo_get_stats          = e1000_get_stats,
848         .ndo_set_rx_mode        = e1000_set_rx_mode,
849         .ndo_set_mac_address    = e1000_set_mac,
850         .ndo_tx_timeout         = e1000_tx_timeout,
851         .ndo_change_mtu         = e1000_change_mtu,
852         .ndo_do_ioctl           = e1000_ioctl,
853         .ndo_validate_addr      = eth_validate_addr,
854         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
855         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
856 #ifdef CONFIG_NET_POLL_CONTROLLER
857         .ndo_poll_controller    = e1000_netpoll,
858 #endif
859         .ndo_fix_features       = e1000_fix_features,
860         .ndo_set_features       = e1000_set_features,
861 };
862
863 /**
864  * e1000_init_hw_struct - initialize members of hw struct
865  * @adapter: board private struct
866  * @hw: structure used by e1000_hw.c
867  *
868  * Factors out initialization of the e1000_hw struct to its own function
869  * that can be called very early at init (just after struct allocation).
870  * Fields are initialized based on PCI device information and
871  * OS network device settings (MTU size).
872  * Returns negative error codes if MAC type setup fails.
873  */
874 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
875                                 struct e1000_hw *hw)
876 {
877         struct pci_dev *pdev = adapter->pdev;
878
879         /* PCI config space info */
880         hw->vendor_id = pdev->vendor;
881         hw->device_id = pdev->device;
882         hw->subsystem_vendor_id = pdev->subsystem_vendor;
883         hw->subsystem_id = pdev->subsystem_device;
884         hw->revision_id = pdev->revision;
885
886         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
887
888         hw->max_frame_size = adapter->netdev->mtu +
889                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
890         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
891
892         /* identify the MAC */
893         if (e1000_set_mac_type(hw)) {
894                 e_err(probe, "Unknown MAC Type\n");
895                 return -EIO;
896         }
897
898         switch (hw->mac_type) {
899         default:
900                 break;
901         case e1000_82541:
902         case e1000_82547:
903         case e1000_82541_rev_2:
904         case e1000_82547_rev_2:
905                 hw->phy_init_script = 1;
906                 break;
907         }
908
909         e1000_set_media_type(hw);
910         e1000_get_bus_info(hw);
911
912         hw->wait_autoneg_complete = false;
913         hw->tbi_compatibility_en = true;
914         hw->adaptive_ifs = true;
915
916         /* Copper options */
917
918         if (hw->media_type == e1000_media_type_copper) {
919                 hw->mdix = AUTO_ALL_MODES;
920                 hw->disable_polarity_correction = false;
921                 hw->master_slave = E1000_MASTER_SLAVE;
922         }
923
924         return 0;
925 }
926
927 /**
928  * e1000_probe - Device Initialization Routine
929  * @pdev: PCI device information struct
930  * @ent: entry in e1000_pci_tbl
931  *
932  * Returns 0 on success, negative on failure
933  *
934  * e1000_probe initializes an adapter identified by a pci_dev structure.
935  * The OS initialization, configuring of the adapter private structure,
936  * and a hardware reset occur.
937  **/
938 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
939 {
940         struct net_device *netdev;
941         struct e1000_adapter *adapter;
942         struct e1000_hw *hw;
943
944         static int cards_found = 0;
945         static int global_quad_port_a = 0; /* global ksp3 port a indication */
946         int i, err, pci_using_dac;
947         u16 eeprom_data = 0;
948         u16 tmp = 0;
949         u16 eeprom_apme_mask = E1000_EEPROM_APME;
950         int bars, need_ioport;
951
952         /* do not allocate ioport bars when not needed */
953         need_ioport = e1000_is_need_ioport(pdev);
954         if (need_ioport) {
955                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
956                 err = pci_enable_device(pdev);
957         } else {
958                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
959                 err = pci_enable_device_mem(pdev);
960         }
961         if (err)
962                 return err;
963
964         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
965         if (err)
966                 goto err_pci_reg;
967
968         pci_set_master(pdev);
969         err = pci_save_state(pdev);
970         if (err)
971                 goto err_alloc_etherdev;
972
973         err = -ENOMEM;
974         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
975         if (!netdev)
976                 goto err_alloc_etherdev;
977
978         SET_NETDEV_DEV(netdev, &pdev->dev);
979
980         pci_set_drvdata(pdev, netdev);
981         adapter = netdev_priv(netdev);
982         adapter->netdev = netdev;
983         adapter->pdev = pdev;
984         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
985         adapter->bars = bars;
986         adapter->need_ioport = need_ioport;
987
988         hw = &adapter->hw;
989         hw->back = adapter;
990
991         err = -EIO;
992         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
993         if (!hw->hw_addr)
994                 goto err_ioremap;
995
996         if (adapter->need_ioport) {
997                 for (i = BAR_1; i <= BAR_5; i++) {
998                         if (pci_resource_len(pdev, i) == 0)
999                                 continue;
1000                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1001                                 hw->io_base = pci_resource_start(pdev, i);
1002                                 break;
1003                         }
1004                 }
1005         }
1006
1007         /* make ready for any if (hw->...) below */
1008         err = e1000_init_hw_struct(adapter, hw);
1009         if (err)
1010                 goto err_sw_init;
1011
1012         /* there is a workaround being applied below that limits
1013          * 64-bit DMA addresses to 64-bit hardware.  There are some
1014          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1015          */
1016         pci_using_dac = 0;
1017         if ((hw->bus_type == e1000_bus_type_pcix) &&
1018             !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1019                 /* according to DMA-API-HOWTO, coherent calls will always
1020                  * succeed if the set call did
1021                  */
1022                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1023                 pci_using_dac = 1;
1024         } else {
1025                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1026                 if (err) {
1027                         pr_err("No usable DMA config, aborting\n");
1028                         goto err_dma;
1029                 }
1030                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1031         }
1032
1033         netdev->netdev_ops = &e1000_netdev_ops;
1034         e1000_set_ethtool_ops(netdev);
1035         netdev->watchdog_timeo = 5 * HZ;
1036         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1037
1038         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1039
1040         adapter->bd_number = cards_found;
1041
1042         /* setup the private structure */
1043
1044         err = e1000_sw_init(adapter);
1045         if (err)
1046                 goto err_sw_init;
1047
1048         err = -EIO;
1049         if (hw->mac_type == e1000_ce4100) {
1050                 hw->ce4100_gbe_mdio_base_virt =
1051                                         ioremap(pci_resource_start(pdev, BAR_1),
1052                                                 pci_resource_len(pdev, BAR_1));
1053
1054                 if (!hw->ce4100_gbe_mdio_base_virt)
1055                         goto err_mdio_ioremap;
1056         }
1057
1058         if (hw->mac_type >= e1000_82543) {
1059                 netdev->hw_features = NETIF_F_SG |
1060                                    NETIF_F_HW_CSUM |
1061                                    NETIF_F_HW_VLAN_RX;
1062                 netdev->features = NETIF_F_HW_VLAN_TX |
1063                                    NETIF_F_HW_VLAN_FILTER;
1064         }
1065
1066         if ((hw->mac_type >= e1000_82544) &&
1067            (hw->mac_type != e1000_82547))
1068                 netdev->hw_features |= NETIF_F_TSO;
1069
1070         netdev->priv_flags |= IFF_SUPP_NOFCS;
1071
1072         netdev->features |= netdev->hw_features;
1073         netdev->hw_features |= (NETIF_F_RXCSUM |
1074                                 NETIF_F_RXALL |
1075                                 NETIF_F_RXFCS);
1076
1077         if (pci_using_dac) {
1078                 netdev->features |= NETIF_F_HIGHDMA;
1079                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1080         }
1081
1082         netdev->vlan_features |= (NETIF_F_TSO |
1083                                   NETIF_F_HW_CSUM |
1084                                   NETIF_F_SG);
1085
1086         netdev->priv_flags |= IFF_UNICAST_FLT;
1087
1088         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1089
1090         /* initialize eeprom parameters */
1091         if (e1000_init_eeprom_params(hw)) {
1092                 e_err(probe, "EEPROM initialization failed\n");
1093                 goto err_eeprom;
1094         }
1095
1096         /* before reading the EEPROM, reset the controller to
1097          * put the device in a known good starting state
1098          */
1099
1100         e1000_reset_hw(hw);
1101
1102         /* make sure the EEPROM is good */
1103         if (e1000_validate_eeprom_checksum(hw) < 0) {
1104                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1105                 e1000_dump_eeprom(adapter);
1106                 /* set MAC address to all zeroes to invalidate and temporary
1107                  * disable this device for the user. This blocks regular
1108                  * traffic while still permitting ethtool ioctls from reaching
1109                  * the hardware as well as allowing the user to run the
1110                  * interface after manually setting a hw addr using
1111                  * `ip set address`
1112                  */
1113                 memset(hw->mac_addr, 0, netdev->addr_len);
1114         } else {
1115                 /* copy the MAC address out of the EEPROM */
1116                 if (e1000_read_mac_addr(hw))
1117                         e_err(probe, "EEPROM Read Error\n");
1118         }
1119         /* don't block initalization here due to bad MAC address */
1120         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1121
1122         if (!is_valid_ether_addr(netdev->dev_addr))
1123                 e_err(probe, "Invalid MAC Address\n");
1124
1125
1126         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1127         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1128                           e1000_82547_tx_fifo_stall_task);
1129         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1130         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1131
1132         e1000_check_options(adapter);
1133
1134         /* Initial Wake on LAN setting
1135          * If APM wake is enabled in the EEPROM,
1136          * enable the ACPI Magic Packet filter
1137          */
1138
1139         switch (hw->mac_type) {
1140         case e1000_82542_rev2_0:
1141         case e1000_82542_rev2_1:
1142         case e1000_82543:
1143                 break;
1144         case e1000_82544:
1145                 e1000_read_eeprom(hw,
1146                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1147                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1148                 break;
1149         case e1000_82546:
1150         case e1000_82546_rev_3:
1151                 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1152                         e1000_read_eeprom(hw,
1153                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1154                         break;
1155                 }
1156                 /* Fall Through */
1157         default:
1158                 e1000_read_eeprom(hw,
1159                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1160                 break;
1161         }
1162         if (eeprom_data & eeprom_apme_mask)
1163                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1164
1165         /* now that we have the eeprom settings, apply the special cases
1166          * where the eeprom may be wrong or the board simply won't support
1167          * wake on lan on a particular port
1168          */
1169         switch (pdev->device) {
1170         case E1000_DEV_ID_82546GB_PCIE:
1171                 adapter->eeprom_wol = 0;
1172                 break;
1173         case E1000_DEV_ID_82546EB_FIBER:
1174         case E1000_DEV_ID_82546GB_FIBER:
1175                 /* Wake events only supported on port A for dual fiber
1176                  * regardless of eeprom setting
1177                  */
1178                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1179                         adapter->eeprom_wol = 0;
1180                 break;
1181         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1182                 /* if quad port adapter, disable WoL on all but port A */
1183                 if (global_quad_port_a != 0)
1184                         adapter->eeprom_wol = 0;
1185                 else
1186                         adapter->quad_port_a = true;
1187                 /* Reset for multiple quad port adapters */
1188                 if (++global_quad_port_a == 4)
1189                         global_quad_port_a = 0;
1190                 break;
1191         }
1192
1193         /* initialize the wol settings based on the eeprom settings */
1194         adapter->wol = adapter->eeprom_wol;
1195         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1196
1197         /* Auto detect PHY address */
1198         if (hw->mac_type == e1000_ce4100) {
1199                 for (i = 0; i < 32; i++) {
1200                         hw->phy_addr = i;
1201                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1202                         if (tmp == 0 || tmp == 0xFF) {
1203                                 if (i == 31)
1204                                         goto err_eeprom;
1205                                 continue;
1206                         } else
1207                                 break;
1208                 }
1209         }
1210
1211         /* reset the hardware with the new settings */
1212         e1000_reset(adapter);
1213
1214         strcpy(netdev->name, "eth%d");
1215         err = register_netdev(netdev);
1216         if (err)
1217                 goto err_register;
1218
1219         e1000_vlan_filter_on_off(adapter, false);
1220
1221         /* print bus type/speed/width info */
1222         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1223                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1224                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1225                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1226                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1227                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1228                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1229                netdev->dev_addr);
1230
1231         /* carrier off reporting is important to ethtool even BEFORE open */
1232         netif_carrier_off(netdev);
1233
1234         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1235
1236         cards_found++;
1237         return 0;
1238
1239 err_register:
1240 err_eeprom:
1241         e1000_phy_hw_reset(hw);
1242
1243         if (hw->flash_address)
1244                 iounmap(hw->flash_address);
1245         kfree(adapter->tx_ring);
1246         kfree(adapter->rx_ring);
1247 err_dma:
1248 err_sw_init:
1249 err_mdio_ioremap:
1250         iounmap(hw->ce4100_gbe_mdio_base_virt);
1251         iounmap(hw->hw_addr);
1252 err_ioremap:
1253         free_netdev(netdev);
1254 err_alloc_etherdev:
1255         pci_release_selected_regions(pdev, bars);
1256 err_pci_reg:
1257         pci_disable_device(pdev);
1258         return err;
1259 }
1260
1261 /**
1262  * e1000_remove - Device Removal Routine
1263  * @pdev: PCI device information struct
1264  *
1265  * e1000_remove is called by the PCI subsystem to alert the driver
1266  * that it should release a PCI device.  The could be caused by a
1267  * Hot-Plug event, or because the driver is going to be removed from
1268  * memory.
1269  **/
1270 static void e1000_remove(struct pci_dev *pdev)
1271 {
1272         struct net_device *netdev = pci_get_drvdata(pdev);
1273         struct e1000_adapter *adapter = netdev_priv(netdev);
1274         struct e1000_hw *hw = &adapter->hw;
1275
1276         e1000_down_and_stop(adapter);
1277         e1000_release_manageability(adapter);
1278
1279         unregister_netdev(netdev);
1280
1281         e1000_phy_hw_reset(hw);
1282
1283         kfree(adapter->tx_ring);
1284         kfree(adapter->rx_ring);
1285
1286         if (hw->mac_type == e1000_ce4100)
1287                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1288         iounmap(hw->hw_addr);
1289         if (hw->flash_address)
1290                 iounmap(hw->flash_address);
1291         pci_release_selected_regions(pdev, adapter->bars);
1292
1293         free_netdev(netdev);
1294
1295         pci_disable_device(pdev);
1296 }
1297
1298 /**
1299  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1300  * @adapter: board private structure to initialize
1301  *
1302  * e1000_sw_init initializes the Adapter private data structure.
1303  * e1000_init_hw_struct MUST be called before this function
1304  **/
1305 static int e1000_sw_init(struct e1000_adapter *adapter)
1306 {
1307         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1308
1309         adapter->num_tx_queues = 1;
1310         adapter->num_rx_queues = 1;
1311
1312         if (e1000_alloc_queues(adapter)) {
1313                 e_err(probe, "Unable to allocate memory for queues\n");
1314                 return -ENOMEM;
1315         }
1316
1317         /* Explicitly disable IRQ since the NIC can be in any state. */
1318         e1000_irq_disable(adapter);
1319
1320         spin_lock_init(&adapter->stats_lock);
1321         mutex_init(&adapter->mutex);
1322
1323         set_bit(__E1000_DOWN, &adapter->flags);
1324
1325         return 0;
1326 }
1327
1328 /**
1329  * e1000_alloc_queues - Allocate memory for all rings
1330  * @adapter: board private structure to initialize
1331  *
1332  * We allocate one ring per queue at run-time since we don't know the
1333  * number of queues at compile-time.
1334  **/
1335 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1336 {
1337         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1338                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1339         if (!adapter->tx_ring)
1340                 return -ENOMEM;
1341
1342         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1343                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1344         if (!adapter->rx_ring) {
1345                 kfree(adapter->tx_ring);
1346                 return -ENOMEM;
1347         }
1348
1349         return E1000_SUCCESS;
1350 }
1351
1352 /**
1353  * e1000_open - Called when a network interface is made active
1354  * @netdev: network interface device structure
1355  *
1356  * Returns 0 on success, negative value on failure
1357  *
1358  * The open entry point is called when a network interface is made
1359  * active by the system (IFF_UP).  At this point all resources needed
1360  * for transmit and receive operations are allocated, the interrupt
1361  * handler is registered with the OS, the watchdog task is started,
1362  * and the stack is notified that the interface is ready.
1363  **/
1364 static int e1000_open(struct net_device *netdev)
1365 {
1366         struct e1000_adapter *adapter = netdev_priv(netdev);
1367         struct e1000_hw *hw = &adapter->hw;
1368         int err;
1369
1370         /* disallow open during test */
1371         if (test_bit(__E1000_TESTING, &adapter->flags))
1372                 return -EBUSY;
1373
1374         netif_carrier_off(netdev);
1375
1376         /* allocate transmit descriptors */
1377         err = e1000_setup_all_tx_resources(adapter);
1378         if (err)
1379                 goto err_setup_tx;
1380
1381         /* allocate receive descriptors */
1382         err = e1000_setup_all_rx_resources(adapter);
1383         if (err)
1384                 goto err_setup_rx;
1385
1386         e1000_power_up_phy(adapter);
1387
1388         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1389         if ((hw->mng_cookie.status &
1390                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1391                 e1000_update_mng_vlan(adapter);
1392         }
1393
1394         /* before we allocate an interrupt, we must be ready to handle it.
1395          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1396          * as soon as we call pci_request_irq, so we have to setup our
1397          * clean_rx handler before we do so.
1398          */
1399         e1000_configure(adapter);
1400
1401         err = e1000_request_irq(adapter);
1402         if (err)
1403                 goto err_req_irq;
1404
1405         /* From here on the code is the same as e1000_up() */
1406         clear_bit(__E1000_DOWN, &adapter->flags);
1407
1408         napi_enable(&adapter->napi);
1409
1410         e1000_irq_enable(adapter);
1411
1412         netif_start_queue(netdev);
1413
1414         /* fire a link status change interrupt to start the watchdog */
1415         ew32(ICS, E1000_ICS_LSC);
1416
1417         return E1000_SUCCESS;
1418
1419 err_req_irq:
1420         e1000_power_down_phy(adapter);
1421         e1000_free_all_rx_resources(adapter);
1422 err_setup_rx:
1423         e1000_free_all_tx_resources(adapter);
1424 err_setup_tx:
1425         e1000_reset(adapter);
1426
1427         return err;
1428 }
1429
1430 /**
1431  * e1000_close - Disables a network interface
1432  * @netdev: network interface device structure
1433  *
1434  * Returns 0, this is not allowed to fail
1435  *
1436  * The close entry point is called when an interface is de-activated
1437  * by the OS.  The hardware is still under the drivers control, but
1438  * needs to be disabled.  A global MAC reset is issued to stop the
1439  * hardware, and all transmit and receive resources are freed.
1440  **/
1441 static int e1000_close(struct net_device *netdev)
1442 {
1443         struct e1000_adapter *adapter = netdev_priv(netdev);
1444         struct e1000_hw *hw = &adapter->hw;
1445
1446         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1447         e1000_down(adapter);
1448         e1000_power_down_phy(adapter);
1449         e1000_free_irq(adapter);
1450
1451         e1000_free_all_tx_resources(adapter);
1452         e1000_free_all_rx_resources(adapter);
1453
1454         /* kill manageability vlan ID if supported, but not if a vlan with
1455          * the same ID is registered on the host OS (let 8021q kill it)
1456          */
1457         if ((hw->mng_cookie.status &
1458              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1459             !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1460                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1461         }
1462
1463         return 0;
1464 }
1465
1466 /**
1467  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1468  * @adapter: address of board private structure
1469  * @start: address of beginning of memory
1470  * @len: length of memory
1471  **/
1472 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1473                                   unsigned long len)
1474 {
1475         struct e1000_hw *hw = &adapter->hw;
1476         unsigned long begin = (unsigned long)start;
1477         unsigned long end = begin + len;
1478
1479         /* First rev 82545 and 82546 need to not allow any memory
1480          * write location to cross 64k boundary due to errata 23
1481          */
1482         if (hw->mac_type == e1000_82545 ||
1483             hw->mac_type == e1000_ce4100 ||
1484             hw->mac_type == e1000_82546) {
1485                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1486         }
1487
1488         return true;
1489 }
1490
1491 /**
1492  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1493  * @adapter: board private structure
1494  * @txdr:    tx descriptor ring (for a specific queue) to setup
1495  *
1496  * Return 0 on success, negative on failure
1497  **/
1498 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1499                                     struct e1000_tx_ring *txdr)
1500 {
1501         struct pci_dev *pdev = adapter->pdev;
1502         int size;
1503
1504         size = sizeof(struct e1000_buffer) * txdr->count;
1505         txdr->buffer_info = vzalloc(size);
1506         if (!txdr->buffer_info)
1507                 return -ENOMEM;
1508
1509         /* round up to nearest 4K */
1510
1511         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1512         txdr->size = ALIGN(txdr->size, 4096);
1513
1514         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1515                                         GFP_KERNEL);
1516         if (!txdr->desc) {
1517 setup_tx_desc_die:
1518                 vfree(txdr->buffer_info);
1519                 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1520                       "ring\n");
1521                 return -ENOMEM;
1522         }
1523
1524         /* Fix for errata 23, can't cross 64kB boundary */
1525         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1526                 void *olddesc = txdr->desc;
1527                 dma_addr_t olddma = txdr->dma;
1528                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1529                       txdr->size, txdr->desc);
1530                 /* Try again, without freeing the previous */
1531                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1532                                                 &txdr->dma, GFP_KERNEL);
1533                 /* Failed allocation, critical failure */
1534                 if (!txdr->desc) {
1535                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1536                                           olddma);
1537                         goto setup_tx_desc_die;
1538                 }
1539
1540                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1541                         /* give up */
1542                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1543                                           txdr->dma);
1544                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1545                                           olddma);
1546                         e_err(probe, "Unable to allocate aligned memory "
1547                               "for the transmit descriptor ring\n");
1548                         vfree(txdr->buffer_info);
1549                         return -ENOMEM;
1550                 } else {
1551                         /* Free old allocation, new allocation was successful */
1552                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1553                                           olddma);
1554                 }
1555         }
1556         memset(txdr->desc, 0, txdr->size);
1557
1558         txdr->next_to_use = 0;
1559         txdr->next_to_clean = 0;
1560
1561         return 0;
1562 }
1563
1564 /**
1565  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1566  *                                (Descriptors) for all queues
1567  * @adapter: board private structure
1568  *
1569  * Return 0 on success, negative on failure
1570  **/
1571 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1572 {
1573         int i, err = 0;
1574
1575         for (i = 0; i < adapter->num_tx_queues; i++) {
1576                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1577                 if (err) {
1578                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1579                         for (i-- ; i >= 0; i--)
1580                                 e1000_free_tx_resources(adapter,
1581                                                         &adapter->tx_ring[i]);
1582                         break;
1583                 }
1584         }
1585
1586         return err;
1587 }
1588
1589 /**
1590  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1591  * @adapter: board private structure
1592  *
1593  * Configure the Tx unit of the MAC after a reset.
1594  **/
1595 static void e1000_configure_tx(struct e1000_adapter *adapter)
1596 {
1597         u64 tdba;
1598         struct e1000_hw *hw = &adapter->hw;
1599         u32 tdlen, tctl, tipg;
1600         u32 ipgr1, ipgr2;
1601
1602         /* Setup the HW Tx Head and Tail descriptor pointers */
1603
1604         switch (adapter->num_tx_queues) {
1605         case 1:
1606         default:
1607                 tdba = adapter->tx_ring[0].dma;
1608                 tdlen = adapter->tx_ring[0].count *
1609                         sizeof(struct e1000_tx_desc);
1610                 ew32(TDLEN, tdlen);
1611                 ew32(TDBAH, (tdba >> 32));
1612                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1613                 ew32(TDT, 0);
1614                 ew32(TDH, 0);
1615                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1616                                            E1000_TDH : E1000_82542_TDH);
1617                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1618                                            E1000_TDT : E1000_82542_TDT);
1619                 break;
1620         }
1621
1622         /* Set the default values for the Tx Inter Packet Gap timer */
1623         if ((hw->media_type == e1000_media_type_fiber ||
1624              hw->media_type == e1000_media_type_internal_serdes))
1625                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1626         else
1627                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1628
1629         switch (hw->mac_type) {
1630         case e1000_82542_rev2_0:
1631         case e1000_82542_rev2_1:
1632                 tipg = DEFAULT_82542_TIPG_IPGT;
1633                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1634                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1635                 break;
1636         default:
1637                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1638                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1639                 break;
1640         }
1641         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1642         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1643         ew32(TIPG, tipg);
1644
1645         /* Set the Tx Interrupt Delay register */
1646
1647         ew32(TIDV, adapter->tx_int_delay);
1648         if (hw->mac_type >= e1000_82540)
1649                 ew32(TADV, adapter->tx_abs_int_delay);
1650
1651         /* Program the Transmit Control Register */
1652
1653         tctl = er32(TCTL);
1654         tctl &= ~E1000_TCTL_CT;
1655         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1656                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1657
1658         e1000_config_collision_dist(hw);
1659
1660         /* Setup Transmit Descriptor Settings for eop descriptor */
1661         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1662
1663         /* only set IDE if we are delaying interrupts using the timers */
1664         if (adapter->tx_int_delay)
1665                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1666
1667         if (hw->mac_type < e1000_82543)
1668                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1669         else
1670                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1671
1672         /* Cache if we're 82544 running in PCI-X because we'll
1673          * need this to apply a workaround later in the send path.
1674          */
1675         if (hw->mac_type == e1000_82544 &&
1676             hw->bus_type == e1000_bus_type_pcix)
1677                 adapter->pcix_82544 = true;
1678
1679         ew32(TCTL, tctl);
1680
1681 }
1682
1683 /**
1684  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1685  * @adapter: board private structure
1686  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1687  *
1688  * Returns 0 on success, negative on failure
1689  **/
1690 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1691                                     struct e1000_rx_ring *rxdr)
1692 {
1693         struct pci_dev *pdev = adapter->pdev;
1694         int size, desc_len;
1695
1696         size = sizeof(struct e1000_buffer) * rxdr->count;
1697         rxdr->buffer_info = vzalloc(size);
1698         if (!rxdr->buffer_info)
1699                 return -ENOMEM;
1700
1701         desc_len = sizeof(struct e1000_rx_desc);
1702
1703         /* Round up to nearest 4K */
1704
1705         rxdr->size = rxdr->count * desc_len;
1706         rxdr->size = ALIGN(rxdr->size, 4096);
1707
1708         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1709                                         GFP_KERNEL);
1710
1711         if (!rxdr->desc) {
1712                 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1713                       "ring\n");
1714 setup_rx_desc_die:
1715                 vfree(rxdr->buffer_info);
1716                 return -ENOMEM;
1717         }
1718
1719         /* Fix for errata 23, can't cross 64kB boundary */
1720         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1721                 void *olddesc = rxdr->desc;
1722                 dma_addr_t olddma = rxdr->dma;
1723                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1724                       rxdr->size, rxdr->desc);
1725                 /* Try again, without freeing the previous */
1726                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1727                                                 &rxdr->dma, GFP_KERNEL);
1728                 /* Failed allocation, critical failure */
1729                 if (!rxdr->desc) {
1730                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1731                                           olddma);
1732                         e_err(probe, "Unable to allocate memory for the Rx "
1733                               "descriptor ring\n");
1734                         goto setup_rx_desc_die;
1735                 }
1736
1737                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1738                         /* give up */
1739                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1740                                           rxdr->dma);
1741                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1742                                           olddma);
1743                         e_err(probe, "Unable to allocate aligned memory for "
1744                               "the Rx descriptor ring\n");
1745                         goto setup_rx_desc_die;
1746                 } else {
1747                         /* Free old allocation, new allocation was successful */
1748                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1749                                           olddma);
1750                 }
1751         }
1752         memset(rxdr->desc, 0, rxdr->size);
1753
1754         rxdr->next_to_clean = 0;
1755         rxdr->next_to_use = 0;
1756         rxdr->rx_skb_top = NULL;
1757
1758         return 0;
1759 }
1760
1761 /**
1762  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1763  *                                (Descriptors) for all queues
1764  * @adapter: board private structure
1765  *
1766  * Return 0 on success, negative on failure
1767  **/
1768 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1769 {
1770         int i, err = 0;
1771
1772         for (i = 0; i < adapter->num_rx_queues; i++) {
1773                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1774                 if (err) {
1775                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1776                         for (i-- ; i >= 0; i--)
1777                                 e1000_free_rx_resources(adapter,
1778                                                         &adapter->rx_ring[i]);
1779                         break;
1780                 }
1781         }
1782
1783         return err;
1784 }
1785
1786 /**
1787  * e1000_setup_rctl - configure the receive control registers
1788  * @adapter: Board private structure
1789  **/
1790 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1791 {
1792         struct e1000_hw *hw = &adapter->hw;
1793         u32 rctl;
1794
1795         rctl = er32(RCTL);
1796
1797         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1798
1799         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1800                 E1000_RCTL_RDMTS_HALF |
1801                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1802
1803         if (hw->tbi_compatibility_on == 1)
1804                 rctl |= E1000_RCTL_SBP;
1805         else
1806                 rctl &= ~E1000_RCTL_SBP;
1807
1808         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1809                 rctl &= ~E1000_RCTL_LPE;
1810         else
1811                 rctl |= E1000_RCTL_LPE;
1812
1813         /* Setup buffer sizes */
1814         rctl &= ~E1000_RCTL_SZ_4096;
1815         rctl |= E1000_RCTL_BSEX;
1816         switch (adapter->rx_buffer_len) {
1817                 case E1000_RXBUFFER_2048:
1818                 default:
1819                         rctl |= E1000_RCTL_SZ_2048;
1820                         rctl &= ~E1000_RCTL_BSEX;
1821                         break;
1822                 case E1000_RXBUFFER_4096:
1823                         rctl |= E1000_RCTL_SZ_4096;
1824                         break;
1825                 case E1000_RXBUFFER_8192:
1826                         rctl |= E1000_RCTL_SZ_8192;
1827                         break;
1828                 case E1000_RXBUFFER_16384:
1829                         rctl |= E1000_RCTL_SZ_16384;
1830                         break;
1831         }
1832
1833         /* This is useful for sniffing bad packets. */
1834         if (adapter->netdev->features & NETIF_F_RXALL) {
1835                 /* UPE and MPE will be handled by normal PROMISC logic
1836                  * in e1000e_set_rx_mode
1837                  */
1838                 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1839                          E1000_RCTL_BAM | /* RX All Bcast Pkts */
1840                          E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1841
1842                 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1843                           E1000_RCTL_DPF | /* Allow filtered pause */
1844                           E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1845                 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1846                  * and that breaks VLANs.
1847                  */
1848         }
1849
1850         ew32(RCTL, rctl);
1851 }
1852
1853 /**
1854  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1855  * @adapter: board private structure
1856  *
1857  * Configure the Rx unit of the MAC after a reset.
1858  **/
1859 static void e1000_configure_rx(struct e1000_adapter *adapter)
1860 {
1861         u64 rdba;
1862         struct e1000_hw *hw = &adapter->hw;
1863         u32 rdlen, rctl, rxcsum;
1864
1865         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1866                 rdlen = adapter->rx_ring[0].count *
1867                         sizeof(struct e1000_rx_desc);
1868                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1869                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1870         } else {
1871                 rdlen = adapter->rx_ring[0].count *
1872                         sizeof(struct e1000_rx_desc);
1873                 adapter->clean_rx = e1000_clean_rx_irq;
1874                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1875         }
1876
1877         /* disable receives while setting up the descriptors */
1878         rctl = er32(RCTL);
1879         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1880
1881         /* set the Receive Delay Timer Register */
1882         ew32(RDTR, adapter->rx_int_delay);
1883
1884         if (hw->mac_type >= e1000_82540) {
1885                 ew32(RADV, adapter->rx_abs_int_delay);
1886                 if (adapter->itr_setting != 0)
1887                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1888         }
1889
1890         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1891          * the Base and Length of the Rx Descriptor Ring
1892          */
1893         switch (adapter->num_rx_queues) {
1894         case 1:
1895         default:
1896                 rdba = adapter->rx_ring[0].dma;
1897                 ew32(RDLEN, rdlen);
1898                 ew32(RDBAH, (rdba >> 32));
1899                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1900                 ew32(RDT, 0);
1901                 ew32(RDH, 0);
1902                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1903                                            E1000_RDH : E1000_82542_RDH);
1904                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1905                                            E1000_RDT : E1000_82542_RDT);
1906                 break;
1907         }
1908
1909         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1910         if (hw->mac_type >= e1000_82543) {
1911                 rxcsum = er32(RXCSUM);
1912                 if (adapter->rx_csum)
1913                         rxcsum |= E1000_RXCSUM_TUOFL;
1914                 else
1915                         /* don't need to clear IPPCSE as it defaults to 0 */
1916                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1917                 ew32(RXCSUM, rxcsum);
1918         }
1919
1920         /* Enable Receives */
1921         ew32(RCTL, rctl | E1000_RCTL_EN);
1922 }
1923
1924 /**
1925  * e1000_free_tx_resources - Free Tx Resources per Queue
1926  * @adapter: board private structure
1927  * @tx_ring: Tx descriptor ring for a specific queue
1928  *
1929  * Free all transmit software resources
1930  **/
1931 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1932                                     struct e1000_tx_ring *tx_ring)
1933 {
1934         struct pci_dev *pdev = adapter->pdev;
1935
1936         e1000_clean_tx_ring(adapter, tx_ring);
1937
1938         vfree(tx_ring->buffer_info);
1939         tx_ring->buffer_info = NULL;
1940
1941         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1942                           tx_ring->dma);
1943
1944         tx_ring->desc = NULL;
1945 }
1946
1947 /**
1948  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1949  * @adapter: board private structure
1950  *
1951  * Free all transmit software resources
1952  **/
1953 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1954 {
1955         int i;
1956
1957         for (i = 0; i < adapter->num_tx_queues; i++)
1958                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1959 }
1960
1961 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1962                                              struct e1000_buffer *buffer_info)
1963 {
1964         if (buffer_info->dma) {
1965                 if (buffer_info->mapped_as_page)
1966                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1967                                        buffer_info->length, DMA_TO_DEVICE);
1968                 else
1969                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1970                                          buffer_info->length,
1971                                          DMA_TO_DEVICE);
1972                 buffer_info->dma = 0;
1973         }
1974         if (buffer_info->skb) {
1975                 dev_kfree_skb_any(buffer_info->skb);
1976                 buffer_info->skb = NULL;
1977         }
1978         buffer_info->time_stamp = 0;
1979         /* buffer_info must be completely set up in the transmit path */
1980 }
1981
1982 /**
1983  * e1000_clean_tx_ring - Free Tx Buffers
1984  * @adapter: board private structure
1985  * @tx_ring: ring to be cleaned
1986  **/
1987 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1988                                 struct e1000_tx_ring *tx_ring)
1989 {
1990         struct e1000_hw *hw = &adapter->hw;
1991         struct e1000_buffer *buffer_info;
1992         unsigned long size;
1993         unsigned int i;
1994
1995         /* Free all the Tx ring sk_buffs */
1996
1997         for (i = 0; i < tx_ring->count; i++) {
1998                 buffer_info = &tx_ring->buffer_info[i];
1999                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2000         }
2001
2002         netdev_reset_queue(adapter->netdev);
2003         size = sizeof(struct e1000_buffer) * tx_ring->count;
2004         memset(tx_ring->buffer_info, 0, size);
2005
2006         /* Zero out the descriptor ring */
2007
2008         memset(tx_ring->desc, 0, tx_ring->size);
2009
2010         tx_ring->next_to_use = 0;
2011         tx_ring->next_to_clean = 0;
2012         tx_ring->last_tx_tso = false;
2013
2014         writel(0, hw->hw_addr + tx_ring->tdh);
2015         writel(0, hw->hw_addr + tx_ring->tdt);
2016 }
2017
2018 /**
2019  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2020  * @adapter: board private structure
2021  **/
2022 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2023 {
2024         int i;
2025
2026         for (i = 0; i < adapter->num_tx_queues; i++)
2027                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2028 }
2029
2030 /**
2031  * e1000_free_rx_resources - Free Rx Resources
2032  * @adapter: board private structure
2033  * @rx_ring: ring to clean the resources from
2034  *
2035  * Free all receive software resources
2036  **/
2037 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2038                                     struct e1000_rx_ring *rx_ring)
2039 {
2040         struct pci_dev *pdev = adapter->pdev;
2041
2042         e1000_clean_rx_ring(adapter, rx_ring);
2043
2044         vfree(rx_ring->buffer_info);
2045         rx_ring->buffer_info = NULL;
2046
2047         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2048                           rx_ring->dma);
2049
2050         rx_ring->desc = NULL;
2051 }
2052
2053 /**
2054  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2055  * @adapter: board private structure
2056  *
2057  * Free all receive software resources
2058  **/
2059 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2060 {
2061         int i;
2062
2063         for (i = 0; i < adapter->num_rx_queues; i++)
2064                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2065 }
2066
2067 /**
2068  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2069  * @adapter: board private structure
2070  * @rx_ring: ring to free buffers from
2071  **/
2072 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2073                                 struct e1000_rx_ring *rx_ring)
2074 {
2075         struct e1000_hw *hw = &adapter->hw;
2076         struct e1000_buffer *buffer_info;
2077         struct pci_dev *pdev = adapter->pdev;
2078         unsigned long size;
2079         unsigned int i;
2080
2081         /* Free all the Rx ring sk_buffs */
2082         for (i = 0; i < rx_ring->count; i++) {
2083                 buffer_info = &rx_ring->buffer_info[i];
2084                 if (buffer_info->dma &&
2085                     adapter->clean_rx == e1000_clean_rx_irq) {
2086                         dma_unmap_single(&pdev->dev, buffer_info->dma,
2087                                          buffer_info->length,
2088                                          DMA_FROM_DEVICE);
2089                 } else if (buffer_info->dma &&
2090                            adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2091                         dma_unmap_page(&pdev->dev, buffer_info->dma,
2092                                        buffer_info->length,
2093                                        DMA_FROM_DEVICE);
2094                 }
2095
2096                 buffer_info->dma = 0;
2097                 if (buffer_info->page) {
2098                         put_page(buffer_info->page);
2099                         buffer_info->page = NULL;
2100                 }
2101                 if (buffer_info->skb) {
2102                         dev_kfree_skb(buffer_info->skb);
2103                         buffer_info->skb = NULL;
2104                 }
2105         }
2106
2107         /* there also may be some cached data from a chained receive */
2108         if (rx_ring->rx_skb_top) {
2109                 dev_kfree_skb(rx_ring->rx_skb_top);
2110                 rx_ring->rx_skb_top = NULL;
2111         }
2112
2113         size = sizeof(struct e1000_buffer) * rx_ring->count;
2114         memset(rx_ring->buffer_info, 0, size);
2115
2116         /* Zero out the descriptor ring */
2117         memset(rx_ring->desc, 0, rx_ring->size);
2118
2119         rx_ring->next_to_clean = 0;
2120         rx_ring->next_to_use = 0;
2121
2122         writel(0, hw->hw_addr + rx_ring->rdh);
2123         writel(0, hw->hw_addr + rx_ring->rdt);
2124 }
2125
2126 /**
2127  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2128  * @adapter: board private structure
2129  **/
2130 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2131 {
2132         int i;
2133
2134         for (i = 0; i < adapter->num_rx_queues; i++)
2135                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2136 }
2137
2138 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2139  * and memory write and invalidate disabled for certain operations
2140  */
2141 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2142 {
2143         struct e1000_hw *hw = &adapter->hw;
2144         struct net_device *netdev = adapter->netdev;
2145         u32 rctl;
2146
2147         e1000_pci_clear_mwi(hw);
2148
2149         rctl = er32(RCTL);
2150         rctl |= E1000_RCTL_RST;
2151         ew32(RCTL, rctl);
2152         E1000_WRITE_FLUSH();
2153         mdelay(5);
2154
2155         if (netif_running(netdev))
2156                 e1000_clean_all_rx_rings(adapter);
2157 }
2158
2159 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2160 {
2161         struct e1000_hw *hw = &adapter->hw;
2162         struct net_device *netdev = adapter->netdev;
2163         u32 rctl;
2164
2165         rctl = er32(RCTL);
2166         rctl &= ~E1000_RCTL_RST;
2167         ew32(RCTL, rctl);
2168         E1000_WRITE_FLUSH();
2169         mdelay(5);
2170
2171         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2172                 e1000_pci_set_mwi(hw);
2173
2174         if (netif_running(netdev)) {
2175                 /* No need to loop, because 82542 supports only 1 queue */
2176                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2177                 e1000_configure_rx(adapter);
2178                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2179         }
2180 }
2181
2182 /**
2183  * e1000_set_mac - Change the Ethernet Address of the NIC
2184  * @netdev: network interface device structure
2185  * @p: pointer to an address structure
2186  *
2187  * Returns 0 on success, negative on failure
2188  **/
2189 static int e1000_set_mac(struct net_device *netdev, void *p)
2190 {
2191         struct e1000_adapter *adapter = netdev_priv(netdev);
2192         struct e1000_hw *hw = &adapter->hw;
2193         struct sockaddr *addr = p;
2194
2195         if (!is_valid_ether_addr(addr->sa_data))
2196                 return -EADDRNOTAVAIL;
2197
2198         /* 82542 2.0 needs to be in reset to write receive address registers */
2199
2200         if (hw->mac_type == e1000_82542_rev2_0)
2201                 e1000_enter_82542_rst(adapter);
2202
2203         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2204         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2205
2206         e1000_rar_set(hw, hw->mac_addr, 0);
2207
2208         if (hw->mac_type == e1000_82542_rev2_0)
2209                 e1000_leave_82542_rst(adapter);
2210
2211         return 0;
2212 }
2213
2214 /**
2215  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2216  * @netdev: network interface device structure
2217  *
2218  * The set_rx_mode entry point is called whenever the unicast or multicast
2219  * address lists or the network interface flags are updated. This routine is
2220  * responsible for configuring the hardware for proper unicast, multicast,
2221  * promiscuous mode, and all-multi behavior.
2222  **/
2223 static void e1000_set_rx_mode(struct net_device *netdev)
2224 {
2225         struct e1000_adapter *adapter = netdev_priv(netdev);
2226         struct e1000_hw *hw = &adapter->hw;
2227         struct netdev_hw_addr *ha;
2228         bool use_uc = false;
2229         u32 rctl;
2230         u32 hash_value;
2231         int i, rar_entries = E1000_RAR_ENTRIES;
2232         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2233         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2234
2235         if (!mcarray)
2236                 return;
2237
2238         /* Check for Promiscuous and All Multicast modes */
2239
2240         rctl = er32(RCTL);
2241
2242         if (netdev->flags & IFF_PROMISC) {
2243                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2244                 rctl &= ~E1000_RCTL_VFE;
2245         } else {
2246                 if (netdev->flags & IFF_ALLMULTI)
2247                         rctl |= E1000_RCTL_MPE;
2248                 else
2249                         rctl &= ~E1000_RCTL_MPE;
2250                 /* Enable VLAN filter if there is a VLAN */
2251                 if (e1000_vlan_used(adapter))
2252                         rctl |= E1000_RCTL_VFE;
2253         }
2254
2255         if (netdev_uc_count(netdev) > rar_entries - 1) {
2256                 rctl |= E1000_RCTL_UPE;
2257         } else if (!(netdev->flags & IFF_PROMISC)) {
2258                 rctl &= ~E1000_RCTL_UPE;
2259                 use_uc = true;
2260         }
2261
2262         ew32(RCTL, rctl);
2263
2264         /* 82542 2.0 needs to be in reset to write receive address registers */
2265
2266         if (hw->mac_type == e1000_82542_rev2_0)
2267                 e1000_enter_82542_rst(adapter);
2268
2269         /* load the first 14 addresses into the exact filters 1-14. Unicast
2270          * addresses take precedence to avoid disabling unicast filtering
2271          * when possible.
2272          *
2273          * RAR 0 is used for the station MAC address
2274          * if there are not 14 addresses, go ahead and clear the filters
2275          */
2276         i = 1;
2277         if (use_uc)
2278                 netdev_for_each_uc_addr(ha, netdev) {
2279                         if (i == rar_entries)
2280                                 break;
2281                         e1000_rar_set(hw, ha->addr, i++);
2282                 }
2283
2284         netdev_for_each_mc_addr(ha, netdev) {
2285                 if (i == rar_entries) {
2286                         /* load any remaining addresses into the hash table */
2287                         u32 hash_reg, hash_bit, mta;
2288                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2289                         hash_reg = (hash_value >> 5) & 0x7F;
2290                         hash_bit = hash_value & 0x1F;
2291                         mta = (1 << hash_bit);
2292                         mcarray[hash_reg] |= mta;
2293                 } else {
2294                         e1000_rar_set(hw, ha->addr, i++);
2295                 }
2296         }
2297
2298         for (; i < rar_entries; i++) {
2299                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2300                 E1000_WRITE_FLUSH();
2301                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2302                 E1000_WRITE_FLUSH();
2303         }
2304
2305         /* write the hash table completely, write from bottom to avoid
2306          * both stupid write combining chipsets, and flushing each write
2307          */
2308         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2309                 /* If we are on an 82544 has an errata where writing odd
2310                  * offsets overwrites the previous even offset, but writing
2311                  * backwards over the range solves the issue by always
2312                  * writing the odd offset first
2313                  */
2314                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2315         }
2316         E1000_WRITE_FLUSH();
2317
2318         if (hw->mac_type == e1000_82542_rev2_0)
2319                 e1000_leave_82542_rst(adapter);
2320
2321         kfree(mcarray);
2322 }
2323
2324 /**
2325  * e1000_update_phy_info_task - get phy info
2326  * @work: work struct contained inside adapter struct
2327  *
2328  * Need to wait a few seconds after link up to get diagnostic information from
2329  * the phy
2330  */
2331 static void e1000_update_phy_info_task(struct work_struct *work)
2332 {
2333         struct e1000_adapter *adapter = container_of(work,
2334                                                      struct e1000_adapter,
2335                                                      phy_info_task.work);
2336         if (test_bit(__E1000_DOWN, &adapter->flags))
2337                 return;
2338         mutex_lock(&adapter->mutex);
2339         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2340         mutex_unlock(&adapter->mutex);
2341 }
2342
2343 /**
2344  * e1000_82547_tx_fifo_stall_task - task to complete work
2345  * @work: work struct contained inside adapter struct
2346  **/
2347 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2348 {
2349         struct e1000_adapter *adapter = container_of(work,
2350                                                      struct e1000_adapter,
2351                                                      fifo_stall_task.work);
2352         struct e1000_hw *hw = &adapter->hw;
2353         struct net_device *netdev = adapter->netdev;
2354         u32 tctl;
2355
2356         if (test_bit(__E1000_DOWN, &adapter->flags))
2357                 return;
2358         mutex_lock(&adapter->mutex);
2359         if (atomic_read(&adapter->tx_fifo_stall)) {
2360                 if ((er32(TDT) == er32(TDH)) &&
2361                    (er32(TDFT) == er32(TDFH)) &&
2362                    (er32(TDFTS) == er32(TDFHS))) {
2363                         tctl = er32(TCTL);
2364                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2365                         ew32(TDFT, adapter->tx_head_addr);
2366                         ew32(TDFH, adapter->tx_head_addr);
2367                         ew32(TDFTS, adapter->tx_head_addr);
2368                         ew32(TDFHS, adapter->tx_head_addr);
2369                         ew32(TCTL, tctl);
2370                         E1000_WRITE_FLUSH();
2371
2372                         adapter->tx_fifo_head = 0;
2373                         atomic_set(&adapter->tx_fifo_stall, 0);
2374                         netif_wake_queue(netdev);
2375                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2376                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2377                 }
2378         }
2379         mutex_unlock(&adapter->mutex);
2380 }
2381
2382 bool e1000_has_link(struct e1000_adapter *adapter)
2383 {
2384         struct e1000_hw *hw = &adapter->hw;
2385         bool link_active = false;
2386
2387         /* get_link_status is set on LSC (link status) interrupt or rx
2388          * sequence error interrupt (except on intel ce4100).
2389          * get_link_status will stay false until the
2390          * e1000_check_for_link establishes link for copper adapters
2391          * ONLY
2392          */
2393         switch (hw->media_type) {
2394         case e1000_media_type_copper:
2395                 if (hw->mac_type == e1000_ce4100)
2396                         hw->get_link_status = 1;
2397                 if (hw->get_link_status) {
2398                         e1000_check_for_link(hw);
2399                         link_active = !hw->get_link_status;
2400                 } else {
2401                         link_active = true;
2402                 }
2403                 break;
2404         case e1000_media_type_fiber:
2405                 e1000_check_for_link(hw);
2406                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2407                 break;
2408         case e1000_media_type_internal_serdes:
2409                 e1000_check_for_link(hw);
2410                 link_active = hw->serdes_has_link;
2411                 break;
2412         default:
2413                 break;
2414         }
2415
2416         return link_active;
2417 }
2418
2419 /**
2420  * e1000_watchdog - work function
2421  * @work: work struct contained inside adapter struct
2422  **/
2423 static void e1000_watchdog(struct work_struct *work)
2424 {
2425         struct e1000_adapter *adapter = container_of(work,
2426                                                      struct e1000_adapter,
2427                                                      watchdog_task.work);
2428         struct e1000_hw *hw = &adapter->hw;
2429         struct net_device *netdev = adapter->netdev;
2430         struct e1000_tx_ring *txdr = adapter->tx_ring;
2431         u32 link, tctl;
2432
2433         if (test_bit(__E1000_DOWN, &adapter->flags))
2434                 return;
2435
2436         mutex_lock(&adapter->mutex);
2437         link = e1000_has_link(adapter);
2438         if ((netif_carrier_ok(netdev)) && link)
2439                 goto link_up;
2440
2441         if (link) {
2442                 if (!netif_carrier_ok(netdev)) {
2443                         u32 ctrl;
2444                         bool txb2b = true;
2445                         /* update snapshot of PHY registers on LSC */
2446                         e1000_get_speed_and_duplex(hw,
2447                                                    &adapter->link_speed,
2448                                                    &adapter->link_duplex);
2449
2450                         ctrl = er32(CTRL);
2451                         pr_info("%s NIC Link is Up %d Mbps %s, "
2452                                 "Flow Control: %s\n",
2453                                 netdev->name,
2454                                 adapter->link_speed,
2455                                 adapter->link_duplex == FULL_DUPLEX ?
2456                                 "Full Duplex" : "Half Duplex",
2457                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2458                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2459                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2460                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2461
2462                         /* adjust timeout factor according to speed/duplex */
2463                         adapter->tx_timeout_factor = 1;
2464                         switch (adapter->link_speed) {
2465                         case SPEED_10:
2466                                 txb2b = false;
2467                                 adapter->tx_timeout_factor = 16;
2468                                 break;
2469                         case SPEED_100:
2470                                 txb2b = false;
2471                                 /* maybe add some timeout factor ? */
2472                                 break;
2473                         }
2474
2475                         /* enable transmits in the hardware */
2476                         tctl = er32(TCTL);
2477                         tctl |= E1000_TCTL_EN;
2478                         ew32(TCTL, tctl);
2479
2480                         netif_carrier_on(netdev);
2481                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2482                                 schedule_delayed_work(&adapter->phy_info_task,
2483                                                       2 * HZ);
2484                         adapter->smartspeed = 0;
2485                 }
2486         } else {
2487                 if (netif_carrier_ok(netdev)) {
2488                         adapter->link_speed = 0;
2489                         adapter->link_duplex = 0;
2490                         pr_info("%s NIC Link is Down\n",
2491                                 netdev->name);
2492                         netif_carrier_off(netdev);
2493
2494                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2495                                 schedule_delayed_work(&adapter->phy_info_task,
2496                                                       2 * HZ);
2497                 }
2498
2499                 e1000_smartspeed(adapter);
2500         }
2501
2502 link_up:
2503         e1000_update_stats(adapter);
2504
2505         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2506         adapter->tpt_old = adapter->stats.tpt;
2507         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2508         adapter->colc_old = adapter->stats.colc;
2509
2510         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2511         adapter->gorcl_old = adapter->stats.gorcl;
2512         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2513         adapter->gotcl_old = adapter->stats.gotcl;
2514
2515         e1000_update_adaptive(hw);
2516
2517         if (!netif_carrier_ok(netdev)) {
2518                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2519                         /* We've lost link, so the controller stops DMA,
2520                          * but we've got queued Tx work that's never going
2521                          * to get done, so reset controller to flush Tx.
2522                          * (Do the reset outside of interrupt context).
2523                          */
2524                         adapter->tx_timeout_count++;
2525                         schedule_work(&adapter->reset_task);
2526                         /* exit immediately since reset is imminent */
2527                         goto unlock;
2528                 }
2529         }
2530
2531         /* Simple mode for Interrupt Throttle Rate (ITR) */
2532         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2533                 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2534                  * Total asymmetrical Tx or Rx gets ITR=8000;
2535                  * everyone else is between 2000-8000.
2536                  */
2537                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2538                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2539                             adapter->gotcl - adapter->gorcl :
2540                             adapter->gorcl - adapter->gotcl) / 10000;
2541                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2542
2543                 ew32(ITR, 1000000000 / (itr * 256));
2544         }
2545
2546         /* Cause software interrupt to ensure rx ring is cleaned */
2547         ew32(ICS, E1000_ICS_RXDMT0);
2548
2549         /* Force detection of hung controller every watchdog period */
2550         adapter->detect_tx_hung = true;
2551
2552         /* Reschedule the task */
2553         if (!test_bit(__E1000_DOWN, &adapter->flags))
2554                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2555
2556 unlock:
2557         mutex_unlock(&adapter->mutex);
2558 }
2559
2560 enum latency_range {
2561         lowest_latency = 0,
2562         low_latency = 1,
2563         bulk_latency = 2,
2564         latency_invalid = 255
2565 };
2566
2567 /**
2568  * e1000_update_itr - update the dynamic ITR value based on statistics
2569  * @adapter: pointer to adapter
2570  * @itr_setting: current adapter->itr
2571  * @packets: the number of packets during this measurement interval
2572  * @bytes: the number of bytes during this measurement interval
2573  *
2574  *      Stores a new ITR value based on packets and byte
2575  *      counts during the last interrupt.  The advantage of per interrupt
2576  *      computation is faster updates and more accurate ITR for the current
2577  *      traffic pattern.  Constants in this function were computed
2578  *      based on theoretical maximum wire speed and thresholds were set based
2579  *      on testing data as well as attempting to minimize response time
2580  *      while increasing bulk throughput.
2581  *      this functionality is controlled by the InterruptThrottleRate module
2582  *      parameter (see e1000_param.c)
2583  **/
2584 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2585                                      u16 itr_setting, int packets, int bytes)
2586 {
2587         unsigned int retval = itr_setting;
2588         struct e1000_hw *hw = &adapter->hw;
2589
2590         if (unlikely(hw->mac_type < e1000_82540))
2591                 goto update_itr_done;
2592
2593         if (packets == 0)
2594                 goto update_itr_done;
2595
2596         switch (itr_setting) {
2597         case lowest_latency:
2598                 /* jumbo frames get bulk treatment*/
2599                 if (bytes/packets > 8000)
2600                         retval = bulk_latency;
2601                 else if ((packets < 5) && (bytes > 512))
2602                         retval = low_latency;
2603                 break;
2604         case low_latency:  /* 50 usec aka 20000 ints/s */
2605                 if (bytes > 10000) {
2606                         /* jumbo frames need bulk latency setting */
2607                         if (bytes/packets > 8000)
2608                                 retval = bulk_latency;
2609                         else if ((packets < 10) || ((bytes/packets) > 1200))
2610                                 retval = bulk_latency;
2611                         else if ((packets > 35))
2612                                 retval = lowest_latency;
2613                 } else if (bytes/packets > 2000)
2614                         retval = bulk_latency;
2615                 else if (packets <= 2 && bytes < 512)
2616                         retval = lowest_latency;
2617                 break;
2618         case bulk_latency: /* 250 usec aka 4000 ints/s */
2619                 if (bytes > 25000) {
2620                         if (packets > 35)
2621                                 retval = low_latency;
2622                 } else if (bytes < 6000) {
2623                         retval = low_latency;
2624                 }
2625                 break;
2626         }
2627
2628 update_itr_done:
2629         return retval;
2630 }
2631
2632 static void e1000_set_itr(struct e1000_adapter *adapter)
2633 {
2634         struct e1000_hw *hw = &adapter->hw;
2635         u16 current_itr;
2636         u32 new_itr = adapter->itr;
2637
2638         if (unlikely(hw->mac_type < e1000_82540))
2639                 return;
2640
2641         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2642         if (unlikely(adapter->link_speed != SPEED_1000)) {
2643                 current_itr = 0;
2644                 new_itr = 4000;
2645                 goto set_itr_now;
2646         }
2647
2648         adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2649                                            adapter->total_tx_packets,
2650                                            adapter->total_tx_bytes);
2651         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2652         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2653                 adapter->tx_itr = low_latency;
2654
2655         adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2656                                            adapter->total_rx_packets,
2657                                            adapter->total_rx_bytes);
2658         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2659         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2660                 adapter->rx_itr = low_latency;
2661
2662         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2663
2664         switch (current_itr) {
2665         /* counts and packets in update_itr are dependent on these numbers */
2666         case lowest_latency:
2667                 new_itr = 70000;
2668                 break;
2669         case low_latency:
2670                 new_itr = 20000; /* aka hwitr = ~200 */
2671                 break;
2672         case bulk_latency:
2673                 new_itr = 4000;
2674                 break;
2675         default:
2676                 break;
2677         }
2678
2679 set_itr_now:
2680         if (new_itr != adapter->itr) {
2681                 /* this attempts to bias the interrupt rate towards Bulk
2682                  * by adding intermediate steps when interrupt rate is
2683                  * increasing
2684                  */
2685                 new_itr = new_itr > adapter->itr ?
2686                           min(adapter->itr + (new_itr >> 2), new_itr) :
2687                           new_itr;
2688                 adapter->itr = new_itr;
2689                 ew32(ITR, 1000000000 / (new_itr * 256));
2690         }
2691 }
2692
2693 #define E1000_TX_FLAGS_CSUM             0x00000001
2694 #define E1000_TX_FLAGS_VLAN             0x00000002
2695 #define E1000_TX_FLAGS_TSO              0x00000004
2696 #define E1000_TX_FLAGS_IPV4             0x00000008
2697 #define E1000_TX_FLAGS_NO_FCS           0x00000010
2698 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2699 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2700
2701 static int e1000_tso(struct e1000_adapter *adapter,
2702                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2703 {
2704         struct e1000_context_desc *context_desc;
2705         struct e1000_buffer *buffer_info;
2706         unsigned int i;
2707         u32 cmd_length = 0;
2708         u16 ipcse = 0, tucse, mss;
2709         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2710         int err;
2711
2712         if (skb_is_gso(skb)) {
2713                 if (skb_header_cloned(skb)) {
2714                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2715                         if (err)
2716                                 return err;
2717                 }
2718
2719                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2720                 mss = skb_shinfo(skb)->gso_size;
2721                 if (skb->protocol == htons(ETH_P_IP)) {
2722                         struct iphdr *iph = ip_hdr(skb);
2723                         iph->tot_len = 0;
2724                         iph->check = 0;
2725                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2726                                                                  iph->daddr, 0,
2727                                                                  IPPROTO_TCP,
2728                                                                  0);
2729                         cmd_length = E1000_TXD_CMD_IP;
2730                         ipcse = skb_transport_offset(skb) - 1;
2731                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2732                         ipv6_hdr(skb)->payload_len = 0;
2733                         tcp_hdr(skb)->check =
2734                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2735                                                  &ipv6_hdr(skb)->daddr,
2736                                                  0, IPPROTO_TCP, 0);
2737                         ipcse = 0;
2738                 }
2739                 ipcss = skb_network_offset(skb);
2740                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2741                 tucss = skb_transport_offset(skb);
2742                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2743                 tucse = 0;
2744
2745                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2746                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2747
2748                 i = tx_ring->next_to_use;
2749                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2750                 buffer_info = &tx_ring->buffer_info[i];
2751
2752                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2753                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2754                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2755                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2756                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2757                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2758                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2759                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2760                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2761
2762                 buffer_info->time_stamp = jiffies;
2763                 buffer_info->next_to_watch = i;
2764
2765                 if (++i == tx_ring->count) i = 0;
2766                 tx_ring->next_to_use = i;
2767
2768                 return true;
2769         }
2770         return false;
2771 }
2772
2773 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2774                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2775 {
2776         struct e1000_context_desc *context_desc;
2777         struct e1000_buffer *buffer_info;
2778         unsigned int i;
2779         u8 css;
2780         u32 cmd_len = E1000_TXD_CMD_DEXT;
2781
2782         if (skb->ip_summed != CHECKSUM_PARTIAL)
2783                 return false;
2784
2785         switch (skb->protocol) {
2786         case cpu_to_be16(ETH_P_IP):
2787                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2788                         cmd_len |= E1000_TXD_CMD_TCP;
2789                 break;
2790         case cpu_to_be16(ETH_P_IPV6):
2791                 /* XXX not handling all IPV6 headers */
2792                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2793                         cmd_len |= E1000_TXD_CMD_TCP;
2794                 break;
2795         default:
2796                 if (unlikely(net_ratelimit()))
2797                         e_warn(drv, "checksum_partial proto=%x!\n",
2798                                skb->protocol);
2799                 break;
2800         }
2801
2802         css = skb_checksum_start_offset(skb);
2803
2804         i = tx_ring->next_to_use;
2805         buffer_info = &tx_ring->buffer_info[i];
2806         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2807
2808         context_desc->lower_setup.ip_config = 0;
2809         context_desc->upper_setup.tcp_fields.tucss = css;
2810         context_desc->upper_setup.tcp_fields.tucso =
2811                 css + skb->csum_offset;
2812         context_desc->upper_setup.tcp_fields.tucse = 0;
2813         context_desc->tcp_seg_setup.data = 0;
2814         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2815
2816         buffer_info->time_stamp = jiffies;
2817         buffer_info->next_to_watch = i;
2818
2819         if (unlikely(++i == tx_ring->count)) i = 0;
2820         tx_ring->next_to_use = i;
2821
2822         return true;
2823 }
2824
2825 #define E1000_MAX_TXD_PWR       12
2826 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2827
2828 static int e1000_tx_map(struct e1000_adapter *adapter,
2829                         struct e1000_tx_ring *tx_ring,
2830                         struct sk_buff *skb, unsigned int first,
2831                         unsigned int max_per_txd, unsigned int nr_frags,
2832                         unsigned int mss)
2833 {
2834         struct e1000_hw *hw = &adapter->hw;
2835         struct pci_dev *pdev = adapter->pdev;
2836         struct e1000_buffer *buffer_info;
2837         unsigned int len = skb_headlen(skb);
2838         unsigned int offset = 0, size, count = 0, i;
2839         unsigned int f, bytecount, segs;
2840
2841         i = tx_ring->next_to_use;
2842
2843         while (len) {
2844                 buffer_info = &tx_ring->buffer_info[i];
2845                 size = min(len, max_per_txd);
2846                 /* Workaround for Controller erratum --
2847                  * descriptor for non-tso packet in a linear SKB that follows a
2848                  * tso gets written back prematurely before the data is fully
2849                  * DMA'd to the controller
2850                  */
2851                 if (!skb->data_len && tx_ring->last_tx_tso &&
2852                     !skb_is_gso(skb)) {
2853                         tx_ring->last_tx_tso = false;
2854                         size -= 4;
2855                 }
2856
2857                 /* Workaround for premature desc write-backs
2858                  * in TSO mode.  Append 4-byte sentinel desc
2859                  */
2860                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2861                         size -= 4;
2862                 /* work-around for errata 10 and it applies
2863                  * to all controllers in PCI-X mode
2864                  * The fix is to make sure that the first descriptor of a
2865                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2866                  */
2867                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2868                                 (size > 2015) && count == 0))
2869                         size = 2015;
2870
2871                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2872                  * terminating buffers within evenly-aligned dwords.
2873                  */
2874                 if (unlikely(adapter->pcix_82544 &&
2875                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2876                    size > 4))
2877                         size -= 4;
2878
2879                 buffer_info->length = size;
2880                 /* set time_stamp *before* dma to help avoid a possible race */
2881                 buffer_info->time_stamp = jiffies;
2882                 buffer_info->mapped_as_page = false;
2883                 buffer_info->dma = dma_map_single(&pdev->dev,
2884                                                   skb->data + offset,
2885                                                   size, DMA_TO_DEVICE);
2886                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2887                         goto dma_error;
2888                 buffer_info->next_to_watch = i;
2889
2890                 len -= size;
2891                 offset += size;
2892                 count++;
2893                 if (len) {
2894                         i++;
2895                         if (unlikely(i == tx_ring->count))
2896                                 i = 0;
2897                 }
2898         }
2899
2900         for (f = 0; f < nr_frags; f++) {
2901                 const struct skb_frag_struct *frag;
2902
2903                 frag = &skb_shinfo(skb)->frags[f];
2904                 len = skb_frag_size(frag);
2905                 offset = 0;
2906
2907                 while (len) {
2908                         unsigned long bufend;
2909                         i++;
2910                         if (unlikely(i == tx_ring->count))
2911                                 i = 0;
2912
2913                         buffer_info = &tx_ring->buffer_info[i];
2914                         size = min(len, max_per_txd);
2915                         /* Workaround for premature desc write-backs
2916                          * in TSO mode.  Append 4-byte sentinel desc
2917                          */
2918                         if (unlikely(mss && f == (nr_frags-1) &&
2919                             size == len && size > 8))
2920                                 size -= 4;
2921                         /* Workaround for potential 82544 hang in PCI-X.
2922                          * Avoid terminating buffers within evenly-aligned
2923                          * dwords.
2924                          */
2925                         bufend = (unsigned long)
2926                                 page_to_phys(skb_frag_page(frag));
2927                         bufend += offset + size - 1;
2928                         if (unlikely(adapter->pcix_82544 &&
2929                                      !(bufend & 4) &&
2930                                      size > 4))
2931                                 size -= 4;
2932
2933                         buffer_info->length = size;
2934                         buffer_info->time_stamp = jiffies;
2935                         buffer_info->mapped_as_page = true;
2936                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2937                                                 offset, size, DMA_TO_DEVICE);
2938                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2939                                 goto dma_error;
2940                         buffer_info->next_to_watch = i;
2941
2942                         len -= size;
2943                         offset += size;
2944                         count++;
2945                 }
2946         }
2947
2948         segs = skb_shinfo(skb)->gso_segs ?: 1;
2949         /* multiply data chunks by size of headers */
2950         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2951
2952         tx_ring->buffer_info[i].skb = skb;
2953         tx_ring->buffer_info[i].segs = segs;
2954         tx_ring->buffer_info[i].bytecount = bytecount;
2955         tx_ring->buffer_info[first].next_to_watch = i;
2956
2957         return count;
2958
2959 dma_error:
2960         dev_err(&pdev->dev, "TX DMA map failed\n");
2961         buffer_info->dma = 0;
2962         if (count)
2963                 count--;
2964
2965         while (count--) {
2966                 if (i==0)
2967                         i += tx_ring->count;
2968                 i--;
2969                 buffer_info = &tx_ring->buffer_info[i];
2970                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2971         }
2972
2973         return 0;
2974 }
2975
2976 static void e1000_tx_queue(struct e1000_adapter *adapter,
2977                            struct e1000_tx_ring *tx_ring, int tx_flags,
2978                            int count)
2979 {
2980         struct e1000_hw *hw = &adapter->hw;
2981         struct e1000_tx_desc *tx_desc = NULL;
2982         struct e1000_buffer *buffer_info;
2983         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2984         unsigned int i;
2985
2986         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2987                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2988                              E1000_TXD_CMD_TSE;
2989                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2990
2991                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2992                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2993         }
2994
2995         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2996                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2997                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2998         }
2999
3000         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3001                 txd_lower |= E1000_TXD_CMD_VLE;
3002                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3003         }
3004
3005         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3006                 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3007
3008         i = tx_ring->next_to_use;
3009
3010         while (count--) {
3011                 buffer_info = &tx_ring->buffer_info[i];
3012                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3013                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3014                 tx_desc->lower.data =
3015                         cpu_to_le32(txd_lower | buffer_info->length);
3016                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3017                 if (unlikely(++i == tx_ring->count)) i = 0;
3018         }
3019
3020         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3021
3022         /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3023         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3024                 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3025
3026         /* Force memory writes to complete before letting h/w
3027          * know there are new descriptors to fetch.  (Only
3028          * applicable for weak-ordered memory model archs,
3029          * such as IA-64).
3030          */
3031         wmb();
3032
3033         tx_ring->next_to_use = i;
3034         writel(i, hw->hw_addr + tx_ring->tdt);
3035         /* we need this if more than one processor can write to our tail
3036          * at a time, it synchronizes IO on IA64/Altix systems
3037          */
3038         mmiowb();
3039 }
3040
3041 /* 82547 workaround to avoid controller hang in half-duplex environment.
3042  * The workaround is to avoid queuing a large packet that would span
3043  * the internal Tx FIFO ring boundary by notifying the stack to resend
3044  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3045  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3046  * to the beginning of the Tx FIFO.
3047  */
3048
3049 #define E1000_FIFO_HDR                  0x10
3050 #define E1000_82547_PAD_LEN             0x3E0
3051
3052 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3053                                        struct sk_buff *skb)
3054 {
3055         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3056         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3057
3058         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3059
3060         if (adapter->link_duplex != HALF_DUPLEX)
3061                 goto no_fifo_stall_required;
3062
3063         if (atomic_read(&adapter->tx_fifo_stall))
3064                 return 1;
3065
3066         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3067                 atomic_set(&adapter->tx_fifo_stall, 1);
3068                 return 1;
3069         }
3070
3071 no_fifo_stall_required:
3072         adapter->tx_fifo_head += skb_fifo_len;
3073         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3074                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3075         return 0;
3076 }
3077
3078 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3079 {
3080         struct e1000_adapter *adapter = netdev_priv(netdev);
3081         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3082
3083         netif_stop_queue(netdev);
3084         /* Herbert's original patch had:
3085          *  smp_mb__after_netif_stop_queue();
3086          * but since that doesn't exist yet, just open code it.
3087          */
3088         smp_mb();
3089
3090         /* We need to check again in a case another CPU has just
3091          * made room available.
3092          */
3093         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3094                 return -EBUSY;
3095
3096         /* A reprieve! */
3097         netif_start_queue(netdev);
3098         ++adapter->restart_queue;
3099         return 0;
3100 }
3101
3102 static int e1000_maybe_stop_tx(struct net_device *netdev,
3103                                struct e1000_tx_ring *tx_ring, int size)
3104 {
3105         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3106                 return 0;
3107         return __e1000_maybe_stop_tx(netdev, size);
3108 }
3109
3110 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3111 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3112                                     struct net_device *netdev)
3113 {
3114         struct e1000_adapter *adapter = netdev_priv(netdev);
3115         struct e1000_hw *hw = &adapter->hw;
3116         struct e1000_tx_ring *tx_ring;
3117         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3118         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3119         unsigned int tx_flags = 0;
3120         unsigned int len = skb_headlen(skb);
3121         unsigned int nr_frags;
3122         unsigned int mss;
3123         int count = 0;
3124         int tso;
3125         unsigned int f;
3126
3127         /* This goes back to the question of how to logically map a Tx queue
3128          * to a flow.  Right now, performance is impacted slightly negatively
3129          * if using multiple Tx queues.  If the stack breaks away from a
3130          * single qdisc implementation, we can look at this again.
3131          */
3132         tx_ring = adapter->tx_ring;
3133
3134         if (unlikely(skb->len <= 0)) {
3135                 dev_kfree_skb_any(skb);
3136                 return NETDEV_TX_OK;
3137         }
3138
3139         /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3140          * packets may get corrupted during padding by HW.
3141          * To WA this issue, pad all small packets manually.
3142          */
3143         if (skb->len < ETH_ZLEN) {
3144                 if (skb_pad(skb, ETH_ZLEN - skb->len))
3145                         return NETDEV_TX_OK;
3146                 skb->len = ETH_ZLEN;
3147                 skb_set_tail_pointer(skb, ETH_ZLEN);
3148         }
3149
3150         mss = skb_shinfo(skb)->gso_size;
3151         /* The controller does a simple calculation to
3152          * make sure there is enough room in the FIFO before
3153          * initiating the DMA for each buffer.  The calc is:
3154          * 4 = ceil(buffer len/mss).  To make sure we don't
3155          * overrun the FIFO, adjust the max buffer len if mss
3156          * drops.
3157          */
3158         if (mss) {
3159                 u8 hdr_len;
3160                 max_per_txd = min(mss << 2, max_per_txd);
3161                 max_txd_pwr = fls(max_per_txd) - 1;
3162
3163                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3164                 if (skb->data_len && hdr_len == len) {
3165                         switch (hw->mac_type) {
3166                                 unsigned int pull_size;
3167                         case e1000_82544:
3168                                 /* Make sure we have room to chop off 4 bytes,
3169                                  * and that the end alignment will work out to
3170                                  * this hardware's requirements
3171                                  * NOTE: this is a TSO only workaround
3172                                  * if end byte alignment not correct move us
3173                                  * into the next dword
3174                                  */
3175                                 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3176                                     & 4)
3177                                         break;
3178                                 /* fall through */
3179                                 pull_size = min((unsigned int)4, skb->data_len);
3180                                 if (!__pskb_pull_tail(skb, pull_size)) {
3181                                         e_err(drv, "__pskb_pull_tail "
3182                                               "failed.\n");
3183                                         dev_kfree_skb_any(skb);
3184                                         return NETDEV_TX_OK;
3185                                 }
3186                                 len = skb_headlen(skb);
3187                                 break;
3188                         default:
3189                                 /* do nothing */
3190                                 break;
3191                         }
3192                 }
3193         }
3194
3195         /* reserve a descriptor for the offload context */
3196         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3197                 count++;
3198         count++;
3199
3200         /* Controller Erratum workaround */
3201         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3202                 count++;
3203
3204         count += TXD_USE_COUNT(len, max_txd_pwr);
3205
3206         if (adapter->pcix_82544)
3207                 count++;
3208
3209         /* work-around for errata 10 and it applies to all controllers
3210          * in PCI-X mode, so add one more descriptor to the count
3211          */
3212         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3213                         (len > 2015)))
3214                 count++;
3215
3216         nr_frags = skb_shinfo(skb)->nr_frags;
3217         for (f = 0; f < nr_frags; f++)
3218                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3219                                        max_txd_pwr);
3220         if (adapter->pcix_82544)
3221                 count += nr_frags;
3222
3223         /* need: count + 2 desc gap to keep tail from touching
3224          * head, otherwise try next time
3225          */
3226         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3227                 return NETDEV_TX_BUSY;
3228
3229         if (unlikely((hw->mac_type == e1000_82547) &&
3230                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3231                 netif_stop_queue(netdev);
3232                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3233                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3234                 return NETDEV_TX_BUSY;
3235         }
3236
3237         if (vlan_tx_tag_present(skb)) {
3238                 tx_flags |= E1000_TX_FLAGS_VLAN;
3239                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3240         }
3241
3242         first = tx_ring->next_to_use;
3243
3244         tso = e1000_tso(adapter, tx_ring, skb);
3245         if (tso < 0) {
3246                 dev_kfree_skb_any(skb);
3247                 return NETDEV_TX_OK;
3248         }
3249
3250         if (likely(tso)) {
3251                 if (likely(hw->mac_type != e1000_82544))
3252                         tx_ring->last_tx_tso = true;
3253                 tx_flags |= E1000_TX_FLAGS_TSO;
3254         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3255                 tx_flags |= E1000_TX_FLAGS_CSUM;
3256
3257         if (likely(skb->protocol == htons(ETH_P_IP)))
3258                 tx_flags |= E1000_TX_FLAGS_IPV4;
3259
3260         if (unlikely(skb->no_fcs))
3261                 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3262
3263         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3264                              nr_frags, mss);
3265
3266         if (count) {
3267                 netdev_sent_queue(netdev, skb->len);
3268                 skb_tx_timestamp(skb);
3269
3270                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3271                 /* Make sure there is space in the ring for the next send. */
3272                 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3273
3274         } else {
3275                 dev_kfree_skb_any(skb);
3276                 tx_ring->buffer_info[first].time_stamp = 0;
3277                 tx_ring->next_to_use = first;
3278         }
3279
3280         return NETDEV_TX_OK;
3281 }
3282
3283 #define NUM_REGS 38 /* 1 based count */
3284 static void e1000_regdump(struct e1000_adapter *adapter)
3285 {
3286         struct e1000_hw *hw = &adapter->hw;
3287         u32 regs[NUM_REGS];
3288         u32 *regs_buff = regs;
3289         int i = 0;
3290
3291         static const char * const reg_name[] = {
3292                 "CTRL",  "STATUS",
3293                 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3294                 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3295                 "TIDV", "TXDCTL", "TADV", "TARC0",
3296                 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3297                 "TXDCTL1", "TARC1",
3298                 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3299                 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3300                 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3301         };
3302
3303         regs_buff[0]  = er32(CTRL);
3304         regs_buff[1]  = er32(STATUS);
3305
3306         regs_buff[2]  = er32(RCTL);
3307         regs_buff[3]  = er32(RDLEN);
3308         regs_buff[4]  = er32(RDH);
3309         regs_buff[5]  = er32(RDT);
3310         regs_buff[6]  = er32(RDTR);
3311
3312         regs_buff[7]  = er32(TCTL);
3313         regs_buff[8]  = er32(TDBAL);
3314         regs_buff[9]  = er32(TDBAH);
3315         regs_buff[10] = er32(TDLEN);
3316         regs_buff[11] = er32(TDH);
3317         regs_buff[12] = er32(TDT);
3318         regs_buff[13] = er32(TIDV);
3319         regs_buff[14] = er32(TXDCTL);
3320         regs_buff[15] = er32(TADV);
3321         regs_buff[16] = er32(TARC0);
3322
3323         regs_buff[17] = er32(TDBAL1);
3324         regs_buff[18] = er32(TDBAH1);
3325         regs_buff[19] = er32(TDLEN1);
3326         regs_buff[20] = er32(TDH1);
3327         regs_buff[21] = er32(TDT1);
3328         regs_buff[22] = er32(TXDCTL1);
3329         regs_buff[23] = er32(TARC1);
3330         regs_buff[24] = er32(CTRL_EXT);
3331         regs_buff[25] = er32(ERT);
3332         regs_buff[26] = er32(RDBAL0);
3333         regs_buff[27] = er32(RDBAH0);
3334         regs_buff[28] = er32(TDFH);
3335         regs_buff[29] = er32(TDFT);
3336         regs_buff[30] = er32(TDFHS);
3337         regs_buff[31] = er32(TDFTS);
3338         regs_buff[32] = er32(TDFPC);
3339         regs_buff[33] = er32(RDFH);
3340         regs_buff[34] = er32(RDFT);
3341         regs_buff[35] = er32(RDFHS);
3342         regs_buff[36] = er32(RDFTS);
3343         regs_buff[37] = er32(RDFPC);
3344
3345         pr_info("Register dump\n");
3346         for (i = 0; i < NUM_REGS; i++)
3347                 pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3348 }
3349
3350 /*
3351  * e1000_dump: Print registers, tx ring and rx ring
3352  */
3353 static void e1000_dump(struct e1000_adapter *adapter)
3354 {
3355         /* this code doesn't handle multiple rings */
3356         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3357         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3358         int i;
3359
3360         if (!netif_msg_hw(adapter))
3361                 return;
3362
3363         /* Print Registers */
3364         e1000_regdump(adapter);
3365
3366         /* transmit dump */
3367         pr_info("TX Desc ring0 dump\n");
3368
3369         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3370          *
3371          * Legacy Transmit Descriptor
3372          *   +--------------------------------------------------------------+
3373          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3374          *   +--------------------------------------------------------------+
3375          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3376          *   +--------------------------------------------------------------+
3377          *   63       48 47        36 35    32 31     24 23    16 15        0
3378          *
3379          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3380          *   63      48 47    40 39       32 31             16 15    8 7      0
3381          *   +----------------------------------------------------------------+
3382          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3383          *   +----------------------------------------------------------------+
3384          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3385          *   +----------------------------------------------------------------+
3386          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3387          *
3388          * Extended Data Descriptor (DTYP=0x1)
3389          *   +----------------------------------------------------------------+
3390          * 0 |                     Buffer Address [63:0]                      |
3391          *   +----------------------------------------------------------------+
3392          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3393          *   +----------------------------------------------------------------+
3394          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3395          */
3396         pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3397         pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3398
3399         if (!netif_msg_tx_done(adapter))
3400                 goto rx_ring_summary;
3401
3402         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3403                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3404                 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3405                 struct my_u { __le64 a; __le64 b; };
3406                 struct my_u *u = (struct my_u *)tx_desc;
3407                 const char *type;
3408
3409                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3410                         type = "NTC/U";
3411                 else if (i == tx_ring->next_to_use)
3412                         type = "NTU";
3413                 else if (i == tx_ring->next_to_clean)
3414                         type = "NTC";
3415                 else
3416                         type = "";
3417
3418                 pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3419                         ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3420                         le64_to_cpu(u->a), le64_to_cpu(u->b),
3421                         (u64)buffer_info->dma, buffer_info->length,
3422                         buffer_info->next_to_watch,
3423                         (u64)buffer_info->time_stamp, buffer_info->skb, type);
3424         }
3425
3426 rx_ring_summary:
3427         /* receive dump */
3428         pr_info("\nRX Desc ring dump\n");
3429
3430         /* Legacy Receive Descriptor Format
3431          *
3432          * +-----------------------------------------------------+
3433          * |                Buffer Address [63:0]                |
3434          * +-----------------------------------------------------+
3435          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3436          * +-----------------------------------------------------+
3437          * 63       48 47    40 39      32 31         16 15      0
3438          */
3439         pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3440
3441         if (!netif_msg_rx_status(adapter))
3442                 goto exit;
3443
3444         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3445                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3446                 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3447                 struct my_u { __le64 a; __le64 b; };
3448                 struct my_u *u = (struct my_u *)rx_desc;
3449                 const char *type;
3450
3451                 if (i == rx_ring->next_to_use)
3452                         type = "NTU";
3453                 else if (i == rx_ring->next_to_clean)
3454                         type = "NTC";
3455                 else
3456                         type = "";
3457
3458                 pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3459                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3460                         (u64)buffer_info->dma, buffer_info->skb, type);
3461         } /* for */
3462
3463         /* dump the descriptor caches */
3464         /* rx */
3465         pr_info("Rx descriptor cache in 64bit format\n");
3466         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3467                 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3468                         i,
3469                         readl(adapter->hw.hw_addr + i+4),
3470                         readl(adapter->hw.hw_addr + i),
3471                         readl(adapter->hw.hw_addr + i+12),
3472                         readl(adapter->hw.hw_addr + i+8));
3473         }
3474         /* tx */
3475         pr_info("Tx descriptor cache in 64bit format\n");
3476         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3477                 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3478                         i,
3479                         readl(adapter->hw.hw_addr + i+4),
3480                         readl(adapter->hw.hw_addr + i),
3481                         readl(adapter->hw.hw_addr + i+12),
3482                         readl(adapter->hw.hw_addr + i+8));
3483         }
3484 exit:
3485         return;
3486 }
3487
3488 /**
3489  * e1000_tx_timeout - Respond to a Tx Hang
3490  * @netdev: network interface device structure
3491  **/
3492 static void e1000_tx_timeout(struct net_device *netdev)
3493 {
3494         struct e1000_adapter *adapter = netdev_priv(netdev);
3495
3496         /* Do the reset outside of interrupt context */
3497         adapter->tx_timeout_count++;
3498         schedule_work(&adapter->reset_task);
3499 }
3500
3501 static void e1000_reset_task(struct work_struct *work)
3502 {
3503         struct e1000_adapter *adapter =
3504                 container_of(work, struct e1000_adapter, reset_task);
3505
3506         if (test_bit(__E1000_DOWN, &adapter->flags))
3507                 return;
3508         e_err(drv, "Reset adapter\n");
3509         e1000_reinit_safe(adapter);
3510 }
3511
3512 /**
3513  * e1000_get_stats - Get System Network Statistics
3514  * @netdev: network interface device structure
3515  *
3516  * Returns the address of the device statistics structure.
3517  * The statistics are actually updated from the watchdog.
3518  **/
3519 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3520 {
3521         /* only return the current stats */
3522         return &netdev->stats;
3523 }
3524
3525 /**
3526  * e1000_change_mtu - Change the Maximum Transfer Unit
3527  * @netdev: network interface device structure
3528  * @new_mtu: new value for maximum frame size
3529  *
3530  * Returns 0 on success, negative on failure
3531  **/
3532 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3533 {
3534         struct e1000_adapter *adapter = netdev_priv(netdev);
3535         struct e1000_hw *hw = &adapter->hw;
3536         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3537
3538         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3539             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3540                 e_err(probe, "Invalid MTU setting\n");
3541                 return -EINVAL;
3542         }
3543
3544         /* Adapter-specific max frame size limits. */
3545         switch (hw->mac_type) {
3546         case e1000_undefined ... e1000_82542_rev2_1:
3547                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3548                         e_err(probe, "Jumbo Frames not supported.\n");
3549                         return -EINVAL;
3550                 }
3551                 break;
3552         default:
3553                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3554                 break;
3555         }
3556
3557         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3558                 msleep(1);
3559         /* e1000_down has a dependency on max_frame_size */
3560         hw->max_frame_size = max_frame;
3561         if (netif_running(netdev))
3562                 e1000_down(adapter);
3563
3564         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3565          * means we reserve 2 more, this pushes us to allocate from the next
3566          * larger slab size.
3567          * i.e. RXBUFFER_2048 --> size-4096 slab
3568          * however with the new *_jumbo_rx* routines, jumbo receives will use
3569          * fragmented skbs
3570          */
3571
3572         if (max_frame <= E1000_RXBUFFER_2048)
3573                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3574         else
3575 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3576                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3577 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3578                 adapter->rx_buffer_len = PAGE_SIZE;
3579 #endif
3580
3581         /* adjust allocation if LPE protects us, and we aren't using SBP */
3582         if (!hw->tbi_compatibility_on &&
3583             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3584              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3585                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3586
3587         pr_info("%s changing MTU from %d to %d\n",
3588                 netdev->name, netdev->mtu, new_mtu);
3589         netdev->mtu = new_mtu;
3590
3591         if (netif_running(netdev))
3592                 e1000_up(adapter);
3593         else
3594                 e1000_reset(adapter);
3595
3596         clear_bit(__E1000_RESETTING, &adapter->flags);
3597
3598         return 0;
3599 }
3600
3601 /**
3602  * e1000_update_stats - Update the board statistics counters
3603  * @adapter: board private structure
3604  **/
3605 void e1000_update_stats(struct e1000_adapter *adapter)
3606 {
3607         struct net_device *netdev = adapter->netdev;
3608         struct e1000_hw *hw = &adapter->hw;
3609         struct pci_dev *pdev = adapter->pdev;
3610         unsigned long flags;
3611         u16 phy_tmp;
3612
3613 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3614
3615         /* Prevent stats update while adapter is being reset, or if the pci
3616          * connection is down.
3617          */
3618         if (adapter->link_speed == 0)
3619                 return;
3620         if (pci_channel_offline(pdev))
3621                 return;
3622
3623         spin_lock_irqsave(&adapter->stats_lock, flags);
3624
3625         /* these counters are modified from e1000_tbi_adjust_stats,
3626          * called from the interrupt context, so they must only
3627          * be written while holding adapter->stats_lock
3628          */
3629
3630         adapter->stats.crcerrs += er32(CRCERRS);
3631         adapter->stats.gprc += er32(GPRC);
3632         adapter->stats.gorcl += er32(GORCL);
3633         adapter->stats.gorch += er32(GORCH);
3634         adapter->stats.bprc += er32(BPRC);
3635         adapter->stats.mprc += er32(MPRC);
3636         adapter->stats.roc += er32(ROC);
3637
3638         adapter->stats.prc64 += er32(PRC64);
3639         adapter->stats.prc127 += er32(PRC127);
3640         adapter->stats.prc255 += er32(PRC255);
3641         adapter->stats.prc511 += er32(PRC511);
3642         adapter->stats.prc1023 += er32(PRC1023);
3643         adapter->stats.prc1522 += er32(PRC1522);
3644
3645         adapter->stats.symerrs += er32(SYMERRS);
3646         adapter->stats.mpc += er32(MPC);
3647         adapter->stats.scc += er32(SCC);
3648         adapter->stats.ecol += er32(ECOL);
3649         adapter->stats.mcc += er32(MCC);
3650         adapter->stats.latecol += er32(LATECOL);
3651         adapter->stats.dc += er32(DC);
3652         adapter->stats.sec += er32(SEC);
3653         adapter->stats.rlec += er32(RLEC);
3654         adapter->stats.xonrxc += er32(XONRXC);
3655         adapter->stats.xontxc += er32(XONTXC);
3656         adapter->stats.xoffrxc += er32(XOFFRXC);
3657         adapter->stats.xofftxc += er32(XOFFTXC);
3658         adapter->stats.fcruc += er32(FCRUC);
3659         adapter->stats.gptc += er32(GPTC);
3660         adapter->stats.gotcl += er32(GOTCL);
3661         adapter->stats.gotch += er32(GOTCH);
3662         adapter->stats.rnbc += er32(RNBC);
3663         adapter->stats.ruc += er32(RUC);
3664         adapter->stats.rfc += er32(RFC);
3665         adapter->stats.rjc += er32(RJC);
3666         adapter->stats.torl += er32(TORL);
3667         adapter->stats.torh += er32(TORH);
3668         adapter->stats.totl += er32(TOTL);
3669         adapter->stats.toth += er32(TOTH);
3670         adapter->stats.tpr += er32(TPR);
3671
3672         adapter->stats.ptc64 += er32(PTC64);
3673         adapter->stats.ptc127 += er32(PTC127);
3674         adapter->stats.ptc255 += er32(PTC255);
3675         adapter->stats.ptc511 += er32(PTC511);
3676         adapter->stats.ptc1023 += er32(PTC1023);
3677         adapter->stats.ptc1522 += er32(PTC1522);
3678
3679         adapter->stats.mptc += er32(MPTC);
3680         adapter->stats.bptc += er32(BPTC);
3681
3682         /* used for adaptive IFS */
3683
3684         hw->tx_packet_delta = er32(TPT);
3685         adapter->stats.tpt += hw->tx_packet_delta;
3686         hw->collision_delta = er32(COLC);
3687         adapter->stats.colc += hw->collision_delta;
3688
3689         if (hw->mac_type >= e1000_82543) {
3690                 adapter->stats.algnerrc += er32(ALGNERRC);
3691                 adapter->stats.rxerrc += er32(RXERRC);
3692                 adapter->stats.tncrs += er32(TNCRS);
3693                 adapter->stats.cexterr += er32(CEXTERR);
3694                 adapter->stats.tsctc += er32(TSCTC);
3695                 adapter->stats.tsctfc += er32(TSCTFC);
3696         }
3697
3698         /* Fill out the OS statistics structure */
3699         netdev->stats.multicast = adapter->stats.mprc;
3700         netdev->stats.collisions = adapter->stats.colc;
3701
3702         /* Rx Errors */
3703
3704         /* RLEC on some newer hardware can be incorrect so build
3705          * our own version based on RUC and ROC
3706          */
3707         netdev->stats.rx_errors = adapter->stats.rxerrc +
3708                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3709                 adapter->stats.ruc + adapter->stats.roc +
3710                 adapter->stats.cexterr;
3711         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3712         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3713         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3714         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3715         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3716
3717         /* Tx Errors */
3718         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3719         netdev->stats.tx_errors = adapter->stats.txerrc;
3720         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3721         netdev->stats.tx_window_errors = adapter->stats.latecol;
3722         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3723         if (hw->bad_tx_carr_stats_fd &&
3724             adapter->link_duplex == FULL_DUPLEX) {
3725                 netdev->stats.tx_carrier_errors = 0;
3726                 adapter->stats.tncrs = 0;
3727         }
3728
3729         /* Tx Dropped needs to be maintained elsewhere */
3730
3731         /* Phy Stats */
3732         if (hw->media_type == e1000_media_type_copper) {
3733                 if ((adapter->link_speed == SPEED_1000) &&
3734                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3735                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3736                         adapter->phy_stats.idle_errors += phy_tmp;
3737                 }
3738
3739                 if ((hw->mac_type <= e1000_82546) &&
3740                    (hw->phy_type == e1000_phy_m88) &&
3741                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3742                         adapter->phy_stats.receive_errors += phy_tmp;
3743         }
3744
3745         /* Management Stats */
3746         if (hw->has_smbus) {
3747                 adapter->stats.mgptc += er32(MGTPTC);
3748                 adapter->stats.mgprc += er32(MGTPRC);
3749                 adapter->stats.mgpdc += er32(MGTPDC);
3750         }
3751
3752         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3753 }
3754
3755 /**
3756  * e1000_intr - Interrupt Handler
3757  * @irq: interrupt number
3758  * @data: pointer to a network interface device structure
3759  **/
3760 static irqreturn_t e1000_intr(int irq, void *data)
3761 {
3762         struct net_device *netdev = data;
3763         struct e1000_adapter *adapter = netdev_priv(netdev);
3764         struct e1000_hw *hw = &adapter->hw;
3765         u32 icr = er32(ICR);
3766
3767         if (unlikely((!icr)))
3768                 return IRQ_NONE;  /* Not our interrupt */
3769
3770         /* we might have caused the interrupt, but the above
3771          * read cleared it, and just in case the driver is
3772          * down there is nothing to do so return handled
3773          */
3774         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3775                 return IRQ_HANDLED;
3776
3777         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3778                 hw->get_link_status = 1;
3779                 /* guard against interrupt when we're going down */
3780                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3781                         schedule_delayed_work(&adapter->watchdog_task, 1);
3782         }
3783
3784         /* disable interrupts, without the synchronize_irq bit */
3785         ew32(IMC, ~0);
3786         E1000_WRITE_FLUSH();
3787
3788         if (likely(napi_schedule_prep(&adapter->napi))) {
3789                 adapter->total_tx_bytes = 0;
3790                 adapter->total_tx_packets = 0;
3791                 adapter->total_rx_bytes = 0;
3792                 adapter->total_rx_packets = 0;
3793                 __napi_schedule(&adapter->napi);
3794         } else {
3795                 /* this really should not happen! if it does it is basically a
3796                  * bug, but not a hard error, so enable ints and continue
3797                  */
3798                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3799                         e1000_irq_enable(adapter);
3800         }
3801
3802         return IRQ_HANDLED;
3803 }
3804
3805 /**
3806  * e1000_clean - NAPI Rx polling callback
3807  * @adapter: board private structure
3808  **/
3809 static int e1000_clean(struct napi_struct *napi, int budget)
3810 {
3811         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3812                                                      napi);
3813         int tx_clean_complete = 0, work_done = 0;
3814
3815         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3816
3817         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3818
3819         if (!tx_clean_complete)
3820                 work_done = budget;
3821
3822         /* If budget not fully consumed, exit the polling mode */
3823         if (work_done < budget) {
3824                 if (likely(adapter->itr_setting & 3))
3825                         e1000_set_itr(adapter);
3826                 napi_complete(napi);
3827                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3828                         e1000_irq_enable(adapter);
3829         }
3830
3831         return work_done;
3832 }
3833
3834 /**
3835  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3836  * @adapter: board private structure
3837  **/
3838 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3839                                struct e1000_tx_ring *tx_ring)
3840 {
3841         struct e1000_hw *hw = &adapter->hw;
3842         struct net_device *netdev = adapter->netdev;
3843         struct e1000_tx_desc *tx_desc, *eop_desc;
3844         struct e1000_buffer *buffer_info;
3845         unsigned int i, eop;
3846         unsigned int count = 0;
3847         unsigned int total_tx_bytes=0, total_tx_packets=0;
3848         unsigned int bytes_compl = 0, pkts_compl = 0;
3849
3850         i = tx_ring->next_to_clean;
3851         eop = tx_ring->buffer_info[i].next_to_watch;
3852         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3853
3854         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3855                (count < tx_ring->count)) {
3856                 bool cleaned = false;
3857                 rmb();  /* read buffer_info after eop_desc */
3858                 for ( ; !cleaned; count++) {
3859                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3860                         buffer_info = &tx_ring->buffer_info[i];
3861                         cleaned = (i == eop);
3862
3863                         if (cleaned) {
3864                                 total_tx_packets += buffer_info->segs;
3865                                 total_tx_bytes += buffer_info->bytecount;
3866                                 if (buffer_info->skb) {
3867                                         bytes_compl += buffer_info->skb->len;
3868                                         pkts_compl++;
3869                                 }
3870
3871                         }
3872                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3873                         tx_desc->upper.data = 0;
3874
3875                         if (unlikely(++i == tx_ring->count)) i = 0;
3876                 }
3877
3878                 eop = tx_ring->buffer_info[i].next_to_watch;
3879                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3880         }
3881
3882         tx_ring->next_to_clean = i;
3883
3884         netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3885
3886 #define TX_WAKE_THRESHOLD 32
3887         if (unlikely(count && netif_carrier_ok(netdev) &&
3888                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3889                 /* Make sure that anybody stopping the queue after this
3890                  * sees the new next_to_clean.
3891                  */
3892                 smp_mb();
3893
3894                 if (netif_queue_stopped(netdev) &&
3895                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3896                         netif_wake_queue(netdev);
3897                         ++adapter->restart_queue;
3898                 }
3899         }
3900
3901         if (adapter->detect_tx_hung) {
3902                 /* Detect a transmit hang in hardware, this serializes the
3903                  * check with the clearing of time_stamp and movement of i
3904                  */
3905                 adapter->detect_tx_hung = false;
3906                 if (tx_ring->buffer_info[eop].time_stamp &&
3907                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3908                                (adapter->tx_timeout_factor * HZ)) &&
3909                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3910
3911                         /* detected Tx unit hang */
3912                         e_err(drv, "Detected Tx Unit Hang\n"
3913                               "  Tx Queue             <%lu>\n"
3914                               "  TDH                  <%x>\n"
3915                               "  TDT                  <%x>\n"
3916                               "  next_to_use          <%x>\n"
3917                               "  next_to_clean        <%x>\n"
3918                               "buffer_info[next_to_clean]\n"
3919                               "  time_stamp           <%lx>\n"
3920                               "  next_to_watch        <%x>\n"
3921                               "  jiffies              <%lx>\n"
3922                               "  next_to_watch.status <%x>\n",
3923                                 (unsigned long)((tx_ring - adapter->tx_ring) /
3924                                         sizeof(struct e1000_tx_ring)),
3925                                 readl(hw->hw_addr + tx_ring->tdh),
3926                                 readl(hw->hw_addr + tx_ring->tdt),
3927                                 tx_ring->next_to_use,
3928                                 tx_ring->next_to_clean,
3929                                 tx_ring->buffer_info[eop].time_stamp,
3930                                 eop,
3931                                 jiffies,
3932                                 eop_desc->upper.fields.status);
3933                         e1000_dump(adapter);
3934                         netif_stop_queue(netdev);
3935                 }
3936         }
3937         adapter->total_tx_bytes += total_tx_bytes;
3938         adapter->total_tx_packets += total_tx_packets;
3939         netdev->stats.tx_bytes += total_tx_bytes;
3940         netdev->stats.tx_packets += total_tx_packets;
3941         return count < tx_ring->count;
3942 }
3943
3944 /**
3945  * e1000_rx_checksum - Receive Checksum Offload for 82543
3946  * @adapter:     board private structure
3947  * @status_err:  receive descriptor status and error fields
3948  * @csum:        receive descriptor csum field
3949  * @sk_buff:     socket buffer with received data
3950  **/
3951 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3952                               u32 csum, struct sk_buff *skb)
3953 {
3954         struct e1000_hw *hw = &adapter->hw;
3955         u16 status = (u16)status_err;
3956         u8 errors = (u8)(status_err >> 24);
3957
3958         skb_checksum_none_assert(skb);
3959
3960         /* 82543 or newer only */
3961         if (unlikely(hw->mac_type < e1000_82543)) return;
3962         /* Ignore Checksum bit is set */
3963         if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3964         /* TCP/UDP checksum error bit is set */
3965         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3966                 /* let the stack verify checksum errors */
3967                 adapter->hw_csum_err++;
3968                 return;
3969         }
3970         /* TCP/UDP Checksum has not been calculated */
3971         if (!(status & E1000_RXD_STAT_TCPCS))
3972                 return;
3973
3974         /* It must be a TCP or UDP packet with a valid checksum */
3975         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3976                 /* TCP checksum is good */
3977                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3978         }
3979         adapter->hw_csum_good++;
3980 }
3981
3982 /**
3983  * e1000_consume_page - helper function
3984  **/
3985 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3986                                u16 length)
3987 {
3988         bi->page = NULL;
3989         skb->len += length;
3990         skb->data_len += length;
3991         skb->truesize += PAGE_SIZE;
3992 }
3993
3994 /**
3995  * e1000_receive_skb - helper function to handle rx indications
3996  * @adapter: board private structure
3997  * @status: descriptor status field as written by hardware
3998  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3999  * @skb: pointer to sk_buff to be indicated to stack
4000  */
4001 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4002                               __le16 vlan, struct sk_buff *skb)
4003 {
4004         skb->protocol = eth_type_trans(skb, adapter->netdev);
4005
4006         if (status & E1000_RXD_STAT_VP) {
4007                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4008
4009                 __vlan_hwaccel_put_tag(skb, vid);
4010         }
4011         napi_gro_receive(&adapter->napi, skb);
4012 }
4013
4014 /**
4015  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4016  * @adapter: board private structure
4017  * @rx_ring: ring to clean
4018  * @work_done: amount of napi work completed this call
4019  * @work_to_do: max amount of work allowed for this call to do
4020  *
4021  * the return value indicates whether actual cleaning was done, there
4022  * is no guarantee that everything was cleaned
4023  */
4024 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4025                                      struct e1000_rx_ring *rx_ring,
4026                                      int *work_done, int work_to_do)
4027 {
4028         struct e1000_hw *hw = &adapter->hw;
4029         struct net_device *netdev = adapter->netdev;
4030         struct pci_dev *pdev = adapter->pdev;
4031         struct e1000_rx_desc *rx_desc, *next_rxd;
4032         struct e1000_buffer *buffer_info, *next_buffer;
4033         unsigned long irq_flags;
4034         u32 length;
4035         unsigned int i;
4036         int cleaned_count = 0;
4037         bool cleaned = false;
4038         unsigned int total_rx_bytes=0, total_rx_packets=0;
4039
4040         i = rx_ring->next_to_clean;
4041         rx_desc = E1000_RX_DESC(*rx_ring, i);
4042         buffer_info = &rx_ring->buffer_info[i];
4043
4044         while (rx_desc->status & E1000_RXD_STAT_DD) {
4045                 struct sk_buff *skb;
4046                 u8 status;
4047
4048                 if (*work_done >= work_to_do)
4049                         break;
4050                 (*work_done)++;
4051                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4052
4053                 status = rx_desc->status;
4054                 skb = buffer_info->skb;
4055                 buffer_info->skb = NULL;
4056
4057                 if (++i == rx_ring->count) i = 0;
4058                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4059                 prefetch(next_rxd);
4060
4061                 next_buffer = &rx_ring->buffer_info[i];
4062
4063                 cleaned = true;
4064                 cleaned_count++;
4065                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4066                                buffer_info->length, DMA_FROM_DEVICE);
4067                 buffer_info->dma = 0;
4068
4069                 length = le16_to_cpu(rx_desc->length);
4070
4071                 /* errors is only valid for DD + EOP descriptors */
4072                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4073                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4074                         u8 *mapped;
4075                         u8 last_byte;
4076
4077                         mapped = page_address(buffer_info->page);
4078                         last_byte = *(mapped + length - 1);
4079                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4080                                        last_byte)) {
4081                                 spin_lock_irqsave(&adapter->stats_lock,
4082                                                   irq_flags);
4083                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4084                                                        length, mapped);
4085                                 spin_unlock_irqrestore(&adapter->stats_lock,
4086                                                        irq_flags);
4087                                 length--;
4088                         } else {
4089                                 if (netdev->features & NETIF_F_RXALL)
4090                                         goto process_skb;
4091                                 /* recycle both page and skb */
4092                                 buffer_info->skb = skb;
4093                                 /* an error means any chain goes out the window
4094                                  * too
4095                                  */
4096                                 if (rx_ring->rx_skb_top)
4097                                         dev_kfree_skb(rx_ring->rx_skb_top);
4098                                 rx_ring->rx_skb_top = NULL;
4099                                 goto next_desc;
4100                         }
4101                 }
4102
4103 #define rxtop rx_ring->rx_skb_top
4104 process_skb:
4105                 if (!(status & E1000_RXD_STAT_EOP)) {
4106                         /* this descriptor is only the beginning (or middle) */
4107                         if (!rxtop) {
4108                                 /* this is the beginning of a chain */
4109                                 rxtop = skb;
4110                                 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4111                                                    0, length);
4112                         } else {
4113                                 /* this is the middle of a chain */
4114                                 skb_fill_page_desc(rxtop,
4115                                     skb_shinfo(rxtop)->nr_frags,
4116                                     buffer_info->page, 0, length);
4117                                 /* re-use the skb, only consumed the page */
4118                                 buffer_info->skb = skb;
4119                         }
4120                         e1000_consume_page(buffer_info, rxtop, length);
4121                         goto next_desc;
4122                 } else {
4123                         if (rxtop) {
4124                                 /* end of the chain */
4125                                 skb_fill_page_desc(rxtop,
4126                                     skb_shinfo(rxtop)->nr_frags,
4127                                     buffer_info->page, 0, length);
4128                                 /* re-use the current skb, we only consumed the
4129                                  * page
4130                                  */
4131                                 buffer_info->skb = skb;
4132                                 skb = rxtop;
4133                                 rxtop = NULL;
4134                                 e1000_consume_page(buffer_info, skb, length);
4135                         } else {
4136                                 /* no chain, got EOP, this buf is the packet
4137                                  * copybreak to save the put_page/alloc_page
4138                                  */
4139                                 if (length <= copybreak &&
4140                                     skb_tailroom(skb) >= length) {
4141                                         u8 *vaddr;
4142                                         vaddr = kmap_atomic(buffer_info->page);
4143                                         memcpy(skb_tail_pointer(skb), vaddr,
4144                                                length);
4145                                         kunmap_atomic(vaddr);
4146                                         /* re-use the page, so don't erase
4147                                          * buffer_info->page
4148                                          */
4149                                         skb_put(skb, length);
4150                                 } else {
4151                                         skb_fill_page_desc(skb, 0,
4152                                                            buffer_info->page, 0,
4153                                                            length);
4154                                         e1000_consume_page(buffer_info, skb,
4155                                                            length);
4156                                 }
4157                         }
4158                 }
4159
4160                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4161                 e1000_rx_checksum(adapter,
4162                                   (u32)(status) |
4163                                   ((u32)(rx_desc->errors) << 24),
4164                                   le16_to_cpu(rx_desc->csum), skb);
4165
4166                 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4167                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4168                         pskb_trim(skb, skb->len - 4);
4169                 total_rx_packets++;
4170
4171                 /* eth type trans needs skb->data to point to something */
4172                 if (!pskb_may_pull(skb, ETH_HLEN)) {
4173                         e_err(drv, "pskb_may_pull failed.\n");
4174                         dev_kfree_skb(skb);
4175                         goto next_desc;
4176                 }
4177
4178                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4179
4180 next_desc:
4181                 rx_desc->status = 0;
4182
4183                 /* return some buffers to hardware, one at a time is too slow */
4184                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4185                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4186                         cleaned_count = 0;
4187                 }
4188
4189                 /* use prefetched values */
4190                 rx_desc = next_rxd;
4191                 buffer_info = next_buffer;
4192         }
4193         rx_ring->next_to_clean = i;
4194
4195         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4196         if (cleaned_count)
4197                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4198
4199         adapter->total_rx_packets += total_rx_packets;
4200         adapter->total_rx_bytes += total_rx_bytes;
4201         netdev->stats.rx_bytes += total_rx_bytes;
4202         netdev->stats.rx_packets += total_rx_packets;
4203         return cleaned;
4204 }
4205
4206 /* this should improve performance for small packets with large amounts
4207  * of reassembly being done in the stack
4208  */
4209 static void e1000_check_copybreak(struct net_device *netdev,
4210                                  struct e1000_buffer *buffer_info,
4211                                  u32 length, struct sk_buff **skb)
4212 {
4213         struct sk_buff *new_skb;
4214
4215         if (length > copybreak)
4216                 return;
4217
4218         new_skb = netdev_alloc_skb_ip_align(netdev, length);
4219         if (!new_skb)
4220                 return;
4221
4222         skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4223                                        (*skb)->data - NET_IP_ALIGN,
4224                                        length + NET_IP_ALIGN);
4225         /* save the skb in buffer_info as good */
4226         buffer_info->skb = *skb;
4227         *skb = new_skb;
4228 }
4229
4230 /**
4231  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4232  * @adapter: board private structure
4233  * @rx_ring: ring to clean
4234  * @work_done: amount of napi work completed this call
4235  * @work_to_do: max amount of work allowed for this call to do
4236  */
4237 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4238                                struct e1000_rx_ring *rx_ring,
4239                                int *work_done, int work_to_do)
4240 {
4241         struct e1000_hw *hw = &adapter->hw;
4242         struct net_device *netdev = adapter->netdev;
4243         struct pci_dev *pdev = adapter->pdev;
4244         struct e1000_rx_desc *rx_desc, *next_rxd;
4245         struct e1000_buffer *buffer_info, *next_buffer;
4246         unsigned long flags;
4247         u32 length;
4248         unsigned int i;
4249         int cleaned_count = 0;
4250         bool cleaned = false;
4251         unsigned int total_rx_bytes=0, total_rx_packets=0;
4252
4253         i = rx_ring->next_to_clean;
4254         rx_desc = E1000_RX_DESC(*rx_ring, i);
4255         buffer_info = &rx_ring->buffer_info[i];
4256
4257         while (rx_desc->status & E1000_RXD_STAT_DD) {
4258                 struct sk_buff *skb;
4259                 u8 status;
4260
4261                 if (*work_done >= work_to_do)
4262                         break;
4263                 (*work_done)++;
4264                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4265
4266                 status = rx_desc->status;
4267                 skb = buffer_info->skb;
4268                 buffer_info->skb = NULL;
4269
4270                 prefetch(skb->data - NET_IP_ALIGN);
4271
4272                 if (++i == rx_ring->count) i = 0;
4273                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4274                 prefetch(next_rxd);
4275
4276                 next_buffer = &rx_ring->buffer_info[i];
4277
4278                 cleaned = true;
4279                 cleaned_count++;
4280                 dma_unmap_single(&pdev->dev, buffer_info->dma,
4281                                  buffer_info->length, DMA_FROM_DEVICE);
4282                 buffer_info->dma = 0;
4283
4284                 length = le16_to_cpu(rx_desc->length);
4285                 /* !EOP means multiple descriptors were used to store a single
4286                  * packet, if thats the case we need to toss it.  In fact, we
4287                  * to toss every packet with the EOP bit clear and the next
4288                  * frame that _does_ have the EOP bit set, as it is by
4289                  * definition only a frame fragment
4290                  */
4291                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4292                         adapter->discarding = true;
4293
4294                 if (adapter->discarding) {
4295                         /* All receives must fit into a single buffer */
4296                         e_dbg("Receive packet consumed multiple buffers\n");
4297                         /* recycle */
4298                         buffer_info->skb = skb;
4299                         if (status & E1000_RXD_STAT_EOP)
4300                                 adapter->discarding = false;
4301                         goto next_desc;
4302                 }
4303
4304                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4305                         u8 last_byte = *(skb->data + length - 1);
4306                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4307                                        last_byte)) {
4308                                 spin_lock_irqsave(&adapter->stats_lock, flags);
4309                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4310                                                        length, skb->data);
4311                                 spin_unlock_irqrestore(&adapter->stats_lock,
4312                                                        flags);
4313                                 length--;
4314                         } else {
4315                                 if (netdev->features & NETIF_F_RXALL)
4316                                         goto process_skb;
4317                                 /* recycle */
4318                                 buffer_info->skb = skb;
4319                                 goto next_desc;
4320                         }
4321                 }
4322
4323 process_skb:
4324                 total_rx_bytes += (length - 4); /* don't count FCS */
4325                 total_rx_packets++;
4326
4327                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4328                         /* adjust length to remove Ethernet CRC, this must be
4329                          * done after the TBI_ACCEPT workaround above
4330                          */
4331                         length -= 4;
4332
4333                 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4334
4335                 skb_put(skb, length);
4336
4337                 /* Receive Checksum Offload */
4338                 e1000_rx_checksum(adapter,
4339                                   (u32)(status) |
4340                                   ((u32)(rx_desc->errors) << 24),
4341                                   le16_to_cpu(rx_desc->csum), skb);
4342
4343                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4344
4345 next_desc:
4346                 rx_desc->status = 0;
4347
4348                 /* return some buffers to hardware, one at a time is too slow */
4349                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4350                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4351                         cleaned_count = 0;
4352                 }
4353
4354                 /* use prefetched values */
4355                 rx_desc = next_rxd;
4356                 buffer_info = next_buffer;
4357         }
4358         rx_ring->next_to_clean = i;
4359
4360         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4361         if (cleaned_count)
4362                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4363
4364         adapter->total_rx_packets += total_rx_packets;
4365         adapter->total_rx_bytes += total_rx_bytes;
4366         netdev->stats.rx_bytes += total_rx_bytes;
4367         netdev->stats.rx_packets += total_rx_packets;
4368         return cleaned;
4369 }
4370
4371 /**
4372  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4373  * @adapter: address of board private structure
4374  * @rx_ring: pointer to receive ring structure
4375  * @cleaned_count: number of buffers to allocate this pass
4376  **/
4377 static void
4378 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4379                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4380 {
4381         struct net_device *netdev = adapter->netdev;
4382         struct pci_dev *pdev = adapter->pdev;
4383         struct e1000_rx_desc *rx_desc;
4384         struct e1000_buffer *buffer_info;
4385         struct sk_buff *skb;
4386         unsigned int i;
4387         unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4388
4389         i = rx_ring->next_to_use;
4390         buffer_info = &rx_ring->buffer_info[i];
4391
4392         while (cleaned_count--) {
4393                 skb = buffer_info->skb;
4394                 if (skb) {
4395                         skb_trim(skb, 0);
4396                         goto check_page;
4397                 }
4398
4399                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4400                 if (unlikely(!skb)) {
4401                         /* Better luck next round */
4402                         adapter->alloc_rx_buff_failed++;
4403                         break;
4404                 }
4405
4406                 buffer_info->skb = skb;
4407                 buffer_info->length = adapter->rx_buffer_len;
4408 check_page:
4409                 /* allocate a new page if necessary */
4410                 if (!buffer_info->page) {
4411                         buffer_info->page = alloc_page(GFP_ATOMIC);
4412                         if (unlikely(!buffer_info->page)) {
4413                                 adapter->alloc_rx_buff_failed++;
4414                                 break;
4415                         }
4416                 }
4417
4418                 if (!buffer_info->dma) {
4419                         buffer_info->dma = dma_map_page(&pdev->dev,
4420                                                         buffer_info->page, 0,
4421                                                         buffer_info->length,
4422                                                         DMA_FROM_DEVICE);
4423                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4424                                 put_page(buffer_info->page);
4425                                 dev_kfree_skb(skb);
4426                                 buffer_info->page = NULL;
4427                                 buffer_info->skb = NULL;
4428                                 buffer_info->dma = 0;
4429                                 adapter->alloc_rx_buff_failed++;
4430                                 break; /* while !buffer_info->skb */
4431                         }
4432                 }
4433
4434                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4435                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4436
4437                 if (unlikely(++i == rx_ring->count))
4438                         i = 0;
4439                 buffer_info = &rx_ring->buffer_info[i];
4440         }
4441
4442         if (likely(rx_ring->next_to_use != i)) {
4443                 rx_ring->next_to_use = i;
4444                 if (unlikely(i-- == 0))
4445                         i = (rx_ring->count - 1);
4446
4447                 /* Force memory writes to complete before letting h/w
4448                  * know there are new descriptors to fetch.  (Only
4449                  * applicable for weak-ordered memory model archs,
4450                  * such as IA-64).
4451                  */
4452                 wmb();
4453                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4454         }
4455 }
4456
4457 /**
4458  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4459  * @adapter: address of board private structure
4460  **/
4461 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4462                                    struct e1000_rx_ring *rx_ring,
4463                                    int cleaned_count)
4464 {
4465         struct e1000_hw *hw = &adapter->hw;
4466         struct net_device *netdev = adapter->netdev;
4467         struct pci_dev *pdev = adapter->pdev;
4468         struct e1000_rx_desc *rx_desc;
4469         struct e1000_buffer *buffer_info;
4470         struct sk_buff *skb;
4471         unsigned int i;
4472         unsigned int bufsz = adapter->rx_buffer_len;
4473
4474         i = rx_ring->next_to_use;
4475         buffer_info = &rx_ring->buffer_info[i];
4476
4477         while (cleaned_count--) {
4478                 skb = buffer_info->skb;
4479                 if (skb) {
4480                         skb_trim(skb, 0);
4481                         goto map_skb;
4482                 }
4483
4484                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4485                 if (unlikely(!skb)) {
4486                         /* Better luck next round */
4487                         adapter->alloc_rx_buff_failed++;
4488                         break;
4489                 }
4490
4491                 /* Fix for errata 23, can't cross 64kB boundary */
4492                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4493                         struct sk_buff *oldskb = skb;
4494                         e_err(rx_err, "skb align check failed: %u bytes at "
4495                               "%p\n", bufsz, skb->data);
4496                         /* Try again, without freeing the previous */
4497                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4498                         /* Failed allocation, critical failure */
4499                         if (!skb) {
4500                                 dev_kfree_skb(oldskb);
4501                                 adapter->alloc_rx_buff_failed++;
4502                                 break;
4503                         }
4504
4505                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4506                                 /* give up */
4507                                 dev_kfree_skb(skb);
4508                                 dev_kfree_skb(oldskb);
4509                                 adapter->alloc_rx_buff_failed++;
4510                                 break; /* while !buffer_info->skb */
4511                         }
4512
4513                         /* Use new allocation */
4514                         dev_kfree_skb(oldskb);
4515                 }
4516                 buffer_info->skb = skb;
4517                 buffer_info->length = adapter->rx_buffer_len;
4518 map_skb:
4519                 buffer_info->dma = dma_map_single(&pdev->dev,
4520                                                   skb->data,
4521                                                   buffer_info->length,
4522                                                   DMA_FROM_DEVICE);
4523                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4524                         dev_kfree_skb(skb);
4525                         buffer_info->skb = NULL;
4526                         buffer_info->dma = 0;
4527                         adapter->alloc_rx_buff_failed++;
4528                         break; /* while !buffer_info->skb */
4529                 }
4530
4531                 /* XXX if it was allocated cleanly it will never map to a
4532                  * boundary crossing
4533                  */
4534
4535                 /* Fix for errata 23, can't cross 64kB boundary */
4536                 if (!e1000_check_64k_bound(adapter,
4537                                         (void *)(unsigned long)buffer_info->dma,
4538                                         adapter->rx_buffer_len)) {
4539                         e_err(rx_err, "dma align check failed: %u bytes at "
4540                               "%p\n", adapter->rx_buffer_len,
4541                               (void *)(unsigned long)buffer_info->dma);
4542                         dev_kfree_skb(skb);
4543                         buffer_info->skb = NULL;
4544
4545                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4546                                          adapter->rx_buffer_len,
4547                                          DMA_FROM_DEVICE);
4548                         buffer_info->dma = 0;
4549
4550                         adapter->alloc_rx_buff_failed++;
4551                         break; /* while !buffer_info->skb */
4552                 }
4553                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4554                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4555
4556                 if (unlikely(++i == rx_ring->count))
4557                         i = 0;
4558                 buffer_info = &rx_ring->buffer_info[i];
4559         }
4560
4561         if (likely(rx_ring->next_to_use != i)) {
4562                 rx_ring->next_to_use = i;
4563                 if (unlikely(i-- == 0))
4564                         i = (rx_ring->count - 1);
4565
4566                 /* Force memory writes to complete before letting h/w
4567                  * know there are new descriptors to fetch.  (Only
4568                  * applicable for weak-ordered memory model archs,
4569                  * such as IA-64).
4570                  */
4571                 wmb();
4572                 writel(i, hw->hw_addr + rx_ring->rdt);
4573         }
4574 }
4575
4576 /**
4577  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4578  * @adapter:
4579  **/
4580 static void e1000_smartspeed(struct e1000_adapter *adapter)
4581 {
4582         struct e1000_hw *hw = &adapter->hw;
4583         u16 phy_status;
4584         u16 phy_ctrl;
4585
4586         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4587            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4588                 return;
4589
4590         if (adapter->smartspeed == 0) {
4591                 /* If Master/Slave config fault is asserted twice,
4592                  * we assume back-to-back
4593                  */
4594                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4595                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4596                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4597                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4598                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4599                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4600                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4601                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4602                                             phy_ctrl);
4603                         adapter->smartspeed++;
4604                         if (!e1000_phy_setup_autoneg(hw) &&
4605                            !e1000_read_phy_reg(hw, PHY_CTRL,
4606                                                &phy_ctrl)) {
4607                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4608                                              MII_CR_RESTART_AUTO_NEG);
4609                                 e1000_write_phy_reg(hw, PHY_CTRL,
4610                                                     phy_ctrl);
4611                         }
4612                 }
4613                 return;
4614         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4615                 /* If still no link, perhaps using 2/3 pair cable */
4616                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4617                 phy_ctrl |= CR_1000T_MS_ENABLE;
4618                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4619                 if (!e1000_phy_setup_autoneg(hw) &&
4620                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4621                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4622                                      MII_CR_RESTART_AUTO_NEG);
4623                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4624                 }
4625         }
4626         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4627         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4628                 adapter->smartspeed = 0;
4629 }
4630
4631 /**
4632  * e1000_ioctl -
4633  * @netdev:
4634  * @ifreq:
4635  * @cmd:
4636  **/
4637 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4638 {
4639         switch (cmd) {
4640         case SIOCGMIIPHY:
4641         case SIOCGMIIREG:
4642         case SIOCSMIIREG:
4643                 return e1000_mii_ioctl(netdev, ifr, cmd);
4644         default:
4645                 return -EOPNOTSUPP;
4646         }
4647 }
4648
4649 /**
4650  * e1000_mii_ioctl -
4651  * @netdev:
4652  * @ifreq:
4653  * @cmd:
4654  **/
4655 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4656                            int cmd)
4657 {
4658         struct e1000_adapter *adapter = netdev_priv(netdev);
4659         struct e1000_hw *hw = &adapter->hw;
4660         struct mii_ioctl_data *data = if_mii(ifr);
4661         int retval;
4662         u16 mii_reg;
4663         unsigned long flags;
4664
4665         if (hw->media_type != e1000_media_type_copper)
4666                 return -EOPNOTSUPP;
4667
4668         switch (cmd) {
4669         case SIOCGMIIPHY:
4670                 data->phy_id = hw->phy_addr;
4671                 break;
4672         case SIOCGMIIREG:
4673                 spin_lock_irqsave(&adapter->stats_lock, flags);
4674                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4675                                    &data->val_out)) {
4676                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4677                         return -EIO;
4678                 }
4679                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4680                 break;
4681         case SIOCSMIIREG:
4682                 if (data->reg_num & ~(0x1F))
4683                         return -EFAULT;
4684                 mii_reg = data->val_in;
4685                 spin_lock_irqsave(&adapter->stats_lock, flags);
4686                 if (e1000_write_phy_reg(hw, data->reg_num,
4687                                         mii_reg)) {
4688                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4689                         return -EIO;
4690                 }
4691                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4692                 if (hw->media_type == e1000_media_type_copper) {
4693                         switch (data->reg_num) {
4694                         case PHY_CTRL:
4695                                 if (mii_reg & MII_CR_POWER_DOWN)
4696                                         break;
4697                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4698                                         hw->autoneg = 1;
4699                                         hw->autoneg_advertised = 0x2F;
4700                                 } else {
4701                                         u32 speed;
4702                                         if (mii_reg & 0x40)
4703                                                 speed = SPEED_1000;
4704                                         else if (mii_reg & 0x2000)
4705                                                 speed = SPEED_100;
4706                                         else
4707                                                 speed = SPEED_10;
4708                                         retval = e1000_set_spd_dplx(
4709                                                 adapter, speed,
4710                                                 ((mii_reg & 0x100)
4711                                                  ? DUPLEX_FULL :
4712                                                  DUPLEX_HALF));
4713                                         if (retval)
4714                                                 return retval;
4715                                 }
4716                                 if (netif_running(adapter->netdev))
4717                                         e1000_reinit_locked(adapter);
4718                                 else
4719                                         e1000_reset(adapter);
4720                                 break;
4721                         case M88E1000_PHY_SPEC_CTRL:
4722                         case M88E1000_EXT_PHY_SPEC_CTRL:
4723                                 if (e1000_phy_reset(hw))
4724                                         return -EIO;
4725                                 break;
4726                         }
4727                 } else {
4728                         switch (data->reg_num) {
4729                         case PHY_CTRL:
4730                                 if (mii_reg & MII_CR_POWER_DOWN)
4731                                         break;
4732                                 if (netif_running(adapter->netdev))
4733                                         e1000_reinit_locked(adapter);
4734                                 else
4735                                         e1000_reset(adapter);
4736                                 break;
4737                         }
4738                 }
4739                 break;
4740         default:
4741                 return -EOPNOTSUPP;
4742         }
4743         return E1000_SUCCESS;
4744 }
4745
4746 void e1000_pci_set_mwi(struct e1000_hw *hw)
4747 {
4748         struct e1000_adapter *adapter = hw->back;
4749         int ret_val = pci_set_mwi(adapter->pdev);
4750
4751         if (ret_val)
4752                 e_err(probe, "Error in setting MWI\n");
4753 }
4754
4755 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4756 {
4757         struct e1000_adapter *adapter = hw->back;
4758
4759         pci_clear_mwi(adapter->pdev);
4760 }
4761
4762 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4763 {
4764         struct e1000_adapter *adapter = hw->back;
4765         return pcix_get_mmrbc(adapter->pdev);
4766 }
4767
4768 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4769 {
4770         struct e1000_adapter *adapter = hw->back;
4771         pcix_set_mmrbc(adapter->pdev, mmrbc);
4772 }
4773
4774 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4775 {
4776         outl(value, port);
4777 }
4778
4779 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4780 {
4781         u16 vid;
4782
4783         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4784                 return true;
4785         return false;
4786 }
4787
4788 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4789                               netdev_features_t features)
4790 {
4791         struct e1000_hw *hw = &adapter->hw;
4792         u32 ctrl;
4793
4794         ctrl = er32(CTRL);
4795         if (features & NETIF_F_HW_VLAN_RX) {
4796                 /* enable VLAN tag insert/strip */
4797                 ctrl |= E1000_CTRL_VME;
4798         } else {
4799                 /* disable VLAN tag insert/strip */
4800                 ctrl &= ~E1000_CTRL_VME;
4801         }
4802         ew32(CTRL, ctrl);
4803 }
4804 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4805                                      bool filter_on)
4806 {
4807         struct e1000_hw *hw = &adapter->hw;
4808         u32 rctl;
4809
4810         if (!test_bit(__E1000_DOWN, &adapter->flags))
4811                 e1000_irq_disable(adapter);
4812
4813         __e1000_vlan_mode(adapter, adapter->netdev->features);
4814         if (filter_on) {
4815                 /* enable VLAN receive filtering */
4816                 rctl = er32(RCTL);
4817                 rctl &= ~E1000_RCTL_CFIEN;
4818                 if (!(adapter->netdev->flags & IFF_PROMISC))
4819                         rctl |= E1000_RCTL_VFE;
4820                 ew32(RCTL, rctl);
4821                 e1000_update_mng_vlan(adapter);
4822         } else {
4823                 /* disable VLAN receive filtering */
4824                 rctl = er32(RCTL);
4825                 rctl &= ~E1000_RCTL_VFE;
4826                 ew32(RCTL, rctl);
4827         }
4828
4829         if (!test_bit(__E1000_DOWN, &adapter->flags))
4830                 e1000_irq_enable(adapter);
4831 }
4832
4833 static void e1000_vlan_mode(struct net_device *netdev,
4834                             netdev_features_t features)
4835 {
4836         struct e1000_adapter *adapter = netdev_priv(netdev);
4837
4838         if (!test_bit(__E1000_DOWN, &adapter->flags))
4839                 e1000_irq_disable(adapter);
4840
4841         __e1000_vlan_mode(adapter, features);
4842
4843         if (!test_bit(__E1000_DOWN, &adapter->flags))
4844                 e1000_irq_enable(adapter);
4845 }
4846
4847 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4848 {
4849         struct e1000_adapter *adapter = netdev_priv(netdev);
4850         struct e1000_hw *hw = &adapter->hw;
4851         u32 vfta, index;
4852
4853         if ((hw->mng_cookie.status &
4854              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4855             (vid == adapter->mng_vlan_id))
4856                 return 0;
4857
4858         if (!e1000_vlan_used(adapter))
4859                 e1000_vlan_filter_on_off(adapter, true);
4860
4861         /* add VID to filter table */
4862         index = (vid >> 5) & 0x7F;
4863         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4864         vfta |= (1 << (vid & 0x1F));
4865         e1000_write_vfta(hw, index, vfta);
4866
4867         set_bit(vid, adapter->active_vlans);
4868
4869         return 0;
4870 }
4871
4872 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4873 {
4874         struct e1000_adapter *adapter = netdev_priv(netdev);
4875         struct e1000_hw *hw = &adapter->hw;
4876         u32 vfta, index;
4877
4878         if (!test_bit(__E1000_DOWN, &adapter->flags))
4879                 e1000_irq_disable(adapter);
4880         if (!test_bit(__E1000_DOWN, &adapter->flags))
4881                 e1000_irq_enable(adapter);
4882
4883         /* remove VID from filter table */
4884         index = (vid >> 5) & 0x7F;
4885         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4886         vfta &= ~(1 << (vid & 0x1F));
4887         e1000_write_vfta(hw, index, vfta);
4888
4889         clear_bit(vid, adapter->active_vlans);
4890
4891         if (!e1000_vlan_used(adapter))
4892                 e1000_vlan_filter_on_off(adapter, false);
4893
4894         return 0;
4895 }
4896
4897 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4898 {
4899         u16 vid;
4900
4901         if (!e1000_vlan_used(adapter))
4902                 return;
4903
4904         e1000_vlan_filter_on_off(adapter, true);
4905         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4906                 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4907 }
4908
4909 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4910 {
4911         struct e1000_hw *hw = &adapter->hw;
4912
4913         hw->autoneg = 0;
4914
4915         /* Make sure dplx is at most 1 bit and lsb of speed is not set
4916          * for the switch() below to work
4917          */
4918         if ((spd & 1) || (dplx & ~1))
4919                 goto err_inval;
4920
4921         /* Fiber NICs only allow 1000 gbps Full duplex */
4922         if ((hw->media_type == e1000_media_type_fiber) &&
4923             spd != SPEED_1000 &&
4924             dplx != DUPLEX_FULL)
4925                 goto err_inval;
4926
4927         switch (spd + dplx) {
4928         case SPEED_10 + DUPLEX_HALF:
4929                 hw->forced_speed_duplex = e1000_10_half;
4930                 break;
4931         case SPEED_10 + DUPLEX_FULL:
4932                 hw->forced_speed_duplex = e1000_10_full;
4933                 break;
4934         case SPEED_100 + DUPLEX_HALF:
4935                 hw->forced_speed_duplex = e1000_100_half;
4936                 break;
4937         case SPEED_100 + DUPLEX_FULL:
4938                 hw->forced_speed_duplex = e1000_100_full;
4939                 break;
4940         case SPEED_1000 + DUPLEX_FULL:
4941                 hw->autoneg = 1;
4942                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4943                 break;
4944         case SPEED_1000 + DUPLEX_HALF: /* not supported */
4945         default:
4946                 goto err_inval;
4947         }
4948
4949         /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4950         hw->mdix = AUTO_ALL_MODES;
4951
4952         return 0;
4953
4954 err_inval:
4955         e_err(probe, "Unsupported Speed/Duplex configuration\n");
4956         return -EINVAL;
4957 }
4958
4959 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4960 {
4961         struct net_device *netdev = pci_get_drvdata(pdev);
4962         struct e1000_adapter *adapter = netdev_priv(netdev);
4963         struct e1000_hw *hw = &adapter->hw;
4964         u32 ctrl, ctrl_ext, rctl, status;
4965         u32 wufc = adapter->wol;
4966 #ifdef CONFIG_PM
4967         int retval = 0;
4968 #endif
4969
4970         netif_device_detach(netdev);
4971
4972         if (netif_running(netdev)) {
4973                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4974                 e1000_down(adapter);
4975         }
4976
4977 #ifdef CONFIG_PM
4978         retval = pci_save_state(pdev);
4979         if (retval)
4980                 return retval;
4981 #endif
4982
4983         status = er32(STATUS);
4984         if (status & E1000_STATUS_LU)
4985                 wufc &= ~E1000_WUFC_LNKC;
4986
4987         if (wufc) {
4988                 e1000_setup_rctl(adapter);
4989                 e1000_set_rx_mode(netdev);
4990
4991                 rctl = er32(RCTL);
4992
4993                 /* turn on all-multi mode if wake on multicast is enabled */
4994                 if (wufc & E1000_WUFC_MC)
4995                         rctl |= E1000_RCTL_MPE;
4996
4997                 /* enable receives in the hardware */
4998                 ew32(RCTL, rctl | E1000_RCTL_EN);
4999
5000                 if (hw->mac_type >= e1000_82540) {
5001                         ctrl = er32(CTRL);
5002                         /* advertise wake from D3Cold */
5003                         #define E1000_CTRL_ADVD3WUC 0x00100000
5004                         /* phy power management enable */
5005                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5006                         ctrl |= E1000_CTRL_ADVD3WUC |
5007                                 E1000_CTRL_EN_PHY_PWR_MGMT;
5008                         ew32(CTRL, ctrl);
5009                 }
5010
5011                 if (hw->media_type == e1000_media_type_fiber ||
5012                     hw->media_type == e1000_media_type_internal_serdes) {
5013                         /* keep the laser running in D3 */
5014                         ctrl_ext = er32(CTRL_EXT);
5015                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5016                         ew32(CTRL_EXT, ctrl_ext);
5017                 }
5018
5019                 ew32(WUC, E1000_WUC_PME_EN);
5020                 ew32(WUFC, wufc);
5021         } else {
5022                 ew32(WUC, 0);
5023                 ew32(WUFC, 0);
5024         }
5025
5026         e1000_release_manageability(adapter);
5027
5028         *enable_wake = !!wufc;
5029
5030         /* make sure adapter isn't asleep if manageability is enabled */
5031         if (adapter->en_mng_pt)
5032                 *enable_wake = true;
5033
5034         if (netif_running(netdev))
5035                 e1000_free_irq(adapter);
5036
5037         pci_disable_device(pdev);
5038
5039         return 0;
5040 }
5041
5042 #ifdef CONFIG_PM
5043 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5044 {
5045         int retval;
5046         bool wake;
5047
5048         retval = __e1000_shutdown(pdev, &wake);
5049         if (retval)
5050                 return retval;
5051
5052         if (wake) {
5053                 pci_prepare_to_sleep(pdev);
5054         } else {
5055                 pci_wake_from_d3(pdev, false);
5056                 pci_set_power_state(pdev, PCI_D3hot);
5057         }
5058
5059         return 0;
5060 }
5061
5062 static int e1000_resume(struct pci_dev *pdev)
5063 {
5064         struct net_device *netdev = pci_get_drvdata(pdev);
5065         struct e1000_adapter *adapter = netdev_priv(netdev);
5066         struct e1000_hw *hw = &adapter->hw;
5067         u32 err;
5068
5069         pci_set_power_state(pdev, PCI_D0);
5070         pci_restore_state(pdev);
5071         pci_save_state(pdev);
5072
5073         if (adapter->need_ioport)
5074                 err = pci_enable_device(pdev);
5075         else
5076                 err = pci_enable_device_mem(pdev);
5077         if (err) {
5078                 pr_err("Cannot enable PCI device from suspend\n");
5079                 return err;
5080         }
5081         pci_set_master(pdev);
5082
5083         pci_enable_wake(pdev, PCI_D3hot, 0);
5084         pci_enable_wake(pdev, PCI_D3cold, 0);
5085
5086         if (netif_running(netdev)) {
5087                 err = e1000_request_irq(adapter);
5088                 if (err)
5089                         return err;
5090         }
5091
5092         e1000_power_up_phy(adapter);
5093         e1000_reset(adapter);
5094         ew32(WUS, ~0);
5095
5096         e1000_init_manageability(adapter);
5097
5098         if (netif_running(netdev))
5099                 e1000_up(adapter);
5100
5101         netif_device_attach(netdev);
5102
5103         return 0;
5104 }
5105 #endif
5106
5107 static void e1000_shutdown(struct pci_dev *pdev)
5108 {
5109         bool wake;
5110
5111         __e1000_shutdown(pdev, &wake);
5112
5113         if (system_state == SYSTEM_POWER_OFF) {
5114                 pci_wake_from_d3(pdev, wake);
5115                 pci_set_power_state(pdev, PCI_D3hot);
5116         }
5117 }
5118
5119 #ifdef CONFIG_NET_POLL_CONTROLLER
5120 /* Polling 'interrupt' - used by things like netconsole to send skbs
5121  * without having to re-enable interrupts. It's not called while
5122  * the interrupt routine is executing.
5123  */
5124 static void e1000_netpoll(struct net_device *netdev)
5125 {
5126         struct e1000_adapter *adapter = netdev_priv(netdev);
5127
5128         disable_irq(adapter->pdev->irq);
5129         e1000_intr(adapter->pdev->irq, netdev);
5130         enable_irq(adapter->pdev->irq);
5131 }
5132 #endif
5133
5134 /**
5135  * e1000_io_error_detected - called when PCI error is detected
5136  * @pdev: Pointer to PCI device
5137  * @state: The current pci connection state
5138  *
5139  * This function is called after a PCI bus error affecting
5140  * this device has been detected.
5141  */
5142 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5143                                                 pci_channel_state_t state)
5144 {
5145         struct net_device *netdev = pci_get_drvdata(pdev);
5146         struct e1000_adapter *adapter = netdev_priv(netdev);
5147
5148         netif_device_detach(netdev);
5149
5150         if (state == pci_channel_io_perm_failure)
5151                 return PCI_ERS_RESULT_DISCONNECT;
5152
5153         if (netif_running(netdev))
5154                 e1000_down(adapter);
5155         pci_disable_device(pdev);
5156
5157         /* Request a slot slot reset. */
5158         return PCI_ERS_RESULT_NEED_RESET;
5159 }
5160
5161 /**
5162  * e1000_io_slot_reset - called after the pci bus has been reset.
5163  * @pdev: Pointer to PCI device
5164  *
5165  * Restart the card from scratch, as if from a cold-boot. Implementation
5166  * resembles the first-half of the e1000_resume routine.
5167  */
5168 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5169 {
5170         struct net_device *netdev = pci_get_drvdata(pdev);
5171         struct e1000_adapter *adapter = netdev_priv(netdev);
5172         struct e1000_hw *hw = &adapter->hw;
5173         int err;
5174
5175         if (adapter->need_ioport)
5176                 err = pci_enable_device(pdev);
5177         else
5178                 err = pci_enable_device_mem(pdev);
5179         if (err) {
5180                 pr_err("Cannot re-enable PCI device after reset.\n");
5181                 return PCI_ERS_RESULT_DISCONNECT;
5182         }
5183         pci_set_master(pdev);
5184
5185         pci_enable_wake(pdev, PCI_D3hot, 0);
5186         pci_enable_wake(pdev, PCI_D3cold, 0);
5187
5188         e1000_reset(adapter);
5189         ew32(WUS, ~0);
5190
5191         return PCI_ERS_RESULT_RECOVERED;
5192 }
5193
5194 /**
5195  * e1000_io_resume - called when traffic can start flowing again.
5196  * @pdev: Pointer to PCI device
5197  *
5198  * This callback is called when the error recovery driver tells us that
5199  * its OK to resume normal operation. Implementation resembles the
5200  * second-half of the e1000_resume routine.
5201  */
5202 static void e1000_io_resume(struct pci_dev *pdev)
5203 {
5204         struct net_device *netdev = pci_get_drvdata(pdev);
5205         struct e1000_adapter *adapter = netdev_priv(netdev);
5206
5207         e1000_init_manageability(adapter);
5208
5209         if (netif_running(netdev)) {
5210                 if (e1000_up(adapter)) {
5211                         pr_info("can't bring device back up after reset\n");
5212                         return;
5213                 }
5214         }
5215
5216         netif_device_attach(netdev);
5217 }
5218
5219 /* e1000_main.c */