Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / intel / igb / e1000_82575.c
1 /*******************************************************************************
2
3   Intel(R) Gigabit Ethernet Linux driver
4   Copyright(c) 2007-2013 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 /* e1000_82575
29  * e1000_82576
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/types.h>
35 #include <linux/if_ether.h>
36 #include <linux/i2c.h>
37
38 #include "e1000_mac.h"
39 #include "e1000_82575.h"
40 #include "e1000_i210.h"
41
42 static s32  igb_get_invariants_82575(struct e1000_hw *);
43 static s32  igb_acquire_phy_82575(struct e1000_hw *);
44 static void igb_release_phy_82575(struct e1000_hw *);
45 static s32  igb_acquire_nvm_82575(struct e1000_hw *);
46 static void igb_release_nvm_82575(struct e1000_hw *);
47 static s32  igb_check_for_link_82575(struct e1000_hw *);
48 static s32  igb_get_cfg_done_82575(struct e1000_hw *);
49 static s32  igb_init_hw_82575(struct e1000_hw *);
50 static s32  igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
51 static s32  igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
52 static s32  igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
53 static s32  igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
54 static s32  igb_reset_hw_82575(struct e1000_hw *);
55 static s32  igb_reset_hw_82580(struct e1000_hw *);
56 static s32  igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
57 static s32  igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
58 static s32  igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
59 static s32  igb_setup_copper_link_82575(struct e1000_hw *);
60 static s32  igb_setup_serdes_link_82575(struct e1000_hw *);
61 static s32  igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
62 static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
63 static s32  igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
64 static s32  igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
65                                                  u16 *);
66 static s32  igb_get_phy_id_82575(struct e1000_hw *);
67 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
68 static bool igb_sgmii_active_82575(struct e1000_hw *);
69 static s32  igb_reset_init_script_82575(struct e1000_hw *);
70 static s32  igb_read_mac_addr_82575(struct e1000_hw *);
71 static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
72 static s32  igb_reset_mdicnfg_82580(struct e1000_hw *hw);
73 static s32  igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
74 static s32  igb_update_nvm_checksum_82580(struct e1000_hw *hw);
75 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
76 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
77 static const u16 e1000_82580_rxpbs_table[] =
78         { 36, 72, 144, 1, 2, 4, 8, 16,
79           35, 70, 140 };
80 #define E1000_82580_RXPBS_TABLE_SIZE \
81         (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
82
83 /**
84  *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
85  *  @hw: pointer to the HW structure
86  *
87  *  Called to determine if the I2C pins are being used for I2C or as an
88  *  external MDIO interface since the two options are mutually exclusive.
89  **/
90 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
91 {
92         u32 reg = 0;
93         bool ext_mdio = false;
94
95         switch (hw->mac.type) {
96         case e1000_82575:
97         case e1000_82576:
98                 reg = rd32(E1000_MDIC);
99                 ext_mdio = !!(reg & E1000_MDIC_DEST);
100                 break;
101         case e1000_82580:
102         case e1000_i350:
103         case e1000_i354:
104         case e1000_i210:
105         case e1000_i211:
106                 reg = rd32(E1000_MDICNFG);
107                 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
108                 break;
109         default:
110                 break;
111         }
112         return ext_mdio;
113 }
114
115 /**
116  *  igb_init_phy_params_82575 - Init PHY func ptrs.
117  *  @hw: pointer to the HW structure
118  **/
119 static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
120 {
121         struct e1000_phy_info *phy = &hw->phy;
122         s32 ret_val = 0;
123         u32 ctrl_ext;
124
125         if (hw->phy.media_type != e1000_media_type_copper) {
126                 phy->type = e1000_phy_none;
127                 goto out;
128         }
129
130         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
131         phy->reset_delay_us     = 100;
132
133         ctrl_ext = rd32(E1000_CTRL_EXT);
134
135         if (igb_sgmii_active_82575(hw)) {
136                 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
137                 ctrl_ext |= E1000_CTRL_I2C_ENA;
138         } else {
139                 phy->ops.reset = igb_phy_hw_reset;
140                 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
141         }
142
143         wr32(E1000_CTRL_EXT, ctrl_ext);
144         igb_reset_mdicnfg_82580(hw);
145
146         if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
147                 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
148                 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
149         } else {
150                 switch (hw->mac.type) {
151                 case e1000_82580:
152                 case e1000_i350:
153                 case e1000_i354:
154                         phy->ops.read_reg = igb_read_phy_reg_82580;
155                         phy->ops.write_reg = igb_write_phy_reg_82580;
156                         break;
157                 case e1000_i210:
158                 case e1000_i211:
159                         phy->ops.read_reg = igb_read_phy_reg_gs40g;
160                         phy->ops.write_reg = igb_write_phy_reg_gs40g;
161                         break;
162                 default:
163                         phy->ops.read_reg = igb_read_phy_reg_igp;
164                         phy->ops.write_reg = igb_write_phy_reg_igp;
165                 }
166         }
167
168         /* set lan id */
169         hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
170                         E1000_STATUS_FUNC_SHIFT;
171
172         /* Set phy->phy_addr and phy->id. */
173         ret_val = igb_get_phy_id_82575(hw);
174         if (ret_val)
175                 return ret_val;
176
177         /* Verify phy id and set remaining function pointers */
178         switch (phy->id) {
179         case M88E1545_E_PHY_ID:
180         case I347AT4_E_PHY_ID:
181         case M88E1112_E_PHY_ID:
182         case M88E1111_I_PHY_ID:
183                 phy->type               = e1000_phy_m88;
184                 phy->ops.check_polarity = igb_check_polarity_m88;
185                 phy->ops.get_phy_info   = igb_get_phy_info_m88;
186                 if (phy->id != M88E1111_I_PHY_ID)
187                         phy->ops.get_cable_length =
188                                          igb_get_cable_length_m88_gen2;
189                 else
190                         phy->ops.get_cable_length = igb_get_cable_length_m88;
191                 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
192                 break;
193         case IGP03E1000_E_PHY_ID:
194                 phy->type = e1000_phy_igp_3;
195                 phy->ops.get_phy_info = igb_get_phy_info_igp;
196                 phy->ops.get_cable_length = igb_get_cable_length_igp_2;
197                 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
198                 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
199                 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
200                 break;
201         case I82580_I_PHY_ID:
202         case I350_I_PHY_ID:
203                 phy->type = e1000_phy_82580;
204                 phy->ops.force_speed_duplex =
205                                          igb_phy_force_speed_duplex_82580;
206                 phy->ops.get_cable_length = igb_get_cable_length_82580;
207                 phy->ops.get_phy_info = igb_get_phy_info_82580;
208                 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
209                 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
210                 break;
211         case I210_I_PHY_ID:
212                 phy->type               = e1000_phy_i210;
213                 phy->ops.check_polarity = igb_check_polarity_m88;
214                 phy->ops.get_phy_info   = igb_get_phy_info_m88;
215                 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
216                 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
217                 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
218                 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
219                 break;
220         default:
221                 ret_val = -E1000_ERR_PHY;
222                 goto out;
223         }
224
225 out:
226         return ret_val;
227 }
228
229 /**
230  *  igb_init_nvm_params_82575 - Init NVM func ptrs.
231  *  @hw: pointer to the HW structure
232  **/
233 static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
234 {
235         struct e1000_nvm_info *nvm = &hw->nvm;
236         u32 eecd = rd32(E1000_EECD);
237         u16 size;
238
239         size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
240                      E1000_EECD_SIZE_EX_SHIFT);
241         /* Added to a constant, "size" becomes the left-shift value
242          * for setting word_size.
243          */
244         size += NVM_WORD_SIZE_BASE_SHIFT;
245
246         /* Just in case size is out of range, cap it to the largest
247          * EEPROM size supported
248          */
249         if (size > 15)
250                 size = 15;
251
252         nvm->word_size = 1 << size;
253         if (hw->mac.type < e1000_i210) {
254                 nvm->opcode_bits = 8;
255                 nvm->delay_usec = 1;
256
257                 switch (nvm->override) {
258                 case e1000_nvm_override_spi_large:
259                         nvm->page_size = 32;
260                         nvm->address_bits = 16;
261                         break;
262                 case e1000_nvm_override_spi_small:
263                         nvm->page_size = 8;
264                         nvm->address_bits = 8;
265                         break;
266                 default:
267                         nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
268                         nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
269                                             16 : 8;
270                         break;
271                 }
272                 if (nvm->word_size == (1 << 15))
273                         nvm->page_size = 128;
274
275                 nvm->type = e1000_nvm_eeprom_spi;
276         } else {
277                 nvm->type = e1000_nvm_flash_hw;
278         }
279
280         /* NVM Function Pointers */
281         switch (hw->mac.type) {
282         case e1000_82580:
283                 nvm->ops.validate = igb_validate_nvm_checksum_82580;
284                 nvm->ops.update = igb_update_nvm_checksum_82580;
285                 nvm->ops.acquire = igb_acquire_nvm_82575;
286                 nvm->ops.release = igb_release_nvm_82575;
287                 if (nvm->word_size < (1 << 15))
288                         nvm->ops.read = igb_read_nvm_eerd;
289                 else
290                         nvm->ops.read = igb_read_nvm_spi;
291                 nvm->ops.write = igb_write_nvm_spi;
292                 break;
293         case e1000_i354:
294         case e1000_i350:
295                 nvm->ops.validate = igb_validate_nvm_checksum_i350;
296                 nvm->ops.update = igb_update_nvm_checksum_i350;
297                 nvm->ops.acquire = igb_acquire_nvm_82575;
298                 nvm->ops.release = igb_release_nvm_82575;
299                 if (nvm->word_size < (1 << 15))
300                         nvm->ops.read = igb_read_nvm_eerd;
301                 else
302                         nvm->ops.read = igb_read_nvm_spi;
303                 nvm->ops.write = igb_write_nvm_spi;
304                 break;
305         case e1000_i210:
306                 nvm->ops.validate = igb_validate_nvm_checksum_i210;
307                 nvm->ops.update   = igb_update_nvm_checksum_i210;
308                 nvm->ops.acquire = igb_acquire_nvm_i210;
309                 nvm->ops.release = igb_release_nvm_i210;
310                 nvm->ops.read    = igb_read_nvm_srrd_i210;
311                 nvm->ops.write   = igb_write_nvm_srwr_i210;
312                 nvm->ops.valid_led_default = igb_valid_led_default_i210;
313                 break;
314         case e1000_i211:
315                 nvm->ops.acquire  = igb_acquire_nvm_i210;
316                 nvm->ops.release  = igb_release_nvm_i210;
317                 nvm->ops.read     = igb_read_nvm_i211;
318                 nvm->ops.valid_led_default = igb_valid_led_default_i210;
319                 nvm->ops.validate = NULL;
320                 nvm->ops.update   = NULL;
321                 nvm->ops.write    = NULL;
322                 break;
323         default:
324                 nvm->ops.validate = igb_validate_nvm_checksum;
325                 nvm->ops.update = igb_update_nvm_checksum;
326                 nvm->ops.acquire = igb_acquire_nvm_82575;
327                 nvm->ops.release = igb_release_nvm_82575;
328                 if (nvm->word_size < (1 << 15))
329                         nvm->ops.read = igb_read_nvm_eerd;
330                 else
331                         nvm->ops.read = igb_read_nvm_spi;
332                 nvm->ops.write = igb_write_nvm_spi;
333                 break;
334         }
335
336         return 0;
337 }
338
339 /**
340  *  igb_init_mac_params_82575 - Init MAC func ptrs.
341  *  @hw: pointer to the HW structure
342  **/
343 static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
344 {
345         struct e1000_mac_info *mac = &hw->mac;
346         struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
347
348         /* Set mta register count */
349         mac->mta_reg_count = 128;
350         /* Set rar entry count */
351         switch (mac->type) {
352         case e1000_82576:
353                 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
354                 break;
355         case e1000_82580:
356                 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
357                 break;
358         case e1000_i350:
359         case e1000_i354:
360                 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
361                 break;
362         default:
363                 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
364                 break;
365         }
366         /* reset */
367         if (mac->type >= e1000_82580)
368                 mac->ops.reset_hw = igb_reset_hw_82580;
369         else
370                 mac->ops.reset_hw = igb_reset_hw_82575;
371
372         if (mac->type >= e1000_i210) {
373                 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
374                 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
375
376         } else {
377                 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
378                 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
379         }
380
381         /* Set if part includes ASF firmware */
382         mac->asf_firmware_present = true;
383         /* Set if manageability features are enabled. */
384         mac->arc_subsystem_valid =
385                 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
386                         ? true : false;
387         /* enable EEE on i350 parts and later parts */
388         if (mac->type >= e1000_i350)
389                 dev_spec->eee_disable = false;
390         else
391                 dev_spec->eee_disable = true;
392         /* physical interface link setup */
393         mac->ops.setup_physical_interface =
394                 (hw->phy.media_type == e1000_media_type_copper)
395                         ? igb_setup_copper_link_82575
396                         : igb_setup_serdes_link_82575;
397
398         return 0;
399 }
400
401 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
402 {
403         struct e1000_mac_info *mac = &hw->mac;
404         struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
405         s32 ret_val;
406         u32 ctrl_ext = 0;
407
408         switch (hw->device_id) {
409         case E1000_DEV_ID_82575EB_COPPER:
410         case E1000_DEV_ID_82575EB_FIBER_SERDES:
411         case E1000_DEV_ID_82575GB_QUAD_COPPER:
412                 mac->type = e1000_82575;
413                 break;
414         case E1000_DEV_ID_82576:
415         case E1000_DEV_ID_82576_NS:
416         case E1000_DEV_ID_82576_NS_SERDES:
417         case E1000_DEV_ID_82576_FIBER:
418         case E1000_DEV_ID_82576_SERDES:
419         case E1000_DEV_ID_82576_QUAD_COPPER:
420         case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
421         case E1000_DEV_ID_82576_SERDES_QUAD:
422                 mac->type = e1000_82576;
423                 break;
424         case E1000_DEV_ID_82580_COPPER:
425         case E1000_DEV_ID_82580_FIBER:
426         case E1000_DEV_ID_82580_QUAD_FIBER:
427         case E1000_DEV_ID_82580_SERDES:
428         case E1000_DEV_ID_82580_SGMII:
429         case E1000_DEV_ID_82580_COPPER_DUAL:
430         case E1000_DEV_ID_DH89XXCC_SGMII:
431         case E1000_DEV_ID_DH89XXCC_SERDES:
432         case E1000_DEV_ID_DH89XXCC_BACKPLANE:
433         case E1000_DEV_ID_DH89XXCC_SFP:
434                 mac->type = e1000_82580;
435                 break;
436         case E1000_DEV_ID_I350_COPPER:
437         case E1000_DEV_ID_I350_FIBER:
438         case E1000_DEV_ID_I350_SERDES:
439         case E1000_DEV_ID_I350_SGMII:
440                 mac->type = e1000_i350;
441                 break;
442         case E1000_DEV_ID_I210_COPPER:
443         case E1000_DEV_ID_I210_COPPER_OEM1:
444         case E1000_DEV_ID_I210_COPPER_IT:
445         case E1000_DEV_ID_I210_FIBER:
446         case E1000_DEV_ID_I210_SERDES:
447         case E1000_DEV_ID_I210_SGMII:
448                 mac->type = e1000_i210;
449                 break;
450         case E1000_DEV_ID_I211_COPPER:
451                 mac->type = e1000_i211;
452                 break;
453         case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
454         case E1000_DEV_ID_I354_SGMII:
455         case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
456                 mac->type = e1000_i354;
457                 break;
458         default:
459                 return -E1000_ERR_MAC_INIT;
460                 break;
461         }
462
463         /* Set media type */
464         /* The 82575 uses bits 22:23 for link mode. The mode can be changed
465          * based on the EEPROM. We cannot rely upon device ID. There
466          * is no distinguishable difference between fiber and internal
467          * SerDes mode on the 82575. There can be an external PHY attached
468          * on the SGMII interface. For this, we'll set sgmii_active to true.
469          */
470         hw->phy.media_type = e1000_media_type_copper;
471         dev_spec->sgmii_active = false;
472
473         ctrl_ext = rd32(E1000_CTRL_EXT);
474         switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
475         case E1000_CTRL_EXT_LINK_MODE_SGMII:
476                 dev_spec->sgmii_active = true;
477                 break;
478         case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
479         case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
480                 hw->phy.media_type = e1000_media_type_internal_serdes;
481                 break;
482         default:
483                 break;
484         }
485
486         /* mac initialization and operations */
487         ret_val = igb_init_mac_params_82575(hw);
488         if (ret_val)
489                 goto out;
490
491         /* NVM initialization */
492         ret_val = igb_init_nvm_params_82575(hw);
493         if (ret_val)
494                 goto out;
495
496         /* if part supports SR-IOV then initialize mailbox parameters */
497         switch (mac->type) {
498         case e1000_82576:
499         case e1000_i350:
500                 igb_init_mbx_params_pf(hw);
501                 break;
502         default:
503                 break;
504         }
505
506         /* setup PHY parameters */
507         ret_val = igb_init_phy_params_82575(hw);
508
509 out:
510         return ret_val;
511 }
512
513 /**
514  *  igb_acquire_phy_82575 - Acquire rights to access PHY
515  *  @hw: pointer to the HW structure
516  *
517  *  Acquire access rights to the correct PHY.  This is a
518  *  function pointer entry point called by the api module.
519  **/
520 static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
521 {
522         u16 mask = E1000_SWFW_PHY0_SM;
523
524         if (hw->bus.func == E1000_FUNC_1)
525                 mask = E1000_SWFW_PHY1_SM;
526         else if (hw->bus.func == E1000_FUNC_2)
527                 mask = E1000_SWFW_PHY2_SM;
528         else if (hw->bus.func == E1000_FUNC_3)
529                 mask = E1000_SWFW_PHY3_SM;
530
531         return hw->mac.ops.acquire_swfw_sync(hw, mask);
532 }
533
534 /**
535  *  igb_release_phy_82575 - Release rights to access PHY
536  *  @hw: pointer to the HW structure
537  *
538  *  A wrapper to release access rights to the correct PHY.  This is a
539  *  function pointer entry point called by the api module.
540  **/
541 static void igb_release_phy_82575(struct e1000_hw *hw)
542 {
543         u16 mask = E1000_SWFW_PHY0_SM;
544
545         if (hw->bus.func == E1000_FUNC_1)
546                 mask = E1000_SWFW_PHY1_SM;
547         else if (hw->bus.func == E1000_FUNC_2)
548                 mask = E1000_SWFW_PHY2_SM;
549         else if (hw->bus.func == E1000_FUNC_3)
550                 mask = E1000_SWFW_PHY3_SM;
551
552         hw->mac.ops.release_swfw_sync(hw, mask);
553 }
554
555 /**
556  *  igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
557  *  @hw: pointer to the HW structure
558  *  @offset: register offset to be read
559  *  @data: pointer to the read data
560  *
561  *  Reads the PHY register at offset using the serial gigabit media independent
562  *  interface and stores the retrieved information in data.
563  **/
564 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
565                                           u16 *data)
566 {
567         s32 ret_val = -E1000_ERR_PARAM;
568
569         if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
570                 hw_dbg("PHY Address %u is out of range\n", offset);
571                 goto out;
572         }
573
574         ret_val = hw->phy.ops.acquire(hw);
575         if (ret_val)
576                 goto out;
577
578         ret_val = igb_read_phy_reg_i2c(hw, offset, data);
579
580         hw->phy.ops.release(hw);
581
582 out:
583         return ret_val;
584 }
585
586 /**
587  *  igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
588  *  @hw: pointer to the HW structure
589  *  @offset: register offset to write to
590  *  @data: data to write at register offset
591  *
592  *  Writes the data to PHY register at the offset using the serial gigabit
593  *  media independent interface.
594  **/
595 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
596                                            u16 data)
597 {
598         s32 ret_val = -E1000_ERR_PARAM;
599
600
601         if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
602                 hw_dbg("PHY Address %d is out of range\n", offset);
603                 goto out;
604         }
605
606         ret_val = hw->phy.ops.acquire(hw);
607         if (ret_val)
608                 goto out;
609
610         ret_val = igb_write_phy_reg_i2c(hw, offset, data);
611
612         hw->phy.ops.release(hw);
613
614 out:
615         return ret_val;
616 }
617
618 /**
619  *  igb_get_phy_id_82575 - Retrieve PHY addr and id
620  *  @hw: pointer to the HW structure
621  *
622  *  Retrieves the PHY address and ID for both PHY's which do and do not use
623  *  sgmi interface.
624  **/
625 static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
626 {
627         struct e1000_phy_info *phy = &hw->phy;
628         s32  ret_val = 0;
629         u16 phy_id;
630         u32 ctrl_ext;
631         u32 mdic;
632
633         /* For SGMII PHYs, we try the list of possible addresses until
634          * we find one that works.  For non-SGMII PHYs
635          * (e.g. integrated copper PHYs), an address of 1 should
636          * work.  The result of this function should mean phy->phy_addr
637          * and phy->id are set correctly.
638          */
639         if (!(igb_sgmii_active_82575(hw))) {
640                 phy->addr = 1;
641                 ret_val = igb_get_phy_id(hw);
642                 goto out;
643         }
644
645         if (igb_sgmii_uses_mdio_82575(hw)) {
646                 switch (hw->mac.type) {
647                 case e1000_82575:
648                 case e1000_82576:
649                         mdic = rd32(E1000_MDIC);
650                         mdic &= E1000_MDIC_PHY_MASK;
651                         phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
652                         break;
653                 case e1000_82580:
654                 case e1000_i350:
655                 case e1000_i354:
656                 case e1000_i210:
657                 case e1000_i211:
658                         mdic = rd32(E1000_MDICNFG);
659                         mdic &= E1000_MDICNFG_PHY_MASK;
660                         phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
661                         break;
662                 default:
663                         ret_val = -E1000_ERR_PHY;
664                         goto out;
665                         break;
666                 }
667                 ret_val = igb_get_phy_id(hw);
668                 goto out;
669         }
670
671         /* Power on sgmii phy if it is disabled */
672         ctrl_ext = rd32(E1000_CTRL_EXT);
673         wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
674         wrfl();
675         msleep(300);
676
677         /* The address field in the I2CCMD register is 3 bits and 0 is invalid.
678          * Therefore, we need to test 1-7
679          */
680         for (phy->addr = 1; phy->addr < 8; phy->addr++) {
681                 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
682                 if (ret_val == 0) {
683                         hw_dbg("Vendor ID 0x%08X read at address %u\n",
684                                phy_id, phy->addr);
685                         /* At the time of this writing, The M88 part is
686                          * the only supported SGMII PHY product.
687                          */
688                         if (phy_id == M88_VENDOR)
689                                 break;
690                 } else {
691                         hw_dbg("PHY address %u was unreadable\n", phy->addr);
692                 }
693         }
694
695         /* A valid PHY type couldn't be found. */
696         if (phy->addr == 8) {
697                 phy->addr = 0;
698                 ret_val = -E1000_ERR_PHY;
699                 goto out;
700         } else {
701                 ret_val = igb_get_phy_id(hw);
702         }
703
704         /* restore previous sfp cage power state */
705         wr32(E1000_CTRL_EXT, ctrl_ext);
706
707 out:
708         return ret_val;
709 }
710
711 /**
712  *  igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
713  *  @hw: pointer to the HW structure
714  *
715  *  Resets the PHY using the serial gigabit media independent interface.
716  **/
717 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
718 {
719         s32 ret_val;
720
721         /* This isn't a true "hard" reset, but is the only reset
722          * available to us at this time.
723          */
724
725         hw_dbg("Soft resetting SGMII attached PHY...\n");
726
727         /* SFP documentation requires the following to configure the SPF module
728          * to work on SGMII.  No further documentation is given.
729          */
730         ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
731         if (ret_val)
732                 goto out;
733
734         ret_val = igb_phy_sw_reset(hw);
735
736 out:
737         return ret_val;
738 }
739
740 /**
741  *  igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
742  *  @hw: pointer to the HW structure
743  *  @active: true to enable LPLU, false to disable
744  *
745  *  Sets the LPLU D0 state according to the active flag.  When
746  *  activating LPLU this function also disables smart speed
747  *  and vice versa.  LPLU will not be activated unless the
748  *  device autonegotiation advertisement meets standards of
749  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
750  *  This is a function pointer entry point only called by
751  *  PHY setup routines.
752  **/
753 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
754 {
755         struct e1000_phy_info *phy = &hw->phy;
756         s32 ret_val;
757         u16 data;
758
759         ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
760         if (ret_val)
761                 goto out;
762
763         if (active) {
764                 data |= IGP02E1000_PM_D0_LPLU;
765                 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
766                                                  data);
767                 if (ret_val)
768                         goto out;
769
770                 /* When LPLU is enabled, we should disable SmartSpeed */
771                 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
772                                                 &data);
773                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
774                 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
775                                                  data);
776                 if (ret_val)
777                         goto out;
778         } else {
779                 data &= ~IGP02E1000_PM_D0_LPLU;
780                 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
781                                                  data);
782                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
783                  * during Dx states where the power conservation is most
784                  * important.  During driver activity we should enable
785                  * SmartSpeed, so performance is maintained.
786                  */
787                 if (phy->smart_speed == e1000_smart_speed_on) {
788                         ret_val = phy->ops.read_reg(hw,
789                                         IGP01E1000_PHY_PORT_CONFIG, &data);
790                         if (ret_val)
791                                 goto out;
792
793                         data |= IGP01E1000_PSCFR_SMART_SPEED;
794                         ret_val = phy->ops.write_reg(hw,
795                                         IGP01E1000_PHY_PORT_CONFIG, data);
796                         if (ret_val)
797                                 goto out;
798                 } else if (phy->smart_speed == e1000_smart_speed_off) {
799                         ret_val = phy->ops.read_reg(hw,
800                                         IGP01E1000_PHY_PORT_CONFIG, &data);
801                         if (ret_val)
802                                 goto out;
803
804                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
805                         ret_val = phy->ops.write_reg(hw,
806                                         IGP01E1000_PHY_PORT_CONFIG, data);
807                         if (ret_val)
808                                 goto out;
809                 }
810         }
811
812 out:
813         return ret_val;
814 }
815
816 /**
817  *  igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
818  *  @hw: pointer to the HW structure
819  *  @active: true to enable LPLU, false to disable
820  *
821  *  Sets the LPLU D0 state according to the active flag.  When
822  *  activating LPLU this function also disables smart speed
823  *  and vice versa.  LPLU will not be activated unless the
824  *  device autonegotiation advertisement meets standards of
825  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
826  *  This is a function pointer entry point only called by
827  *  PHY setup routines.
828  **/
829 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
830 {
831         struct e1000_phy_info *phy = &hw->phy;
832         s32 ret_val = 0;
833         u16 data;
834
835         data = rd32(E1000_82580_PHY_POWER_MGMT);
836
837         if (active) {
838                 data |= E1000_82580_PM_D0_LPLU;
839
840                 /* When LPLU is enabled, we should disable SmartSpeed */
841                 data &= ~E1000_82580_PM_SPD;
842         } else {
843                 data &= ~E1000_82580_PM_D0_LPLU;
844
845                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
846                  * during Dx states where the power conservation is most
847                  * important.  During driver activity we should enable
848                  * SmartSpeed, so performance is maintained.
849                  */
850                 if (phy->smart_speed == e1000_smart_speed_on)
851                         data |= E1000_82580_PM_SPD;
852                 else if (phy->smart_speed == e1000_smart_speed_off)
853                         data &= ~E1000_82580_PM_SPD; }
854
855         wr32(E1000_82580_PHY_POWER_MGMT, data);
856         return ret_val;
857 }
858
859 /**
860  *  igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
861  *  @hw: pointer to the HW structure
862  *  @active: boolean used to enable/disable lplu
863  *
864  *  Success returns 0, Failure returns 1
865  *
866  *  The low power link up (lplu) state is set to the power management level D3
867  *  and SmartSpeed is disabled when active is true, else clear lplu for D3
868  *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
869  *  is used during Dx states where the power conservation is most important.
870  *  During driver activity, SmartSpeed should be enabled so performance is
871  *  maintained.
872  **/
873 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
874 {
875         struct e1000_phy_info *phy = &hw->phy;
876         s32 ret_val = 0;
877         u16 data;
878
879         data = rd32(E1000_82580_PHY_POWER_MGMT);
880
881         if (!active) {
882                 data &= ~E1000_82580_PM_D3_LPLU;
883                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
884                  * during Dx states where the power conservation is most
885                  * important.  During driver activity we should enable
886                  * SmartSpeed, so performance is maintained.
887                  */
888                 if (phy->smart_speed == e1000_smart_speed_on)
889                         data |= E1000_82580_PM_SPD;
890                 else if (phy->smart_speed == e1000_smart_speed_off)
891                         data &= ~E1000_82580_PM_SPD;
892         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
893                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
894                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
895                 data |= E1000_82580_PM_D3_LPLU;
896                 /* When LPLU is enabled, we should disable SmartSpeed */
897                 data &= ~E1000_82580_PM_SPD;
898         }
899
900         wr32(E1000_82580_PHY_POWER_MGMT, data);
901         return ret_val;
902 }
903
904 /**
905  *  igb_acquire_nvm_82575 - Request for access to EEPROM
906  *  @hw: pointer to the HW structure
907  *
908  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
909  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
910  *  Return successful if access grant bit set, else clear the request for
911  *  EEPROM access and return -E1000_ERR_NVM (-1).
912  **/
913 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
914 {
915         s32 ret_val;
916
917         ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
918         if (ret_val)
919                 goto out;
920
921         ret_val = igb_acquire_nvm(hw);
922
923         if (ret_val)
924                 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
925
926 out:
927         return ret_val;
928 }
929
930 /**
931  *  igb_release_nvm_82575 - Release exclusive access to EEPROM
932  *  @hw: pointer to the HW structure
933  *
934  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
935  *  then release the semaphores acquired.
936  **/
937 static void igb_release_nvm_82575(struct e1000_hw *hw)
938 {
939         igb_release_nvm(hw);
940         hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
941 }
942
943 /**
944  *  igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
945  *  @hw: pointer to the HW structure
946  *  @mask: specifies which semaphore to acquire
947  *
948  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
949  *  will also specify which port we're acquiring the lock for.
950  **/
951 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
952 {
953         u32 swfw_sync;
954         u32 swmask = mask;
955         u32 fwmask = mask << 16;
956         s32 ret_val = 0;
957         s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
958
959         while (i < timeout) {
960                 if (igb_get_hw_semaphore(hw)) {
961                         ret_val = -E1000_ERR_SWFW_SYNC;
962                         goto out;
963                 }
964
965                 swfw_sync = rd32(E1000_SW_FW_SYNC);
966                 if (!(swfw_sync & (fwmask | swmask)))
967                         break;
968
969                 /* Firmware currently using resource (fwmask)
970                  * or other software thread using resource (swmask)
971                  */
972                 igb_put_hw_semaphore(hw);
973                 mdelay(5);
974                 i++;
975         }
976
977         if (i == timeout) {
978                 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
979                 ret_val = -E1000_ERR_SWFW_SYNC;
980                 goto out;
981         }
982
983         swfw_sync |= swmask;
984         wr32(E1000_SW_FW_SYNC, swfw_sync);
985
986         igb_put_hw_semaphore(hw);
987
988 out:
989         return ret_val;
990 }
991
992 /**
993  *  igb_release_swfw_sync_82575 - Release SW/FW semaphore
994  *  @hw: pointer to the HW structure
995  *  @mask: specifies which semaphore to acquire
996  *
997  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
998  *  will also specify which port we're releasing the lock for.
999  **/
1000 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1001 {
1002         u32 swfw_sync;
1003
1004         while (igb_get_hw_semaphore(hw) != 0);
1005         /* Empty */
1006
1007         swfw_sync = rd32(E1000_SW_FW_SYNC);
1008         swfw_sync &= ~mask;
1009         wr32(E1000_SW_FW_SYNC, swfw_sync);
1010
1011         igb_put_hw_semaphore(hw);
1012 }
1013
1014 /**
1015  *  igb_get_cfg_done_82575 - Read config done bit
1016  *  @hw: pointer to the HW structure
1017  *
1018  *  Read the management control register for the config done bit for
1019  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
1020  *  to read the config done bit, so an error is *ONLY* logged and returns
1021  *  0.  If we were to return with error, EEPROM-less silicon
1022  *  would not be able to be reset or change link.
1023  **/
1024 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1025 {
1026         s32 timeout = PHY_CFG_TIMEOUT;
1027         s32 ret_val = 0;
1028         u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1029
1030         if (hw->bus.func == 1)
1031                 mask = E1000_NVM_CFG_DONE_PORT_1;
1032         else if (hw->bus.func == E1000_FUNC_2)
1033                 mask = E1000_NVM_CFG_DONE_PORT_2;
1034         else if (hw->bus.func == E1000_FUNC_3)
1035                 mask = E1000_NVM_CFG_DONE_PORT_3;
1036
1037         while (timeout) {
1038                 if (rd32(E1000_EEMNGCTL) & mask)
1039                         break;
1040                 msleep(1);
1041                 timeout--;
1042         }
1043         if (!timeout)
1044                 hw_dbg("MNG configuration cycle has not completed.\n");
1045
1046         /* If EEPROM is not marked present, init the PHY manually */
1047         if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
1048             (hw->phy.type == e1000_phy_igp_3))
1049                 igb_phy_init_script_igp3(hw);
1050
1051         return ret_val;
1052 }
1053
1054 /**
1055  *  igb_check_for_link_82575 - Check for link
1056  *  @hw: pointer to the HW structure
1057  *
1058  *  If sgmii is enabled, then use the pcs register to determine link, otherwise
1059  *  use the generic interface for determining link.
1060  **/
1061 static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1062 {
1063         s32 ret_val;
1064         u16 speed, duplex;
1065
1066         if (hw->phy.media_type != e1000_media_type_copper) {
1067                 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1068                                                              &duplex);
1069                 /* Use this flag to determine if link needs to be checked or
1070                  * not.  If  we have link clear the flag so that we do not
1071                  * continue to check for link.
1072                  */
1073                 hw->mac.get_link_status = !hw->mac.serdes_has_link;
1074
1075                 /* Configure Flow Control now that Auto-Neg has completed.
1076                  * First, we need to restore the desired flow control
1077                  * settings because we may have had to re-autoneg with a
1078                  * different link partner.
1079                  */
1080                 ret_val = igb_config_fc_after_link_up(hw);
1081                 if (ret_val)
1082                         hw_dbg("Error configuring flow control\n");
1083         } else {
1084                 ret_val = igb_check_for_copper_link(hw);
1085         }
1086
1087         return ret_val;
1088 }
1089
1090 /**
1091  *  igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1092  *  @hw: pointer to the HW structure
1093  **/
1094 void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1095 {
1096         u32 reg;
1097
1098
1099         if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1100             !igb_sgmii_active_82575(hw))
1101                 return;
1102
1103         /* Enable PCS to turn on link */
1104         reg = rd32(E1000_PCS_CFG0);
1105         reg |= E1000_PCS_CFG_PCS_EN;
1106         wr32(E1000_PCS_CFG0, reg);
1107
1108         /* Power up the laser */
1109         reg = rd32(E1000_CTRL_EXT);
1110         reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1111         wr32(E1000_CTRL_EXT, reg);
1112
1113         /* flush the write to verify completion */
1114         wrfl();
1115         msleep(1);
1116 }
1117
1118 /**
1119  *  igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1120  *  @hw: pointer to the HW structure
1121  *  @speed: stores the current speed
1122  *  @duplex: stores the current duplex
1123  *
1124  *  Using the physical coding sub-layer (PCS), retrieve the current speed and
1125  *  duplex, then store the values in the pointers provided.
1126  **/
1127 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1128                                                 u16 *duplex)
1129 {
1130         struct e1000_mac_info *mac = &hw->mac;
1131         u32 pcs;
1132
1133         /* Set up defaults for the return values of this function */
1134         mac->serdes_has_link = false;
1135         *speed = 0;
1136         *duplex = 0;
1137
1138         /* Read the PCS Status register for link state. For non-copper mode,
1139          * the status register is not accurate. The PCS status register is
1140          * used instead.
1141          */
1142         pcs = rd32(E1000_PCS_LSTAT);
1143
1144         /* The link up bit determines when link is up on autoneg. The sync ok
1145          * gets set once both sides sync up and agree upon link. Stable link
1146          * can be determined by checking for both link up and link sync ok
1147          */
1148         if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1149                 mac->serdes_has_link = true;
1150
1151                 /* Detect and store PCS speed */
1152                 if (pcs & E1000_PCS_LSTS_SPEED_1000) {
1153                         *speed = SPEED_1000;
1154                 } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
1155                         *speed = SPEED_100;
1156                 } else {
1157                         *speed = SPEED_10;
1158                 }
1159
1160                 /* Detect and store PCS duplex */
1161                 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
1162                         *duplex = FULL_DUPLEX;
1163                 } else {
1164                         *duplex = HALF_DUPLEX;
1165                 }
1166         }
1167
1168         return 0;
1169 }
1170
1171 /**
1172  *  igb_shutdown_serdes_link_82575 - Remove link during power down
1173  *  @hw: pointer to the HW structure
1174  *
1175  *  In the case of fiber serdes, shut down optics and PCS on driver unload
1176  *  when management pass thru is not enabled.
1177  **/
1178 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1179 {
1180         u32 reg;
1181
1182         if (hw->phy.media_type != e1000_media_type_internal_serdes &&
1183             igb_sgmii_active_82575(hw))
1184                 return;
1185
1186         if (!igb_enable_mng_pass_thru(hw)) {
1187                 /* Disable PCS to turn off link */
1188                 reg = rd32(E1000_PCS_CFG0);
1189                 reg &= ~E1000_PCS_CFG_PCS_EN;
1190                 wr32(E1000_PCS_CFG0, reg);
1191
1192                 /* shutdown the laser */
1193                 reg = rd32(E1000_CTRL_EXT);
1194                 reg |= E1000_CTRL_EXT_SDP3_DATA;
1195                 wr32(E1000_CTRL_EXT, reg);
1196
1197                 /* flush the write to verify completion */
1198                 wrfl();
1199                 msleep(1);
1200         }
1201 }
1202
1203 /**
1204  *  igb_reset_hw_82575 - Reset hardware
1205  *  @hw: pointer to the HW structure
1206  *
1207  *  This resets the hardware into a known state.  This is a
1208  *  function pointer entry point called by the api module.
1209  **/
1210 static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1211 {
1212         u32 ctrl, icr;
1213         s32 ret_val;
1214
1215         /* Prevent the PCI-E bus from sticking if there is no TLP connection
1216          * on the last TLP read/write transaction when MAC is reset.
1217          */
1218         ret_val = igb_disable_pcie_master(hw);
1219         if (ret_val)
1220                 hw_dbg("PCI-E Master disable polling has failed.\n");
1221
1222         /* set the completion timeout for interface */
1223         ret_val = igb_set_pcie_completion_timeout(hw);
1224         if (ret_val) {
1225                 hw_dbg("PCI-E Set completion timeout has failed.\n");
1226         }
1227
1228         hw_dbg("Masking off all interrupts\n");
1229         wr32(E1000_IMC, 0xffffffff);
1230
1231         wr32(E1000_RCTL, 0);
1232         wr32(E1000_TCTL, E1000_TCTL_PSP);
1233         wrfl();
1234
1235         msleep(10);
1236
1237         ctrl = rd32(E1000_CTRL);
1238
1239         hw_dbg("Issuing a global reset to MAC\n");
1240         wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1241
1242         ret_val = igb_get_auto_rd_done(hw);
1243         if (ret_val) {
1244                 /* When auto config read does not complete, do not
1245                  * return with an error. This can happen in situations
1246                  * where there is no eeprom and prevents getting link.
1247                  */
1248                 hw_dbg("Auto Read Done did not complete\n");
1249         }
1250
1251         /* If EEPROM is not present, run manual init scripts */
1252         if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1253                 igb_reset_init_script_82575(hw);
1254
1255         /* Clear any pending interrupt events. */
1256         wr32(E1000_IMC, 0xffffffff);
1257         icr = rd32(E1000_ICR);
1258
1259         /* Install any alternate MAC address into RAR0 */
1260         ret_val = igb_check_alt_mac_addr(hw);
1261
1262         return ret_val;
1263 }
1264
1265 /**
1266  *  igb_init_hw_82575 - Initialize hardware
1267  *  @hw: pointer to the HW structure
1268  *
1269  *  This inits the hardware readying it for operation.
1270  **/
1271 static s32 igb_init_hw_82575(struct e1000_hw *hw)
1272 {
1273         struct e1000_mac_info *mac = &hw->mac;
1274         s32 ret_val;
1275         u16 i, rar_count = mac->rar_entry_count;
1276
1277         /* Initialize identification LED */
1278         ret_val = igb_id_led_init(hw);
1279         if (ret_val) {
1280                 hw_dbg("Error initializing identification LED\n");
1281                 /* This is not fatal and we should not stop init due to this */
1282         }
1283
1284         /* Disabling VLAN filtering */
1285         hw_dbg("Initializing the IEEE VLAN\n");
1286         if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
1287                 igb_clear_vfta_i350(hw);
1288         else
1289                 igb_clear_vfta(hw);
1290
1291         /* Setup the receive address */
1292         igb_init_rx_addrs(hw, rar_count);
1293
1294         /* Zero out the Multicast HASH table */
1295         hw_dbg("Zeroing the MTA\n");
1296         for (i = 0; i < mac->mta_reg_count; i++)
1297                 array_wr32(E1000_MTA, i, 0);
1298
1299         /* Zero out the Unicast HASH table */
1300         hw_dbg("Zeroing the UTA\n");
1301         for (i = 0; i < mac->uta_reg_count; i++)
1302                 array_wr32(E1000_UTA, i, 0);
1303
1304         /* Setup link and flow control */
1305         ret_val = igb_setup_link(hw);
1306
1307         /* Clear all of the statistics registers (clear on read).  It is
1308          * important that we do this after we have tried to establish link
1309          * because the symbol error count will increment wildly if there
1310          * is no link.
1311          */
1312         igb_clear_hw_cntrs_82575(hw);
1313         return ret_val;
1314 }
1315
1316 /**
1317  *  igb_setup_copper_link_82575 - Configure copper link settings
1318  *  @hw: pointer to the HW structure
1319  *
1320  *  Configures the link for auto-neg or forced speed and duplex.  Then we check
1321  *  for link, once link is established calls to configure collision distance
1322  *  and flow control are called.
1323  **/
1324 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1325 {
1326         u32 ctrl;
1327         s32  ret_val;
1328         u32 phpm_reg;
1329
1330         ctrl = rd32(E1000_CTRL);
1331         ctrl |= E1000_CTRL_SLU;
1332         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1333         wr32(E1000_CTRL, ctrl);
1334
1335         /* Clear Go Link Disconnect bit */
1336         if (hw->mac.type >= e1000_82580) {
1337                 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1338                 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1339                 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1340         }
1341
1342         ret_val = igb_setup_serdes_link_82575(hw);
1343         if (ret_val)
1344                 goto out;
1345
1346         if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1347                 /* allow time for SFP cage time to power up phy */
1348                 msleep(300);
1349
1350                 ret_val = hw->phy.ops.reset(hw);
1351                 if (ret_val) {
1352                         hw_dbg("Error resetting the PHY.\n");
1353                         goto out;
1354                 }
1355         }
1356         switch (hw->phy.type) {
1357         case e1000_phy_i210:
1358         case e1000_phy_m88:
1359                 switch (hw->phy.id) {
1360                 case I347AT4_E_PHY_ID:
1361                 case M88E1112_E_PHY_ID:
1362                 case M88E1545_E_PHY_ID:
1363                 case I210_I_PHY_ID:
1364                         ret_val = igb_copper_link_setup_m88_gen2(hw);
1365                         break;
1366                 default:
1367                         ret_val = igb_copper_link_setup_m88(hw);
1368                         break;
1369                 }
1370                 break;
1371         case e1000_phy_igp_3:
1372                 ret_val = igb_copper_link_setup_igp(hw);
1373                 break;
1374         case e1000_phy_82580:
1375                 ret_val = igb_copper_link_setup_82580(hw);
1376                 break;
1377         default:
1378                 ret_val = -E1000_ERR_PHY;
1379                 break;
1380         }
1381
1382         if (ret_val)
1383                 goto out;
1384
1385         ret_val = igb_setup_copper_link(hw);
1386 out:
1387         return ret_val;
1388 }
1389
1390 /**
1391  *  igb_setup_serdes_link_82575 - Setup link for serdes
1392  *  @hw: pointer to the HW structure
1393  *
1394  *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
1395  *  used on copper connections where the serialized gigabit media independent
1396  *  interface (sgmii), or serdes fiber is being used.  Configures the link
1397  *  for auto-negotiation or forces speed/duplex.
1398  **/
1399 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1400 {
1401         u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1402         bool pcs_autoneg;
1403         s32 ret_val = E1000_SUCCESS;
1404         u16 data;
1405
1406         if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1407             !igb_sgmii_active_82575(hw))
1408                 return ret_val;
1409
1410
1411         /* On the 82575, SerDes loopback mode persists until it is
1412          * explicitly turned off or a power cycle is performed.  A read to
1413          * the register does not indicate its status.  Therefore, we ensure
1414          * loopback mode is disabled during initialization.
1415          */
1416         wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1417
1418         /* power on the sfp cage if present and turn on I2C */
1419         ctrl_ext = rd32(E1000_CTRL_EXT);
1420         ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1421         ctrl_ext |= E1000_CTRL_I2C_ENA;
1422         wr32(E1000_CTRL_EXT, ctrl_ext);
1423
1424         ctrl_reg = rd32(E1000_CTRL);
1425         ctrl_reg |= E1000_CTRL_SLU;
1426
1427         if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1428                 /* set both sw defined pins */
1429                 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1430
1431                 /* Set switch control to serdes energy detect */
1432                 reg = rd32(E1000_CONNSW);
1433                 reg |= E1000_CONNSW_ENRGSRC;
1434                 wr32(E1000_CONNSW, reg);
1435         }
1436
1437         reg = rd32(E1000_PCS_LCTL);
1438
1439         /* default pcs_autoneg to the same setting as mac autoneg */
1440         pcs_autoneg = hw->mac.autoneg;
1441
1442         switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1443         case E1000_CTRL_EXT_LINK_MODE_SGMII:
1444                 /* sgmii mode lets the phy handle forcing speed/duplex */
1445                 pcs_autoneg = true;
1446                 /* autoneg time out should be disabled for SGMII mode */
1447                 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1448                 break;
1449         case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1450                 /* disable PCS autoneg and support parallel detect only */
1451                 pcs_autoneg = false;
1452         default:
1453                 if (hw->mac.type == e1000_82575 ||
1454                     hw->mac.type == e1000_82576) {
1455                         ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1456                         if (ret_val) {
1457                                 printk(KERN_DEBUG "NVM Read Error\n\n");
1458                                 return ret_val;
1459                         }
1460
1461                         if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1462                                 pcs_autoneg = false;
1463                 }
1464
1465                 /* non-SGMII modes only supports a speed of 1000/Full for the
1466                  * link so it is best to just force the MAC and let the pcs
1467                  * link either autoneg or be forced to 1000/Full
1468                  */
1469                 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1470                             E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1471
1472                 /* set speed of 1000/Full if speed/duplex is forced */
1473                 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1474                 break;
1475         }
1476
1477         wr32(E1000_CTRL, ctrl_reg);
1478
1479         /* New SerDes mode allows for forcing speed or autonegotiating speed
1480          * at 1gb. Autoneg should be default set by most drivers. This is the
1481          * mode that will be compatible with older link partners and switches.
1482          * However, both are supported by the hardware and some drivers/tools.
1483          */
1484         reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1485                 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1486
1487         if (pcs_autoneg) {
1488                 /* Set PCS register for autoneg */
1489                 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1490                        E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1491
1492                 /* Disable force flow control for autoneg */
1493                 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1494
1495                 /* Configure flow control advertisement for autoneg */
1496                 anadv_reg = rd32(E1000_PCS_ANADV);
1497                 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1498                 switch (hw->fc.requested_mode) {
1499                 case e1000_fc_full:
1500                 case e1000_fc_rx_pause:
1501                         anadv_reg |= E1000_TXCW_ASM_DIR;
1502                         anadv_reg |= E1000_TXCW_PAUSE;
1503                         break;
1504                 case e1000_fc_tx_pause:
1505                         anadv_reg |= E1000_TXCW_ASM_DIR;
1506                         break;
1507                 default:
1508                         break;
1509                 }
1510                 wr32(E1000_PCS_ANADV, anadv_reg);
1511
1512                 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1513         } else {
1514                 /* Set PCS register for forced link */
1515                 reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
1516
1517                 /* Force flow control for forced link */
1518                 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1519
1520                 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1521         }
1522
1523         wr32(E1000_PCS_LCTL, reg);
1524
1525         if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
1526                 igb_force_mac_fc(hw);
1527
1528         return ret_val;
1529 }
1530
1531 /**
1532  *  igb_sgmii_active_82575 - Return sgmii state
1533  *  @hw: pointer to the HW structure
1534  *
1535  *  82575 silicon has a serialized gigabit media independent interface (sgmii)
1536  *  which can be enabled for use in the embedded applications.  Simply
1537  *  return the current state of the sgmii interface.
1538  **/
1539 static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1540 {
1541         struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1542         return dev_spec->sgmii_active;
1543 }
1544
1545 /**
1546  *  igb_reset_init_script_82575 - Inits HW defaults after reset
1547  *  @hw: pointer to the HW structure
1548  *
1549  *  Inits recommended HW defaults after a reset when there is no EEPROM
1550  *  detected. This is only for the 82575.
1551  **/
1552 static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1553 {
1554         if (hw->mac.type == e1000_82575) {
1555                 hw_dbg("Running reset init script for 82575\n");
1556                 /* SerDes configuration via SERDESCTRL */
1557                 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1558                 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1559                 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1560                 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1561
1562                 /* CCM configuration via CCMCTL register */
1563                 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1564                 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1565
1566                 /* PCIe lanes configuration */
1567                 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1568                 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1569                 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1570                 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1571
1572                 /* PCIe PLL Configuration */
1573                 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1574                 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1575                 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1576         }
1577
1578         return 0;
1579 }
1580
1581 /**
1582  *  igb_read_mac_addr_82575 - Read device MAC address
1583  *  @hw: pointer to the HW structure
1584  **/
1585 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1586 {
1587         s32 ret_val = 0;
1588
1589         /* If there's an alternate MAC address place it in RAR0
1590          * so that it will override the Si installed default perm
1591          * address.
1592          */
1593         ret_val = igb_check_alt_mac_addr(hw);
1594         if (ret_val)
1595                 goto out;
1596
1597         ret_val = igb_read_mac_addr(hw);
1598
1599 out:
1600         return ret_val;
1601 }
1602
1603 /**
1604  * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1605  * @hw: pointer to the HW structure
1606  *
1607  * In the case of a PHY power down to save power, or to turn off link during a
1608  * driver unload, or wake on lan is not enabled, remove the link.
1609  **/
1610 void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1611 {
1612         /* If the management interface is not enabled, then power down */
1613         if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1614                 igb_power_down_phy_copper(hw);
1615 }
1616
1617 /**
1618  *  igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
1619  *  @hw: pointer to the HW structure
1620  *
1621  *  Clears the hardware counters by reading the counter registers.
1622  **/
1623 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1624 {
1625         igb_clear_hw_cntrs_base(hw);
1626
1627         rd32(E1000_PRC64);
1628         rd32(E1000_PRC127);
1629         rd32(E1000_PRC255);
1630         rd32(E1000_PRC511);
1631         rd32(E1000_PRC1023);
1632         rd32(E1000_PRC1522);
1633         rd32(E1000_PTC64);
1634         rd32(E1000_PTC127);
1635         rd32(E1000_PTC255);
1636         rd32(E1000_PTC511);
1637         rd32(E1000_PTC1023);
1638         rd32(E1000_PTC1522);
1639
1640         rd32(E1000_ALGNERRC);
1641         rd32(E1000_RXERRC);
1642         rd32(E1000_TNCRS);
1643         rd32(E1000_CEXTERR);
1644         rd32(E1000_TSCTC);
1645         rd32(E1000_TSCTFC);
1646
1647         rd32(E1000_MGTPRC);
1648         rd32(E1000_MGTPDC);
1649         rd32(E1000_MGTPTC);
1650
1651         rd32(E1000_IAC);
1652         rd32(E1000_ICRXOC);
1653
1654         rd32(E1000_ICRXPTC);
1655         rd32(E1000_ICRXATC);
1656         rd32(E1000_ICTXPTC);
1657         rd32(E1000_ICTXATC);
1658         rd32(E1000_ICTXQEC);
1659         rd32(E1000_ICTXQMTC);
1660         rd32(E1000_ICRXDMTC);
1661
1662         rd32(E1000_CBTMPC);
1663         rd32(E1000_HTDPMC);
1664         rd32(E1000_CBRMPC);
1665         rd32(E1000_RPTHC);
1666         rd32(E1000_HGPTC);
1667         rd32(E1000_HTCBDPC);
1668         rd32(E1000_HGORCL);
1669         rd32(E1000_HGORCH);
1670         rd32(E1000_HGOTCL);
1671         rd32(E1000_HGOTCH);
1672         rd32(E1000_LENERRS);
1673
1674         /* This register should not be read in copper configurations */
1675         if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1676             igb_sgmii_active_82575(hw))
1677                 rd32(E1000_SCVPC);
1678 }
1679
1680 /**
1681  *  igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1682  *  @hw: pointer to the HW structure
1683  *
1684  *  After rx enable if managability is enabled then there is likely some
1685  *  bad data at the start of the fifo and possibly in the DMA fifo.  This
1686  *  function clears the fifos and flushes any packets that came in as rx was
1687  *  being enabled.
1688  **/
1689 void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1690 {
1691         u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1692         int i, ms_wait;
1693
1694         if (hw->mac.type != e1000_82575 ||
1695             !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1696                 return;
1697
1698         /* Disable all RX queues */
1699         for (i = 0; i < 4; i++) {
1700                 rxdctl[i] = rd32(E1000_RXDCTL(i));
1701                 wr32(E1000_RXDCTL(i),
1702                      rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1703         }
1704         /* Poll all queues to verify they have shut down */
1705         for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1706                 msleep(1);
1707                 rx_enabled = 0;
1708                 for (i = 0; i < 4; i++)
1709                         rx_enabled |= rd32(E1000_RXDCTL(i));
1710                 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1711                         break;
1712         }
1713
1714         if (ms_wait == 10)
1715                 hw_dbg("Queue disable timed out after 10ms\n");
1716
1717         /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1718          * incoming packets are rejected.  Set enable and wait 2ms so that
1719          * any packet that was coming in as RCTL.EN was set is flushed
1720          */
1721         rfctl = rd32(E1000_RFCTL);
1722         wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1723
1724         rlpml = rd32(E1000_RLPML);
1725         wr32(E1000_RLPML, 0);
1726
1727         rctl = rd32(E1000_RCTL);
1728         temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1729         temp_rctl |= E1000_RCTL_LPE;
1730
1731         wr32(E1000_RCTL, temp_rctl);
1732         wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1733         wrfl();
1734         msleep(2);
1735
1736         /* Enable RX queues that were previously enabled and restore our
1737          * previous state
1738          */
1739         for (i = 0; i < 4; i++)
1740                 wr32(E1000_RXDCTL(i), rxdctl[i]);
1741         wr32(E1000_RCTL, rctl);
1742         wrfl();
1743
1744         wr32(E1000_RLPML, rlpml);
1745         wr32(E1000_RFCTL, rfctl);
1746
1747         /* Flush receive errors generated by workaround */
1748         rd32(E1000_ROC);
1749         rd32(E1000_RNBC);
1750         rd32(E1000_MPC);
1751 }
1752
1753 /**
1754  *  igb_set_pcie_completion_timeout - set pci-e completion timeout
1755  *  @hw: pointer to the HW structure
1756  *
1757  *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1758  *  however the hardware default for these parts is 500us to 1ms which is less
1759  *  than the 10ms recommended by the pci-e spec.  To address this we need to
1760  *  increase the value to either 10ms to 200ms for capability version 1 config,
1761  *  or 16ms to 55ms for version 2.
1762  **/
1763 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1764 {
1765         u32 gcr = rd32(E1000_GCR);
1766         s32 ret_val = 0;
1767         u16 pcie_devctl2;
1768
1769         /* only take action if timeout value is defaulted to 0 */
1770         if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1771                 goto out;
1772
1773         /* if capabilities version is type 1 we can write the
1774          * timeout of 10ms to 200ms through the GCR register
1775          */
1776         if (!(gcr & E1000_GCR_CAP_VER2)) {
1777                 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1778                 goto out;
1779         }
1780
1781         /* for version 2 capabilities we need to write the config space
1782          * directly in order to set the completion timeout value for
1783          * 16ms to 55ms
1784          */
1785         ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1786                                         &pcie_devctl2);
1787         if (ret_val)
1788                 goto out;
1789
1790         pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1791
1792         ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1793                                          &pcie_devctl2);
1794 out:
1795         /* disable completion timeout resend */
1796         gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1797
1798         wr32(E1000_GCR, gcr);
1799         return ret_val;
1800 }
1801
1802 /**
1803  *  igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
1804  *  @hw: pointer to the hardware struct
1805  *  @enable: state to enter, either enabled or disabled
1806  *  @pf: Physical Function pool - do not set anti-spoofing for the PF
1807  *
1808  *  enables/disables L2 switch anti-spoofing functionality.
1809  **/
1810 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1811 {
1812         u32 reg_val, reg_offset;
1813
1814         switch (hw->mac.type) {
1815         case e1000_82576:
1816                 reg_offset = E1000_DTXSWC;
1817                 break;
1818         case e1000_i350:
1819         case e1000_i354:
1820                 reg_offset = E1000_TXSWC;
1821                 break;
1822         default:
1823                 return;
1824         }
1825
1826         reg_val = rd32(reg_offset);
1827         if (enable) {
1828                 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1829                              E1000_DTXSWC_VLAN_SPOOF_MASK);
1830                 /* The PF can spoof - it has to in order to
1831                  * support emulation mode NICs
1832                  */
1833                 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1834         } else {
1835                 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1836                              E1000_DTXSWC_VLAN_SPOOF_MASK);
1837         }
1838         wr32(reg_offset, reg_val);
1839 }
1840
1841 /**
1842  *  igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
1843  *  @hw: pointer to the hardware struct
1844  *  @enable: state to enter, either enabled or disabled
1845  *
1846  *  enables/disables L2 switch loopback functionality.
1847  **/
1848 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1849 {
1850         u32 dtxswc;
1851
1852         switch (hw->mac.type) {
1853         case e1000_82576:
1854                 dtxswc = rd32(E1000_DTXSWC);
1855                 if (enable)
1856                         dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1857                 else
1858                         dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1859                 wr32(E1000_DTXSWC, dtxswc);
1860                 break;
1861         case e1000_i354:
1862         case e1000_i350:
1863                 dtxswc = rd32(E1000_TXSWC);
1864                 if (enable)
1865                         dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1866                 else
1867                         dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1868                 wr32(E1000_TXSWC, dtxswc);
1869                 break;
1870         default:
1871                 /* Currently no other hardware supports loopback */
1872                 break;
1873         }
1874
1875 }
1876
1877 /**
1878  *  igb_vmdq_set_replication_pf - enable or disable vmdq replication
1879  *  @hw: pointer to the hardware struct
1880  *  @enable: state to enter, either enabled or disabled
1881  *
1882  *  enables/disables replication of packets across multiple pools.
1883  **/
1884 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1885 {
1886         u32 vt_ctl = rd32(E1000_VT_CTL);
1887
1888         if (enable)
1889                 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1890         else
1891                 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1892
1893         wr32(E1000_VT_CTL, vt_ctl);
1894 }
1895
1896 /**
1897  *  igb_read_phy_reg_82580 - Read 82580 MDI control register
1898  *  @hw: pointer to the HW structure
1899  *  @offset: register offset to be read
1900  *  @data: pointer to the read data
1901  *
1902  *  Reads the MDI control register in the PHY at offset and stores the
1903  *  information read to data.
1904  **/
1905 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1906 {
1907         s32 ret_val;
1908
1909         ret_val = hw->phy.ops.acquire(hw);
1910         if (ret_val)
1911                 goto out;
1912
1913         ret_val = igb_read_phy_reg_mdic(hw, offset, data);
1914
1915         hw->phy.ops.release(hw);
1916
1917 out:
1918         return ret_val;
1919 }
1920
1921 /**
1922  *  igb_write_phy_reg_82580 - Write 82580 MDI control register
1923  *  @hw: pointer to the HW structure
1924  *  @offset: register offset to write to
1925  *  @data: data to write to register at offset
1926  *
1927  *  Writes data to MDI control register in the PHY at offset.
1928  **/
1929 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1930 {
1931         s32 ret_val;
1932
1933
1934         ret_val = hw->phy.ops.acquire(hw);
1935         if (ret_val)
1936                 goto out;
1937
1938         ret_val = igb_write_phy_reg_mdic(hw, offset, data);
1939
1940         hw->phy.ops.release(hw);
1941
1942 out:
1943         return ret_val;
1944 }
1945
1946 /**
1947  *  igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
1948  *  @hw: pointer to the HW structure
1949  *
1950  *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
1951  *  the values found in the EEPROM.  This addresses an issue in which these
1952  *  bits are not restored from EEPROM after reset.
1953  **/
1954 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
1955 {
1956         s32 ret_val = 0;
1957         u32 mdicnfg;
1958         u16 nvm_data = 0;
1959
1960         if (hw->mac.type != e1000_82580)
1961                 goto out;
1962         if (!igb_sgmii_active_82575(hw))
1963                 goto out;
1964
1965         ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1966                                    NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1967                                    &nvm_data);
1968         if (ret_val) {
1969                 hw_dbg("NVM Read Error\n");
1970                 goto out;
1971         }
1972
1973         mdicnfg = rd32(E1000_MDICNFG);
1974         if (nvm_data & NVM_WORD24_EXT_MDIO)
1975                 mdicnfg |= E1000_MDICNFG_EXT_MDIO;
1976         if (nvm_data & NVM_WORD24_COM_MDIO)
1977                 mdicnfg |= E1000_MDICNFG_COM_MDIO;
1978         wr32(E1000_MDICNFG, mdicnfg);
1979 out:
1980         return ret_val;
1981 }
1982
1983 /**
1984  *  igb_reset_hw_82580 - Reset hardware
1985  *  @hw: pointer to the HW structure
1986  *
1987  *  This resets function or entire device (all ports, etc.)
1988  *  to a known state.
1989  **/
1990 static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1991 {
1992         s32 ret_val = 0;
1993         /* BH SW mailbox bit in SW_FW_SYNC */
1994         u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1995         u32 ctrl, icr;
1996         bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1997
1998
1999         hw->dev_spec._82575.global_device_reset = false;
2000
2001         /* due to hw errata, global device reset doesn't always
2002          * work on 82580
2003          */
2004         if (hw->mac.type == e1000_82580)
2005                 global_device_reset = false;
2006
2007         /* Get current control state. */
2008         ctrl = rd32(E1000_CTRL);
2009
2010         /* Prevent the PCI-E bus from sticking if there is no TLP connection
2011          * on the last TLP read/write transaction when MAC is reset.
2012          */
2013         ret_val = igb_disable_pcie_master(hw);
2014         if (ret_val)
2015                 hw_dbg("PCI-E Master disable polling has failed.\n");
2016
2017         hw_dbg("Masking off all interrupts\n");
2018         wr32(E1000_IMC, 0xffffffff);
2019         wr32(E1000_RCTL, 0);
2020         wr32(E1000_TCTL, E1000_TCTL_PSP);
2021         wrfl();
2022
2023         msleep(10);
2024
2025         /* Determine whether or not a global dev reset is requested */
2026         if (global_device_reset &&
2027                 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
2028                         global_device_reset = false;
2029
2030         if (global_device_reset &&
2031                 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
2032                 ctrl |= E1000_CTRL_DEV_RST;
2033         else
2034                 ctrl |= E1000_CTRL_RST;
2035
2036         wr32(E1000_CTRL, ctrl);
2037         wrfl();
2038
2039         /* Add delay to insure DEV_RST has time to complete */
2040         if (global_device_reset)
2041                 msleep(5);
2042
2043         ret_val = igb_get_auto_rd_done(hw);
2044         if (ret_val) {
2045                 /* When auto config read does not complete, do not
2046                  * return with an error. This can happen in situations
2047                  * where there is no eeprom and prevents getting link.
2048                  */
2049                 hw_dbg("Auto Read Done did not complete\n");
2050         }
2051
2052         /* If EEPROM is not present, run manual init scripts */
2053         if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
2054                 igb_reset_init_script_82575(hw);
2055
2056         /* clear global device reset status bit */
2057         wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
2058
2059         /* Clear any pending interrupt events. */
2060         wr32(E1000_IMC, 0xffffffff);
2061         icr = rd32(E1000_ICR);
2062
2063         ret_val = igb_reset_mdicnfg_82580(hw);
2064         if (ret_val)
2065                 hw_dbg("Could not reset MDICNFG based on EEPROM\n");
2066
2067         /* Install any alternate MAC address into RAR0 */
2068         ret_val = igb_check_alt_mac_addr(hw);
2069
2070         /* Release semaphore */
2071         if (global_device_reset)
2072                 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
2073
2074         return ret_val;
2075 }
2076
2077 /**
2078  *  igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
2079  *  @data: data received by reading RXPBS register
2080  *
2081  *  The 82580 uses a table based approach for packet buffer allocation sizes.
2082  *  This function converts the retrieved value into the correct table value
2083  *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2084  *  0x0 36  72 144   1   2   4   8  16
2085  *  0x8 35  70 140 rsv rsv rsv rsv rsv
2086  */
2087 u16 igb_rxpbs_adjust_82580(u32 data)
2088 {
2089         u16 ret_val = 0;
2090
2091         if (data < E1000_82580_RXPBS_TABLE_SIZE)
2092                 ret_val = e1000_82580_rxpbs_table[data];
2093
2094         return ret_val;
2095 }
2096
2097 /**
2098  *  igb_validate_nvm_checksum_with_offset - Validate EEPROM
2099  *  checksum
2100  *  @hw: pointer to the HW structure
2101  *  @offset: offset in words of the checksum protected region
2102  *
2103  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2104  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
2105  **/
2106 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
2107                                                  u16 offset)
2108 {
2109         s32 ret_val = 0;
2110         u16 checksum = 0;
2111         u16 i, nvm_data;
2112
2113         for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2114                 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2115                 if (ret_val) {
2116                         hw_dbg("NVM Read Error\n");
2117                         goto out;
2118                 }
2119                 checksum += nvm_data;
2120         }
2121
2122         if (checksum != (u16) NVM_SUM) {
2123                 hw_dbg("NVM Checksum Invalid\n");
2124                 ret_val = -E1000_ERR_NVM;
2125                 goto out;
2126         }
2127
2128 out:
2129         return ret_val;
2130 }
2131
2132 /**
2133  *  igb_update_nvm_checksum_with_offset - Update EEPROM
2134  *  checksum
2135  *  @hw: pointer to the HW structure
2136  *  @offset: offset in words of the checksum protected region
2137  *
2138  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
2139  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
2140  *  value to the EEPROM.
2141  **/
2142 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2143 {
2144         s32 ret_val;
2145         u16 checksum = 0;
2146         u16 i, nvm_data;
2147
2148         for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2149                 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2150                 if (ret_val) {
2151                         hw_dbg("NVM Read Error while updating checksum.\n");
2152                         goto out;
2153                 }
2154                 checksum += nvm_data;
2155         }
2156         checksum = (u16) NVM_SUM - checksum;
2157         ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2158                                 &checksum);
2159         if (ret_val)
2160                 hw_dbg("NVM Write Error while updating checksum.\n");
2161
2162 out:
2163         return ret_val;
2164 }
2165
2166 /**
2167  *  igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
2168  *  @hw: pointer to the HW structure
2169  *
2170  *  Calculates the EEPROM section checksum by reading/adding each word of
2171  *  the EEPROM and then verifies that the sum of the EEPROM is
2172  *  equal to 0xBABA.
2173  **/
2174 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2175 {
2176         s32 ret_val = 0;
2177         u16 eeprom_regions_count = 1;
2178         u16 j, nvm_data;
2179         u16 nvm_offset;
2180
2181         ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2182         if (ret_val) {
2183                 hw_dbg("NVM Read Error\n");
2184                 goto out;
2185         }
2186
2187         if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2188                 /* if checksums compatibility bit is set validate checksums
2189                  * for all 4 ports.
2190                  */
2191                 eeprom_regions_count = 4;
2192         }
2193
2194         for (j = 0; j < eeprom_regions_count; j++) {
2195                 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2196                 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2197                                                                 nvm_offset);
2198                 if (ret_val != 0)
2199                         goto out;
2200         }
2201
2202 out:
2203         return ret_val;
2204 }
2205
2206 /**
2207  *  igb_update_nvm_checksum_82580 - Update EEPROM checksum
2208  *  @hw: pointer to the HW structure
2209  *
2210  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2211  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2212  *  checksum and writes the value to the EEPROM.
2213  **/
2214 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2215 {
2216         s32 ret_val;
2217         u16 j, nvm_data;
2218         u16 nvm_offset;
2219
2220         ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2221         if (ret_val) {
2222                 hw_dbg("NVM Read Error while updating checksum"
2223                         " compatibility bit.\n");
2224                 goto out;
2225         }
2226
2227         if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2228                 /* set compatibility bit to validate checksums appropriately */
2229                 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2230                 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2231                                         &nvm_data);
2232                 if (ret_val) {
2233                         hw_dbg("NVM Write Error while updating checksum"
2234                                 " compatibility bit.\n");
2235                         goto out;
2236                 }
2237         }
2238
2239         for (j = 0; j < 4; j++) {
2240                 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2241                 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2242                 if (ret_val)
2243                         goto out;
2244         }
2245
2246 out:
2247         return ret_val;
2248 }
2249
2250 /**
2251  *  igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
2252  *  @hw: pointer to the HW structure
2253  *
2254  *  Calculates the EEPROM section checksum by reading/adding each word of
2255  *  the EEPROM and then verifies that the sum of the EEPROM is
2256  *  equal to 0xBABA.
2257  **/
2258 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
2259 {
2260         s32 ret_val = 0;
2261         u16 j;
2262         u16 nvm_offset;
2263
2264         for (j = 0; j < 4; j++) {
2265                 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2266                 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2267                                                                 nvm_offset);
2268                 if (ret_val != 0)
2269                         goto out;
2270         }
2271
2272 out:
2273         return ret_val;
2274 }
2275
2276 /**
2277  *  igb_update_nvm_checksum_i350 - Update EEPROM checksum
2278  *  @hw: pointer to the HW structure
2279  *
2280  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2281  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2282  *  checksum and writes the value to the EEPROM.
2283  **/
2284 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2285 {
2286         s32 ret_val = 0;
2287         u16 j;
2288         u16 nvm_offset;
2289
2290         for (j = 0; j < 4; j++) {
2291                 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2292                 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2293                 if (ret_val != 0)
2294                         goto out;
2295         }
2296
2297 out:
2298         return ret_val;
2299 }
2300
2301 /**
2302  *  __igb_access_emi_reg - Read/write EMI register
2303  *  @hw: pointer to the HW structure
2304  *  @addr: EMI address to program
2305  *  @data: pointer to value to read/write from/to the EMI address
2306  *  @read: boolean flag to indicate read or write
2307  **/
2308 static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2309                                   u16 *data, bool read)
2310 {
2311         s32 ret_val = E1000_SUCCESS;
2312
2313         ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2314         if (ret_val)
2315                 return ret_val;
2316
2317         if (read)
2318                 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
2319         else
2320                 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
2321
2322         return ret_val;
2323 }
2324
2325 /**
2326  *  igb_read_emi_reg - Read Extended Management Interface register
2327  *  @hw: pointer to the HW structure
2328  *  @addr: EMI address to program
2329  *  @data: value to be read from the EMI address
2330  **/
2331 s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2332 {
2333         return __igb_access_emi_reg(hw, addr, data, true);
2334 }
2335
2336 /**
2337  *  igb_set_eee_i350 - Enable/disable EEE support
2338  *  @hw: pointer to the HW structure
2339  *
2340  *  Enable/disable EEE based on setting in dev_spec structure.
2341  *
2342  **/
2343 s32 igb_set_eee_i350(struct e1000_hw *hw)
2344 {
2345         s32 ret_val = 0;
2346         u32 ipcnfg, eeer;
2347
2348         if ((hw->mac.type < e1000_i350) ||
2349             (hw->phy.media_type != e1000_media_type_copper))
2350                 goto out;
2351         ipcnfg = rd32(E1000_IPCNFG);
2352         eeer = rd32(E1000_EEER);
2353
2354         /* enable or disable per user setting */
2355         if (!(hw->dev_spec._82575.eee_disable)) {
2356                 u32 eee_su = rd32(E1000_EEE_SU);
2357
2358                 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2359                 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2360                         E1000_EEER_LPI_FC);
2361
2362                 /* This bit should not be set in normal operation. */
2363                 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2364                         hw_dbg("LPI Clock Stop Bit should not be set!\n");
2365
2366         } else {
2367                 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2368                         E1000_IPCNFG_EEE_100M_AN);
2369                 eeer &= ~(E1000_EEER_TX_LPI_EN |
2370                         E1000_EEER_RX_LPI_EN |
2371                         E1000_EEER_LPI_FC);
2372         }
2373         wr32(E1000_IPCNFG, ipcnfg);
2374         wr32(E1000_EEER, eeer);
2375         rd32(E1000_IPCNFG);
2376         rd32(E1000_EEER);
2377 out:
2378
2379         return ret_val;
2380 }
2381
2382 /**
2383  *  igb_set_eee_i354 - Enable/disable EEE support
2384  *  @hw: pointer to the HW structure
2385  *
2386  *  Enable/disable EEE legacy mode based on setting in dev_spec structure.
2387  *
2388  **/
2389 s32 igb_set_eee_i354(struct e1000_hw *hw)
2390 {
2391         struct e1000_phy_info *phy = &hw->phy;
2392         s32 ret_val = 0;
2393         u16 phy_data;
2394
2395         if ((hw->phy.media_type != e1000_media_type_copper) ||
2396             (phy->id != M88E1545_E_PHY_ID))
2397                 goto out;
2398
2399         if (!hw->dev_spec._82575.eee_disable) {
2400                 /* Switch to PHY page 18. */
2401                 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18);
2402                 if (ret_val)
2403                         goto out;
2404
2405                 ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1,
2406                                             &phy_data);
2407                 if (ret_val)
2408                         goto out;
2409
2410                 phy_data |= E1000_M88E1545_EEE_CTRL_1_MS;
2411                 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1,
2412                                              phy_data);
2413                 if (ret_val)
2414                         goto out;
2415
2416                 /* Return the PHY to page 0. */
2417                 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0);
2418                 if (ret_val)
2419                         goto out;
2420
2421                 /* Turn on EEE advertisement. */
2422                 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2423                                              E1000_EEE_ADV_DEV_I354,
2424                                              &phy_data);
2425                 if (ret_val)
2426                         goto out;
2427
2428                 phy_data |= E1000_EEE_ADV_100_SUPPORTED |
2429                             E1000_EEE_ADV_1000_SUPPORTED;
2430                 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2431                                                 E1000_EEE_ADV_DEV_I354,
2432                                                 phy_data);
2433         } else {
2434                 /* Turn off EEE advertisement. */
2435                 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2436                                              E1000_EEE_ADV_DEV_I354,
2437                                              &phy_data);
2438                 if (ret_val)
2439                         goto out;
2440
2441                 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
2442                               E1000_EEE_ADV_1000_SUPPORTED);
2443                 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2444                                               E1000_EEE_ADV_DEV_I354,
2445                                               phy_data);
2446         }
2447
2448 out:
2449         return ret_val;
2450 }
2451
2452 /**
2453  *  igb_get_eee_status_i354 - Get EEE status
2454  *  @hw: pointer to the HW structure
2455  *  @status: EEE status
2456  *
2457  *  Get EEE status by guessing based on whether Tx or Rx LPI indications have
2458  *  been received.
2459  **/
2460 s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2461 {
2462         struct e1000_phy_info *phy = &hw->phy;
2463         s32 ret_val = 0;
2464         u16 phy_data;
2465
2466         /* Check if EEE is supported on this device. */
2467         if ((hw->phy.media_type != e1000_media_type_copper) ||
2468             (phy->id != M88E1545_E_PHY_ID))
2469                 goto out;
2470
2471         ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
2472                                      E1000_PCS_STATUS_DEV_I354,
2473                                      &phy_data);
2474         if (ret_val)
2475                 goto out;
2476
2477         *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
2478                               E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
2479
2480 out:
2481         return ret_val;
2482 }
2483
2484 static const u8 e1000_emc_temp_data[4] = {
2485         E1000_EMC_INTERNAL_DATA,
2486         E1000_EMC_DIODE1_DATA,
2487         E1000_EMC_DIODE2_DATA,
2488         E1000_EMC_DIODE3_DATA
2489 };
2490 static const u8 e1000_emc_therm_limit[4] = {
2491         E1000_EMC_INTERNAL_THERM_LIMIT,
2492         E1000_EMC_DIODE1_THERM_LIMIT,
2493         E1000_EMC_DIODE2_THERM_LIMIT,
2494         E1000_EMC_DIODE3_THERM_LIMIT
2495 };
2496
2497 /**
2498  *  igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2499  *  @hw: pointer to hardware structure
2500  *
2501  *  Updates the temperatures in mac.thermal_sensor_data
2502  **/
2503 s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2504 {
2505         s32 status = E1000_SUCCESS;
2506         u16 ets_offset;
2507         u16 ets_cfg;
2508         u16 ets_sensor;
2509         u8  num_sensors;
2510         u8  sensor_index;
2511         u8  sensor_location;
2512         u8  i;
2513         struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2514
2515         if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2516                 return E1000_NOT_IMPLEMENTED;
2517
2518         data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
2519
2520         /* Return the internal sensor only if ETS is unsupported */
2521         hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2522         if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2523                 return status;
2524
2525         hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2526         if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2527             != NVM_ETS_TYPE_EMC)
2528                 return E1000_NOT_IMPLEMENTED;
2529
2530         num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2531         if (num_sensors > E1000_MAX_SENSORS)
2532                 num_sensors = E1000_MAX_SENSORS;
2533
2534         for (i = 1; i < num_sensors; i++) {
2535                 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2536                 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2537                                 NVM_ETS_DATA_INDEX_SHIFT);
2538                 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2539                                    NVM_ETS_DATA_LOC_SHIFT);
2540
2541                 if (sensor_location != 0)
2542                         hw->phy.ops.read_i2c_byte(hw,
2543                                         e1000_emc_temp_data[sensor_index],
2544                                         E1000_I2C_THERMAL_SENSOR_ADDR,
2545                                         &data->sensor[i].temp);
2546         }
2547         return status;
2548 }
2549
2550 /**
2551  *  igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
2552  *  @hw: pointer to hardware structure
2553  *
2554  *  Sets the thermal sensor thresholds according to the NVM map
2555  *  and save off the threshold and location values into mac.thermal_sensor_data
2556  **/
2557 s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2558 {
2559         s32 status = E1000_SUCCESS;
2560         u16 ets_offset;
2561         u16 ets_cfg;
2562         u16 ets_sensor;
2563         u8  low_thresh_delta;
2564         u8  num_sensors;
2565         u8  sensor_index;
2566         u8  sensor_location;
2567         u8  therm_limit;
2568         u8  i;
2569         struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2570
2571         if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2572                 return E1000_NOT_IMPLEMENTED;
2573
2574         memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
2575
2576         data->sensor[0].location = 0x1;
2577         data->sensor[0].caution_thresh =
2578                 (rd32(E1000_THHIGHTC) & 0xFF);
2579         data->sensor[0].max_op_thresh =
2580                 (rd32(E1000_THLOWTC) & 0xFF);
2581
2582         /* Return the internal sensor only if ETS is unsupported */
2583         hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2584         if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2585                 return status;
2586
2587         hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2588         if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2589             != NVM_ETS_TYPE_EMC)
2590                 return E1000_NOT_IMPLEMENTED;
2591
2592         low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
2593                             NVM_ETS_LTHRES_DELTA_SHIFT);
2594         num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2595
2596         for (i = 1; i <= num_sensors; i++) {
2597                 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2598                 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2599                                 NVM_ETS_DATA_INDEX_SHIFT);
2600                 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2601                                    NVM_ETS_DATA_LOC_SHIFT);
2602                 therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
2603
2604                 hw->phy.ops.write_i2c_byte(hw,
2605                         e1000_emc_therm_limit[sensor_index],
2606                         E1000_I2C_THERMAL_SENSOR_ADDR,
2607                         therm_limit);
2608
2609                 if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
2610                         data->sensor[i].location = sensor_location;
2611                         data->sensor[i].caution_thresh = therm_limit;
2612                         data->sensor[i].max_op_thresh = therm_limit -
2613                                                         low_thresh_delta;
2614                 }
2615         }
2616         return status;
2617 }
2618
2619 static struct e1000_mac_operations e1000_mac_ops_82575 = {
2620         .init_hw              = igb_init_hw_82575,
2621         .check_for_link       = igb_check_for_link_82575,
2622         .rar_set              = igb_rar_set,
2623         .read_mac_addr        = igb_read_mac_addr_82575,
2624         .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
2625 #ifdef CONFIG_IGB_HWMON
2626         .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
2627         .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
2628 #endif
2629 };
2630
2631 static struct e1000_phy_operations e1000_phy_ops_82575 = {
2632         .acquire              = igb_acquire_phy_82575,
2633         .get_cfg_done         = igb_get_cfg_done_82575,
2634         .release              = igb_release_phy_82575,
2635         .write_i2c_byte       = igb_write_i2c_byte,
2636         .read_i2c_byte        = igb_read_i2c_byte,
2637 };
2638
2639 static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
2640         .acquire              = igb_acquire_nvm_82575,
2641         .read                 = igb_read_nvm_eerd,
2642         .release              = igb_release_nvm_82575,
2643         .write                = igb_write_nvm_spi,
2644 };
2645
2646 const struct e1000_info e1000_82575_info = {
2647         .get_invariants = igb_get_invariants_82575,
2648         .mac_ops = &e1000_mac_ops_82575,
2649         .phy_ops = &e1000_phy_ops_82575,
2650         .nvm_ops = &e1000_nvm_ops_82575,
2651 };
2652