2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
53 #define assert(expr) \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
102 RTL_GIGA_MAC_VER_01 = 0,
143 RTL_GIGA_MAC_NONE = 0xff,
146 enum rtl_tx_desc_version {
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
157 #define _R(NAME,TD,FW,SZ,B) { \
165 static const struct {
167 enum rtl_tx_desc_version txd_version;
171 } rtl_chip_infos[] = {
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
299 static int rx_buf_sz = 16383;
306 MAC0 = 0, /* Ethernet hardware address. */
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
369 enum rtl8110_registers {
375 enum rtl8168_8101_registers {
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
394 #define PFM_EN (1 << 6)
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
416 enum rtl8168_registers {
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
453 #define FORCE_CLK (1 << 15) /* force clock request */
456 enum rtl_register_content {
457 /* InterruptStatusBits */
461 TxDescUnavail = 0x0080,
485 /* TXPoll register p.5 */
486 HPQ = 0x80, /* Poll cmd on the high prio queue */
487 NPQ = 0x40, /* Poll cmd on the low prio queue */
488 FSWInt = 0x01, /* Forced software interrupt */
492 Cfg9346_Unlock = 0xc0,
497 AcceptBroadcast = 0x08,
498 AcceptMulticast = 0x04,
500 AcceptAllPhys = 0x01,
501 #define RX_CONFIG_ACCEPT_MASK 0x3f
504 TxInterFrameGapShift = 24,
505 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
507 /* Config1 register p.24 */
510 Speed_down = (1 << 4),
514 PMEnable = (1 << 0), /* Power Management Enable */
516 /* Config2 register p. 25 */
517 ClkReqEn = (1 << 7), /* Clock Request Enable */
518 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
519 PCI_Clock_66MHz = 0x01,
520 PCI_Clock_33MHz = 0x00,
522 /* Config3 register p.25 */
523 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
524 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
525 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
526 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
528 /* Config4 register */
529 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
531 /* Config5 register p.27 */
532 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
533 MWF = (1 << 5), /* Accept Multicast wakeup frame */
534 UWF = (1 << 4), /* Accept Unicast wakeup frame */
536 LanWake = (1 << 1), /* LanWake enable/disable */
537 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 ASPM_en = (1 << 0), /* ASPM enable */
541 TBIReset = 0x80000000,
542 TBILoopback = 0x40000000,
543 TBINwEnable = 0x20000000,
544 TBINwRestart = 0x10000000,
545 TBILinkOk = 0x02000000,
546 TBINwComplete = 0x01000000,
549 EnableBist = (1 << 15), // 8168 8101
550 Mac_dbgo_oe = (1 << 14), // 8168 8101
551 Normal_mode = (1 << 13), // unused
552 Force_half_dup = (1 << 12), // 8168 8101
553 Force_rxflow_en = (1 << 11), // 8168 8101
554 Force_txflow_en = (1 << 10), // 8168 8101
555 Cxpl_dbg_sel = (1 << 9), // 8168 8101
556 ASF = (1 << 8), // 8168 8101
557 PktCntrDisable = (1 << 7), // 8168 8101
558 Mac_dbgo_sel = 0x001c, // 8168
563 INTT_0 = 0x0000, // 8168
564 INTT_1 = 0x0001, // 8168
565 INTT_2 = 0x0002, // 8168
566 INTT_3 = 0x0003, // 8168
568 /* rtl8169_PHYstatus */
579 TBILinkOK = 0x02000000,
581 /* DumpCounterCommand */
586 /* First doubleword. */
587 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
588 RingEnd = (1 << 30), /* End of descriptor ring */
589 FirstFrag = (1 << 29), /* First segment of a packet */
590 LastFrag = (1 << 28), /* Final segment of a packet */
594 enum rtl_tx_desc_bit {
595 /* First doubleword. */
596 TD_LSO = (1 << 27), /* Large Send Offload */
597 #define TD_MSS_MAX 0x07ffu /* MSS value */
599 /* Second doubleword. */
600 TxVlanTag = (1 << 17), /* Add VLAN tag */
603 /* 8169, 8168b and 810x except 8102e. */
604 enum rtl_tx_desc_bit_0 {
605 /* First doubleword. */
606 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
607 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
608 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
609 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
612 /* 8102e, 8168c and beyond. */
613 enum rtl_tx_desc_bit_1 {
614 /* Second doubleword. */
615 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
616 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
617 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
618 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
621 static const struct rtl_tx_desc_info {
628 } tx_desc_info [] = {
631 .udp = TD0_IP_CS | TD0_UDP_CS,
632 .tcp = TD0_IP_CS | TD0_TCP_CS
634 .mss_shift = TD0_MSS_SHIFT,
639 .udp = TD1_IP_CS | TD1_UDP_CS,
640 .tcp = TD1_IP_CS | TD1_TCP_CS
642 .mss_shift = TD1_MSS_SHIFT,
647 enum rtl_rx_desc_bit {
649 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
650 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
652 #define RxProtoUDP (PID1)
653 #define RxProtoTCP (PID0)
654 #define RxProtoIP (PID1 | PID0)
655 #define RxProtoMask RxProtoIP
657 IPFail = (1 << 16), /* IP checksum failed */
658 UDPFail = (1 << 15), /* UDP/IP checksum failed */
659 TCPFail = (1 << 14), /* TCP/IP checksum failed */
660 RxVlanTag = (1 << 16), /* VLAN tag available */
663 #define RsvdMask 0x3fffc000
680 u8 __pad[sizeof(void *) - sizeof(u32)];
684 RTL_FEATURE_WOL = (1 << 0),
685 RTL_FEATURE_MSI = (1 << 1),
686 RTL_FEATURE_GMII = (1 << 2),
689 struct rtl8169_counters {
696 __le32 tx_one_collision;
697 __le32 tx_multi_collision;
706 RTL_FLAG_TASK_ENABLED,
707 RTL_FLAG_TASK_SLOW_PENDING,
708 RTL_FLAG_TASK_RESET_PENDING,
709 RTL_FLAG_TASK_PHY_PENDING,
713 struct rtl8169_stats {
716 struct u64_stats_sync syncp;
719 struct rtl8169_private {
720 void __iomem *mmio_addr; /* memory map physical address */
721 struct pci_dev *pci_dev;
722 struct net_device *dev;
723 struct napi_struct napi;
727 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
728 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
731 struct rtl8169_stats rx_stats;
732 struct rtl8169_stats tx_stats;
733 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
734 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
735 dma_addr_t TxPhyAddr;
736 dma_addr_t RxPhyAddr;
737 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
738 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
739 struct timer_list timer;
745 void (*write)(struct rtl8169_private *, int, int);
746 int (*read)(struct rtl8169_private *, int);
749 struct pll_power_ops {
750 void (*down)(struct rtl8169_private *);
751 void (*up)(struct rtl8169_private *);
755 void (*enable)(struct rtl8169_private *);
756 void (*disable)(struct rtl8169_private *);
760 void (*write)(struct rtl8169_private *, int, int);
761 u32 (*read)(struct rtl8169_private *, int);
764 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
765 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
766 void (*phy_reset_enable)(struct rtl8169_private *tp);
767 void (*hw_start)(struct net_device *);
768 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
769 unsigned int (*link_ok)(void __iomem *);
770 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
773 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
775 struct work_struct work;
780 struct mii_if_info mii;
781 struct rtl8169_counters counters;
786 const struct firmware *fw;
788 #define RTL_VER_SIZE 32
790 char version[RTL_VER_SIZE];
792 struct rtl_fw_phy_action {
797 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
802 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
803 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
804 module_param(use_dac, int, 0);
805 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
806 module_param_named(debug, debug.msg_enable, int, 0);
807 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
808 MODULE_LICENSE("GPL");
809 MODULE_VERSION(RTL8169_VERSION);
810 MODULE_FIRMWARE(FIRMWARE_8168D_1);
811 MODULE_FIRMWARE(FIRMWARE_8168D_2);
812 MODULE_FIRMWARE(FIRMWARE_8168E_1);
813 MODULE_FIRMWARE(FIRMWARE_8168E_2);
814 MODULE_FIRMWARE(FIRMWARE_8168E_3);
815 MODULE_FIRMWARE(FIRMWARE_8105E_1);
816 MODULE_FIRMWARE(FIRMWARE_8168F_1);
817 MODULE_FIRMWARE(FIRMWARE_8168F_2);
818 MODULE_FIRMWARE(FIRMWARE_8402_1);
819 MODULE_FIRMWARE(FIRMWARE_8411_1);
820 MODULE_FIRMWARE(FIRMWARE_8106E_1);
821 MODULE_FIRMWARE(FIRMWARE_8168G_1);
823 static void rtl_lock_work(struct rtl8169_private *tp)
825 mutex_lock(&tp->wk.mutex);
828 static void rtl_unlock_work(struct rtl8169_private *tp)
830 mutex_unlock(&tp->wk.mutex);
833 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
835 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
836 PCI_EXP_DEVCTL_READRQ, force);
840 bool (*check)(struct rtl8169_private *);
844 static void rtl_udelay(unsigned int d)
849 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
850 void (*delay)(unsigned int), unsigned int d, int n,
855 for (i = 0; i < n; i++) {
857 if (c->check(tp) == high)
860 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
861 c->msg, !high, n, d);
865 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
866 const struct rtl_cond *c,
867 unsigned int d, int n)
869 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
872 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
873 const struct rtl_cond *c,
874 unsigned int d, int n)
876 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
879 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
880 const struct rtl_cond *c,
881 unsigned int d, int n)
883 return rtl_loop_wait(tp, c, msleep, d, n, true);
886 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
887 const struct rtl_cond *c,
888 unsigned int d, int n)
890 return rtl_loop_wait(tp, c, msleep, d, n, false);
893 #define DECLARE_RTL_COND(name) \
894 static bool name ## _check(struct rtl8169_private *); \
896 static const struct rtl_cond name = { \
897 .check = name ## _check, \
901 static bool name ## _check(struct rtl8169_private *tp)
903 DECLARE_RTL_COND(rtl_ocpar_cond)
905 void __iomem *ioaddr = tp->mmio_addr;
907 return RTL_R32(OCPAR) & OCPAR_FLAG;
910 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
912 void __iomem *ioaddr = tp->mmio_addr;
914 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
916 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
920 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
922 void __iomem *ioaddr = tp->mmio_addr;
924 RTL_W32(OCPDR, data);
925 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
927 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
930 DECLARE_RTL_COND(rtl_eriar_cond)
932 void __iomem *ioaddr = tp->mmio_addr;
934 return RTL_R32(ERIAR) & ERIAR_FLAG;
937 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
939 void __iomem *ioaddr = tp->mmio_addr;
942 RTL_W32(ERIAR, 0x800010e8);
945 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
948 ocp_write(tp, 0x1, 0x30, 0x00000001);
951 #define OOB_CMD_RESET 0x00
952 #define OOB_CMD_DRIVER_START 0x05
953 #define OOB_CMD_DRIVER_STOP 0x06
955 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
957 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
960 DECLARE_RTL_COND(rtl_ocp_read_cond)
964 reg = rtl8168_get_ocp_reg(tp);
966 return ocp_read(tp, 0x0f, reg) & 0x00000800;
969 static void rtl8168_driver_start(struct rtl8169_private *tp)
971 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
973 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
976 static void rtl8168_driver_stop(struct rtl8169_private *tp)
978 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
980 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
983 static int r8168dp_check_dash(struct rtl8169_private *tp)
985 u16 reg = rtl8168_get_ocp_reg(tp);
987 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
990 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
992 if (reg & 0xffff0001) {
993 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
999 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1001 void __iomem *ioaddr = tp->mmio_addr;
1003 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1006 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1008 void __iomem *ioaddr = tp->mmio_addr;
1010 if (rtl_ocp_reg_failure(tp, reg))
1013 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1015 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1018 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1020 void __iomem *ioaddr = tp->mmio_addr;
1022 if (rtl_ocp_reg_failure(tp, reg))
1025 RTL_W32(GPHY_OCP, reg << 15);
1027 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1028 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1031 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1035 val = r8168_phy_ocp_read(tp, reg);
1036 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1039 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1041 void __iomem *ioaddr = tp->mmio_addr;
1043 if (rtl_ocp_reg_failure(tp, reg))
1046 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1049 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1051 void __iomem *ioaddr = tp->mmio_addr;
1053 if (rtl_ocp_reg_failure(tp, reg))
1056 RTL_W32(OCPDR, reg << 15);
1058 return RTL_R32(OCPDR);
1061 #define OCP_STD_PHY_BASE 0xa400
1063 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1066 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1070 if (tp->ocp_base != OCP_STD_PHY_BASE)
1073 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1076 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1078 if (tp->ocp_base != OCP_STD_PHY_BASE)
1081 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1084 DECLARE_RTL_COND(rtl_phyar_cond)
1086 void __iomem *ioaddr = tp->mmio_addr;
1088 return RTL_R32(PHYAR) & 0x80000000;
1091 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1093 void __iomem *ioaddr = tp->mmio_addr;
1095 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1097 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1099 * According to hardware specs a 20us delay is required after write
1100 * complete indication, but before sending next command.
1105 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1107 void __iomem *ioaddr = tp->mmio_addr;
1110 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1112 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1113 RTL_R32(PHYAR) & 0xffff : ~0;
1116 * According to hardware specs a 20us delay is required after read
1117 * complete indication, but before sending next command.
1124 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1126 void __iomem *ioaddr = tp->mmio_addr;
1128 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1129 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1130 RTL_W32(EPHY_RXER_NUM, 0);
1132 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1135 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1137 r8168dp_1_mdio_access(tp, reg,
1138 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1141 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1143 void __iomem *ioaddr = tp->mmio_addr;
1145 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1148 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1149 RTL_W32(EPHY_RXER_NUM, 0);
1151 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1152 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1155 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1157 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1159 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1162 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1164 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1167 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1169 void __iomem *ioaddr = tp->mmio_addr;
1171 r8168dp_2_mdio_start(ioaddr);
1173 r8169_mdio_write(tp, reg, value);
1175 r8168dp_2_mdio_stop(ioaddr);
1178 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1180 void __iomem *ioaddr = tp->mmio_addr;
1183 r8168dp_2_mdio_start(ioaddr);
1185 value = r8169_mdio_read(tp, reg);
1187 r8168dp_2_mdio_stop(ioaddr);
1192 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1194 tp->mdio_ops.write(tp, location, val);
1197 static int rtl_readphy(struct rtl8169_private *tp, int location)
1199 return tp->mdio_ops.read(tp, location);
1202 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1204 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1207 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1211 val = rtl_readphy(tp, reg_addr);
1212 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1215 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1218 struct rtl8169_private *tp = netdev_priv(dev);
1220 rtl_writephy(tp, location, val);
1223 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1225 struct rtl8169_private *tp = netdev_priv(dev);
1227 return rtl_readphy(tp, location);
1230 DECLARE_RTL_COND(rtl_ephyar_cond)
1232 void __iomem *ioaddr = tp->mmio_addr;
1234 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1237 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1239 void __iomem *ioaddr = tp->mmio_addr;
1241 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1242 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1244 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1249 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1251 void __iomem *ioaddr = tp->mmio_addr;
1253 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1255 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1256 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1259 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1262 void __iomem *ioaddr = tp->mmio_addr;
1264 BUG_ON((addr & 3) || (mask == 0));
1265 RTL_W32(ERIDR, val);
1266 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1268 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1271 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1273 void __iomem *ioaddr = tp->mmio_addr;
1275 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1277 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1278 RTL_R32(ERIDR) : ~0;
1281 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1286 val = rtl_eri_read(tp, addr, type);
1287 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1296 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1297 const struct exgmac_reg *r, int len)
1300 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1305 DECLARE_RTL_COND(rtl_efusear_cond)
1307 void __iomem *ioaddr = tp->mmio_addr;
1309 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1312 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1314 void __iomem *ioaddr = tp->mmio_addr;
1316 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1318 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1319 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1322 static u16 rtl_get_events(struct rtl8169_private *tp)
1324 void __iomem *ioaddr = tp->mmio_addr;
1326 return RTL_R16(IntrStatus);
1329 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1331 void __iomem *ioaddr = tp->mmio_addr;
1333 RTL_W16(IntrStatus, bits);
1337 static void rtl_irq_disable(struct rtl8169_private *tp)
1339 void __iomem *ioaddr = tp->mmio_addr;
1341 RTL_W16(IntrMask, 0);
1345 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1347 void __iomem *ioaddr = tp->mmio_addr;
1349 RTL_W16(IntrMask, bits);
1352 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1353 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1354 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1356 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1358 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1361 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1363 void __iomem *ioaddr = tp->mmio_addr;
1365 rtl_irq_disable(tp);
1366 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1370 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1372 void __iomem *ioaddr = tp->mmio_addr;
1374 return RTL_R32(TBICSR) & TBIReset;
1377 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1379 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1382 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1384 return RTL_R32(TBICSR) & TBILinkOk;
1387 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1389 return RTL_R8(PHYstatus) & LinkStatus;
1392 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1394 void __iomem *ioaddr = tp->mmio_addr;
1396 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1399 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1403 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1404 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1407 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1409 void __iomem *ioaddr = tp->mmio_addr;
1410 struct net_device *dev = tp->dev;
1412 if (!netif_running(dev))
1415 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1416 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1417 if (RTL_R8(PHYstatus) & _1000bpsF) {
1418 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1420 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1422 } else if (RTL_R8(PHYstatus) & _100bps) {
1423 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1425 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1428 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1430 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1433 /* Reset packet filter */
1434 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1436 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1438 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1439 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1440 if (RTL_R8(PHYstatus) & _1000bpsF) {
1441 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1443 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1446 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1448 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1451 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1452 if (RTL_R8(PHYstatus) & _10bps) {
1453 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1455 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1458 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1464 static void __rtl8169_check_link_status(struct net_device *dev,
1465 struct rtl8169_private *tp,
1466 void __iomem *ioaddr, bool pm)
1468 if (tp->link_ok(ioaddr)) {
1469 rtl_link_chg_patch(tp);
1470 /* This is to cancel a scheduled suspend if there's one. */
1472 pm_request_resume(&tp->pci_dev->dev);
1473 netif_carrier_on(dev);
1474 if (net_ratelimit())
1475 netif_info(tp, ifup, dev, "link up\n");
1477 netif_carrier_off(dev);
1478 netif_info(tp, ifdown, dev, "link down\n");
1480 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1484 static void rtl8169_check_link_status(struct net_device *dev,
1485 struct rtl8169_private *tp,
1486 void __iomem *ioaddr)
1488 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1491 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1493 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1495 void __iomem *ioaddr = tp->mmio_addr;
1499 options = RTL_R8(Config1);
1500 if (!(options & PMEnable))
1503 options = RTL_R8(Config3);
1504 if (options & LinkUp)
1505 wolopts |= WAKE_PHY;
1506 if (options & MagicPacket)
1507 wolopts |= WAKE_MAGIC;
1509 options = RTL_R8(Config5);
1511 wolopts |= WAKE_UCAST;
1513 wolopts |= WAKE_BCAST;
1515 wolopts |= WAKE_MCAST;
1520 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1522 struct rtl8169_private *tp = netdev_priv(dev);
1526 wol->supported = WAKE_ANY;
1527 wol->wolopts = __rtl8169_get_wol(tp);
1529 rtl_unlock_work(tp);
1532 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1534 void __iomem *ioaddr = tp->mmio_addr;
1536 static const struct {
1541 { WAKE_PHY, Config3, LinkUp },
1542 { WAKE_MAGIC, Config3, MagicPacket },
1543 { WAKE_UCAST, Config5, UWF },
1544 { WAKE_BCAST, Config5, BWF },
1545 { WAKE_MCAST, Config5, MWF },
1546 { WAKE_ANY, Config5, LanWake }
1550 RTL_W8(Cfg9346, Cfg9346_Unlock);
1552 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1553 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1554 if (wolopts & cfg[i].opt)
1555 options |= cfg[i].mask;
1556 RTL_W8(cfg[i].reg, options);
1559 switch (tp->mac_version) {
1560 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1561 options = RTL_R8(Config1) & ~PMEnable;
1563 options |= PMEnable;
1564 RTL_W8(Config1, options);
1567 options = RTL_R8(Config2) & ~PME_SIGNAL;
1569 options |= PME_SIGNAL;
1570 RTL_W8(Config2, options);
1574 RTL_W8(Cfg9346, Cfg9346_Lock);
1577 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1579 struct rtl8169_private *tp = netdev_priv(dev);
1584 tp->features |= RTL_FEATURE_WOL;
1586 tp->features &= ~RTL_FEATURE_WOL;
1587 __rtl8169_set_wol(tp, wol->wolopts);
1589 rtl_unlock_work(tp);
1591 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1596 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1598 return rtl_chip_infos[tp->mac_version].fw_name;
1601 static void rtl8169_get_drvinfo(struct net_device *dev,
1602 struct ethtool_drvinfo *info)
1604 struct rtl8169_private *tp = netdev_priv(dev);
1605 struct rtl_fw *rtl_fw = tp->rtl_fw;
1607 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1608 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1609 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1610 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1611 if (!IS_ERR_OR_NULL(rtl_fw))
1612 strlcpy(info->fw_version, rtl_fw->version,
1613 sizeof(info->fw_version));
1616 static int rtl8169_get_regs_len(struct net_device *dev)
1618 return R8169_REGS_SIZE;
1621 static int rtl8169_set_speed_tbi(struct net_device *dev,
1622 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1624 struct rtl8169_private *tp = netdev_priv(dev);
1625 void __iomem *ioaddr = tp->mmio_addr;
1629 reg = RTL_R32(TBICSR);
1630 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1631 (duplex == DUPLEX_FULL)) {
1632 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1633 } else if (autoneg == AUTONEG_ENABLE)
1634 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1636 netif_warn(tp, link, dev,
1637 "incorrect speed setting refused in TBI mode\n");
1644 static int rtl8169_set_speed_xmii(struct net_device *dev,
1645 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1647 struct rtl8169_private *tp = netdev_priv(dev);
1648 int giga_ctrl, bmcr;
1651 rtl_writephy(tp, 0x1f, 0x0000);
1653 if (autoneg == AUTONEG_ENABLE) {
1656 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1657 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1658 ADVERTISE_100HALF | ADVERTISE_100FULL);
1660 if (adv & ADVERTISED_10baseT_Half)
1661 auto_nego |= ADVERTISE_10HALF;
1662 if (adv & ADVERTISED_10baseT_Full)
1663 auto_nego |= ADVERTISE_10FULL;
1664 if (adv & ADVERTISED_100baseT_Half)
1665 auto_nego |= ADVERTISE_100HALF;
1666 if (adv & ADVERTISED_100baseT_Full)
1667 auto_nego |= ADVERTISE_100FULL;
1669 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1671 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1672 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1674 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1675 if (tp->mii.supports_gmii) {
1676 if (adv & ADVERTISED_1000baseT_Half)
1677 giga_ctrl |= ADVERTISE_1000HALF;
1678 if (adv & ADVERTISED_1000baseT_Full)
1679 giga_ctrl |= ADVERTISE_1000FULL;
1680 } else if (adv & (ADVERTISED_1000baseT_Half |
1681 ADVERTISED_1000baseT_Full)) {
1682 netif_info(tp, link, dev,
1683 "PHY does not support 1000Mbps\n");
1687 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1689 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1690 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1694 if (speed == SPEED_10)
1696 else if (speed == SPEED_100)
1697 bmcr = BMCR_SPEED100;
1701 if (duplex == DUPLEX_FULL)
1702 bmcr |= BMCR_FULLDPLX;
1705 rtl_writephy(tp, MII_BMCR, bmcr);
1707 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1708 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1709 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1710 rtl_writephy(tp, 0x17, 0x2138);
1711 rtl_writephy(tp, 0x0e, 0x0260);
1713 rtl_writephy(tp, 0x17, 0x2108);
1714 rtl_writephy(tp, 0x0e, 0x0000);
1723 static int rtl8169_set_speed(struct net_device *dev,
1724 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1726 struct rtl8169_private *tp = netdev_priv(dev);
1729 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1733 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1734 (advertising & ADVERTISED_1000baseT_Full)) {
1735 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1741 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1743 struct rtl8169_private *tp = netdev_priv(dev);
1746 del_timer_sync(&tp->timer);
1749 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1750 cmd->duplex, cmd->advertising);
1751 rtl_unlock_work(tp);
1756 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1757 netdev_features_t features)
1759 struct rtl8169_private *tp = netdev_priv(dev);
1761 if (dev->mtu > TD_MSS_MAX)
1762 features &= ~NETIF_F_ALL_TSO;
1764 if (dev->mtu > JUMBO_1K &&
1765 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1766 features &= ~NETIF_F_IP_CSUM;
1771 static void __rtl8169_set_features(struct net_device *dev,
1772 netdev_features_t features)
1774 struct rtl8169_private *tp = netdev_priv(dev);
1775 netdev_features_t changed = features ^ dev->features;
1776 void __iomem *ioaddr = tp->mmio_addr;
1778 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1781 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1782 if (features & NETIF_F_RXCSUM)
1783 tp->cp_cmd |= RxChkSum;
1785 tp->cp_cmd &= ~RxChkSum;
1787 if (dev->features & NETIF_F_HW_VLAN_RX)
1788 tp->cp_cmd |= RxVlan;
1790 tp->cp_cmd &= ~RxVlan;
1792 RTL_W16(CPlusCmd, tp->cp_cmd);
1795 if (changed & NETIF_F_RXALL) {
1796 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1797 if (features & NETIF_F_RXALL)
1798 tmp |= (AcceptErr | AcceptRunt);
1799 RTL_W32(RxConfig, tmp);
1803 static int rtl8169_set_features(struct net_device *dev,
1804 netdev_features_t features)
1806 struct rtl8169_private *tp = netdev_priv(dev);
1809 __rtl8169_set_features(dev, features);
1810 rtl_unlock_work(tp);
1816 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1818 return (vlan_tx_tag_present(skb)) ?
1819 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1822 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1824 u32 opts2 = le32_to_cpu(desc->opts2);
1826 if (opts2 & RxVlanTag)
1827 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1830 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1832 struct rtl8169_private *tp = netdev_priv(dev);
1833 void __iomem *ioaddr = tp->mmio_addr;
1837 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1838 cmd->port = PORT_FIBRE;
1839 cmd->transceiver = XCVR_INTERNAL;
1841 status = RTL_R32(TBICSR);
1842 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1843 cmd->autoneg = !!(status & TBINwEnable);
1845 ethtool_cmd_speed_set(cmd, SPEED_1000);
1846 cmd->duplex = DUPLEX_FULL; /* Always set */
1851 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1853 struct rtl8169_private *tp = netdev_priv(dev);
1855 return mii_ethtool_gset(&tp->mii, cmd);
1858 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1860 struct rtl8169_private *tp = netdev_priv(dev);
1864 rc = tp->get_settings(dev, cmd);
1865 rtl_unlock_work(tp);
1870 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1873 struct rtl8169_private *tp = netdev_priv(dev);
1875 if (regs->len > R8169_REGS_SIZE)
1876 regs->len = R8169_REGS_SIZE;
1879 memcpy_fromio(p, tp->mmio_addr, regs->len);
1880 rtl_unlock_work(tp);
1883 static u32 rtl8169_get_msglevel(struct net_device *dev)
1885 struct rtl8169_private *tp = netdev_priv(dev);
1887 return tp->msg_enable;
1890 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1892 struct rtl8169_private *tp = netdev_priv(dev);
1894 tp->msg_enable = value;
1897 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1904 "tx_single_collisions",
1905 "tx_multi_collisions",
1913 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1917 return ARRAY_SIZE(rtl8169_gstrings);
1923 DECLARE_RTL_COND(rtl_counters_cond)
1925 void __iomem *ioaddr = tp->mmio_addr;
1927 return RTL_R32(CounterAddrLow) & CounterDump;
1930 static void rtl8169_update_counters(struct net_device *dev)
1932 struct rtl8169_private *tp = netdev_priv(dev);
1933 void __iomem *ioaddr = tp->mmio_addr;
1934 struct device *d = &tp->pci_dev->dev;
1935 struct rtl8169_counters *counters;
1940 * Some chips are unable to dump tally counters when the receiver
1943 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1946 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1950 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1951 cmd = (u64)paddr & DMA_BIT_MASK(32);
1952 RTL_W32(CounterAddrLow, cmd);
1953 RTL_W32(CounterAddrLow, cmd | CounterDump);
1955 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1956 memcpy(&tp->counters, counters, sizeof(*counters));
1958 RTL_W32(CounterAddrLow, 0);
1959 RTL_W32(CounterAddrHigh, 0);
1961 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1964 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1965 struct ethtool_stats *stats, u64 *data)
1967 struct rtl8169_private *tp = netdev_priv(dev);
1971 rtl8169_update_counters(dev);
1973 data[0] = le64_to_cpu(tp->counters.tx_packets);
1974 data[1] = le64_to_cpu(tp->counters.rx_packets);
1975 data[2] = le64_to_cpu(tp->counters.tx_errors);
1976 data[3] = le32_to_cpu(tp->counters.rx_errors);
1977 data[4] = le16_to_cpu(tp->counters.rx_missed);
1978 data[5] = le16_to_cpu(tp->counters.align_errors);
1979 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1980 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1981 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1982 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1983 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1984 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1985 data[12] = le16_to_cpu(tp->counters.tx_underun);
1988 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1992 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1997 static const struct ethtool_ops rtl8169_ethtool_ops = {
1998 .get_drvinfo = rtl8169_get_drvinfo,
1999 .get_regs_len = rtl8169_get_regs_len,
2000 .get_link = ethtool_op_get_link,
2001 .get_settings = rtl8169_get_settings,
2002 .set_settings = rtl8169_set_settings,
2003 .get_msglevel = rtl8169_get_msglevel,
2004 .set_msglevel = rtl8169_set_msglevel,
2005 .get_regs = rtl8169_get_regs,
2006 .get_wol = rtl8169_get_wol,
2007 .set_wol = rtl8169_set_wol,
2008 .get_strings = rtl8169_get_strings,
2009 .get_sset_count = rtl8169_get_sset_count,
2010 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2011 .get_ts_info = ethtool_op_get_ts_info,
2014 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2015 struct net_device *dev, u8 default_version)
2017 void __iomem *ioaddr = tp->mmio_addr;
2019 * The driver currently handles the 8168Bf and the 8168Be identically
2020 * but they can be identified more specifically through the test below
2023 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2025 * Same thing for the 8101Eb and the 8101Ec:
2027 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2029 static const struct rtl_mac_info {
2035 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2036 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2039 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2040 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2041 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2044 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2045 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2046 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2047 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2050 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2051 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2052 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2054 /* 8168DP family. */
2055 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2056 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2057 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2060 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2061 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2062 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2063 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2064 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2065 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2066 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2067 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2068 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2071 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2072 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2073 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2074 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2077 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2078 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2079 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2080 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2081 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2082 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2083 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2084 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2085 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2086 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2087 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2088 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2089 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2090 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2091 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2092 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2093 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2094 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2095 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2096 /* FIXME: where did these entries come from ? -- FR */
2097 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2098 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2101 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2102 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2103 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2104 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2105 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2106 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2109 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2111 const struct rtl_mac_info *p = mac_info;
2114 reg = RTL_R32(TxConfig);
2115 while ((reg & p->mask) != p->val)
2117 tp->mac_version = p->mac_version;
2119 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2120 netif_notice(tp, probe, dev,
2121 "unknown MAC, using family default\n");
2122 tp->mac_version = default_version;
2126 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2128 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2136 static void rtl_writephy_batch(struct rtl8169_private *tp,
2137 const struct phy_reg *regs, int len)
2140 rtl_writephy(tp, regs->reg, regs->val);
2145 #define PHY_READ 0x00000000
2146 #define PHY_DATA_OR 0x10000000
2147 #define PHY_DATA_AND 0x20000000
2148 #define PHY_BJMPN 0x30000000
2149 #define PHY_READ_EFUSE 0x40000000
2150 #define PHY_READ_MAC_BYTE 0x50000000
2151 #define PHY_WRITE_MAC_BYTE 0x60000000
2152 #define PHY_CLEAR_READCOUNT 0x70000000
2153 #define PHY_WRITE 0x80000000
2154 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2155 #define PHY_COMP_EQ_SKIPN 0xa0000000
2156 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2157 #define PHY_WRITE_PREVIOUS 0xc0000000
2158 #define PHY_SKIPN 0xd0000000
2159 #define PHY_DELAY_MS 0xe0000000
2160 #define PHY_WRITE_ERI_WORD 0xf0000000
2164 char version[RTL_VER_SIZE];
2170 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2172 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2174 const struct firmware *fw = rtl_fw->fw;
2175 struct fw_info *fw_info = (struct fw_info *)fw->data;
2176 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2177 char *version = rtl_fw->version;
2180 if (fw->size < FW_OPCODE_SIZE)
2183 if (!fw_info->magic) {
2184 size_t i, size, start;
2187 if (fw->size < sizeof(*fw_info))
2190 for (i = 0; i < fw->size; i++)
2191 checksum += fw->data[i];
2195 start = le32_to_cpu(fw_info->fw_start);
2196 if (start > fw->size)
2199 size = le32_to_cpu(fw_info->fw_len);
2200 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2203 memcpy(version, fw_info->version, RTL_VER_SIZE);
2205 pa->code = (__le32 *)(fw->data + start);
2208 if (fw->size % FW_OPCODE_SIZE)
2211 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2213 pa->code = (__le32 *)fw->data;
2214 pa->size = fw->size / FW_OPCODE_SIZE;
2216 version[RTL_VER_SIZE - 1] = 0;
2223 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2224 struct rtl_fw_phy_action *pa)
2229 for (index = 0; index < pa->size; index++) {
2230 u32 action = le32_to_cpu(pa->code[index]);
2231 u32 regno = (action & 0x0fff0000) >> 16;
2233 switch(action & 0xf0000000) {
2237 case PHY_READ_EFUSE:
2238 case PHY_CLEAR_READCOUNT:
2240 case PHY_WRITE_PREVIOUS:
2245 if (regno > index) {
2246 netif_err(tp, ifup, tp->dev,
2247 "Out of range of firmware\n");
2251 case PHY_READCOUNT_EQ_SKIP:
2252 if (index + 2 >= pa->size) {
2253 netif_err(tp, ifup, tp->dev,
2254 "Out of range of firmware\n");
2258 case PHY_COMP_EQ_SKIPN:
2259 case PHY_COMP_NEQ_SKIPN:
2261 if (index + 1 + regno >= pa->size) {
2262 netif_err(tp, ifup, tp->dev,
2263 "Out of range of firmware\n");
2268 case PHY_READ_MAC_BYTE:
2269 case PHY_WRITE_MAC_BYTE:
2270 case PHY_WRITE_ERI_WORD:
2272 netif_err(tp, ifup, tp->dev,
2273 "Invalid action 0x%08x\n", action);
2282 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2284 struct net_device *dev = tp->dev;
2287 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2288 netif_err(tp, ifup, dev, "invalid firwmare\n");
2292 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2298 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2300 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2304 predata = count = 0;
2306 for (index = 0; index < pa->size; ) {
2307 u32 action = le32_to_cpu(pa->code[index]);
2308 u32 data = action & 0x0000ffff;
2309 u32 regno = (action & 0x0fff0000) >> 16;
2314 switch(action & 0xf0000000) {
2316 predata = rtl_readphy(tp, regno);
2331 case PHY_READ_EFUSE:
2332 predata = rtl8168d_efuse_read(tp, regno);
2335 case PHY_CLEAR_READCOUNT:
2340 rtl_writephy(tp, regno, data);
2343 case PHY_READCOUNT_EQ_SKIP:
2344 index += (count == data) ? 2 : 1;
2346 case PHY_COMP_EQ_SKIPN:
2347 if (predata == data)
2351 case PHY_COMP_NEQ_SKIPN:
2352 if (predata != data)
2356 case PHY_WRITE_PREVIOUS:
2357 rtl_writephy(tp, regno, predata);
2368 case PHY_READ_MAC_BYTE:
2369 case PHY_WRITE_MAC_BYTE:
2370 case PHY_WRITE_ERI_WORD:
2377 static void rtl_release_firmware(struct rtl8169_private *tp)
2379 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2380 release_firmware(tp->rtl_fw->fw);
2383 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2386 static void rtl_apply_firmware(struct rtl8169_private *tp)
2388 struct rtl_fw *rtl_fw = tp->rtl_fw;
2390 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2391 if (!IS_ERR_OR_NULL(rtl_fw))
2392 rtl_phy_write_fw(tp, rtl_fw);
2395 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2397 if (rtl_readphy(tp, reg) != val)
2398 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2400 rtl_apply_firmware(tp);
2403 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2405 static const struct phy_reg phy_reg_init[] = {
2467 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2470 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2472 static const struct phy_reg phy_reg_init[] = {
2478 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2481 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2483 struct pci_dev *pdev = tp->pci_dev;
2485 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2486 (pdev->subsystem_device != 0xe000))
2489 rtl_writephy(tp, 0x1f, 0x0001);
2490 rtl_writephy(tp, 0x10, 0xf01b);
2491 rtl_writephy(tp, 0x1f, 0x0000);
2494 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2496 static const struct phy_reg phy_reg_init[] = {
2536 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2538 rtl8169scd_hw_phy_config_quirk(tp);
2541 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2543 static const struct phy_reg phy_reg_init[] = {
2591 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2594 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2596 static const struct phy_reg phy_reg_init[] = {
2601 rtl_writephy(tp, 0x1f, 0x0001);
2602 rtl_patchphy(tp, 0x16, 1 << 0);
2604 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2607 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2609 static const struct phy_reg phy_reg_init[] = {
2615 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2618 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2620 static const struct phy_reg phy_reg_init[] = {
2628 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2631 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2633 static const struct phy_reg phy_reg_init[] = {
2639 rtl_writephy(tp, 0x1f, 0x0000);
2640 rtl_patchphy(tp, 0x14, 1 << 5);
2641 rtl_patchphy(tp, 0x0d, 1 << 5);
2643 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2646 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2648 static const struct phy_reg phy_reg_init[] = {
2668 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2670 rtl_patchphy(tp, 0x14, 1 << 5);
2671 rtl_patchphy(tp, 0x0d, 1 << 5);
2672 rtl_writephy(tp, 0x1f, 0x0000);
2675 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2677 static const struct phy_reg phy_reg_init[] = {
2695 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2697 rtl_patchphy(tp, 0x16, 1 << 0);
2698 rtl_patchphy(tp, 0x14, 1 << 5);
2699 rtl_patchphy(tp, 0x0d, 1 << 5);
2700 rtl_writephy(tp, 0x1f, 0x0000);
2703 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2705 static const struct phy_reg phy_reg_init[] = {
2717 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2719 rtl_patchphy(tp, 0x16, 1 << 0);
2720 rtl_patchphy(tp, 0x14, 1 << 5);
2721 rtl_patchphy(tp, 0x0d, 1 << 5);
2722 rtl_writephy(tp, 0x1f, 0x0000);
2725 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2727 rtl8168c_3_hw_phy_config(tp);
2730 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2732 static const struct phy_reg phy_reg_init_0[] = {
2733 /* Channel Estimation */
2754 * Enhance line driver power
2763 * Can not link to 1Gbps with bad cable
2764 * Decrease SNR threshold form 21.07dB to 19.04dB
2773 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2777 * Fine Tune Switching regulator parameter
2779 rtl_writephy(tp, 0x1f, 0x0002);
2780 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2781 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2783 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2784 static const struct phy_reg phy_reg_init[] = {
2794 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2796 val = rtl_readphy(tp, 0x0d);
2798 if ((val & 0x00ff) != 0x006c) {
2799 static const u32 set[] = {
2800 0x0065, 0x0066, 0x0067, 0x0068,
2801 0x0069, 0x006a, 0x006b, 0x006c
2805 rtl_writephy(tp, 0x1f, 0x0002);
2808 for (i = 0; i < ARRAY_SIZE(set); i++)
2809 rtl_writephy(tp, 0x0d, val | set[i]);
2812 static const struct phy_reg phy_reg_init[] = {
2820 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2823 /* RSET couple improve */
2824 rtl_writephy(tp, 0x1f, 0x0002);
2825 rtl_patchphy(tp, 0x0d, 0x0300);
2826 rtl_patchphy(tp, 0x0f, 0x0010);
2828 /* Fine tune PLL performance */
2829 rtl_writephy(tp, 0x1f, 0x0002);
2830 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2831 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2833 rtl_writephy(tp, 0x1f, 0x0005);
2834 rtl_writephy(tp, 0x05, 0x001b);
2836 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2838 rtl_writephy(tp, 0x1f, 0x0000);
2841 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2843 static const struct phy_reg phy_reg_init_0[] = {
2844 /* Channel Estimation */
2865 * Enhance line driver power
2874 * Can not link to 1Gbps with bad cable
2875 * Decrease SNR threshold form 21.07dB to 19.04dB
2884 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2886 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2887 static const struct phy_reg phy_reg_init[] = {
2898 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2900 val = rtl_readphy(tp, 0x0d);
2901 if ((val & 0x00ff) != 0x006c) {
2902 static const u32 set[] = {
2903 0x0065, 0x0066, 0x0067, 0x0068,
2904 0x0069, 0x006a, 0x006b, 0x006c
2908 rtl_writephy(tp, 0x1f, 0x0002);
2911 for (i = 0; i < ARRAY_SIZE(set); i++)
2912 rtl_writephy(tp, 0x0d, val | set[i]);
2915 static const struct phy_reg phy_reg_init[] = {
2923 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2926 /* Fine tune PLL performance */
2927 rtl_writephy(tp, 0x1f, 0x0002);
2928 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2929 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2931 /* Switching regulator Slew rate */
2932 rtl_writephy(tp, 0x1f, 0x0002);
2933 rtl_patchphy(tp, 0x0f, 0x0017);
2935 rtl_writephy(tp, 0x1f, 0x0005);
2936 rtl_writephy(tp, 0x05, 0x001b);
2938 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2940 rtl_writephy(tp, 0x1f, 0x0000);
2943 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2945 static const struct phy_reg phy_reg_init[] = {
3001 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3004 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3006 static const struct phy_reg phy_reg_init[] = {
3016 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3017 rtl_patchphy(tp, 0x0d, 1 << 5);
3020 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3022 static const struct phy_reg phy_reg_init[] = {
3023 /* Enable Delay cap */
3029 /* Channel estimation fine tune */
3038 /* Update PFM & 10M TX idle timer */
3050 rtl_apply_firmware(tp);
3052 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3054 /* DCO enable for 10M IDLE Power */
3055 rtl_writephy(tp, 0x1f, 0x0007);
3056 rtl_writephy(tp, 0x1e, 0x0023);
3057 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3058 rtl_writephy(tp, 0x1f, 0x0000);
3060 /* For impedance matching */
3061 rtl_writephy(tp, 0x1f, 0x0002);
3062 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3063 rtl_writephy(tp, 0x1f, 0x0000);
3065 /* PHY auto speed down */
3066 rtl_writephy(tp, 0x1f, 0x0007);
3067 rtl_writephy(tp, 0x1e, 0x002d);
3068 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3069 rtl_writephy(tp, 0x1f, 0x0000);
3070 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3072 rtl_writephy(tp, 0x1f, 0x0005);
3073 rtl_writephy(tp, 0x05, 0x8b86);
3074 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3075 rtl_writephy(tp, 0x1f, 0x0000);
3077 rtl_writephy(tp, 0x1f, 0x0005);
3078 rtl_writephy(tp, 0x05, 0x8b85);
3079 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3080 rtl_writephy(tp, 0x1f, 0x0007);
3081 rtl_writephy(tp, 0x1e, 0x0020);
3082 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3083 rtl_writephy(tp, 0x1f, 0x0006);
3084 rtl_writephy(tp, 0x00, 0x5a00);
3085 rtl_writephy(tp, 0x1f, 0x0000);
3086 rtl_writephy(tp, 0x0d, 0x0007);
3087 rtl_writephy(tp, 0x0e, 0x003c);
3088 rtl_writephy(tp, 0x0d, 0x4007);
3089 rtl_writephy(tp, 0x0e, 0x0000);
3090 rtl_writephy(tp, 0x0d, 0x0000);
3093 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3096 addr[0] | (addr[1] << 8),
3097 addr[2] | (addr[3] << 8),
3098 addr[4] | (addr[5] << 8)
3100 const struct exgmac_reg e[] = {
3101 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3102 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3103 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3104 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3107 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3110 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3112 static const struct phy_reg phy_reg_init[] = {
3113 /* Enable Delay cap */
3122 /* Channel estimation fine tune */
3139 rtl_apply_firmware(tp);
3141 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3143 /* For 4-corner performance improve */
3144 rtl_writephy(tp, 0x1f, 0x0005);
3145 rtl_writephy(tp, 0x05, 0x8b80);
3146 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3147 rtl_writephy(tp, 0x1f, 0x0000);
3149 /* PHY auto speed down */
3150 rtl_writephy(tp, 0x1f, 0x0004);
3151 rtl_writephy(tp, 0x1f, 0x0007);
3152 rtl_writephy(tp, 0x1e, 0x002d);
3153 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3154 rtl_writephy(tp, 0x1f, 0x0002);
3155 rtl_writephy(tp, 0x1f, 0x0000);
3156 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3158 /* improve 10M EEE waveform */
3159 rtl_writephy(tp, 0x1f, 0x0005);
3160 rtl_writephy(tp, 0x05, 0x8b86);
3161 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3162 rtl_writephy(tp, 0x1f, 0x0000);
3164 /* Improve 2-pair detection performance */
3165 rtl_writephy(tp, 0x1f, 0x0005);
3166 rtl_writephy(tp, 0x05, 0x8b85);
3167 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3168 rtl_writephy(tp, 0x1f, 0x0000);
3171 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3172 rtl_writephy(tp, 0x1f, 0x0005);
3173 rtl_writephy(tp, 0x05, 0x8b85);
3174 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3175 rtl_writephy(tp, 0x1f, 0x0004);
3176 rtl_writephy(tp, 0x1f, 0x0007);
3177 rtl_writephy(tp, 0x1e, 0x0020);
3178 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3179 rtl_writephy(tp, 0x1f, 0x0002);
3180 rtl_writephy(tp, 0x1f, 0x0000);
3181 rtl_writephy(tp, 0x0d, 0x0007);
3182 rtl_writephy(tp, 0x0e, 0x003c);
3183 rtl_writephy(tp, 0x0d, 0x4007);
3184 rtl_writephy(tp, 0x0e, 0x0000);
3185 rtl_writephy(tp, 0x0d, 0x0000);
3188 rtl_writephy(tp, 0x1f, 0x0003);
3189 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3190 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3191 rtl_writephy(tp, 0x1f, 0x0000);
3193 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3194 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3197 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3199 /* For 4-corner performance improve */
3200 rtl_writephy(tp, 0x1f, 0x0005);
3201 rtl_writephy(tp, 0x05, 0x8b80);
3202 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3203 rtl_writephy(tp, 0x1f, 0x0000);
3205 /* PHY auto speed down */
3206 rtl_writephy(tp, 0x1f, 0x0007);
3207 rtl_writephy(tp, 0x1e, 0x002d);
3208 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3209 rtl_writephy(tp, 0x1f, 0x0000);
3210 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3212 /* Improve 10M EEE waveform */
3213 rtl_writephy(tp, 0x1f, 0x0005);
3214 rtl_writephy(tp, 0x05, 0x8b86);
3215 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3216 rtl_writephy(tp, 0x1f, 0x0000);
3219 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3221 static const struct phy_reg phy_reg_init[] = {
3222 /* Channel estimation fine tune */
3227 /* Modify green table for giga & fnet */
3244 /* Modify green table for 10M */
3250 /* Disable hiimpedance detection (RTCT) */
3256 rtl_apply_firmware(tp);
3258 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3260 rtl8168f_hw_phy_config(tp);
3262 /* Improve 2-pair detection performance */
3263 rtl_writephy(tp, 0x1f, 0x0005);
3264 rtl_writephy(tp, 0x05, 0x8b85);
3265 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3266 rtl_writephy(tp, 0x1f, 0x0000);
3269 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3271 rtl_apply_firmware(tp);
3273 rtl8168f_hw_phy_config(tp);
3276 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3278 static const struct phy_reg phy_reg_init[] = {
3279 /* Channel estimation fine tune */
3284 /* Modify green table for giga & fnet */
3301 /* Modify green table for 10M */
3307 /* Disable hiimpedance detection (RTCT) */
3314 rtl_apply_firmware(tp);
3316 rtl8168f_hw_phy_config(tp);
3318 /* Improve 2-pair detection performance */
3319 rtl_writephy(tp, 0x1f, 0x0005);
3320 rtl_writephy(tp, 0x05, 0x8b85);
3321 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3322 rtl_writephy(tp, 0x1f, 0x0000);
3324 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3326 /* Modify green table for giga */
3327 rtl_writephy(tp, 0x1f, 0x0005);
3328 rtl_writephy(tp, 0x05, 0x8b54);
3329 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3330 rtl_writephy(tp, 0x05, 0x8b5d);
3331 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3332 rtl_writephy(tp, 0x05, 0x8a7c);
3333 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3334 rtl_writephy(tp, 0x05, 0x8a7f);
3335 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3336 rtl_writephy(tp, 0x05, 0x8a82);
3337 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3338 rtl_writephy(tp, 0x05, 0x8a85);
3339 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3340 rtl_writephy(tp, 0x05, 0x8a88);
3341 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3342 rtl_writephy(tp, 0x1f, 0x0000);
3344 /* uc same-seed solution */
3345 rtl_writephy(tp, 0x1f, 0x0005);
3346 rtl_writephy(tp, 0x05, 0x8b85);
3347 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3348 rtl_writephy(tp, 0x1f, 0x0000);
3351 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3352 rtl_writephy(tp, 0x1f, 0x0005);
3353 rtl_writephy(tp, 0x05, 0x8b85);
3354 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3355 rtl_writephy(tp, 0x1f, 0x0004);
3356 rtl_writephy(tp, 0x1f, 0x0007);
3357 rtl_writephy(tp, 0x1e, 0x0020);
3358 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3359 rtl_writephy(tp, 0x1f, 0x0000);
3360 rtl_writephy(tp, 0x0d, 0x0007);
3361 rtl_writephy(tp, 0x0e, 0x003c);
3362 rtl_writephy(tp, 0x0d, 0x4007);
3363 rtl_writephy(tp, 0x0e, 0x0000);
3364 rtl_writephy(tp, 0x0d, 0x0000);
3367 rtl_writephy(tp, 0x1f, 0x0003);
3368 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3369 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3370 rtl_writephy(tp, 0x1f, 0x0000);
3373 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3375 static const u16 mac_ocp_patch[] = {
3376 0xe008, 0xe01b, 0xe01d, 0xe01f,
3377 0xe021, 0xe023, 0xe025, 0xe027,
3378 0x49d2, 0xf10d, 0x766c, 0x49e2,
3379 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3381 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3382 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3383 0xbe00, 0xb416, 0x0076, 0xe86c,
3384 0xc602, 0xbe00, 0x0000, 0xc602,
3386 0xbe00, 0x0000, 0xc602, 0xbe00,
3387 0x0000, 0xc602, 0xbe00, 0x0000,
3388 0xc602, 0xbe00, 0x0000, 0xc602,
3389 0xbe00, 0x0000, 0xc602, 0xbe00,
3391 0x0000, 0x0000, 0x0000, 0x0000
3395 /* Patch code for GPHY reset */
3396 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3397 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3398 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3399 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3401 rtl_apply_firmware(tp);
3403 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3404 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3406 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3408 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3409 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3411 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3413 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3414 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3416 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3417 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3419 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3422 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3424 static const struct phy_reg phy_reg_init[] = {
3431 rtl_writephy(tp, 0x1f, 0x0000);
3432 rtl_patchphy(tp, 0x11, 1 << 12);
3433 rtl_patchphy(tp, 0x19, 1 << 13);
3434 rtl_patchphy(tp, 0x10, 1 << 15);
3436 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3439 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3441 static const struct phy_reg phy_reg_init[] = {
3455 /* Disable ALDPS before ram code */
3456 rtl_writephy(tp, 0x1f, 0x0000);
3457 rtl_writephy(tp, 0x18, 0x0310);
3460 rtl_apply_firmware(tp);
3462 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3465 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3467 /* Disable ALDPS before setting firmware */
3468 rtl_writephy(tp, 0x1f, 0x0000);
3469 rtl_writephy(tp, 0x18, 0x0310);
3472 rtl_apply_firmware(tp);
3475 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3476 rtl_writephy(tp, 0x1f, 0x0004);
3477 rtl_writephy(tp, 0x10, 0x401f);
3478 rtl_writephy(tp, 0x19, 0x7030);
3479 rtl_writephy(tp, 0x1f, 0x0000);
3482 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3484 static const struct phy_reg phy_reg_init[] = {
3491 /* Disable ALDPS before ram code */
3492 rtl_writephy(tp, 0x1f, 0x0000);
3493 rtl_writephy(tp, 0x18, 0x0310);
3496 rtl_apply_firmware(tp);
3498 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3499 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3501 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3504 static void rtl_hw_phy_config(struct net_device *dev)
3506 struct rtl8169_private *tp = netdev_priv(dev);
3508 rtl8169_print_mac_version(tp);
3510 switch (tp->mac_version) {
3511 case RTL_GIGA_MAC_VER_01:
3513 case RTL_GIGA_MAC_VER_02:
3514 case RTL_GIGA_MAC_VER_03:
3515 rtl8169s_hw_phy_config(tp);
3517 case RTL_GIGA_MAC_VER_04:
3518 rtl8169sb_hw_phy_config(tp);
3520 case RTL_GIGA_MAC_VER_05:
3521 rtl8169scd_hw_phy_config(tp);
3523 case RTL_GIGA_MAC_VER_06:
3524 rtl8169sce_hw_phy_config(tp);
3526 case RTL_GIGA_MAC_VER_07:
3527 case RTL_GIGA_MAC_VER_08:
3528 case RTL_GIGA_MAC_VER_09:
3529 rtl8102e_hw_phy_config(tp);
3531 case RTL_GIGA_MAC_VER_11:
3532 rtl8168bb_hw_phy_config(tp);
3534 case RTL_GIGA_MAC_VER_12:
3535 rtl8168bef_hw_phy_config(tp);
3537 case RTL_GIGA_MAC_VER_17:
3538 rtl8168bef_hw_phy_config(tp);
3540 case RTL_GIGA_MAC_VER_18:
3541 rtl8168cp_1_hw_phy_config(tp);
3543 case RTL_GIGA_MAC_VER_19:
3544 rtl8168c_1_hw_phy_config(tp);
3546 case RTL_GIGA_MAC_VER_20:
3547 rtl8168c_2_hw_phy_config(tp);
3549 case RTL_GIGA_MAC_VER_21:
3550 rtl8168c_3_hw_phy_config(tp);
3552 case RTL_GIGA_MAC_VER_22:
3553 rtl8168c_4_hw_phy_config(tp);
3555 case RTL_GIGA_MAC_VER_23:
3556 case RTL_GIGA_MAC_VER_24:
3557 rtl8168cp_2_hw_phy_config(tp);
3559 case RTL_GIGA_MAC_VER_25:
3560 rtl8168d_1_hw_phy_config(tp);
3562 case RTL_GIGA_MAC_VER_26:
3563 rtl8168d_2_hw_phy_config(tp);
3565 case RTL_GIGA_MAC_VER_27:
3566 rtl8168d_3_hw_phy_config(tp);
3568 case RTL_GIGA_MAC_VER_28:
3569 rtl8168d_4_hw_phy_config(tp);
3571 case RTL_GIGA_MAC_VER_29:
3572 case RTL_GIGA_MAC_VER_30:
3573 rtl8105e_hw_phy_config(tp);
3575 case RTL_GIGA_MAC_VER_31:
3578 case RTL_GIGA_MAC_VER_32:
3579 case RTL_GIGA_MAC_VER_33:
3580 rtl8168e_1_hw_phy_config(tp);
3582 case RTL_GIGA_MAC_VER_34:
3583 rtl8168e_2_hw_phy_config(tp);
3585 case RTL_GIGA_MAC_VER_35:
3586 rtl8168f_1_hw_phy_config(tp);
3588 case RTL_GIGA_MAC_VER_36:
3589 rtl8168f_2_hw_phy_config(tp);
3592 case RTL_GIGA_MAC_VER_37:
3593 rtl8402_hw_phy_config(tp);
3596 case RTL_GIGA_MAC_VER_38:
3597 rtl8411_hw_phy_config(tp);
3600 case RTL_GIGA_MAC_VER_39:
3601 rtl8106e_hw_phy_config(tp);
3604 case RTL_GIGA_MAC_VER_40:
3605 rtl8168g_1_hw_phy_config(tp);
3608 case RTL_GIGA_MAC_VER_41:
3614 static void rtl_phy_work(struct rtl8169_private *tp)
3616 struct timer_list *timer = &tp->timer;
3617 void __iomem *ioaddr = tp->mmio_addr;
3618 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3620 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3622 if (tp->phy_reset_pending(tp)) {
3624 * A busy loop could burn quite a few cycles on nowadays CPU.
3625 * Let's delay the execution of the timer for a few ticks.
3631 if (tp->link_ok(ioaddr))
3634 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3636 tp->phy_reset_enable(tp);
3639 mod_timer(timer, jiffies + timeout);
3642 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3644 if (!test_and_set_bit(flag, tp->wk.flags))
3645 schedule_work(&tp->wk.work);
3648 static void rtl8169_phy_timer(unsigned long __opaque)
3650 struct net_device *dev = (struct net_device *)__opaque;
3651 struct rtl8169_private *tp = netdev_priv(dev);
3653 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3656 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3657 void __iomem *ioaddr)
3660 pci_release_regions(pdev);
3661 pci_clear_mwi(pdev);
3662 pci_disable_device(pdev);
3666 DECLARE_RTL_COND(rtl_phy_reset_cond)
3668 return tp->phy_reset_pending(tp);
3671 static void rtl8169_phy_reset(struct net_device *dev,
3672 struct rtl8169_private *tp)
3674 tp->phy_reset_enable(tp);
3675 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3678 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3680 void __iomem *ioaddr = tp->mmio_addr;
3682 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3683 (RTL_R8(PHYstatus) & TBI_Enable);
3686 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3688 void __iomem *ioaddr = tp->mmio_addr;
3690 rtl_hw_phy_config(dev);
3692 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3693 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3697 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3699 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3700 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3702 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3703 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3705 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3706 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3709 rtl8169_phy_reset(dev, tp);
3711 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3712 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3713 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3714 (tp->mii.supports_gmii ?
3715 ADVERTISED_1000baseT_Half |
3716 ADVERTISED_1000baseT_Full : 0));
3718 if (rtl_tbi_enabled(tp))
3719 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3722 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3724 void __iomem *ioaddr = tp->mmio_addr;
3728 RTL_W8(Cfg9346, Cfg9346_Unlock);
3730 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3733 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3736 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3737 rtl_rar_exgmac_set(tp, addr);
3739 RTL_W8(Cfg9346, Cfg9346_Lock);
3741 rtl_unlock_work(tp);
3744 static int rtl_set_mac_address(struct net_device *dev, void *p)
3746 struct rtl8169_private *tp = netdev_priv(dev);
3747 struct sockaddr *addr = p;
3749 if (!is_valid_ether_addr(addr->sa_data))
3750 return -EADDRNOTAVAIL;
3752 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3754 rtl_rar_set(tp, dev->dev_addr);
3759 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3761 struct rtl8169_private *tp = netdev_priv(dev);
3762 struct mii_ioctl_data *data = if_mii(ifr);
3764 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3767 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3768 struct mii_ioctl_data *data, int cmd)
3772 data->phy_id = 32; /* Internal PHY */
3776 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3780 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3786 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3791 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3793 if (tp->features & RTL_FEATURE_MSI) {
3794 pci_disable_msi(pdev);
3795 tp->features &= ~RTL_FEATURE_MSI;
3799 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3801 struct mdio_ops *ops = &tp->mdio_ops;
3803 switch (tp->mac_version) {
3804 case RTL_GIGA_MAC_VER_27:
3805 ops->write = r8168dp_1_mdio_write;
3806 ops->read = r8168dp_1_mdio_read;
3808 case RTL_GIGA_MAC_VER_28:
3809 case RTL_GIGA_MAC_VER_31:
3810 ops->write = r8168dp_2_mdio_write;
3811 ops->read = r8168dp_2_mdio_read;
3813 case RTL_GIGA_MAC_VER_40:
3814 case RTL_GIGA_MAC_VER_41:
3815 ops->write = r8168g_mdio_write;
3816 ops->read = r8168g_mdio_read;
3819 ops->write = r8169_mdio_write;
3820 ops->read = r8169_mdio_read;
3825 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3827 void __iomem *ioaddr = tp->mmio_addr;
3829 switch (tp->mac_version) {
3830 case RTL_GIGA_MAC_VER_25:
3831 case RTL_GIGA_MAC_VER_26:
3832 case RTL_GIGA_MAC_VER_29:
3833 case RTL_GIGA_MAC_VER_30:
3834 case RTL_GIGA_MAC_VER_32:
3835 case RTL_GIGA_MAC_VER_33:
3836 case RTL_GIGA_MAC_VER_34:
3837 case RTL_GIGA_MAC_VER_37:
3838 case RTL_GIGA_MAC_VER_38:
3839 case RTL_GIGA_MAC_VER_39:
3840 case RTL_GIGA_MAC_VER_40:
3841 case RTL_GIGA_MAC_VER_41:
3842 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3843 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3850 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3852 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3855 rtl_writephy(tp, 0x1f, 0x0000);
3856 rtl_writephy(tp, MII_BMCR, 0x0000);
3858 rtl_wol_suspend_quirk(tp);
3863 static void r810x_phy_power_down(struct rtl8169_private *tp)
3865 rtl_writephy(tp, 0x1f, 0x0000);
3866 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3869 static void r810x_phy_power_up(struct rtl8169_private *tp)
3871 rtl_writephy(tp, 0x1f, 0x0000);
3872 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3875 static void r810x_pll_power_down(struct rtl8169_private *tp)
3877 void __iomem *ioaddr = tp->mmio_addr;
3879 if (rtl_wol_pll_power_down(tp))
3882 r810x_phy_power_down(tp);
3884 switch (tp->mac_version) {
3885 case RTL_GIGA_MAC_VER_07:
3886 case RTL_GIGA_MAC_VER_08:
3887 case RTL_GIGA_MAC_VER_09:
3888 case RTL_GIGA_MAC_VER_10:
3889 case RTL_GIGA_MAC_VER_13:
3890 case RTL_GIGA_MAC_VER_16:
3893 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3898 static void r810x_pll_power_up(struct rtl8169_private *tp)
3900 void __iomem *ioaddr = tp->mmio_addr;
3902 r810x_phy_power_up(tp);
3904 switch (tp->mac_version) {
3905 case RTL_GIGA_MAC_VER_07:
3906 case RTL_GIGA_MAC_VER_08:
3907 case RTL_GIGA_MAC_VER_09:
3908 case RTL_GIGA_MAC_VER_10:
3909 case RTL_GIGA_MAC_VER_13:
3910 case RTL_GIGA_MAC_VER_16:
3913 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3918 static void r8168_phy_power_up(struct rtl8169_private *tp)
3920 rtl_writephy(tp, 0x1f, 0x0000);
3921 switch (tp->mac_version) {
3922 case RTL_GIGA_MAC_VER_11:
3923 case RTL_GIGA_MAC_VER_12:
3924 case RTL_GIGA_MAC_VER_17:
3925 case RTL_GIGA_MAC_VER_18:
3926 case RTL_GIGA_MAC_VER_19:
3927 case RTL_GIGA_MAC_VER_20:
3928 case RTL_GIGA_MAC_VER_21:
3929 case RTL_GIGA_MAC_VER_22:
3930 case RTL_GIGA_MAC_VER_23:
3931 case RTL_GIGA_MAC_VER_24:
3932 case RTL_GIGA_MAC_VER_25:
3933 case RTL_GIGA_MAC_VER_26:
3934 case RTL_GIGA_MAC_VER_27:
3935 case RTL_GIGA_MAC_VER_28:
3936 case RTL_GIGA_MAC_VER_31:
3937 rtl_writephy(tp, 0x0e, 0x0000);
3942 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3945 static void r8168_phy_power_down(struct rtl8169_private *tp)
3947 rtl_writephy(tp, 0x1f, 0x0000);
3948 switch (tp->mac_version) {
3949 case RTL_GIGA_MAC_VER_32:
3950 case RTL_GIGA_MAC_VER_33:
3951 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3954 case RTL_GIGA_MAC_VER_11:
3955 case RTL_GIGA_MAC_VER_12:
3956 case RTL_GIGA_MAC_VER_17:
3957 case RTL_GIGA_MAC_VER_18:
3958 case RTL_GIGA_MAC_VER_19:
3959 case RTL_GIGA_MAC_VER_20:
3960 case RTL_GIGA_MAC_VER_21:
3961 case RTL_GIGA_MAC_VER_22:
3962 case RTL_GIGA_MAC_VER_23:
3963 case RTL_GIGA_MAC_VER_24:
3964 case RTL_GIGA_MAC_VER_25:
3965 case RTL_GIGA_MAC_VER_26:
3966 case RTL_GIGA_MAC_VER_27:
3967 case RTL_GIGA_MAC_VER_28:
3968 case RTL_GIGA_MAC_VER_31:
3969 rtl_writephy(tp, 0x0e, 0x0200);
3971 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3976 static void r8168_pll_power_down(struct rtl8169_private *tp)
3978 void __iomem *ioaddr = tp->mmio_addr;
3980 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3981 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3982 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3983 r8168dp_check_dash(tp)) {
3987 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3988 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3989 (RTL_R16(CPlusCmd) & ASF)) {
3993 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3994 tp->mac_version == RTL_GIGA_MAC_VER_33)
3995 rtl_ephy_write(tp, 0x19, 0xff64);
3997 if (rtl_wol_pll_power_down(tp))
4000 r8168_phy_power_down(tp);
4002 switch (tp->mac_version) {
4003 case RTL_GIGA_MAC_VER_25:
4004 case RTL_GIGA_MAC_VER_26:
4005 case RTL_GIGA_MAC_VER_27:
4006 case RTL_GIGA_MAC_VER_28:
4007 case RTL_GIGA_MAC_VER_31:
4008 case RTL_GIGA_MAC_VER_32:
4009 case RTL_GIGA_MAC_VER_33:
4010 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4015 static void r8168_pll_power_up(struct rtl8169_private *tp)
4017 void __iomem *ioaddr = tp->mmio_addr;
4019 switch (tp->mac_version) {
4020 case RTL_GIGA_MAC_VER_25:
4021 case RTL_GIGA_MAC_VER_26:
4022 case RTL_GIGA_MAC_VER_27:
4023 case RTL_GIGA_MAC_VER_28:
4024 case RTL_GIGA_MAC_VER_31:
4025 case RTL_GIGA_MAC_VER_32:
4026 case RTL_GIGA_MAC_VER_33:
4027 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4031 r8168_phy_power_up(tp);
4034 static void rtl_generic_op(struct rtl8169_private *tp,
4035 void (*op)(struct rtl8169_private *))
4041 static void rtl_pll_power_down(struct rtl8169_private *tp)
4043 rtl_generic_op(tp, tp->pll_power_ops.down);
4046 static void rtl_pll_power_up(struct rtl8169_private *tp)
4048 rtl_generic_op(tp, tp->pll_power_ops.up);
4051 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4053 struct pll_power_ops *ops = &tp->pll_power_ops;
4055 switch (tp->mac_version) {
4056 case RTL_GIGA_MAC_VER_07:
4057 case RTL_GIGA_MAC_VER_08:
4058 case RTL_GIGA_MAC_VER_09:
4059 case RTL_GIGA_MAC_VER_10:
4060 case RTL_GIGA_MAC_VER_16:
4061 case RTL_GIGA_MAC_VER_29:
4062 case RTL_GIGA_MAC_VER_30:
4063 case RTL_GIGA_MAC_VER_37:
4064 case RTL_GIGA_MAC_VER_39:
4065 ops->down = r810x_pll_power_down;
4066 ops->up = r810x_pll_power_up;
4069 case RTL_GIGA_MAC_VER_11:
4070 case RTL_GIGA_MAC_VER_12:
4071 case RTL_GIGA_MAC_VER_17:
4072 case RTL_GIGA_MAC_VER_18:
4073 case RTL_GIGA_MAC_VER_19:
4074 case RTL_GIGA_MAC_VER_20:
4075 case RTL_GIGA_MAC_VER_21:
4076 case RTL_GIGA_MAC_VER_22:
4077 case RTL_GIGA_MAC_VER_23:
4078 case RTL_GIGA_MAC_VER_24:
4079 case RTL_GIGA_MAC_VER_25:
4080 case RTL_GIGA_MAC_VER_26:
4081 case RTL_GIGA_MAC_VER_27:
4082 case RTL_GIGA_MAC_VER_28:
4083 case RTL_GIGA_MAC_VER_31:
4084 case RTL_GIGA_MAC_VER_32:
4085 case RTL_GIGA_MAC_VER_33:
4086 case RTL_GIGA_MAC_VER_34:
4087 case RTL_GIGA_MAC_VER_35:
4088 case RTL_GIGA_MAC_VER_36:
4089 case RTL_GIGA_MAC_VER_38:
4090 case RTL_GIGA_MAC_VER_40:
4091 case RTL_GIGA_MAC_VER_41:
4092 ops->down = r8168_pll_power_down;
4093 ops->up = r8168_pll_power_up;
4103 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4105 void __iomem *ioaddr = tp->mmio_addr;
4107 switch (tp->mac_version) {
4108 case RTL_GIGA_MAC_VER_01:
4109 case RTL_GIGA_MAC_VER_02:
4110 case RTL_GIGA_MAC_VER_03:
4111 case RTL_GIGA_MAC_VER_04:
4112 case RTL_GIGA_MAC_VER_05:
4113 case RTL_GIGA_MAC_VER_06:
4114 case RTL_GIGA_MAC_VER_10:
4115 case RTL_GIGA_MAC_VER_11:
4116 case RTL_GIGA_MAC_VER_12:
4117 case RTL_GIGA_MAC_VER_13:
4118 case RTL_GIGA_MAC_VER_14:
4119 case RTL_GIGA_MAC_VER_15:
4120 case RTL_GIGA_MAC_VER_16:
4121 case RTL_GIGA_MAC_VER_17:
4122 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4124 case RTL_GIGA_MAC_VER_18:
4125 case RTL_GIGA_MAC_VER_19:
4126 case RTL_GIGA_MAC_VER_20:
4127 case RTL_GIGA_MAC_VER_21:
4128 case RTL_GIGA_MAC_VER_22:
4129 case RTL_GIGA_MAC_VER_23:
4130 case RTL_GIGA_MAC_VER_24:
4131 case RTL_GIGA_MAC_VER_34:
4132 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4135 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4140 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4142 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
4145 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4147 void __iomem *ioaddr = tp->mmio_addr;
4149 RTL_W8(Cfg9346, Cfg9346_Unlock);
4150 rtl_generic_op(tp, tp->jumbo_ops.enable);
4151 RTL_W8(Cfg9346, Cfg9346_Lock);
4154 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4156 void __iomem *ioaddr = tp->mmio_addr;
4158 RTL_W8(Cfg9346, Cfg9346_Unlock);
4159 rtl_generic_op(tp, tp->jumbo_ops.disable);
4160 RTL_W8(Cfg9346, Cfg9346_Lock);
4163 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4165 void __iomem *ioaddr = tp->mmio_addr;
4167 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4168 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4169 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4172 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4174 void __iomem *ioaddr = tp->mmio_addr;
4176 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4177 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4178 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4181 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4183 void __iomem *ioaddr = tp->mmio_addr;
4185 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4188 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4190 void __iomem *ioaddr = tp->mmio_addr;
4192 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4195 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4197 void __iomem *ioaddr = tp->mmio_addr;
4199 RTL_W8(MaxTxPacketSize, 0x3f);
4200 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4201 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4202 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4205 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4207 void __iomem *ioaddr = tp->mmio_addr;
4209 RTL_W8(MaxTxPacketSize, 0x0c);
4210 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4211 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4212 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4215 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4217 rtl_tx_performance_tweak(tp->pci_dev,
4218 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4221 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4223 rtl_tx_performance_tweak(tp->pci_dev,
4224 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4227 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4229 void __iomem *ioaddr = tp->mmio_addr;
4231 r8168b_0_hw_jumbo_enable(tp);
4233 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4236 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4238 void __iomem *ioaddr = tp->mmio_addr;
4240 r8168b_0_hw_jumbo_disable(tp);
4242 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4245 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4247 struct jumbo_ops *ops = &tp->jumbo_ops;
4249 switch (tp->mac_version) {
4250 case RTL_GIGA_MAC_VER_11:
4251 ops->disable = r8168b_0_hw_jumbo_disable;
4252 ops->enable = r8168b_0_hw_jumbo_enable;
4254 case RTL_GIGA_MAC_VER_12:
4255 case RTL_GIGA_MAC_VER_17:
4256 ops->disable = r8168b_1_hw_jumbo_disable;
4257 ops->enable = r8168b_1_hw_jumbo_enable;
4259 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4260 case RTL_GIGA_MAC_VER_19:
4261 case RTL_GIGA_MAC_VER_20:
4262 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4263 case RTL_GIGA_MAC_VER_22:
4264 case RTL_GIGA_MAC_VER_23:
4265 case RTL_GIGA_MAC_VER_24:
4266 case RTL_GIGA_MAC_VER_25:
4267 case RTL_GIGA_MAC_VER_26:
4268 ops->disable = r8168c_hw_jumbo_disable;
4269 ops->enable = r8168c_hw_jumbo_enable;
4271 case RTL_GIGA_MAC_VER_27:
4272 case RTL_GIGA_MAC_VER_28:
4273 ops->disable = r8168dp_hw_jumbo_disable;
4274 ops->enable = r8168dp_hw_jumbo_enable;
4276 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4277 case RTL_GIGA_MAC_VER_32:
4278 case RTL_GIGA_MAC_VER_33:
4279 case RTL_GIGA_MAC_VER_34:
4280 ops->disable = r8168e_hw_jumbo_disable;
4281 ops->enable = r8168e_hw_jumbo_enable;
4285 * No action needed for jumbo frames with 8169.
4286 * No jumbo for 810x at all.
4288 case RTL_GIGA_MAC_VER_40:
4289 case RTL_GIGA_MAC_VER_41:
4291 ops->disable = NULL;
4297 DECLARE_RTL_COND(rtl_chipcmd_cond)
4299 void __iomem *ioaddr = tp->mmio_addr;
4301 return RTL_R8(ChipCmd) & CmdReset;
4304 static void rtl_hw_reset(struct rtl8169_private *tp)
4306 void __iomem *ioaddr = tp->mmio_addr;
4308 RTL_W8(ChipCmd, CmdReset);
4310 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4313 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4315 struct rtl_fw *rtl_fw;
4319 name = rtl_lookup_firmware_name(tp);
4321 goto out_no_firmware;
4323 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4327 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4331 rc = rtl_check_firmware(tp, rtl_fw);
4333 goto err_release_firmware;
4335 tp->rtl_fw = rtl_fw;
4339 err_release_firmware:
4340 release_firmware(rtl_fw->fw);
4344 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4351 static void rtl_request_firmware(struct rtl8169_private *tp)
4353 if (IS_ERR(tp->rtl_fw))
4354 rtl_request_uncached_firmware(tp);
4357 static void rtl_rx_close(struct rtl8169_private *tp)
4359 void __iomem *ioaddr = tp->mmio_addr;
4361 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4364 DECLARE_RTL_COND(rtl_npq_cond)
4366 void __iomem *ioaddr = tp->mmio_addr;
4368 return RTL_R8(TxPoll) & NPQ;
4371 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4373 void __iomem *ioaddr = tp->mmio_addr;
4375 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4378 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4380 void __iomem *ioaddr = tp->mmio_addr;
4382 /* Disable interrupts */
4383 rtl8169_irq_mask_and_ack(tp);
4387 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4388 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4389 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4390 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4391 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4392 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4393 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4394 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4395 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4396 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4397 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4398 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4399 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4401 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4408 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4410 void __iomem *ioaddr = tp->mmio_addr;
4412 /* Set DMA burst size and Interframe Gap Time */
4413 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4414 (InterFrameGap << TxInterFrameGapShift));
4417 static void rtl_hw_start(struct net_device *dev)
4419 struct rtl8169_private *tp = netdev_priv(dev);
4423 rtl_irq_enable_all(tp);
4426 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4427 void __iomem *ioaddr)
4430 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4431 * register to be written before TxDescAddrLow to work.
4432 * Switching from MMIO to I/O access fixes the issue as well.
4434 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4435 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4436 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4437 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4440 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4444 cmd = RTL_R16(CPlusCmd);
4445 RTL_W16(CPlusCmd, cmd);
4449 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4451 /* Low hurts. Let's disable the filtering. */
4452 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4455 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4457 static const struct rtl_cfg2_info {
4462 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4463 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4464 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4465 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4467 const struct rtl_cfg2_info *p = cfg2_info;
4471 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4472 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4473 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4474 RTL_W32(0x7c, p->val);
4480 static void rtl_set_rx_mode(struct net_device *dev)
4482 struct rtl8169_private *tp = netdev_priv(dev);
4483 void __iomem *ioaddr = tp->mmio_addr;
4484 u32 mc_filter[2]; /* Multicast hash filter */
4488 if (dev->flags & IFF_PROMISC) {
4489 /* Unconditionally log net taps. */
4490 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4492 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4494 mc_filter[1] = mc_filter[0] = 0xffffffff;
4495 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4496 (dev->flags & IFF_ALLMULTI)) {
4497 /* Too many to filter perfectly -- accept all multicasts. */
4498 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4499 mc_filter[1] = mc_filter[0] = 0xffffffff;
4501 struct netdev_hw_addr *ha;
4503 rx_mode = AcceptBroadcast | AcceptMyPhys;
4504 mc_filter[1] = mc_filter[0] = 0;
4505 netdev_for_each_mc_addr(ha, dev) {
4506 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4507 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4508 rx_mode |= AcceptMulticast;
4512 if (dev->features & NETIF_F_RXALL)
4513 rx_mode |= (AcceptErr | AcceptRunt);
4515 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4517 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4518 u32 data = mc_filter[0];
4520 mc_filter[0] = swab32(mc_filter[1]);
4521 mc_filter[1] = swab32(data);
4524 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4525 mc_filter[1] = mc_filter[0] = 0xffffffff;
4527 RTL_W32(MAR0 + 4, mc_filter[1]);
4528 RTL_W32(MAR0 + 0, mc_filter[0]);
4530 RTL_W32(RxConfig, tmp);
4533 static void rtl_hw_start_8169(struct net_device *dev)
4535 struct rtl8169_private *tp = netdev_priv(dev);
4536 void __iomem *ioaddr = tp->mmio_addr;
4537 struct pci_dev *pdev = tp->pci_dev;
4539 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4540 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4541 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4544 RTL_W8(Cfg9346, Cfg9346_Unlock);
4545 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4546 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4547 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4548 tp->mac_version == RTL_GIGA_MAC_VER_04)
4549 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4553 RTL_W8(EarlyTxThres, NoEarlyTx);
4555 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4557 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4558 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4559 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4560 tp->mac_version == RTL_GIGA_MAC_VER_04)
4561 rtl_set_rx_tx_config_registers(tp);
4563 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4565 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4566 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4567 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4568 "Bit-3 and bit-14 MUST be 1\n");
4569 tp->cp_cmd |= (1 << 14);
4572 RTL_W16(CPlusCmd, tp->cp_cmd);
4574 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4577 * Undocumented corner. Supposedly:
4578 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4580 RTL_W16(IntrMitigate, 0x0000);
4582 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4584 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4585 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4586 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4587 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4588 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4589 rtl_set_rx_tx_config_registers(tp);
4592 RTL_W8(Cfg9346, Cfg9346_Lock);
4594 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4597 RTL_W32(RxMissed, 0);
4599 rtl_set_rx_mode(dev);
4601 /* no early-rx interrupts */
4602 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4605 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4607 if (tp->csi_ops.write)
4608 tp->csi_ops.write(tp, addr, value);
4611 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4613 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4616 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4620 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4621 rtl_csi_write(tp, 0x070c, csi | bits);
4624 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4626 rtl_csi_access_enable(tp, 0x17000000);
4629 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4631 rtl_csi_access_enable(tp, 0x27000000);
4634 DECLARE_RTL_COND(rtl_csiar_cond)
4636 void __iomem *ioaddr = tp->mmio_addr;
4638 return RTL_R32(CSIAR) & CSIAR_FLAG;
4641 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4643 void __iomem *ioaddr = tp->mmio_addr;
4645 RTL_W32(CSIDR, value);
4646 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4647 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4649 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4652 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4654 void __iomem *ioaddr = tp->mmio_addr;
4656 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4657 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4659 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4660 RTL_R32(CSIDR) : ~0;
4663 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4665 void __iomem *ioaddr = tp->mmio_addr;
4667 RTL_W32(CSIDR, value);
4668 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4669 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4672 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4675 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4677 void __iomem *ioaddr = tp->mmio_addr;
4679 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4680 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4682 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4683 RTL_R32(CSIDR) : ~0;
4686 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4688 struct csi_ops *ops = &tp->csi_ops;
4690 switch (tp->mac_version) {
4691 case RTL_GIGA_MAC_VER_01:
4692 case RTL_GIGA_MAC_VER_02:
4693 case RTL_GIGA_MAC_VER_03:
4694 case RTL_GIGA_MAC_VER_04:
4695 case RTL_GIGA_MAC_VER_05:
4696 case RTL_GIGA_MAC_VER_06:
4697 case RTL_GIGA_MAC_VER_10:
4698 case RTL_GIGA_MAC_VER_11:
4699 case RTL_GIGA_MAC_VER_12:
4700 case RTL_GIGA_MAC_VER_13:
4701 case RTL_GIGA_MAC_VER_14:
4702 case RTL_GIGA_MAC_VER_15:
4703 case RTL_GIGA_MAC_VER_16:
4704 case RTL_GIGA_MAC_VER_17:
4709 case RTL_GIGA_MAC_VER_37:
4710 case RTL_GIGA_MAC_VER_38:
4711 ops->write = r8402_csi_write;
4712 ops->read = r8402_csi_read;
4716 ops->write = r8169_csi_write;
4717 ops->read = r8169_csi_read;
4723 unsigned int offset;
4728 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4734 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4735 rtl_ephy_write(tp, e->offset, w);
4740 static void rtl_disable_clock_request(struct pci_dev *pdev)
4742 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4743 PCI_EXP_LNKCTL_CLKREQ_EN);
4746 static void rtl_enable_clock_request(struct pci_dev *pdev)
4748 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4749 PCI_EXP_LNKCTL_CLKREQ_EN);
4752 #define R8168_CPCMD_QUIRK_MASK (\
4763 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4765 void __iomem *ioaddr = tp->mmio_addr;
4766 struct pci_dev *pdev = tp->pci_dev;
4768 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4770 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4772 rtl_tx_performance_tweak(pdev,
4773 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4776 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4778 void __iomem *ioaddr = tp->mmio_addr;
4780 rtl_hw_start_8168bb(tp);
4782 RTL_W8(MaxTxPacketSize, TxPacketMax);
4784 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4787 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4789 void __iomem *ioaddr = tp->mmio_addr;
4790 struct pci_dev *pdev = tp->pci_dev;
4792 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4794 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4796 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4798 rtl_disable_clock_request(pdev);
4800 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4803 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4805 static const struct ephy_info e_info_8168cp[] = {
4806 { 0x01, 0, 0x0001 },
4807 { 0x02, 0x0800, 0x1000 },
4808 { 0x03, 0, 0x0042 },
4809 { 0x06, 0x0080, 0x0000 },
4813 rtl_csi_access_enable_2(tp);
4815 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4817 __rtl_hw_start_8168cp(tp);
4820 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4822 void __iomem *ioaddr = tp->mmio_addr;
4823 struct pci_dev *pdev = tp->pci_dev;
4825 rtl_csi_access_enable_2(tp);
4827 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4829 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4831 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4834 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4836 void __iomem *ioaddr = tp->mmio_addr;
4837 struct pci_dev *pdev = tp->pci_dev;
4839 rtl_csi_access_enable_2(tp);
4841 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4844 RTL_W8(DBG_REG, 0x20);
4846 RTL_W8(MaxTxPacketSize, TxPacketMax);
4848 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4850 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4853 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4855 void __iomem *ioaddr = tp->mmio_addr;
4856 static const struct ephy_info e_info_8168c_1[] = {
4857 { 0x02, 0x0800, 0x1000 },
4858 { 0x03, 0, 0x0002 },
4859 { 0x06, 0x0080, 0x0000 }
4862 rtl_csi_access_enable_2(tp);
4864 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4866 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4868 __rtl_hw_start_8168cp(tp);
4871 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4873 static const struct ephy_info e_info_8168c_2[] = {
4874 { 0x01, 0, 0x0001 },
4875 { 0x03, 0x0400, 0x0220 }
4878 rtl_csi_access_enable_2(tp);
4880 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4882 __rtl_hw_start_8168cp(tp);
4885 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4887 rtl_hw_start_8168c_2(tp);
4890 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4892 rtl_csi_access_enable_2(tp);
4894 __rtl_hw_start_8168cp(tp);
4897 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4899 void __iomem *ioaddr = tp->mmio_addr;
4900 struct pci_dev *pdev = tp->pci_dev;
4902 rtl_csi_access_enable_2(tp);
4904 rtl_disable_clock_request(pdev);
4906 RTL_W8(MaxTxPacketSize, TxPacketMax);
4908 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4910 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4913 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4915 void __iomem *ioaddr = tp->mmio_addr;
4916 struct pci_dev *pdev = tp->pci_dev;
4918 rtl_csi_access_enable_1(tp);
4920 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4922 RTL_W8(MaxTxPacketSize, TxPacketMax);
4924 rtl_disable_clock_request(pdev);
4927 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4929 void __iomem *ioaddr = tp->mmio_addr;
4930 struct pci_dev *pdev = tp->pci_dev;
4931 static const struct ephy_info e_info_8168d_4[] = {
4933 { 0x19, 0x20, 0x50 },
4938 rtl_csi_access_enable_1(tp);
4940 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4942 RTL_W8(MaxTxPacketSize, TxPacketMax);
4944 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4945 const struct ephy_info *e = e_info_8168d_4 + i;
4948 w = rtl_ephy_read(tp, e->offset);
4949 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4952 rtl_enable_clock_request(pdev);
4955 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4957 void __iomem *ioaddr = tp->mmio_addr;
4958 struct pci_dev *pdev = tp->pci_dev;
4959 static const struct ephy_info e_info_8168e_1[] = {
4960 { 0x00, 0x0200, 0x0100 },
4961 { 0x00, 0x0000, 0x0004 },
4962 { 0x06, 0x0002, 0x0001 },
4963 { 0x06, 0x0000, 0x0030 },
4964 { 0x07, 0x0000, 0x2000 },
4965 { 0x00, 0x0000, 0x0020 },
4966 { 0x03, 0x5800, 0x2000 },
4967 { 0x03, 0x0000, 0x0001 },
4968 { 0x01, 0x0800, 0x1000 },
4969 { 0x07, 0x0000, 0x4000 },
4970 { 0x1e, 0x0000, 0x2000 },
4971 { 0x19, 0xffff, 0xfe6c },
4972 { 0x0a, 0x0000, 0x0040 }
4975 rtl_csi_access_enable_2(tp);
4977 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4979 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4981 RTL_W8(MaxTxPacketSize, TxPacketMax);
4983 rtl_disable_clock_request(pdev);
4985 /* Reset tx FIFO pointer */
4986 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4987 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4989 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4992 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4994 void __iomem *ioaddr = tp->mmio_addr;
4995 struct pci_dev *pdev = tp->pci_dev;
4996 static const struct ephy_info e_info_8168e_2[] = {
4997 { 0x09, 0x0000, 0x0080 },
4998 { 0x19, 0x0000, 0x0224 }
5001 rtl_csi_access_enable_1(tp);
5003 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5005 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5007 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5008 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5009 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5010 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5011 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5012 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5013 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5014 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5016 RTL_W8(MaxTxPacketSize, EarlySize);
5018 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5019 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5021 /* Adjust EEE LED frequency */
5022 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5024 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5025 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5026 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5027 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5030 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5032 void __iomem *ioaddr = tp->mmio_addr;
5033 struct pci_dev *pdev = tp->pci_dev;
5035 rtl_csi_access_enable_2(tp);
5037 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5039 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5040 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5041 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5042 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5043 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5044 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5045 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5046 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5047 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5048 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5050 RTL_W8(MaxTxPacketSize, EarlySize);
5052 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5053 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5054 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5055 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
5056 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5057 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5060 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5062 void __iomem *ioaddr = tp->mmio_addr;
5063 static const struct ephy_info e_info_8168f_1[] = {
5064 { 0x06, 0x00c0, 0x0020 },
5065 { 0x08, 0x0001, 0x0002 },
5066 { 0x09, 0x0000, 0x0080 },
5067 { 0x19, 0x0000, 0x0224 }
5070 rtl_hw_start_8168f(tp);
5072 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5074 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5076 /* Adjust EEE LED frequency */
5077 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5080 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5082 static const struct ephy_info e_info_8168f_1[] = {
5083 { 0x06, 0x00c0, 0x0020 },
5084 { 0x0f, 0xffff, 0x5200 },
5085 { 0x1e, 0x0000, 0x4000 },
5086 { 0x19, 0x0000, 0x0224 }
5089 rtl_hw_start_8168f(tp);
5091 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5093 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5096 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5098 void __iomem *ioaddr = tp->mmio_addr;
5099 struct pci_dev *pdev = tp->pci_dev;
5101 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5102 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5103 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5104 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5106 rtl_csi_access_enable_1(tp);
5108 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5110 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5111 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5113 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5114 RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
5115 RTL_W8(MaxTxPacketSize, EarlySize);
5116 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5117 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5119 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5120 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5122 /* Adjust EEE LED frequency */
5123 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5125 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5128 static void rtl_hw_start_8168(struct net_device *dev)
5130 struct rtl8169_private *tp = netdev_priv(dev);
5131 void __iomem *ioaddr = tp->mmio_addr;
5133 RTL_W8(Cfg9346, Cfg9346_Unlock);
5135 RTL_W8(MaxTxPacketSize, TxPacketMax);
5137 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5139 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5141 RTL_W16(CPlusCmd, tp->cp_cmd);
5143 RTL_W16(IntrMitigate, 0x5151);
5145 /* Work around for RxFIFO overflow. */
5146 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5147 tp->event_slow |= RxFIFOOver | PCSTimeout;
5148 tp->event_slow &= ~RxOverflow;
5151 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5153 rtl_set_rx_mode(dev);
5155 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5156 (InterFrameGap << TxInterFrameGapShift));
5160 switch (tp->mac_version) {
5161 case RTL_GIGA_MAC_VER_11:
5162 rtl_hw_start_8168bb(tp);
5165 case RTL_GIGA_MAC_VER_12:
5166 case RTL_GIGA_MAC_VER_17:
5167 rtl_hw_start_8168bef(tp);
5170 case RTL_GIGA_MAC_VER_18:
5171 rtl_hw_start_8168cp_1(tp);
5174 case RTL_GIGA_MAC_VER_19:
5175 rtl_hw_start_8168c_1(tp);
5178 case RTL_GIGA_MAC_VER_20:
5179 rtl_hw_start_8168c_2(tp);
5182 case RTL_GIGA_MAC_VER_21:
5183 rtl_hw_start_8168c_3(tp);
5186 case RTL_GIGA_MAC_VER_22:
5187 rtl_hw_start_8168c_4(tp);
5190 case RTL_GIGA_MAC_VER_23:
5191 rtl_hw_start_8168cp_2(tp);
5194 case RTL_GIGA_MAC_VER_24:
5195 rtl_hw_start_8168cp_3(tp);
5198 case RTL_GIGA_MAC_VER_25:
5199 case RTL_GIGA_MAC_VER_26:
5200 case RTL_GIGA_MAC_VER_27:
5201 rtl_hw_start_8168d(tp);
5204 case RTL_GIGA_MAC_VER_28:
5205 rtl_hw_start_8168d_4(tp);
5208 case RTL_GIGA_MAC_VER_31:
5209 rtl_hw_start_8168dp(tp);
5212 case RTL_GIGA_MAC_VER_32:
5213 case RTL_GIGA_MAC_VER_33:
5214 rtl_hw_start_8168e_1(tp);
5216 case RTL_GIGA_MAC_VER_34:
5217 rtl_hw_start_8168e_2(tp);
5220 case RTL_GIGA_MAC_VER_35:
5221 case RTL_GIGA_MAC_VER_36:
5222 rtl_hw_start_8168f_1(tp);
5225 case RTL_GIGA_MAC_VER_38:
5226 rtl_hw_start_8411(tp);
5229 case RTL_GIGA_MAC_VER_40:
5230 case RTL_GIGA_MAC_VER_41:
5231 rtl_hw_start_8168g_1(tp);
5235 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5236 dev->name, tp->mac_version);
5240 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5242 RTL_W8(Cfg9346, Cfg9346_Lock);
5244 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5247 #define R810X_CPCMD_QUIRK_MASK (\
5258 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5260 void __iomem *ioaddr = tp->mmio_addr;
5261 struct pci_dev *pdev = tp->pci_dev;
5262 static const struct ephy_info e_info_8102e_1[] = {
5263 { 0x01, 0, 0x6e65 },
5264 { 0x02, 0, 0x091f },
5265 { 0x03, 0, 0xc2f9 },
5266 { 0x06, 0, 0xafb5 },
5267 { 0x07, 0, 0x0e00 },
5268 { 0x19, 0, 0xec80 },
5269 { 0x01, 0, 0x2e65 },
5274 rtl_csi_access_enable_2(tp);
5276 RTL_W8(DBG_REG, FIX_NAK_1);
5278 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5281 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5282 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5284 cfg1 = RTL_R8(Config1);
5285 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5286 RTL_W8(Config1, cfg1 & ~LEDS0);
5288 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5291 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5293 void __iomem *ioaddr = tp->mmio_addr;
5294 struct pci_dev *pdev = tp->pci_dev;
5296 rtl_csi_access_enable_2(tp);
5298 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5300 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5301 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5304 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5306 rtl_hw_start_8102e_2(tp);
5308 rtl_ephy_write(tp, 0x03, 0xc2f9);
5311 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5313 void __iomem *ioaddr = tp->mmio_addr;
5314 static const struct ephy_info e_info_8105e_1[] = {
5315 { 0x07, 0, 0x4000 },
5316 { 0x19, 0, 0x0200 },
5317 { 0x19, 0, 0x0020 },
5318 { 0x1e, 0, 0x2000 },
5319 { 0x03, 0, 0x0001 },
5320 { 0x19, 0, 0x0100 },
5321 { 0x19, 0, 0x0004 },
5325 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5326 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5328 /* Disable Early Tally Counter */
5329 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5331 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5332 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5333 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5334 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5335 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5337 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5340 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5342 rtl_hw_start_8105e_1(tp);
5343 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5346 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5348 void __iomem *ioaddr = tp->mmio_addr;
5349 static const struct ephy_info e_info_8402[] = {
5350 { 0x19, 0xffff, 0xff64 },
5354 rtl_csi_access_enable_2(tp);
5356 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5357 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5359 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5360 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5361 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5362 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5363 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5365 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5367 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5369 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5370 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5371 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5372 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5373 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5374 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5375 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5378 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5380 void __iomem *ioaddr = tp->mmio_addr;
5382 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5383 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5386 (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
5387 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5388 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5389 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5390 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5393 static void rtl_hw_start_8101(struct net_device *dev)
5395 struct rtl8169_private *tp = netdev_priv(dev);
5396 void __iomem *ioaddr = tp->mmio_addr;
5397 struct pci_dev *pdev = tp->pci_dev;
5399 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5400 tp->event_slow &= ~RxFIFOOver;
5402 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5403 tp->mac_version == RTL_GIGA_MAC_VER_16)
5404 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5405 PCI_EXP_DEVCTL_NOSNOOP_EN);
5407 RTL_W8(Cfg9346, Cfg9346_Unlock);
5409 switch (tp->mac_version) {
5410 case RTL_GIGA_MAC_VER_07:
5411 rtl_hw_start_8102e_1(tp);
5414 case RTL_GIGA_MAC_VER_08:
5415 rtl_hw_start_8102e_3(tp);
5418 case RTL_GIGA_MAC_VER_09:
5419 rtl_hw_start_8102e_2(tp);
5422 case RTL_GIGA_MAC_VER_29:
5423 rtl_hw_start_8105e_1(tp);
5425 case RTL_GIGA_MAC_VER_30:
5426 rtl_hw_start_8105e_2(tp);
5429 case RTL_GIGA_MAC_VER_37:
5430 rtl_hw_start_8402(tp);
5433 case RTL_GIGA_MAC_VER_39:
5434 rtl_hw_start_8106(tp);
5438 RTL_W8(Cfg9346, Cfg9346_Lock);
5440 RTL_W8(MaxTxPacketSize, TxPacketMax);
5442 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5444 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5445 RTL_W16(CPlusCmd, tp->cp_cmd);
5447 RTL_W16(IntrMitigate, 0x0000);
5449 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5451 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5452 rtl_set_rx_tx_config_registers(tp);
5456 rtl_set_rx_mode(dev);
5458 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5461 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5463 struct rtl8169_private *tp = netdev_priv(dev);
5465 if (new_mtu < ETH_ZLEN ||
5466 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5469 if (new_mtu > ETH_DATA_LEN)
5470 rtl_hw_jumbo_enable(tp);
5472 rtl_hw_jumbo_disable(tp);
5475 netdev_update_features(dev);
5480 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5482 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5483 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5486 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5487 void **data_buff, struct RxDesc *desc)
5489 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5494 rtl8169_make_unusable_by_asic(desc);
5497 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5499 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5501 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5504 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5507 desc->addr = cpu_to_le64(mapping);
5509 rtl8169_mark_to_asic(desc, rx_buf_sz);
5512 static inline void *rtl8169_align(void *data)
5514 return (void *)ALIGN((long)data, 16);
5517 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5518 struct RxDesc *desc)
5522 struct device *d = &tp->pci_dev->dev;
5523 struct net_device *dev = tp->dev;
5524 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5526 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5530 if (rtl8169_align(data) != data) {
5532 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5537 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5539 if (unlikely(dma_mapping_error(d, mapping))) {
5540 if (net_ratelimit())
5541 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5545 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5553 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5557 for (i = 0; i < NUM_RX_DESC; i++) {
5558 if (tp->Rx_databuff[i]) {
5559 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5560 tp->RxDescArray + i);
5565 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5567 desc->opts1 |= cpu_to_le32(RingEnd);
5570 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5574 for (i = 0; i < NUM_RX_DESC; i++) {
5577 if (tp->Rx_databuff[i])
5580 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5582 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5585 tp->Rx_databuff[i] = data;
5588 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5592 rtl8169_rx_clear(tp);
5596 static int rtl8169_init_ring(struct net_device *dev)
5598 struct rtl8169_private *tp = netdev_priv(dev);
5600 rtl8169_init_ring_indexes(tp);
5602 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5603 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5605 return rtl8169_rx_fill(tp);
5608 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5609 struct TxDesc *desc)
5611 unsigned int len = tx_skb->len;
5613 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5621 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5626 for (i = 0; i < n; i++) {
5627 unsigned int entry = (start + i) % NUM_TX_DESC;
5628 struct ring_info *tx_skb = tp->tx_skb + entry;
5629 unsigned int len = tx_skb->len;
5632 struct sk_buff *skb = tx_skb->skb;
5634 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5635 tp->TxDescArray + entry);
5637 tp->dev->stats.tx_dropped++;
5645 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5647 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5648 tp->cur_tx = tp->dirty_tx = 0;
5651 static void rtl_reset_work(struct rtl8169_private *tp)
5653 struct net_device *dev = tp->dev;
5656 napi_disable(&tp->napi);
5657 netif_stop_queue(dev);
5658 synchronize_sched();
5660 rtl8169_hw_reset(tp);
5662 for (i = 0; i < NUM_RX_DESC; i++)
5663 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5665 rtl8169_tx_clear(tp);
5666 rtl8169_init_ring_indexes(tp);
5668 napi_enable(&tp->napi);
5670 netif_wake_queue(dev);
5671 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5674 static void rtl8169_tx_timeout(struct net_device *dev)
5676 struct rtl8169_private *tp = netdev_priv(dev);
5678 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5681 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5684 struct skb_shared_info *info = skb_shinfo(skb);
5685 unsigned int cur_frag, entry;
5686 struct TxDesc * uninitialized_var(txd);
5687 struct device *d = &tp->pci_dev->dev;
5690 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5691 const skb_frag_t *frag = info->frags + cur_frag;
5696 entry = (entry + 1) % NUM_TX_DESC;
5698 txd = tp->TxDescArray + entry;
5699 len = skb_frag_size(frag);
5700 addr = skb_frag_address(frag);
5701 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5702 if (unlikely(dma_mapping_error(d, mapping))) {
5703 if (net_ratelimit())
5704 netif_err(tp, drv, tp->dev,
5705 "Failed to map TX fragments DMA!\n");
5709 /* Anti gcc 2.95.3 bugware (sic) */
5710 status = opts[0] | len |
5711 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5713 txd->opts1 = cpu_to_le32(status);
5714 txd->opts2 = cpu_to_le32(opts[1]);
5715 txd->addr = cpu_to_le64(mapping);
5717 tp->tx_skb[entry].len = len;
5721 tp->tx_skb[entry].skb = skb;
5722 txd->opts1 |= cpu_to_le32(LastFrag);
5728 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5732 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5733 struct sk_buff *skb, u32 *opts)
5735 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5736 u32 mss = skb_shinfo(skb)->gso_size;
5737 int offset = info->opts_offset;
5741 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5742 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5743 const struct iphdr *ip = ip_hdr(skb);
5745 if (ip->protocol == IPPROTO_TCP)
5746 opts[offset] |= info->checksum.tcp;
5747 else if (ip->protocol == IPPROTO_UDP)
5748 opts[offset] |= info->checksum.udp;
5754 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5755 struct net_device *dev)
5757 struct rtl8169_private *tp = netdev_priv(dev);
5758 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5759 struct TxDesc *txd = tp->TxDescArray + entry;
5760 void __iomem *ioaddr = tp->mmio_addr;
5761 struct device *d = &tp->pci_dev->dev;
5767 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5768 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5772 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5775 len = skb_headlen(skb);
5776 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5777 if (unlikely(dma_mapping_error(d, mapping))) {
5778 if (net_ratelimit())
5779 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5783 tp->tx_skb[entry].len = len;
5784 txd->addr = cpu_to_le64(mapping);
5786 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5789 rtl8169_tso_csum(tp, skb, opts);
5791 frags = rtl8169_xmit_frags(tp, skb, opts);
5795 opts[0] |= FirstFrag;
5797 opts[0] |= FirstFrag | LastFrag;
5798 tp->tx_skb[entry].skb = skb;
5801 txd->opts2 = cpu_to_le32(opts[1]);
5803 skb_tx_timestamp(skb);
5807 /* Anti gcc 2.95.3 bugware (sic) */
5808 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5809 txd->opts1 = cpu_to_le32(status);
5811 tp->cur_tx += frags + 1;
5815 RTL_W8(TxPoll, NPQ);
5819 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5820 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5821 * not miss a ring update when it notices a stopped queue.
5824 netif_stop_queue(dev);
5825 /* Sync with rtl_tx:
5826 * - publish queue status and cur_tx ring index (write barrier)
5827 * - refresh dirty_tx ring index (read barrier).
5828 * May the current thread have a pessimistic view of the ring
5829 * status and forget to wake up queue, a racing rtl_tx thread
5833 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5834 netif_wake_queue(dev);
5837 return NETDEV_TX_OK;
5840 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5843 dev->stats.tx_dropped++;
5844 return NETDEV_TX_OK;
5847 netif_stop_queue(dev);
5848 dev->stats.tx_dropped++;
5849 return NETDEV_TX_BUSY;
5852 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5854 struct rtl8169_private *tp = netdev_priv(dev);
5855 struct pci_dev *pdev = tp->pci_dev;
5856 u16 pci_status, pci_cmd;
5858 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5859 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5861 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5862 pci_cmd, pci_status);
5865 * The recovery sequence below admits a very elaborated explanation:
5866 * - it seems to work;
5867 * - I did not see what else could be done;
5868 * - it makes iop3xx happy.
5870 * Feel free to adjust to your needs.
5872 if (pdev->broken_parity_status)
5873 pci_cmd &= ~PCI_COMMAND_PARITY;
5875 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5877 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5879 pci_write_config_word(pdev, PCI_STATUS,
5880 pci_status & (PCI_STATUS_DETECTED_PARITY |
5881 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5882 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5884 /* The infamous DAC f*ckup only happens at boot time */
5885 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5886 void __iomem *ioaddr = tp->mmio_addr;
5888 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5889 tp->cp_cmd &= ~PCIDAC;
5890 RTL_W16(CPlusCmd, tp->cp_cmd);
5891 dev->features &= ~NETIF_F_HIGHDMA;
5894 rtl8169_hw_reset(tp);
5896 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5899 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5901 unsigned int dirty_tx, tx_left;
5903 dirty_tx = tp->dirty_tx;
5905 tx_left = tp->cur_tx - dirty_tx;
5907 while (tx_left > 0) {
5908 unsigned int entry = dirty_tx % NUM_TX_DESC;
5909 struct ring_info *tx_skb = tp->tx_skb + entry;
5913 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5914 if (status & DescOwn)
5917 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5918 tp->TxDescArray + entry);
5919 if (status & LastFrag) {
5920 u64_stats_update_begin(&tp->tx_stats.syncp);
5921 tp->tx_stats.packets++;
5922 tp->tx_stats.bytes += tx_skb->skb->len;
5923 u64_stats_update_end(&tp->tx_stats.syncp);
5924 dev_kfree_skb(tx_skb->skb);
5931 if (tp->dirty_tx != dirty_tx) {
5932 tp->dirty_tx = dirty_tx;
5933 /* Sync with rtl8169_start_xmit:
5934 * - publish dirty_tx ring index (write barrier)
5935 * - refresh cur_tx ring index and queue status (read barrier)
5936 * May the current thread miss the stopped queue condition,
5937 * a racing xmit thread can only have a right view of the
5941 if (netif_queue_stopped(dev) &&
5942 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5943 netif_wake_queue(dev);
5946 * 8168 hack: TxPoll requests are lost when the Tx packets are
5947 * too close. Let's kick an extra TxPoll request when a burst
5948 * of start_xmit activity is detected (if it is not detected,
5949 * it is slow enough). -- FR
5951 if (tp->cur_tx != dirty_tx) {
5952 void __iomem *ioaddr = tp->mmio_addr;
5954 RTL_W8(TxPoll, NPQ);
5959 static inline int rtl8169_fragmented_frame(u32 status)
5961 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5964 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5966 u32 status = opts1 & RxProtoMask;
5968 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5969 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5970 skb->ip_summed = CHECKSUM_UNNECESSARY;
5972 skb_checksum_none_assert(skb);
5975 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5976 struct rtl8169_private *tp,
5980 struct sk_buff *skb;
5981 struct device *d = &tp->pci_dev->dev;
5983 data = rtl8169_align(data);
5984 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5986 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5988 memcpy(skb->data, data, pkt_size);
5989 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5994 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5996 unsigned int cur_rx, rx_left;
5999 cur_rx = tp->cur_rx;
6000 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
6001 rx_left = min(rx_left, budget);
6003 for (; rx_left > 0; rx_left--, cur_rx++) {
6004 unsigned int entry = cur_rx % NUM_RX_DESC;
6005 struct RxDesc *desc = tp->RxDescArray + entry;
6009 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6011 if (status & DescOwn)
6013 if (unlikely(status & RxRES)) {
6014 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6016 dev->stats.rx_errors++;
6017 if (status & (RxRWT | RxRUNT))
6018 dev->stats.rx_length_errors++;
6020 dev->stats.rx_crc_errors++;
6021 if (status & RxFOVF) {
6022 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6023 dev->stats.rx_fifo_errors++;
6025 if ((status & (RxRUNT | RxCRC)) &&
6026 !(status & (RxRWT | RxFOVF)) &&
6027 (dev->features & NETIF_F_RXALL))
6030 struct sk_buff *skb;
6035 addr = le64_to_cpu(desc->addr);
6036 if (likely(!(dev->features & NETIF_F_RXFCS)))
6037 pkt_size = (status & 0x00003fff) - 4;
6039 pkt_size = status & 0x00003fff;
6042 * The driver does not support incoming fragmented
6043 * frames. They are seen as a symptom of over-mtu
6046 if (unlikely(rtl8169_fragmented_frame(status))) {
6047 dev->stats.rx_dropped++;
6048 dev->stats.rx_length_errors++;
6049 goto release_descriptor;
6052 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6053 tp, pkt_size, addr);
6055 dev->stats.rx_dropped++;
6056 goto release_descriptor;
6059 rtl8169_rx_csum(skb, status);
6060 skb_put(skb, pkt_size);
6061 skb->protocol = eth_type_trans(skb, dev);
6063 rtl8169_rx_vlan_tag(desc, skb);
6065 napi_gro_receive(&tp->napi, skb);
6067 u64_stats_update_begin(&tp->rx_stats.syncp);
6068 tp->rx_stats.packets++;
6069 tp->rx_stats.bytes += pkt_size;
6070 u64_stats_update_end(&tp->rx_stats.syncp);
6075 rtl8169_mark_to_asic(desc, rx_buf_sz);
6078 count = cur_rx - tp->cur_rx;
6079 tp->cur_rx = cur_rx;
6081 tp->dirty_rx += count;
6086 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6088 struct net_device *dev = dev_instance;
6089 struct rtl8169_private *tp = netdev_priv(dev);
6093 status = rtl_get_events(tp);
6094 if (status && status != 0xffff) {
6095 status &= RTL_EVENT_NAPI | tp->event_slow;
6099 rtl_irq_disable(tp);
6100 napi_schedule(&tp->napi);
6103 return IRQ_RETVAL(handled);
6107 * Workqueue context.
6109 static void rtl_slow_event_work(struct rtl8169_private *tp)
6111 struct net_device *dev = tp->dev;
6114 status = rtl_get_events(tp) & tp->event_slow;
6115 rtl_ack_events(tp, status);
6117 if (unlikely(status & RxFIFOOver)) {
6118 switch (tp->mac_version) {
6119 /* Work around for rx fifo overflow */
6120 case RTL_GIGA_MAC_VER_11:
6121 netif_stop_queue(dev);
6122 /* XXX - Hack alert. See rtl_task(). */
6123 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6129 if (unlikely(status & SYSErr))
6130 rtl8169_pcierr_interrupt(dev);
6132 if (status & LinkChg)
6133 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6135 rtl_irq_enable_all(tp);
6138 static void rtl_task(struct work_struct *work)
6140 static const struct {
6142 void (*action)(struct rtl8169_private *);
6144 /* XXX - keep rtl_slow_event_work() as first element. */
6145 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6146 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6147 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6149 struct rtl8169_private *tp =
6150 container_of(work, struct rtl8169_private, wk.work);
6151 struct net_device *dev = tp->dev;
6156 if (!netif_running(dev) ||
6157 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6160 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6163 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6165 rtl_work[i].action(tp);
6169 rtl_unlock_work(tp);
6172 static int rtl8169_poll(struct napi_struct *napi, int budget)
6174 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6175 struct net_device *dev = tp->dev;
6176 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6180 status = rtl_get_events(tp);
6181 rtl_ack_events(tp, status & ~tp->event_slow);
6183 if (status & RTL_EVENT_NAPI_RX)
6184 work_done = rtl_rx(dev, tp, (u32) budget);
6186 if (status & RTL_EVENT_NAPI_TX)
6189 if (status & tp->event_slow) {
6190 enable_mask &= ~tp->event_slow;
6192 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6195 if (work_done < budget) {
6196 napi_complete(napi);
6198 rtl_irq_enable(tp, enable_mask);
6205 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6207 struct rtl8169_private *tp = netdev_priv(dev);
6209 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6212 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6213 RTL_W32(RxMissed, 0);
6216 static void rtl8169_down(struct net_device *dev)
6218 struct rtl8169_private *tp = netdev_priv(dev);
6219 void __iomem *ioaddr = tp->mmio_addr;
6221 del_timer_sync(&tp->timer);
6223 napi_disable(&tp->napi);
6224 netif_stop_queue(dev);
6226 rtl8169_hw_reset(tp);
6228 * At this point device interrupts can not be enabled in any function,
6229 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6230 * and napi is disabled (rtl8169_poll).
6232 rtl8169_rx_missed(dev, ioaddr);
6234 /* Give a racing hard_start_xmit a few cycles to complete. */
6235 synchronize_sched();
6237 rtl8169_tx_clear(tp);
6239 rtl8169_rx_clear(tp);
6241 rtl_pll_power_down(tp);
6244 static int rtl8169_close(struct net_device *dev)
6246 struct rtl8169_private *tp = netdev_priv(dev);
6247 struct pci_dev *pdev = tp->pci_dev;
6249 pm_runtime_get_sync(&pdev->dev);
6251 /* Update counters before going down */
6252 rtl8169_update_counters(dev);
6255 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6258 rtl_unlock_work(tp);
6260 free_irq(pdev->irq, dev);
6262 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6264 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6266 tp->TxDescArray = NULL;
6267 tp->RxDescArray = NULL;
6269 pm_runtime_put_sync(&pdev->dev);
6274 #ifdef CONFIG_NET_POLL_CONTROLLER
6275 static void rtl8169_netpoll(struct net_device *dev)
6277 struct rtl8169_private *tp = netdev_priv(dev);
6279 rtl8169_interrupt(tp->pci_dev->irq, dev);
6283 static int rtl_open(struct net_device *dev)
6285 struct rtl8169_private *tp = netdev_priv(dev);
6286 void __iomem *ioaddr = tp->mmio_addr;
6287 struct pci_dev *pdev = tp->pci_dev;
6288 int retval = -ENOMEM;
6290 pm_runtime_get_sync(&pdev->dev);
6293 * Rx and Tx descriptors needs 256 bytes alignment.
6294 * dma_alloc_coherent provides more.
6296 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6297 &tp->TxPhyAddr, GFP_KERNEL);
6298 if (!tp->TxDescArray)
6299 goto err_pm_runtime_put;
6301 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6302 &tp->RxPhyAddr, GFP_KERNEL);
6303 if (!tp->RxDescArray)
6306 retval = rtl8169_init_ring(dev);
6310 INIT_WORK(&tp->wk.work, rtl_task);
6314 rtl_request_firmware(tp);
6316 retval = request_irq(pdev->irq, rtl8169_interrupt,
6317 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6320 goto err_release_fw_2;
6324 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6326 napi_enable(&tp->napi);
6328 rtl8169_init_phy(dev, tp);
6330 __rtl8169_set_features(dev, dev->features);
6332 rtl_pll_power_up(tp);
6336 netif_start_queue(dev);
6338 rtl_unlock_work(tp);
6340 tp->saved_wolopts = 0;
6341 pm_runtime_put_noidle(&pdev->dev);
6343 rtl8169_check_link_status(dev, tp, ioaddr);
6348 rtl_release_firmware(tp);
6349 rtl8169_rx_clear(tp);
6351 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6353 tp->RxDescArray = NULL;
6355 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6357 tp->TxDescArray = NULL;
6359 pm_runtime_put_noidle(&pdev->dev);
6363 static struct rtnl_link_stats64 *
6364 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6366 struct rtl8169_private *tp = netdev_priv(dev);
6367 void __iomem *ioaddr = tp->mmio_addr;
6370 if (netif_running(dev))
6371 rtl8169_rx_missed(dev, ioaddr);
6374 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6375 stats->rx_packets = tp->rx_stats.packets;
6376 stats->rx_bytes = tp->rx_stats.bytes;
6377 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6381 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6382 stats->tx_packets = tp->tx_stats.packets;
6383 stats->tx_bytes = tp->tx_stats.bytes;
6384 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6386 stats->rx_dropped = dev->stats.rx_dropped;
6387 stats->tx_dropped = dev->stats.tx_dropped;
6388 stats->rx_length_errors = dev->stats.rx_length_errors;
6389 stats->rx_errors = dev->stats.rx_errors;
6390 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6391 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6392 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6397 static void rtl8169_net_suspend(struct net_device *dev)
6399 struct rtl8169_private *tp = netdev_priv(dev);
6401 if (!netif_running(dev))
6404 netif_device_detach(dev);
6405 netif_stop_queue(dev);
6408 napi_disable(&tp->napi);
6409 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6410 rtl_unlock_work(tp);
6412 rtl_pll_power_down(tp);
6417 static int rtl8169_suspend(struct device *device)
6419 struct pci_dev *pdev = to_pci_dev(device);
6420 struct net_device *dev = pci_get_drvdata(pdev);
6422 rtl8169_net_suspend(dev);
6427 static void __rtl8169_resume(struct net_device *dev)
6429 struct rtl8169_private *tp = netdev_priv(dev);
6431 netif_device_attach(dev);
6433 rtl_pll_power_up(tp);
6436 napi_enable(&tp->napi);
6437 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6438 rtl_unlock_work(tp);
6440 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6443 static int rtl8169_resume(struct device *device)
6445 struct pci_dev *pdev = to_pci_dev(device);
6446 struct net_device *dev = pci_get_drvdata(pdev);
6447 struct rtl8169_private *tp = netdev_priv(dev);
6449 rtl8169_init_phy(dev, tp);
6451 if (netif_running(dev))
6452 __rtl8169_resume(dev);
6457 static int rtl8169_runtime_suspend(struct device *device)
6459 struct pci_dev *pdev = to_pci_dev(device);
6460 struct net_device *dev = pci_get_drvdata(pdev);
6461 struct rtl8169_private *tp = netdev_priv(dev);
6463 if (!tp->TxDescArray)
6467 tp->saved_wolopts = __rtl8169_get_wol(tp);
6468 __rtl8169_set_wol(tp, WAKE_ANY);
6469 rtl_unlock_work(tp);
6471 rtl8169_net_suspend(dev);
6476 static int rtl8169_runtime_resume(struct device *device)
6478 struct pci_dev *pdev = to_pci_dev(device);
6479 struct net_device *dev = pci_get_drvdata(pdev);
6480 struct rtl8169_private *tp = netdev_priv(dev);
6482 if (!tp->TxDescArray)
6486 __rtl8169_set_wol(tp, tp->saved_wolopts);
6487 tp->saved_wolopts = 0;
6488 rtl_unlock_work(tp);
6490 rtl8169_init_phy(dev, tp);
6492 __rtl8169_resume(dev);
6497 static int rtl8169_runtime_idle(struct device *device)
6499 struct pci_dev *pdev = to_pci_dev(device);
6500 struct net_device *dev = pci_get_drvdata(pdev);
6501 struct rtl8169_private *tp = netdev_priv(dev);
6503 return tp->TxDescArray ? -EBUSY : 0;
6506 static const struct dev_pm_ops rtl8169_pm_ops = {
6507 .suspend = rtl8169_suspend,
6508 .resume = rtl8169_resume,
6509 .freeze = rtl8169_suspend,
6510 .thaw = rtl8169_resume,
6511 .poweroff = rtl8169_suspend,
6512 .restore = rtl8169_resume,
6513 .runtime_suspend = rtl8169_runtime_suspend,
6514 .runtime_resume = rtl8169_runtime_resume,
6515 .runtime_idle = rtl8169_runtime_idle,
6518 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6520 #else /* !CONFIG_PM */
6522 #define RTL8169_PM_OPS NULL
6524 #endif /* !CONFIG_PM */
6526 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6528 void __iomem *ioaddr = tp->mmio_addr;
6530 /* WoL fails with 8168b when the receiver is disabled. */
6531 switch (tp->mac_version) {
6532 case RTL_GIGA_MAC_VER_11:
6533 case RTL_GIGA_MAC_VER_12:
6534 case RTL_GIGA_MAC_VER_17:
6535 pci_clear_master(tp->pci_dev);
6537 RTL_W8(ChipCmd, CmdRxEnb);
6546 static void rtl_shutdown(struct pci_dev *pdev)
6548 struct net_device *dev = pci_get_drvdata(pdev);
6549 struct rtl8169_private *tp = netdev_priv(dev);
6550 struct device *d = &pdev->dev;
6552 pm_runtime_get_sync(d);
6554 rtl8169_net_suspend(dev);
6556 /* Restore original MAC address */
6557 rtl_rar_set(tp, dev->perm_addr);
6559 rtl8169_hw_reset(tp);
6561 if (system_state == SYSTEM_POWER_OFF) {
6562 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6563 rtl_wol_suspend_quirk(tp);
6564 rtl_wol_shutdown_quirk(tp);
6567 pci_wake_from_d3(pdev, true);
6568 pci_set_power_state(pdev, PCI_D3hot);
6571 pm_runtime_put_noidle(d);
6574 static void rtl_remove_one(struct pci_dev *pdev)
6576 struct net_device *dev = pci_get_drvdata(pdev);
6577 struct rtl8169_private *tp = netdev_priv(dev);
6579 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6580 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6581 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6582 rtl8168_driver_stop(tp);
6585 cancel_work_sync(&tp->wk.work);
6587 netif_napi_del(&tp->napi);
6589 unregister_netdev(dev);
6591 rtl_release_firmware(tp);
6593 if (pci_dev_run_wake(pdev))
6594 pm_runtime_get_noresume(&pdev->dev);
6596 /* restore original MAC address */
6597 rtl_rar_set(tp, dev->perm_addr);
6599 rtl_disable_msi(pdev, tp);
6600 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6601 pci_set_drvdata(pdev, NULL);
6604 static const struct net_device_ops rtl_netdev_ops = {
6605 .ndo_open = rtl_open,
6606 .ndo_stop = rtl8169_close,
6607 .ndo_get_stats64 = rtl8169_get_stats64,
6608 .ndo_start_xmit = rtl8169_start_xmit,
6609 .ndo_tx_timeout = rtl8169_tx_timeout,
6610 .ndo_validate_addr = eth_validate_addr,
6611 .ndo_change_mtu = rtl8169_change_mtu,
6612 .ndo_fix_features = rtl8169_fix_features,
6613 .ndo_set_features = rtl8169_set_features,
6614 .ndo_set_mac_address = rtl_set_mac_address,
6615 .ndo_do_ioctl = rtl8169_ioctl,
6616 .ndo_set_rx_mode = rtl_set_rx_mode,
6617 #ifdef CONFIG_NET_POLL_CONTROLLER
6618 .ndo_poll_controller = rtl8169_netpoll,
6623 static const struct rtl_cfg_info {
6624 void (*hw_start)(struct net_device *);
6625 unsigned int region;
6630 } rtl_cfg_infos [] = {
6632 .hw_start = rtl_hw_start_8169,
6635 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6636 .features = RTL_FEATURE_GMII,
6637 .default_ver = RTL_GIGA_MAC_VER_01,
6640 .hw_start = rtl_hw_start_8168,
6643 .event_slow = SYSErr | LinkChg | RxOverflow,
6644 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6645 .default_ver = RTL_GIGA_MAC_VER_11,
6648 .hw_start = rtl_hw_start_8101,
6651 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6653 .features = RTL_FEATURE_MSI,
6654 .default_ver = RTL_GIGA_MAC_VER_13,
6658 /* Cfg9346_Unlock assumed. */
6659 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6660 const struct rtl_cfg_info *cfg)
6662 void __iomem *ioaddr = tp->mmio_addr;
6666 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6667 if (cfg->features & RTL_FEATURE_MSI) {
6668 if (pci_enable_msi(tp->pci_dev)) {
6669 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6672 msi = RTL_FEATURE_MSI;
6675 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6676 RTL_W8(Config2, cfg2);
6680 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6682 void __iomem *ioaddr = tp->mmio_addr;
6684 return RTL_R8(MCU) & LINK_LIST_RDY;
6687 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6689 void __iomem *ioaddr = tp->mmio_addr;
6691 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6694 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6696 void __iomem *ioaddr = tp->mmio_addr;
6699 tp->ocp_base = OCP_STD_PHY_BASE;
6701 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6703 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6706 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6709 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6711 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6713 data = r8168_mac_ocp_read(tp, 0xe8de);
6715 r8168_mac_ocp_write(tp, 0xe8de, data);
6717 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6720 data = r8168_mac_ocp_read(tp, 0xe8de);
6722 r8168_mac_ocp_write(tp, 0xe8de, data);
6724 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6728 static void rtl_hw_initialize(struct rtl8169_private *tp)
6730 switch (tp->mac_version) {
6731 case RTL_GIGA_MAC_VER_40:
6732 case RTL_GIGA_MAC_VER_41:
6733 rtl_hw_init_8168g(tp);
6742 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6744 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6745 const unsigned int region = cfg->region;
6746 struct rtl8169_private *tp;
6747 struct mii_if_info *mii;
6748 struct net_device *dev;
6749 void __iomem *ioaddr;
6753 if (netif_msg_drv(&debug)) {
6754 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6755 MODULENAME, RTL8169_VERSION);
6758 dev = alloc_etherdev(sizeof (*tp));
6764 SET_NETDEV_DEV(dev, &pdev->dev);
6765 dev->netdev_ops = &rtl_netdev_ops;
6766 tp = netdev_priv(dev);
6769 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6773 mii->mdio_read = rtl_mdio_read;
6774 mii->mdio_write = rtl_mdio_write;
6775 mii->phy_id_mask = 0x1f;
6776 mii->reg_num_mask = 0x1f;
6777 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6779 /* disable ASPM completely as that cause random device stop working
6780 * problems as well as full system hangs for some PCIe devices users */
6781 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6782 PCIE_LINK_STATE_CLKPM);
6784 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6785 rc = pci_enable_device(pdev);
6787 netif_err(tp, probe, dev, "enable failure\n");
6788 goto err_out_free_dev_1;
6791 if (pci_set_mwi(pdev) < 0)
6792 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6794 /* make sure PCI base addr 1 is MMIO */
6795 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6796 netif_err(tp, probe, dev,
6797 "region #%d not an MMIO resource, aborting\n",
6803 /* check for weird/broken PCI region reporting */
6804 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6805 netif_err(tp, probe, dev,
6806 "Invalid PCI region size(s), aborting\n");
6811 rc = pci_request_regions(pdev, MODULENAME);
6813 netif_err(tp, probe, dev, "could not request regions\n");
6817 tp->cp_cmd = RxChkSum;
6819 if ((sizeof(dma_addr_t) > 4) &&
6820 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6821 tp->cp_cmd |= PCIDAC;
6822 dev->features |= NETIF_F_HIGHDMA;
6824 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6826 netif_err(tp, probe, dev, "DMA configuration failed\n");
6827 goto err_out_free_res_3;
6831 /* ioremap MMIO region */
6832 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6834 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6836 goto err_out_free_res_3;
6838 tp->mmio_addr = ioaddr;
6840 if (!pci_is_pcie(pdev))
6841 netif_info(tp, probe, dev, "not PCI Express\n");
6843 /* Identify chip attached to board */
6844 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6848 rtl_irq_disable(tp);
6850 rtl_hw_initialize(tp);
6854 rtl_ack_events(tp, 0xffff);
6856 pci_set_master(pdev);
6859 * Pretend we are using VLANs; This bypasses a nasty bug where
6860 * Interrupts stop flowing on high load on 8110SCd controllers.
6862 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6863 tp->cp_cmd |= RxVlan;
6865 rtl_init_mdio_ops(tp);
6866 rtl_init_pll_power_ops(tp);
6867 rtl_init_jumbo_ops(tp);
6868 rtl_init_csi_ops(tp);
6870 rtl8169_print_mac_version(tp);
6872 chipset = tp->mac_version;
6873 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6875 RTL_W8(Cfg9346, Cfg9346_Unlock);
6876 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6877 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6878 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6879 tp->features |= RTL_FEATURE_WOL;
6880 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6881 tp->features |= RTL_FEATURE_WOL;
6882 tp->features |= rtl_try_msi(tp, cfg);
6883 RTL_W8(Cfg9346, Cfg9346_Lock);
6885 if (rtl_tbi_enabled(tp)) {
6886 tp->set_speed = rtl8169_set_speed_tbi;
6887 tp->get_settings = rtl8169_gset_tbi;
6888 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6889 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6890 tp->link_ok = rtl8169_tbi_link_ok;
6891 tp->do_ioctl = rtl_tbi_ioctl;
6893 tp->set_speed = rtl8169_set_speed_xmii;
6894 tp->get_settings = rtl8169_gset_xmii;
6895 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6896 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6897 tp->link_ok = rtl8169_xmii_link_ok;
6898 tp->do_ioctl = rtl_xmii_ioctl;
6901 mutex_init(&tp->wk.mutex);
6903 /* Get MAC address */
6904 for (i = 0; i < ETH_ALEN; i++)
6905 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6906 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6908 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6909 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6911 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6913 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6914 * properly for all devices */
6915 dev->features |= NETIF_F_RXCSUM |
6916 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6918 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6919 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6920 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6923 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6924 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6925 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6927 dev->hw_features |= NETIF_F_RXALL;
6928 dev->hw_features |= NETIF_F_RXFCS;
6930 tp->hw_start = cfg->hw_start;
6931 tp->event_slow = cfg->event_slow;
6933 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6934 ~(RxBOVF | RxFOVF) : ~0;
6936 init_timer(&tp->timer);
6937 tp->timer.data = (unsigned long) dev;
6938 tp->timer.function = rtl8169_phy_timer;
6940 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6942 rc = register_netdev(dev);
6946 pci_set_drvdata(pdev, dev);
6948 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6949 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6950 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6951 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6952 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6953 "tx checksumming: %s]\n",
6954 rtl_chip_infos[chipset].jumbo_max,
6955 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6958 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6959 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6960 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6961 rtl8168_driver_start(tp);
6964 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6966 if (pci_dev_run_wake(pdev))
6967 pm_runtime_put_noidle(&pdev->dev);
6969 netif_carrier_off(dev);
6975 netif_napi_del(&tp->napi);
6976 rtl_disable_msi(pdev, tp);
6979 pci_release_regions(pdev);
6981 pci_clear_mwi(pdev);
6982 pci_disable_device(pdev);
6988 static struct pci_driver rtl8169_pci_driver = {
6990 .id_table = rtl8169_pci_tbl,
6991 .probe = rtl_init_one,
6992 .remove = rtl_remove_one,
6993 .shutdown = rtl_shutdown,
6994 .driver.pm = RTL8169_PM_OPS,
6997 module_pci_driver(rtl8169_pci_driver);