2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
53 #define assert(expr) \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
102 RTL_GIGA_MAC_VER_01 = 0,
143 RTL_GIGA_MAC_NONE = 0xff,
146 enum rtl_tx_desc_version {
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
157 #define _R(NAME,TD,FW,SZ,B) { \
165 static const struct {
167 enum rtl_tx_desc_version txd_version;
171 } rtl_chip_infos[] = {
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
299 static int rx_buf_sz = 16383;
306 MAC0 = 0, /* Ethernet hardware address. */
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
369 enum rtl8110_registers {
375 enum rtl8168_8101_registers {
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
394 #define PFM_EN (1 << 6)
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
416 enum rtl8168_registers {
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
453 #define FORCE_CLK (1 << 15) /* force clock request */
456 enum rtl_register_content {
457 /* InterruptStatusBits */
461 TxDescUnavail = 0x0080,
485 /* TXPoll register p.5 */
486 HPQ = 0x80, /* Poll cmd on the high prio queue */
487 NPQ = 0x40, /* Poll cmd on the low prio queue */
488 FSWInt = 0x01, /* Forced software interrupt */
492 Cfg9346_Unlock = 0xc0,
497 AcceptBroadcast = 0x08,
498 AcceptMulticast = 0x04,
500 AcceptAllPhys = 0x01,
501 #define RX_CONFIG_ACCEPT_MASK 0x3f
504 TxInterFrameGapShift = 24,
505 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
507 /* Config1 register p.24 */
510 Speed_down = (1 << 4),
514 PMEnable = (1 << 0), /* Power Management Enable */
516 /* Config2 register p. 25 */
517 ClkReqEn = (1 << 7), /* Clock Request Enable */
518 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
519 PCI_Clock_66MHz = 0x01,
520 PCI_Clock_33MHz = 0x00,
522 /* Config3 register p.25 */
523 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
524 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
525 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
526 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
528 /* Config4 register */
529 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
531 /* Config5 register p.27 */
532 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
533 MWF = (1 << 5), /* Accept Multicast wakeup frame */
534 UWF = (1 << 4), /* Accept Unicast wakeup frame */
536 LanWake = (1 << 1), /* LanWake enable/disable */
537 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 ASPM_en = (1 << 0), /* ASPM enable */
541 TBIReset = 0x80000000,
542 TBILoopback = 0x40000000,
543 TBINwEnable = 0x20000000,
544 TBINwRestart = 0x10000000,
545 TBILinkOk = 0x02000000,
546 TBINwComplete = 0x01000000,
549 EnableBist = (1 << 15), // 8168 8101
550 Mac_dbgo_oe = (1 << 14), // 8168 8101
551 Normal_mode = (1 << 13), // unused
552 Force_half_dup = (1 << 12), // 8168 8101
553 Force_rxflow_en = (1 << 11), // 8168 8101
554 Force_txflow_en = (1 << 10), // 8168 8101
555 Cxpl_dbg_sel = (1 << 9), // 8168 8101
556 ASF = (1 << 8), // 8168 8101
557 PktCntrDisable = (1 << 7), // 8168 8101
558 Mac_dbgo_sel = 0x001c, // 8168
563 INTT_0 = 0x0000, // 8168
564 INTT_1 = 0x0001, // 8168
565 INTT_2 = 0x0002, // 8168
566 INTT_3 = 0x0003, // 8168
568 /* rtl8169_PHYstatus */
579 TBILinkOK = 0x02000000,
581 /* DumpCounterCommand */
586 /* First doubleword. */
587 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
588 RingEnd = (1 << 30), /* End of descriptor ring */
589 FirstFrag = (1 << 29), /* First segment of a packet */
590 LastFrag = (1 << 28), /* Final segment of a packet */
594 enum rtl_tx_desc_bit {
595 /* First doubleword. */
596 TD_LSO = (1 << 27), /* Large Send Offload */
597 #define TD_MSS_MAX 0x07ffu /* MSS value */
599 /* Second doubleword. */
600 TxVlanTag = (1 << 17), /* Add VLAN tag */
603 /* 8169, 8168b and 810x except 8102e. */
604 enum rtl_tx_desc_bit_0 {
605 /* First doubleword. */
606 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
607 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
608 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
609 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
612 /* 8102e, 8168c and beyond. */
613 enum rtl_tx_desc_bit_1 {
614 /* Second doubleword. */
615 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
616 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
617 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
618 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
621 static const struct rtl_tx_desc_info {
628 } tx_desc_info [] = {
631 .udp = TD0_IP_CS | TD0_UDP_CS,
632 .tcp = TD0_IP_CS | TD0_TCP_CS
634 .mss_shift = TD0_MSS_SHIFT,
639 .udp = TD1_IP_CS | TD1_UDP_CS,
640 .tcp = TD1_IP_CS | TD1_TCP_CS
642 .mss_shift = TD1_MSS_SHIFT,
647 enum rtl_rx_desc_bit {
649 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
650 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
652 #define RxProtoUDP (PID1)
653 #define RxProtoTCP (PID0)
654 #define RxProtoIP (PID1 | PID0)
655 #define RxProtoMask RxProtoIP
657 IPFail = (1 << 16), /* IP checksum failed */
658 UDPFail = (1 << 15), /* UDP/IP checksum failed */
659 TCPFail = (1 << 14), /* TCP/IP checksum failed */
660 RxVlanTag = (1 << 16), /* VLAN tag available */
663 #define RsvdMask 0x3fffc000
680 u8 __pad[sizeof(void *) - sizeof(u32)];
684 RTL_FEATURE_WOL = (1 << 0),
685 RTL_FEATURE_MSI = (1 << 1),
686 RTL_FEATURE_GMII = (1 << 2),
687 RTL_FEATURE_FW_LOADED = (1 << 3),
690 struct rtl8169_counters {
697 __le32 tx_one_collision;
698 __le32 tx_multi_collision;
707 RTL_FLAG_TASK_ENABLED,
708 RTL_FLAG_TASK_SLOW_PENDING,
709 RTL_FLAG_TASK_RESET_PENDING,
710 RTL_FLAG_TASK_PHY_PENDING,
714 struct rtl8169_stats {
717 struct u64_stats_sync syncp;
720 struct rtl8169_private {
721 void __iomem *mmio_addr; /* memory map physical address */
722 struct pci_dev *pci_dev;
723 struct net_device *dev;
724 struct napi_struct napi;
728 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
729 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
732 struct rtl8169_stats rx_stats;
733 struct rtl8169_stats tx_stats;
734 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
735 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
736 dma_addr_t TxPhyAddr;
737 dma_addr_t RxPhyAddr;
738 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
739 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
740 struct timer_list timer;
746 void (*write)(struct rtl8169_private *, int, int);
747 int (*read)(struct rtl8169_private *, int);
750 struct pll_power_ops {
751 void (*down)(struct rtl8169_private *);
752 void (*up)(struct rtl8169_private *);
756 void (*enable)(struct rtl8169_private *);
757 void (*disable)(struct rtl8169_private *);
761 void (*write)(struct rtl8169_private *, int, int);
762 u32 (*read)(struct rtl8169_private *, int);
765 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
766 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
767 void (*phy_reset_enable)(struct rtl8169_private *tp);
768 void (*hw_start)(struct net_device *);
769 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
770 unsigned int (*link_ok)(void __iomem *);
771 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
774 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
776 struct work_struct work;
781 struct mii_if_info mii;
782 struct rtl8169_counters counters;
787 const struct firmware *fw;
789 #define RTL_VER_SIZE 32
791 char version[RTL_VER_SIZE];
793 struct rtl_fw_phy_action {
798 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
803 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
804 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
805 module_param(use_dac, int, 0);
806 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
807 module_param_named(debug, debug.msg_enable, int, 0);
808 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
809 MODULE_LICENSE("GPL");
810 MODULE_VERSION(RTL8169_VERSION);
811 MODULE_FIRMWARE(FIRMWARE_8168D_1);
812 MODULE_FIRMWARE(FIRMWARE_8168D_2);
813 MODULE_FIRMWARE(FIRMWARE_8168E_1);
814 MODULE_FIRMWARE(FIRMWARE_8168E_2);
815 MODULE_FIRMWARE(FIRMWARE_8168E_3);
816 MODULE_FIRMWARE(FIRMWARE_8105E_1);
817 MODULE_FIRMWARE(FIRMWARE_8168F_1);
818 MODULE_FIRMWARE(FIRMWARE_8168F_2);
819 MODULE_FIRMWARE(FIRMWARE_8402_1);
820 MODULE_FIRMWARE(FIRMWARE_8411_1);
821 MODULE_FIRMWARE(FIRMWARE_8106E_1);
822 MODULE_FIRMWARE(FIRMWARE_8168G_1);
824 static void rtl_lock_work(struct rtl8169_private *tp)
826 mutex_lock(&tp->wk.mutex);
829 static void rtl_unlock_work(struct rtl8169_private *tp)
831 mutex_unlock(&tp->wk.mutex);
834 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
836 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
837 PCI_EXP_DEVCTL_READRQ, force);
841 bool (*check)(struct rtl8169_private *);
845 static void rtl_udelay(unsigned int d)
850 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
851 void (*delay)(unsigned int), unsigned int d, int n,
856 for (i = 0; i < n; i++) {
858 if (c->check(tp) == high)
861 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
862 c->msg, !high, n, d);
866 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
867 const struct rtl_cond *c,
868 unsigned int d, int n)
870 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
873 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
874 const struct rtl_cond *c,
875 unsigned int d, int n)
877 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
880 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
881 const struct rtl_cond *c,
882 unsigned int d, int n)
884 return rtl_loop_wait(tp, c, msleep, d, n, true);
887 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
888 const struct rtl_cond *c,
889 unsigned int d, int n)
891 return rtl_loop_wait(tp, c, msleep, d, n, false);
894 #define DECLARE_RTL_COND(name) \
895 static bool name ## _check(struct rtl8169_private *); \
897 static const struct rtl_cond name = { \
898 .check = name ## _check, \
902 static bool name ## _check(struct rtl8169_private *tp)
904 DECLARE_RTL_COND(rtl_ocpar_cond)
906 void __iomem *ioaddr = tp->mmio_addr;
908 return RTL_R32(OCPAR) & OCPAR_FLAG;
911 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
913 void __iomem *ioaddr = tp->mmio_addr;
915 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
917 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
921 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
923 void __iomem *ioaddr = tp->mmio_addr;
925 RTL_W32(OCPDR, data);
926 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
928 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
931 DECLARE_RTL_COND(rtl_eriar_cond)
933 void __iomem *ioaddr = tp->mmio_addr;
935 return RTL_R32(ERIAR) & ERIAR_FLAG;
938 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
940 void __iomem *ioaddr = tp->mmio_addr;
943 RTL_W32(ERIAR, 0x800010e8);
946 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
949 ocp_write(tp, 0x1, 0x30, 0x00000001);
952 #define OOB_CMD_RESET 0x00
953 #define OOB_CMD_DRIVER_START 0x05
954 #define OOB_CMD_DRIVER_STOP 0x06
956 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
958 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
961 DECLARE_RTL_COND(rtl_ocp_read_cond)
965 reg = rtl8168_get_ocp_reg(tp);
967 return ocp_read(tp, 0x0f, reg) & 0x00000800;
970 static void rtl8168_driver_start(struct rtl8169_private *tp)
972 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
974 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
977 static void rtl8168_driver_stop(struct rtl8169_private *tp)
979 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
981 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
984 static int r8168dp_check_dash(struct rtl8169_private *tp)
986 u16 reg = rtl8168_get_ocp_reg(tp);
988 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
991 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
993 if (reg & 0xffff0001) {
994 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
1000 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1002 void __iomem *ioaddr = tp->mmio_addr;
1004 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1007 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1009 void __iomem *ioaddr = tp->mmio_addr;
1011 if (rtl_ocp_reg_failure(tp, reg))
1014 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1016 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1019 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1021 void __iomem *ioaddr = tp->mmio_addr;
1023 if (rtl_ocp_reg_failure(tp, reg))
1026 RTL_W32(GPHY_OCP, reg << 15);
1028 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1029 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1032 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1036 val = r8168_phy_ocp_read(tp, reg);
1037 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1040 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1042 void __iomem *ioaddr = tp->mmio_addr;
1044 if (rtl_ocp_reg_failure(tp, reg))
1047 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1050 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1052 void __iomem *ioaddr = tp->mmio_addr;
1054 if (rtl_ocp_reg_failure(tp, reg))
1057 RTL_W32(OCPDR, reg << 15);
1059 return RTL_R32(OCPDR);
1062 #define OCP_STD_PHY_BASE 0xa400
1064 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1067 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1071 if (tp->ocp_base != OCP_STD_PHY_BASE)
1074 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1077 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1079 if (tp->ocp_base != OCP_STD_PHY_BASE)
1082 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1085 DECLARE_RTL_COND(rtl_phyar_cond)
1087 void __iomem *ioaddr = tp->mmio_addr;
1089 return RTL_R32(PHYAR) & 0x80000000;
1092 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1094 void __iomem *ioaddr = tp->mmio_addr;
1096 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1098 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1100 * According to hardware specs a 20us delay is required after write
1101 * complete indication, but before sending next command.
1106 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1108 void __iomem *ioaddr = tp->mmio_addr;
1111 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1113 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1114 RTL_R32(PHYAR) & 0xffff : ~0;
1117 * According to hardware specs a 20us delay is required after read
1118 * complete indication, but before sending next command.
1125 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1127 void __iomem *ioaddr = tp->mmio_addr;
1129 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1130 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1131 RTL_W32(EPHY_RXER_NUM, 0);
1133 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1136 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1138 r8168dp_1_mdio_access(tp, reg,
1139 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1142 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1144 void __iomem *ioaddr = tp->mmio_addr;
1146 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1149 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1150 RTL_W32(EPHY_RXER_NUM, 0);
1152 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1153 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1156 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1158 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1160 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1163 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1165 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1168 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1170 void __iomem *ioaddr = tp->mmio_addr;
1172 r8168dp_2_mdio_start(ioaddr);
1174 r8169_mdio_write(tp, reg, value);
1176 r8168dp_2_mdio_stop(ioaddr);
1179 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1181 void __iomem *ioaddr = tp->mmio_addr;
1184 r8168dp_2_mdio_start(ioaddr);
1186 value = r8169_mdio_read(tp, reg);
1188 r8168dp_2_mdio_stop(ioaddr);
1193 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1195 tp->mdio_ops.write(tp, location, val);
1198 static int rtl_readphy(struct rtl8169_private *tp, int location)
1200 return tp->mdio_ops.read(tp, location);
1203 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1205 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1208 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1212 val = rtl_readphy(tp, reg_addr);
1213 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1216 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1219 struct rtl8169_private *tp = netdev_priv(dev);
1221 rtl_writephy(tp, location, val);
1224 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1226 struct rtl8169_private *tp = netdev_priv(dev);
1228 return rtl_readphy(tp, location);
1231 DECLARE_RTL_COND(rtl_ephyar_cond)
1233 void __iomem *ioaddr = tp->mmio_addr;
1235 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1238 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1240 void __iomem *ioaddr = tp->mmio_addr;
1242 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1243 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1245 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1250 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1252 void __iomem *ioaddr = tp->mmio_addr;
1254 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1256 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1257 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1260 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1263 void __iomem *ioaddr = tp->mmio_addr;
1265 BUG_ON((addr & 3) || (mask == 0));
1266 RTL_W32(ERIDR, val);
1267 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1269 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1272 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1274 void __iomem *ioaddr = tp->mmio_addr;
1276 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1278 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1279 RTL_R32(ERIDR) : ~0;
1282 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1287 val = rtl_eri_read(tp, addr, type);
1288 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1297 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1298 const struct exgmac_reg *r, int len)
1301 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1306 DECLARE_RTL_COND(rtl_efusear_cond)
1308 void __iomem *ioaddr = tp->mmio_addr;
1310 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1313 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1315 void __iomem *ioaddr = tp->mmio_addr;
1317 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1319 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1320 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1323 static u16 rtl_get_events(struct rtl8169_private *tp)
1325 void __iomem *ioaddr = tp->mmio_addr;
1327 return RTL_R16(IntrStatus);
1330 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1332 void __iomem *ioaddr = tp->mmio_addr;
1334 RTL_W16(IntrStatus, bits);
1338 static void rtl_irq_disable(struct rtl8169_private *tp)
1340 void __iomem *ioaddr = tp->mmio_addr;
1342 RTL_W16(IntrMask, 0);
1346 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1348 void __iomem *ioaddr = tp->mmio_addr;
1350 RTL_W16(IntrMask, bits);
1353 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1354 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1355 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1357 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1359 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1362 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1364 void __iomem *ioaddr = tp->mmio_addr;
1366 rtl_irq_disable(tp);
1367 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1371 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1373 void __iomem *ioaddr = tp->mmio_addr;
1375 return RTL_R32(TBICSR) & TBIReset;
1378 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1380 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1383 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1385 return RTL_R32(TBICSR) & TBILinkOk;
1388 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1390 return RTL_R8(PHYstatus) & LinkStatus;
1393 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1395 void __iomem *ioaddr = tp->mmio_addr;
1397 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1400 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1404 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1405 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1408 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1410 void __iomem *ioaddr = tp->mmio_addr;
1411 struct net_device *dev = tp->dev;
1413 if (!netif_running(dev))
1416 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1417 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1418 if (RTL_R8(PHYstatus) & _1000bpsF) {
1419 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1421 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1423 } else if (RTL_R8(PHYstatus) & _100bps) {
1424 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1426 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1429 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1431 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1434 /* Reset packet filter */
1435 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1437 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1439 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1440 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1441 if (RTL_R8(PHYstatus) & _1000bpsF) {
1442 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1444 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1447 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1449 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1452 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1453 if (RTL_R8(PHYstatus) & _10bps) {
1454 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1456 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1459 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1465 static void __rtl8169_check_link_status(struct net_device *dev,
1466 struct rtl8169_private *tp,
1467 void __iomem *ioaddr, bool pm)
1469 if (tp->link_ok(ioaddr)) {
1470 rtl_link_chg_patch(tp);
1471 /* This is to cancel a scheduled suspend if there's one. */
1473 pm_request_resume(&tp->pci_dev->dev);
1474 netif_carrier_on(dev);
1475 if (net_ratelimit())
1476 netif_info(tp, ifup, dev, "link up\n");
1478 netif_carrier_off(dev);
1479 netif_info(tp, ifdown, dev, "link down\n");
1481 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1485 static void rtl8169_check_link_status(struct net_device *dev,
1486 struct rtl8169_private *tp,
1487 void __iomem *ioaddr)
1489 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1492 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1494 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1496 void __iomem *ioaddr = tp->mmio_addr;
1500 options = RTL_R8(Config1);
1501 if (!(options & PMEnable))
1504 options = RTL_R8(Config3);
1505 if (options & LinkUp)
1506 wolopts |= WAKE_PHY;
1507 if (options & MagicPacket)
1508 wolopts |= WAKE_MAGIC;
1510 options = RTL_R8(Config5);
1512 wolopts |= WAKE_UCAST;
1514 wolopts |= WAKE_BCAST;
1516 wolopts |= WAKE_MCAST;
1521 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1523 struct rtl8169_private *tp = netdev_priv(dev);
1527 wol->supported = WAKE_ANY;
1528 wol->wolopts = __rtl8169_get_wol(tp);
1530 rtl_unlock_work(tp);
1533 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1535 void __iomem *ioaddr = tp->mmio_addr;
1537 static const struct {
1542 { WAKE_PHY, Config3, LinkUp },
1543 { WAKE_MAGIC, Config3, MagicPacket },
1544 { WAKE_UCAST, Config5, UWF },
1545 { WAKE_BCAST, Config5, BWF },
1546 { WAKE_MCAST, Config5, MWF },
1547 { WAKE_ANY, Config5, LanWake }
1551 RTL_W8(Cfg9346, Cfg9346_Unlock);
1553 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1554 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1555 if (wolopts & cfg[i].opt)
1556 options |= cfg[i].mask;
1557 RTL_W8(cfg[i].reg, options);
1560 switch (tp->mac_version) {
1561 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1562 options = RTL_R8(Config1) & ~PMEnable;
1564 options |= PMEnable;
1565 RTL_W8(Config1, options);
1568 options = RTL_R8(Config2) & ~PME_SIGNAL;
1570 options |= PME_SIGNAL;
1571 RTL_W8(Config2, options);
1575 RTL_W8(Cfg9346, Cfg9346_Lock);
1578 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1580 struct rtl8169_private *tp = netdev_priv(dev);
1585 tp->features |= RTL_FEATURE_WOL;
1587 tp->features &= ~RTL_FEATURE_WOL;
1588 __rtl8169_set_wol(tp, wol->wolopts);
1590 rtl_unlock_work(tp);
1592 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1597 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1599 return rtl_chip_infos[tp->mac_version].fw_name;
1602 static void rtl8169_get_drvinfo(struct net_device *dev,
1603 struct ethtool_drvinfo *info)
1605 struct rtl8169_private *tp = netdev_priv(dev);
1606 struct rtl_fw *rtl_fw = tp->rtl_fw;
1608 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1609 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1610 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1611 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1612 if (!IS_ERR_OR_NULL(rtl_fw))
1613 strlcpy(info->fw_version, rtl_fw->version,
1614 sizeof(info->fw_version));
1617 static int rtl8169_get_regs_len(struct net_device *dev)
1619 return R8169_REGS_SIZE;
1622 static int rtl8169_set_speed_tbi(struct net_device *dev,
1623 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1625 struct rtl8169_private *tp = netdev_priv(dev);
1626 void __iomem *ioaddr = tp->mmio_addr;
1630 reg = RTL_R32(TBICSR);
1631 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1632 (duplex == DUPLEX_FULL)) {
1633 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1634 } else if (autoneg == AUTONEG_ENABLE)
1635 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1637 netif_warn(tp, link, dev,
1638 "incorrect speed setting refused in TBI mode\n");
1645 static int rtl8169_set_speed_xmii(struct net_device *dev,
1646 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1648 struct rtl8169_private *tp = netdev_priv(dev);
1649 int giga_ctrl, bmcr;
1652 rtl_writephy(tp, 0x1f, 0x0000);
1654 if (autoneg == AUTONEG_ENABLE) {
1657 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1658 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1659 ADVERTISE_100HALF | ADVERTISE_100FULL);
1661 if (adv & ADVERTISED_10baseT_Half)
1662 auto_nego |= ADVERTISE_10HALF;
1663 if (adv & ADVERTISED_10baseT_Full)
1664 auto_nego |= ADVERTISE_10FULL;
1665 if (adv & ADVERTISED_100baseT_Half)
1666 auto_nego |= ADVERTISE_100HALF;
1667 if (adv & ADVERTISED_100baseT_Full)
1668 auto_nego |= ADVERTISE_100FULL;
1670 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1672 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1673 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1675 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1676 if (tp->mii.supports_gmii) {
1677 if (adv & ADVERTISED_1000baseT_Half)
1678 giga_ctrl |= ADVERTISE_1000HALF;
1679 if (adv & ADVERTISED_1000baseT_Full)
1680 giga_ctrl |= ADVERTISE_1000FULL;
1681 } else if (adv & (ADVERTISED_1000baseT_Half |
1682 ADVERTISED_1000baseT_Full)) {
1683 netif_info(tp, link, dev,
1684 "PHY does not support 1000Mbps\n");
1688 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1690 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1691 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1695 if (speed == SPEED_10)
1697 else if (speed == SPEED_100)
1698 bmcr = BMCR_SPEED100;
1702 if (duplex == DUPLEX_FULL)
1703 bmcr |= BMCR_FULLDPLX;
1706 rtl_writephy(tp, MII_BMCR, bmcr);
1708 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1709 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1710 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1711 rtl_writephy(tp, 0x17, 0x2138);
1712 rtl_writephy(tp, 0x0e, 0x0260);
1714 rtl_writephy(tp, 0x17, 0x2108);
1715 rtl_writephy(tp, 0x0e, 0x0000);
1724 static int rtl8169_set_speed(struct net_device *dev,
1725 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1727 struct rtl8169_private *tp = netdev_priv(dev);
1730 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1734 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1735 (advertising & ADVERTISED_1000baseT_Full)) {
1736 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1742 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1744 struct rtl8169_private *tp = netdev_priv(dev);
1747 del_timer_sync(&tp->timer);
1750 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1751 cmd->duplex, cmd->advertising);
1752 rtl_unlock_work(tp);
1757 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1758 netdev_features_t features)
1760 struct rtl8169_private *tp = netdev_priv(dev);
1762 if (dev->mtu > TD_MSS_MAX)
1763 features &= ~NETIF_F_ALL_TSO;
1765 if (dev->mtu > JUMBO_1K &&
1766 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1767 features &= ~NETIF_F_IP_CSUM;
1772 static void __rtl8169_set_features(struct net_device *dev,
1773 netdev_features_t features)
1775 struct rtl8169_private *tp = netdev_priv(dev);
1776 netdev_features_t changed = features ^ dev->features;
1777 void __iomem *ioaddr = tp->mmio_addr;
1779 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1782 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1783 if (features & NETIF_F_RXCSUM)
1784 tp->cp_cmd |= RxChkSum;
1786 tp->cp_cmd &= ~RxChkSum;
1788 if (dev->features & NETIF_F_HW_VLAN_RX)
1789 tp->cp_cmd |= RxVlan;
1791 tp->cp_cmd &= ~RxVlan;
1793 RTL_W16(CPlusCmd, tp->cp_cmd);
1796 if (changed & NETIF_F_RXALL) {
1797 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1798 if (features & NETIF_F_RXALL)
1799 tmp |= (AcceptErr | AcceptRunt);
1800 RTL_W32(RxConfig, tmp);
1804 static int rtl8169_set_features(struct net_device *dev,
1805 netdev_features_t features)
1807 struct rtl8169_private *tp = netdev_priv(dev);
1810 __rtl8169_set_features(dev, features);
1811 rtl_unlock_work(tp);
1817 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1819 return (vlan_tx_tag_present(skb)) ?
1820 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1823 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1825 u32 opts2 = le32_to_cpu(desc->opts2);
1827 if (opts2 & RxVlanTag)
1828 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1831 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1833 struct rtl8169_private *tp = netdev_priv(dev);
1834 void __iomem *ioaddr = tp->mmio_addr;
1838 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1839 cmd->port = PORT_FIBRE;
1840 cmd->transceiver = XCVR_INTERNAL;
1842 status = RTL_R32(TBICSR);
1843 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1844 cmd->autoneg = !!(status & TBINwEnable);
1846 ethtool_cmd_speed_set(cmd, SPEED_1000);
1847 cmd->duplex = DUPLEX_FULL; /* Always set */
1852 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1854 struct rtl8169_private *tp = netdev_priv(dev);
1856 return mii_ethtool_gset(&tp->mii, cmd);
1859 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1861 struct rtl8169_private *tp = netdev_priv(dev);
1865 rc = tp->get_settings(dev, cmd);
1866 rtl_unlock_work(tp);
1871 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1874 struct rtl8169_private *tp = netdev_priv(dev);
1876 if (regs->len > R8169_REGS_SIZE)
1877 regs->len = R8169_REGS_SIZE;
1880 memcpy_fromio(p, tp->mmio_addr, regs->len);
1881 rtl_unlock_work(tp);
1884 static u32 rtl8169_get_msglevel(struct net_device *dev)
1886 struct rtl8169_private *tp = netdev_priv(dev);
1888 return tp->msg_enable;
1891 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1893 struct rtl8169_private *tp = netdev_priv(dev);
1895 tp->msg_enable = value;
1898 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1905 "tx_single_collisions",
1906 "tx_multi_collisions",
1914 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1918 return ARRAY_SIZE(rtl8169_gstrings);
1924 DECLARE_RTL_COND(rtl_counters_cond)
1926 void __iomem *ioaddr = tp->mmio_addr;
1928 return RTL_R32(CounterAddrLow) & CounterDump;
1931 static void rtl8169_update_counters(struct net_device *dev)
1933 struct rtl8169_private *tp = netdev_priv(dev);
1934 void __iomem *ioaddr = tp->mmio_addr;
1935 struct device *d = &tp->pci_dev->dev;
1936 struct rtl8169_counters *counters;
1941 * Some chips are unable to dump tally counters when the receiver
1944 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1947 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1951 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1952 cmd = (u64)paddr & DMA_BIT_MASK(32);
1953 RTL_W32(CounterAddrLow, cmd);
1954 RTL_W32(CounterAddrLow, cmd | CounterDump);
1956 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1957 memcpy(&tp->counters, counters, sizeof(*counters));
1959 RTL_W32(CounterAddrLow, 0);
1960 RTL_W32(CounterAddrHigh, 0);
1962 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1965 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1966 struct ethtool_stats *stats, u64 *data)
1968 struct rtl8169_private *tp = netdev_priv(dev);
1972 rtl8169_update_counters(dev);
1974 data[0] = le64_to_cpu(tp->counters.tx_packets);
1975 data[1] = le64_to_cpu(tp->counters.rx_packets);
1976 data[2] = le64_to_cpu(tp->counters.tx_errors);
1977 data[3] = le32_to_cpu(tp->counters.rx_errors);
1978 data[4] = le16_to_cpu(tp->counters.rx_missed);
1979 data[5] = le16_to_cpu(tp->counters.align_errors);
1980 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1981 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1982 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1983 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1984 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1985 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1986 data[12] = le16_to_cpu(tp->counters.tx_underun);
1989 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1993 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1998 static const struct ethtool_ops rtl8169_ethtool_ops = {
1999 .get_drvinfo = rtl8169_get_drvinfo,
2000 .get_regs_len = rtl8169_get_regs_len,
2001 .get_link = ethtool_op_get_link,
2002 .get_settings = rtl8169_get_settings,
2003 .set_settings = rtl8169_set_settings,
2004 .get_msglevel = rtl8169_get_msglevel,
2005 .set_msglevel = rtl8169_set_msglevel,
2006 .get_regs = rtl8169_get_regs,
2007 .get_wol = rtl8169_get_wol,
2008 .set_wol = rtl8169_set_wol,
2009 .get_strings = rtl8169_get_strings,
2010 .get_sset_count = rtl8169_get_sset_count,
2011 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2012 .get_ts_info = ethtool_op_get_ts_info,
2015 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2016 struct net_device *dev, u8 default_version)
2018 void __iomem *ioaddr = tp->mmio_addr;
2020 * The driver currently handles the 8168Bf and the 8168Be identically
2021 * but they can be identified more specifically through the test below
2024 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2026 * Same thing for the 8101Eb and the 8101Ec:
2028 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2030 static const struct rtl_mac_info {
2036 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2037 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2040 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2041 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2042 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2045 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2046 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2047 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2048 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2051 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2052 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2053 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2055 /* 8168DP family. */
2056 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2057 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2058 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2061 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2062 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2063 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2064 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2065 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2066 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2067 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2068 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2069 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2072 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2073 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2074 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2075 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2078 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2079 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2080 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2081 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2082 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2083 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2084 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2085 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2086 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2087 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2088 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2089 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2090 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2091 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2092 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2093 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2094 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2095 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2096 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2097 /* FIXME: where did these entries come from ? -- FR */
2098 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2099 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2102 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2103 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2104 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2105 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2106 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2107 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2110 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2112 const struct rtl_mac_info *p = mac_info;
2115 reg = RTL_R32(TxConfig);
2116 while ((reg & p->mask) != p->val)
2118 tp->mac_version = p->mac_version;
2120 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2121 netif_notice(tp, probe, dev,
2122 "unknown MAC, using family default\n");
2123 tp->mac_version = default_version;
2127 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2129 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2137 static void rtl_writephy_batch(struct rtl8169_private *tp,
2138 const struct phy_reg *regs, int len)
2141 rtl_writephy(tp, regs->reg, regs->val);
2146 #define PHY_READ 0x00000000
2147 #define PHY_DATA_OR 0x10000000
2148 #define PHY_DATA_AND 0x20000000
2149 #define PHY_BJMPN 0x30000000
2150 #define PHY_READ_EFUSE 0x40000000
2151 #define PHY_READ_MAC_BYTE 0x50000000
2152 #define PHY_WRITE_MAC_BYTE 0x60000000
2153 #define PHY_CLEAR_READCOUNT 0x70000000
2154 #define PHY_WRITE 0x80000000
2155 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2156 #define PHY_COMP_EQ_SKIPN 0xa0000000
2157 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2158 #define PHY_WRITE_PREVIOUS 0xc0000000
2159 #define PHY_SKIPN 0xd0000000
2160 #define PHY_DELAY_MS 0xe0000000
2161 #define PHY_WRITE_ERI_WORD 0xf0000000
2165 char version[RTL_VER_SIZE];
2171 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2173 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2175 const struct firmware *fw = rtl_fw->fw;
2176 struct fw_info *fw_info = (struct fw_info *)fw->data;
2177 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2178 char *version = rtl_fw->version;
2181 if (fw->size < FW_OPCODE_SIZE)
2184 if (!fw_info->magic) {
2185 size_t i, size, start;
2188 if (fw->size < sizeof(*fw_info))
2191 for (i = 0; i < fw->size; i++)
2192 checksum += fw->data[i];
2196 start = le32_to_cpu(fw_info->fw_start);
2197 if (start > fw->size)
2200 size = le32_to_cpu(fw_info->fw_len);
2201 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2204 memcpy(version, fw_info->version, RTL_VER_SIZE);
2206 pa->code = (__le32 *)(fw->data + start);
2209 if (fw->size % FW_OPCODE_SIZE)
2212 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2214 pa->code = (__le32 *)fw->data;
2215 pa->size = fw->size / FW_OPCODE_SIZE;
2217 version[RTL_VER_SIZE - 1] = 0;
2224 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2225 struct rtl_fw_phy_action *pa)
2230 for (index = 0; index < pa->size; index++) {
2231 u32 action = le32_to_cpu(pa->code[index]);
2232 u32 regno = (action & 0x0fff0000) >> 16;
2234 switch(action & 0xf0000000) {
2238 case PHY_READ_EFUSE:
2239 case PHY_CLEAR_READCOUNT:
2241 case PHY_WRITE_PREVIOUS:
2246 if (regno > index) {
2247 netif_err(tp, ifup, tp->dev,
2248 "Out of range of firmware\n");
2252 case PHY_READCOUNT_EQ_SKIP:
2253 if (index + 2 >= pa->size) {
2254 netif_err(tp, ifup, tp->dev,
2255 "Out of range of firmware\n");
2259 case PHY_COMP_EQ_SKIPN:
2260 case PHY_COMP_NEQ_SKIPN:
2262 if (index + 1 + regno >= pa->size) {
2263 netif_err(tp, ifup, tp->dev,
2264 "Out of range of firmware\n");
2269 case PHY_READ_MAC_BYTE:
2270 case PHY_WRITE_MAC_BYTE:
2271 case PHY_WRITE_ERI_WORD:
2273 netif_err(tp, ifup, tp->dev,
2274 "Invalid action 0x%08x\n", action);
2283 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2285 struct net_device *dev = tp->dev;
2288 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2289 netif_err(tp, ifup, dev, "invalid firwmare\n");
2293 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2299 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2301 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2305 predata = count = 0;
2307 for (index = 0; index < pa->size; ) {
2308 u32 action = le32_to_cpu(pa->code[index]);
2309 u32 data = action & 0x0000ffff;
2310 u32 regno = (action & 0x0fff0000) >> 16;
2315 switch(action & 0xf0000000) {
2317 predata = rtl_readphy(tp, regno);
2332 case PHY_READ_EFUSE:
2333 predata = rtl8168d_efuse_read(tp, regno);
2336 case PHY_CLEAR_READCOUNT:
2341 rtl_writephy(tp, regno, data);
2344 case PHY_READCOUNT_EQ_SKIP:
2345 index += (count == data) ? 2 : 1;
2347 case PHY_COMP_EQ_SKIPN:
2348 if (predata == data)
2352 case PHY_COMP_NEQ_SKIPN:
2353 if (predata != data)
2357 case PHY_WRITE_PREVIOUS:
2358 rtl_writephy(tp, regno, predata);
2369 case PHY_READ_MAC_BYTE:
2370 case PHY_WRITE_MAC_BYTE:
2371 case PHY_WRITE_ERI_WORD:
2378 static void rtl_release_firmware(struct rtl8169_private *tp)
2380 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2381 release_firmware(tp->rtl_fw->fw);
2384 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2387 static void rtl_apply_firmware(struct rtl8169_private *tp)
2389 struct rtl_fw *rtl_fw = tp->rtl_fw;
2391 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2392 if (!IS_ERR_OR_NULL(rtl_fw)) {
2393 rtl_phy_write_fw(tp, rtl_fw);
2394 tp->features |= RTL_FEATURE_FW_LOADED;
2398 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2400 if (rtl_readphy(tp, reg) != val)
2401 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2403 rtl_apply_firmware(tp);
2406 static void r810x_aldps_disable(struct rtl8169_private *tp)
2408 rtl_writephy(tp, 0x1f, 0x0000);
2409 rtl_writephy(tp, 0x18, 0x0310);
2413 static void r810x_aldps_enable(struct rtl8169_private *tp)
2415 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2418 rtl_writephy(tp, 0x1f, 0x0000);
2419 rtl_writephy(tp, 0x18, 0x8310);
2422 static void r8168_aldps_enable_1(struct rtl8169_private *tp)
2424 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2427 rtl_writephy(tp, 0x1f, 0x0000);
2428 rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
2431 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2433 static const struct phy_reg phy_reg_init[] = {
2495 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2498 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2500 static const struct phy_reg phy_reg_init[] = {
2506 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2509 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2511 struct pci_dev *pdev = tp->pci_dev;
2513 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2514 (pdev->subsystem_device != 0xe000))
2517 rtl_writephy(tp, 0x1f, 0x0001);
2518 rtl_writephy(tp, 0x10, 0xf01b);
2519 rtl_writephy(tp, 0x1f, 0x0000);
2522 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2524 static const struct phy_reg phy_reg_init[] = {
2564 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2566 rtl8169scd_hw_phy_config_quirk(tp);
2569 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2571 static const struct phy_reg phy_reg_init[] = {
2619 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2622 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2624 static const struct phy_reg phy_reg_init[] = {
2629 rtl_writephy(tp, 0x1f, 0x0001);
2630 rtl_patchphy(tp, 0x16, 1 << 0);
2632 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2635 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2637 static const struct phy_reg phy_reg_init[] = {
2643 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2646 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2648 static const struct phy_reg phy_reg_init[] = {
2656 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2659 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2661 static const struct phy_reg phy_reg_init[] = {
2667 rtl_writephy(tp, 0x1f, 0x0000);
2668 rtl_patchphy(tp, 0x14, 1 << 5);
2669 rtl_patchphy(tp, 0x0d, 1 << 5);
2671 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2674 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2676 static const struct phy_reg phy_reg_init[] = {
2696 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2698 rtl_patchphy(tp, 0x14, 1 << 5);
2699 rtl_patchphy(tp, 0x0d, 1 << 5);
2700 rtl_writephy(tp, 0x1f, 0x0000);
2703 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2705 static const struct phy_reg phy_reg_init[] = {
2723 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2725 rtl_patchphy(tp, 0x16, 1 << 0);
2726 rtl_patchphy(tp, 0x14, 1 << 5);
2727 rtl_patchphy(tp, 0x0d, 1 << 5);
2728 rtl_writephy(tp, 0x1f, 0x0000);
2731 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2733 static const struct phy_reg phy_reg_init[] = {
2745 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2747 rtl_patchphy(tp, 0x16, 1 << 0);
2748 rtl_patchphy(tp, 0x14, 1 << 5);
2749 rtl_patchphy(tp, 0x0d, 1 << 5);
2750 rtl_writephy(tp, 0x1f, 0x0000);
2753 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2755 rtl8168c_3_hw_phy_config(tp);
2758 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2760 static const struct phy_reg phy_reg_init_0[] = {
2761 /* Channel Estimation */
2782 * Enhance line driver power
2791 * Can not link to 1Gbps with bad cable
2792 * Decrease SNR threshold form 21.07dB to 19.04dB
2801 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2805 * Fine Tune Switching regulator parameter
2807 rtl_writephy(tp, 0x1f, 0x0002);
2808 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2809 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2811 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2812 static const struct phy_reg phy_reg_init[] = {
2822 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2824 val = rtl_readphy(tp, 0x0d);
2826 if ((val & 0x00ff) != 0x006c) {
2827 static const u32 set[] = {
2828 0x0065, 0x0066, 0x0067, 0x0068,
2829 0x0069, 0x006a, 0x006b, 0x006c
2833 rtl_writephy(tp, 0x1f, 0x0002);
2836 for (i = 0; i < ARRAY_SIZE(set); i++)
2837 rtl_writephy(tp, 0x0d, val | set[i]);
2840 static const struct phy_reg phy_reg_init[] = {
2848 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2851 /* RSET couple improve */
2852 rtl_writephy(tp, 0x1f, 0x0002);
2853 rtl_patchphy(tp, 0x0d, 0x0300);
2854 rtl_patchphy(tp, 0x0f, 0x0010);
2856 /* Fine tune PLL performance */
2857 rtl_writephy(tp, 0x1f, 0x0002);
2858 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2859 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2861 rtl_writephy(tp, 0x1f, 0x0005);
2862 rtl_writephy(tp, 0x05, 0x001b);
2864 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2866 rtl_writephy(tp, 0x1f, 0x0000);
2869 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2871 static const struct phy_reg phy_reg_init_0[] = {
2872 /* Channel Estimation */
2893 * Enhance line driver power
2902 * Can not link to 1Gbps with bad cable
2903 * Decrease SNR threshold form 21.07dB to 19.04dB
2912 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2914 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2915 static const struct phy_reg phy_reg_init[] = {
2926 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2928 val = rtl_readphy(tp, 0x0d);
2929 if ((val & 0x00ff) != 0x006c) {
2930 static const u32 set[] = {
2931 0x0065, 0x0066, 0x0067, 0x0068,
2932 0x0069, 0x006a, 0x006b, 0x006c
2936 rtl_writephy(tp, 0x1f, 0x0002);
2939 for (i = 0; i < ARRAY_SIZE(set); i++)
2940 rtl_writephy(tp, 0x0d, val | set[i]);
2943 static const struct phy_reg phy_reg_init[] = {
2951 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2954 /* Fine tune PLL performance */
2955 rtl_writephy(tp, 0x1f, 0x0002);
2956 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2957 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2959 /* Switching regulator Slew rate */
2960 rtl_writephy(tp, 0x1f, 0x0002);
2961 rtl_patchphy(tp, 0x0f, 0x0017);
2963 rtl_writephy(tp, 0x1f, 0x0005);
2964 rtl_writephy(tp, 0x05, 0x001b);
2966 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2968 rtl_writephy(tp, 0x1f, 0x0000);
2971 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2973 static const struct phy_reg phy_reg_init[] = {
3029 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3032 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3034 static const struct phy_reg phy_reg_init[] = {
3044 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3045 rtl_patchphy(tp, 0x0d, 1 << 5);
3048 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3050 static const struct phy_reg phy_reg_init[] = {
3051 /* Enable Delay cap */
3057 /* Channel estimation fine tune */
3066 /* Update PFM & 10M TX idle timer */
3078 rtl_apply_firmware(tp);
3080 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3082 /* DCO enable for 10M IDLE Power */
3083 rtl_writephy(tp, 0x1f, 0x0007);
3084 rtl_writephy(tp, 0x1e, 0x0023);
3085 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3086 rtl_writephy(tp, 0x1f, 0x0000);
3088 /* For impedance matching */
3089 rtl_writephy(tp, 0x1f, 0x0002);
3090 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3091 rtl_writephy(tp, 0x1f, 0x0000);
3093 /* PHY auto speed down */
3094 rtl_writephy(tp, 0x1f, 0x0007);
3095 rtl_writephy(tp, 0x1e, 0x002d);
3096 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3097 rtl_writephy(tp, 0x1f, 0x0000);
3098 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3100 rtl_writephy(tp, 0x1f, 0x0005);
3101 rtl_writephy(tp, 0x05, 0x8b86);
3102 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3103 rtl_writephy(tp, 0x1f, 0x0000);
3105 rtl_writephy(tp, 0x1f, 0x0005);
3106 rtl_writephy(tp, 0x05, 0x8b85);
3107 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3108 rtl_writephy(tp, 0x1f, 0x0007);
3109 rtl_writephy(tp, 0x1e, 0x0020);
3110 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3111 rtl_writephy(tp, 0x1f, 0x0006);
3112 rtl_writephy(tp, 0x00, 0x5a00);
3113 rtl_writephy(tp, 0x1f, 0x0000);
3114 rtl_writephy(tp, 0x0d, 0x0007);
3115 rtl_writephy(tp, 0x0e, 0x003c);
3116 rtl_writephy(tp, 0x0d, 0x4007);
3117 rtl_writephy(tp, 0x0e, 0x0000);
3118 rtl_writephy(tp, 0x0d, 0x0000);
3121 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3124 addr[0] | (addr[1] << 8),
3125 addr[2] | (addr[3] << 8),
3126 addr[4] | (addr[5] << 8)
3128 const struct exgmac_reg e[] = {
3129 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3130 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3131 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3132 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3135 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3138 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3140 static const struct phy_reg phy_reg_init[] = {
3141 /* Enable Delay cap */
3150 /* Channel estimation fine tune */
3167 rtl_apply_firmware(tp);
3169 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3171 /* For 4-corner performance improve */
3172 rtl_writephy(tp, 0x1f, 0x0005);
3173 rtl_writephy(tp, 0x05, 0x8b80);
3174 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3175 rtl_writephy(tp, 0x1f, 0x0000);
3177 /* PHY auto speed down */
3178 rtl_writephy(tp, 0x1f, 0x0004);
3179 rtl_writephy(tp, 0x1f, 0x0007);
3180 rtl_writephy(tp, 0x1e, 0x002d);
3181 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3182 rtl_writephy(tp, 0x1f, 0x0002);
3183 rtl_writephy(tp, 0x1f, 0x0000);
3184 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3186 /* improve 10M EEE waveform */
3187 rtl_writephy(tp, 0x1f, 0x0005);
3188 rtl_writephy(tp, 0x05, 0x8b86);
3189 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3190 rtl_writephy(tp, 0x1f, 0x0000);
3192 /* Improve 2-pair detection performance */
3193 rtl_writephy(tp, 0x1f, 0x0005);
3194 rtl_writephy(tp, 0x05, 0x8b85);
3195 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3196 rtl_writephy(tp, 0x1f, 0x0000);
3199 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3200 rtl_writephy(tp, 0x1f, 0x0005);
3201 rtl_writephy(tp, 0x05, 0x8b85);
3202 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3203 rtl_writephy(tp, 0x1f, 0x0004);
3204 rtl_writephy(tp, 0x1f, 0x0007);
3205 rtl_writephy(tp, 0x1e, 0x0020);
3206 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3207 rtl_writephy(tp, 0x1f, 0x0002);
3208 rtl_writephy(tp, 0x1f, 0x0000);
3209 rtl_writephy(tp, 0x0d, 0x0007);
3210 rtl_writephy(tp, 0x0e, 0x003c);
3211 rtl_writephy(tp, 0x0d, 0x4007);
3212 rtl_writephy(tp, 0x0e, 0x0000);
3213 rtl_writephy(tp, 0x0d, 0x0000);
3216 rtl_writephy(tp, 0x1f, 0x0003);
3217 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3218 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3219 rtl_writephy(tp, 0x1f, 0x0000);
3221 r8168_aldps_enable_1(tp);
3223 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3224 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3227 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3229 /* For 4-corner performance improve */
3230 rtl_writephy(tp, 0x1f, 0x0005);
3231 rtl_writephy(tp, 0x05, 0x8b80);
3232 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3233 rtl_writephy(tp, 0x1f, 0x0000);
3235 /* PHY auto speed down */
3236 rtl_writephy(tp, 0x1f, 0x0007);
3237 rtl_writephy(tp, 0x1e, 0x002d);
3238 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3239 rtl_writephy(tp, 0x1f, 0x0000);
3240 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3242 /* Improve 10M EEE waveform */
3243 rtl_writephy(tp, 0x1f, 0x0005);
3244 rtl_writephy(tp, 0x05, 0x8b86);
3245 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3246 rtl_writephy(tp, 0x1f, 0x0000);
3249 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3251 static const struct phy_reg phy_reg_init[] = {
3252 /* Channel estimation fine tune */
3257 /* Modify green table for giga & fnet */
3274 /* Modify green table for 10M */
3280 /* Disable hiimpedance detection (RTCT) */
3286 rtl_apply_firmware(tp);
3288 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3290 rtl8168f_hw_phy_config(tp);
3292 /* Improve 2-pair detection performance */
3293 rtl_writephy(tp, 0x1f, 0x0005);
3294 rtl_writephy(tp, 0x05, 0x8b85);
3295 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3296 rtl_writephy(tp, 0x1f, 0x0000);
3298 r8168_aldps_enable_1(tp);
3301 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3303 rtl_apply_firmware(tp);
3305 rtl8168f_hw_phy_config(tp);
3307 r8168_aldps_enable_1(tp);
3310 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3312 static const struct phy_reg phy_reg_init[] = {
3313 /* Channel estimation fine tune */
3318 /* Modify green table for giga & fnet */
3335 /* Modify green table for 10M */
3341 /* Disable hiimpedance detection (RTCT) */
3348 rtl_apply_firmware(tp);
3350 rtl8168f_hw_phy_config(tp);
3352 /* Improve 2-pair detection performance */
3353 rtl_writephy(tp, 0x1f, 0x0005);
3354 rtl_writephy(tp, 0x05, 0x8b85);
3355 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3356 rtl_writephy(tp, 0x1f, 0x0000);
3358 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3360 /* Modify green table for giga */
3361 rtl_writephy(tp, 0x1f, 0x0005);
3362 rtl_writephy(tp, 0x05, 0x8b54);
3363 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3364 rtl_writephy(tp, 0x05, 0x8b5d);
3365 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3366 rtl_writephy(tp, 0x05, 0x8a7c);
3367 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3368 rtl_writephy(tp, 0x05, 0x8a7f);
3369 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3370 rtl_writephy(tp, 0x05, 0x8a82);
3371 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3372 rtl_writephy(tp, 0x05, 0x8a85);
3373 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3374 rtl_writephy(tp, 0x05, 0x8a88);
3375 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3376 rtl_writephy(tp, 0x1f, 0x0000);
3378 /* uc same-seed solution */
3379 rtl_writephy(tp, 0x1f, 0x0005);
3380 rtl_writephy(tp, 0x05, 0x8b85);
3381 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3382 rtl_writephy(tp, 0x1f, 0x0000);
3385 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3386 rtl_writephy(tp, 0x1f, 0x0005);
3387 rtl_writephy(tp, 0x05, 0x8b85);
3388 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3389 rtl_writephy(tp, 0x1f, 0x0004);
3390 rtl_writephy(tp, 0x1f, 0x0007);
3391 rtl_writephy(tp, 0x1e, 0x0020);
3392 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3393 rtl_writephy(tp, 0x1f, 0x0000);
3394 rtl_writephy(tp, 0x0d, 0x0007);
3395 rtl_writephy(tp, 0x0e, 0x003c);
3396 rtl_writephy(tp, 0x0d, 0x4007);
3397 rtl_writephy(tp, 0x0e, 0x0000);
3398 rtl_writephy(tp, 0x0d, 0x0000);
3401 rtl_writephy(tp, 0x1f, 0x0003);
3402 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3403 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3404 rtl_writephy(tp, 0x1f, 0x0000);
3406 r8168_aldps_enable_1(tp);
3409 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3411 static const u16 mac_ocp_patch[] = {
3412 0xe008, 0xe01b, 0xe01d, 0xe01f,
3413 0xe021, 0xe023, 0xe025, 0xe027,
3414 0x49d2, 0xf10d, 0x766c, 0x49e2,
3415 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3417 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3418 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3419 0xbe00, 0xb416, 0x0076, 0xe86c,
3420 0xc602, 0xbe00, 0x0000, 0xc602,
3422 0xbe00, 0x0000, 0xc602, 0xbe00,
3423 0x0000, 0xc602, 0xbe00, 0x0000,
3424 0xc602, 0xbe00, 0x0000, 0xc602,
3425 0xbe00, 0x0000, 0xc602, 0xbe00,
3427 0x0000, 0x0000, 0x0000, 0x0000
3431 /* Patch code for GPHY reset */
3432 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3433 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3434 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3435 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3437 rtl_apply_firmware(tp);
3439 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3440 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3442 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3444 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3445 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3447 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3449 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3450 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3452 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3453 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3455 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3458 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3460 static const struct phy_reg phy_reg_init[] = {
3467 rtl_writephy(tp, 0x1f, 0x0000);
3468 rtl_patchphy(tp, 0x11, 1 << 12);
3469 rtl_patchphy(tp, 0x19, 1 << 13);
3470 rtl_patchphy(tp, 0x10, 1 << 15);
3472 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3475 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3477 static const struct phy_reg phy_reg_init[] = {
3491 /* Disable ALDPS before ram code */
3492 r810x_aldps_disable(tp);
3494 rtl_apply_firmware(tp);
3496 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3498 r810x_aldps_enable(tp);
3501 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3503 /* Disable ALDPS before setting firmware */
3504 r810x_aldps_disable(tp);
3506 rtl_apply_firmware(tp);
3509 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3510 rtl_writephy(tp, 0x1f, 0x0004);
3511 rtl_writephy(tp, 0x10, 0x401f);
3512 rtl_writephy(tp, 0x19, 0x7030);
3513 rtl_writephy(tp, 0x1f, 0x0000);
3515 r810x_aldps_enable(tp);
3518 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3520 static const struct phy_reg phy_reg_init[] = {
3527 /* Disable ALDPS before ram code */
3528 r810x_aldps_disable(tp);
3530 rtl_apply_firmware(tp);
3532 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3533 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3535 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3537 r810x_aldps_enable(tp);
3540 static void rtl_hw_phy_config(struct net_device *dev)
3542 struct rtl8169_private *tp = netdev_priv(dev);
3544 rtl8169_print_mac_version(tp);
3546 switch (tp->mac_version) {
3547 case RTL_GIGA_MAC_VER_01:
3549 case RTL_GIGA_MAC_VER_02:
3550 case RTL_GIGA_MAC_VER_03:
3551 rtl8169s_hw_phy_config(tp);
3553 case RTL_GIGA_MAC_VER_04:
3554 rtl8169sb_hw_phy_config(tp);
3556 case RTL_GIGA_MAC_VER_05:
3557 rtl8169scd_hw_phy_config(tp);
3559 case RTL_GIGA_MAC_VER_06:
3560 rtl8169sce_hw_phy_config(tp);
3562 case RTL_GIGA_MAC_VER_07:
3563 case RTL_GIGA_MAC_VER_08:
3564 case RTL_GIGA_MAC_VER_09:
3565 rtl8102e_hw_phy_config(tp);
3567 case RTL_GIGA_MAC_VER_11:
3568 rtl8168bb_hw_phy_config(tp);
3570 case RTL_GIGA_MAC_VER_12:
3571 rtl8168bef_hw_phy_config(tp);
3573 case RTL_GIGA_MAC_VER_17:
3574 rtl8168bef_hw_phy_config(tp);
3576 case RTL_GIGA_MAC_VER_18:
3577 rtl8168cp_1_hw_phy_config(tp);
3579 case RTL_GIGA_MAC_VER_19:
3580 rtl8168c_1_hw_phy_config(tp);
3582 case RTL_GIGA_MAC_VER_20:
3583 rtl8168c_2_hw_phy_config(tp);
3585 case RTL_GIGA_MAC_VER_21:
3586 rtl8168c_3_hw_phy_config(tp);
3588 case RTL_GIGA_MAC_VER_22:
3589 rtl8168c_4_hw_phy_config(tp);
3591 case RTL_GIGA_MAC_VER_23:
3592 case RTL_GIGA_MAC_VER_24:
3593 rtl8168cp_2_hw_phy_config(tp);
3595 case RTL_GIGA_MAC_VER_25:
3596 rtl8168d_1_hw_phy_config(tp);
3598 case RTL_GIGA_MAC_VER_26:
3599 rtl8168d_2_hw_phy_config(tp);
3601 case RTL_GIGA_MAC_VER_27:
3602 rtl8168d_3_hw_phy_config(tp);
3604 case RTL_GIGA_MAC_VER_28:
3605 rtl8168d_4_hw_phy_config(tp);
3607 case RTL_GIGA_MAC_VER_29:
3608 case RTL_GIGA_MAC_VER_30:
3609 rtl8105e_hw_phy_config(tp);
3611 case RTL_GIGA_MAC_VER_31:
3614 case RTL_GIGA_MAC_VER_32:
3615 case RTL_GIGA_MAC_VER_33:
3616 rtl8168e_1_hw_phy_config(tp);
3618 case RTL_GIGA_MAC_VER_34:
3619 rtl8168e_2_hw_phy_config(tp);
3621 case RTL_GIGA_MAC_VER_35:
3622 rtl8168f_1_hw_phy_config(tp);
3624 case RTL_GIGA_MAC_VER_36:
3625 rtl8168f_2_hw_phy_config(tp);
3628 case RTL_GIGA_MAC_VER_37:
3629 rtl8402_hw_phy_config(tp);
3632 case RTL_GIGA_MAC_VER_38:
3633 rtl8411_hw_phy_config(tp);
3636 case RTL_GIGA_MAC_VER_39:
3637 rtl8106e_hw_phy_config(tp);
3640 case RTL_GIGA_MAC_VER_40:
3641 rtl8168g_1_hw_phy_config(tp);
3644 case RTL_GIGA_MAC_VER_41:
3650 static void rtl_phy_work(struct rtl8169_private *tp)
3652 struct timer_list *timer = &tp->timer;
3653 void __iomem *ioaddr = tp->mmio_addr;
3654 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3656 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3658 if (tp->phy_reset_pending(tp)) {
3660 * A busy loop could burn quite a few cycles on nowadays CPU.
3661 * Let's delay the execution of the timer for a few ticks.
3667 if (tp->link_ok(ioaddr))
3670 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3672 tp->phy_reset_enable(tp);
3675 mod_timer(timer, jiffies + timeout);
3678 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3680 if (!test_and_set_bit(flag, tp->wk.flags))
3681 schedule_work(&tp->wk.work);
3684 static void rtl8169_phy_timer(unsigned long __opaque)
3686 struct net_device *dev = (struct net_device *)__opaque;
3687 struct rtl8169_private *tp = netdev_priv(dev);
3689 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3692 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3693 void __iomem *ioaddr)
3696 pci_release_regions(pdev);
3697 pci_clear_mwi(pdev);
3698 pci_disable_device(pdev);
3702 DECLARE_RTL_COND(rtl_phy_reset_cond)
3704 return tp->phy_reset_pending(tp);
3707 static void rtl8169_phy_reset(struct net_device *dev,
3708 struct rtl8169_private *tp)
3710 tp->phy_reset_enable(tp);
3711 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3714 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3716 void __iomem *ioaddr = tp->mmio_addr;
3718 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3719 (RTL_R8(PHYstatus) & TBI_Enable);
3722 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3724 void __iomem *ioaddr = tp->mmio_addr;
3726 rtl_hw_phy_config(dev);
3728 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3729 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3733 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3735 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3736 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3738 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3739 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3741 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3742 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3745 rtl8169_phy_reset(dev, tp);
3747 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3748 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3749 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3750 (tp->mii.supports_gmii ?
3751 ADVERTISED_1000baseT_Half |
3752 ADVERTISED_1000baseT_Full : 0));
3754 if (rtl_tbi_enabled(tp))
3755 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3758 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3760 void __iomem *ioaddr = tp->mmio_addr;
3764 RTL_W8(Cfg9346, Cfg9346_Unlock);
3766 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3769 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3772 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3773 rtl_rar_exgmac_set(tp, addr);
3775 RTL_W8(Cfg9346, Cfg9346_Lock);
3777 rtl_unlock_work(tp);
3780 static int rtl_set_mac_address(struct net_device *dev, void *p)
3782 struct rtl8169_private *tp = netdev_priv(dev);
3783 struct sockaddr *addr = p;
3785 if (!is_valid_ether_addr(addr->sa_data))
3786 return -EADDRNOTAVAIL;
3788 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3790 rtl_rar_set(tp, dev->dev_addr);
3795 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3797 struct rtl8169_private *tp = netdev_priv(dev);
3798 struct mii_ioctl_data *data = if_mii(ifr);
3800 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3803 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3804 struct mii_ioctl_data *data, int cmd)
3808 data->phy_id = 32; /* Internal PHY */
3812 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3816 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3822 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3827 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3829 if (tp->features & RTL_FEATURE_MSI) {
3830 pci_disable_msi(pdev);
3831 tp->features &= ~RTL_FEATURE_MSI;
3835 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3837 struct mdio_ops *ops = &tp->mdio_ops;
3839 switch (tp->mac_version) {
3840 case RTL_GIGA_MAC_VER_27:
3841 ops->write = r8168dp_1_mdio_write;
3842 ops->read = r8168dp_1_mdio_read;
3844 case RTL_GIGA_MAC_VER_28:
3845 case RTL_GIGA_MAC_VER_31:
3846 ops->write = r8168dp_2_mdio_write;
3847 ops->read = r8168dp_2_mdio_read;
3849 case RTL_GIGA_MAC_VER_40:
3850 case RTL_GIGA_MAC_VER_41:
3851 ops->write = r8168g_mdio_write;
3852 ops->read = r8168g_mdio_read;
3855 ops->write = r8169_mdio_write;
3856 ops->read = r8169_mdio_read;
3861 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3863 void __iomem *ioaddr = tp->mmio_addr;
3865 switch (tp->mac_version) {
3866 case RTL_GIGA_MAC_VER_25:
3867 case RTL_GIGA_MAC_VER_26:
3868 case RTL_GIGA_MAC_VER_29:
3869 case RTL_GIGA_MAC_VER_30:
3870 case RTL_GIGA_MAC_VER_32:
3871 case RTL_GIGA_MAC_VER_33:
3872 case RTL_GIGA_MAC_VER_34:
3873 case RTL_GIGA_MAC_VER_37:
3874 case RTL_GIGA_MAC_VER_38:
3875 case RTL_GIGA_MAC_VER_39:
3876 case RTL_GIGA_MAC_VER_40:
3877 case RTL_GIGA_MAC_VER_41:
3878 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3879 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3886 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3888 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3891 rtl_writephy(tp, 0x1f, 0x0000);
3892 rtl_writephy(tp, MII_BMCR, 0x0000);
3894 rtl_wol_suspend_quirk(tp);
3899 static void r810x_phy_power_down(struct rtl8169_private *tp)
3901 rtl_writephy(tp, 0x1f, 0x0000);
3902 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3905 static void r810x_phy_power_up(struct rtl8169_private *tp)
3907 rtl_writephy(tp, 0x1f, 0x0000);
3908 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3911 static void r810x_pll_power_down(struct rtl8169_private *tp)
3913 void __iomem *ioaddr = tp->mmio_addr;
3915 if (rtl_wol_pll_power_down(tp))
3918 r810x_phy_power_down(tp);
3920 switch (tp->mac_version) {
3921 case RTL_GIGA_MAC_VER_07:
3922 case RTL_GIGA_MAC_VER_08:
3923 case RTL_GIGA_MAC_VER_09:
3924 case RTL_GIGA_MAC_VER_10:
3925 case RTL_GIGA_MAC_VER_13:
3926 case RTL_GIGA_MAC_VER_16:
3929 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3934 static void r810x_pll_power_up(struct rtl8169_private *tp)
3936 void __iomem *ioaddr = tp->mmio_addr;
3938 r810x_phy_power_up(tp);
3940 switch (tp->mac_version) {
3941 case RTL_GIGA_MAC_VER_07:
3942 case RTL_GIGA_MAC_VER_08:
3943 case RTL_GIGA_MAC_VER_09:
3944 case RTL_GIGA_MAC_VER_10:
3945 case RTL_GIGA_MAC_VER_13:
3946 case RTL_GIGA_MAC_VER_16:
3949 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3954 static void r8168_phy_power_up(struct rtl8169_private *tp)
3956 rtl_writephy(tp, 0x1f, 0x0000);
3957 switch (tp->mac_version) {
3958 case RTL_GIGA_MAC_VER_11:
3959 case RTL_GIGA_MAC_VER_12:
3960 case RTL_GIGA_MAC_VER_17:
3961 case RTL_GIGA_MAC_VER_18:
3962 case RTL_GIGA_MAC_VER_19:
3963 case RTL_GIGA_MAC_VER_20:
3964 case RTL_GIGA_MAC_VER_21:
3965 case RTL_GIGA_MAC_VER_22:
3966 case RTL_GIGA_MAC_VER_23:
3967 case RTL_GIGA_MAC_VER_24:
3968 case RTL_GIGA_MAC_VER_25:
3969 case RTL_GIGA_MAC_VER_26:
3970 case RTL_GIGA_MAC_VER_27:
3971 case RTL_GIGA_MAC_VER_28:
3972 case RTL_GIGA_MAC_VER_31:
3973 rtl_writephy(tp, 0x0e, 0x0000);
3978 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3981 static void r8168_phy_power_down(struct rtl8169_private *tp)
3983 rtl_writephy(tp, 0x1f, 0x0000);
3984 switch (tp->mac_version) {
3985 case RTL_GIGA_MAC_VER_32:
3986 case RTL_GIGA_MAC_VER_33:
3987 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3990 case RTL_GIGA_MAC_VER_11:
3991 case RTL_GIGA_MAC_VER_12:
3992 case RTL_GIGA_MAC_VER_17:
3993 case RTL_GIGA_MAC_VER_18:
3994 case RTL_GIGA_MAC_VER_19:
3995 case RTL_GIGA_MAC_VER_20:
3996 case RTL_GIGA_MAC_VER_21:
3997 case RTL_GIGA_MAC_VER_22:
3998 case RTL_GIGA_MAC_VER_23:
3999 case RTL_GIGA_MAC_VER_24:
4000 case RTL_GIGA_MAC_VER_25:
4001 case RTL_GIGA_MAC_VER_26:
4002 case RTL_GIGA_MAC_VER_27:
4003 case RTL_GIGA_MAC_VER_28:
4004 case RTL_GIGA_MAC_VER_31:
4005 rtl_writephy(tp, 0x0e, 0x0200);
4007 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4012 static void r8168_pll_power_down(struct rtl8169_private *tp)
4014 void __iomem *ioaddr = tp->mmio_addr;
4016 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4017 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4018 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4019 r8168dp_check_dash(tp)) {
4023 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4024 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4025 (RTL_R16(CPlusCmd) & ASF)) {
4029 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4030 tp->mac_version == RTL_GIGA_MAC_VER_33)
4031 rtl_ephy_write(tp, 0x19, 0xff64);
4033 if (rtl_wol_pll_power_down(tp))
4036 r8168_phy_power_down(tp);
4038 switch (tp->mac_version) {
4039 case RTL_GIGA_MAC_VER_25:
4040 case RTL_GIGA_MAC_VER_26:
4041 case RTL_GIGA_MAC_VER_27:
4042 case RTL_GIGA_MAC_VER_28:
4043 case RTL_GIGA_MAC_VER_31:
4044 case RTL_GIGA_MAC_VER_32:
4045 case RTL_GIGA_MAC_VER_33:
4046 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4051 static void r8168_pll_power_up(struct rtl8169_private *tp)
4053 void __iomem *ioaddr = tp->mmio_addr;
4055 switch (tp->mac_version) {
4056 case RTL_GIGA_MAC_VER_25:
4057 case RTL_GIGA_MAC_VER_26:
4058 case RTL_GIGA_MAC_VER_27:
4059 case RTL_GIGA_MAC_VER_28:
4060 case RTL_GIGA_MAC_VER_31:
4061 case RTL_GIGA_MAC_VER_32:
4062 case RTL_GIGA_MAC_VER_33:
4063 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4067 r8168_phy_power_up(tp);
4070 static void rtl_generic_op(struct rtl8169_private *tp,
4071 void (*op)(struct rtl8169_private *))
4077 static void rtl_pll_power_down(struct rtl8169_private *tp)
4079 rtl_generic_op(tp, tp->pll_power_ops.down);
4082 static void rtl_pll_power_up(struct rtl8169_private *tp)
4084 rtl_generic_op(tp, tp->pll_power_ops.up);
4087 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4089 struct pll_power_ops *ops = &tp->pll_power_ops;
4091 switch (tp->mac_version) {
4092 case RTL_GIGA_MAC_VER_07:
4093 case RTL_GIGA_MAC_VER_08:
4094 case RTL_GIGA_MAC_VER_09:
4095 case RTL_GIGA_MAC_VER_10:
4096 case RTL_GIGA_MAC_VER_16:
4097 case RTL_GIGA_MAC_VER_29:
4098 case RTL_GIGA_MAC_VER_30:
4099 case RTL_GIGA_MAC_VER_37:
4100 case RTL_GIGA_MAC_VER_39:
4101 ops->down = r810x_pll_power_down;
4102 ops->up = r810x_pll_power_up;
4105 case RTL_GIGA_MAC_VER_11:
4106 case RTL_GIGA_MAC_VER_12:
4107 case RTL_GIGA_MAC_VER_17:
4108 case RTL_GIGA_MAC_VER_18:
4109 case RTL_GIGA_MAC_VER_19:
4110 case RTL_GIGA_MAC_VER_20:
4111 case RTL_GIGA_MAC_VER_21:
4112 case RTL_GIGA_MAC_VER_22:
4113 case RTL_GIGA_MAC_VER_23:
4114 case RTL_GIGA_MAC_VER_24:
4115 case RTL_GIGA_MAC_VER_25:
4116 case RTL_GIGA_MAC_VER_26:
4117 case RTL_GIGA_MAC_VER_27:
4118 case RTL_GIGA_MAC_VER_28:
4119 case RTL_GIGA_MAC_VER_31:
4120 case RTL_GIGA_MAC_VER_32:
4121 case RTL_GIGA_MAC_VER_33:
4122 case RTL_GIGA_MAC_VER_34:
4123 case RTL_GIGA_MAC_VER_35:
4124 case RTL_GIGA_MAC_VER_36:
4125 case RTL_GIGA_MAC_VER_38:
4126 case RTL_GIGA_MAC_VER_40:
4127 case RTL_GIGA_MAC_VER_41:
4128 ops->down = r8168_pll_power_down;
4129 ops->up = r8168_pll_power_up;
4139 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4141 void __iomem *ioaddr = tp->mmio_addr;
4143 switch (tp->mac_version) {
4144 case RTL_GIGA_MAC_VER_01:
4145 case RTL_GIGA_MAC_VER_02:
4146 case RTL_GIGA_MAC_VER_03:
4147 case RTL_GIGA_MAC_VER_04:
4148 case RTL_GIGA_MAC_VER_05:
4149 case RTL_GIGA_MAC_VER_06:
4150 case RTL_GIGA_MAC_VER_10:
4151 case RTL_GIGA_MAC_VER_11:
4152 case RTL_GIGA_MAC_VER_12:
4153 case RTL_GIGA_MAC_VER_13:
4154 case RTL_GIGA_MAC_VER_14:
4155 case RTL_GIGA_MAC_VER_15:
4156 case RTL_GIGA_MAC_VER_16:
4157 case RTL_GIGA_MAC_VER_17:
4158 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4160 case RTL_GIGA_MAC_VER_18:
4161 case RTL_GIGA_MAC_VER_19:
4162 case RTL_GIGA_MAC_VER_20:
4163 case RTL_GIGA_MAC_VER_21:
4164 case RTL_GIGA_MAC_VER_22:
4165 case RTL_GIGA_MAC_VER_23:
4166 case RTL_GIGA_MAC_VER_24:
4167 case RTL_GIGA_MAC_VER_34:
4168 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4171 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4176 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4178 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
4181 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4183 void __iomem *ioaddr = tp->mmio_addr;
4185 RTL_W8(Cfg9346, Cfg9346_Unlock);
4186 rtl_generic_op(tp, tp->jumbo_ops.enable);
4187 RTL_W8(Cfg9346, Cfg9346_Lock);
4190 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4192 void __iomem *ioaddr = tp->mmio_addr;
4194 RTL_W8(Cfg9346, Cfg9346_Unlock);
4195 rtl_generic_op(tp, tp->jumbo_ops.disable);
4196 RTL_W8(Cfg9346, Cfg9346_Lock);
4199 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4201 void __iomem *ioaddr = tp->mmio_addr;
4203 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4204 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4205 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4208 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4210 void __iomem *ioaddr = tp->mmio_addr;
4212 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4213 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4214 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4217 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4219 void __iomem *ioaddr = tp->mmio_addr;
4221 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4224 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4226 void __iomem *ioaddr = tp->mmio_addr;
4228 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4231 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4233 void __iomem *ioaddr = tp->mmio_addr;
4235 RTL_W8(MaxTxPacketSize, 0x3f);
4236 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4237 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4238 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4241 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4243 void __iomem *ioaddr = tp->mmio_addr;
4245 RTL_W8(MaxTxPacketSize, 0x0c);
4246 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4247 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4248 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4251 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4253 rtl_tx_performance_tweak(tp->pci_dev,
4254 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4257 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4259 rtl_tx_performance_tweak(tp->pci_dev,
4260 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4263 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4265 void __iomem *ioaddr = tp->mmio_addr;
4267 r8168b_0_hw_jumbo_enable(tp);
4269 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4272 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4274 void __iomem *ioaddr = tp->mmio_addr;
4276 r8168b_0_hw_jumbo_disable(tp);
4278 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4281 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4283 struct jumbo_ops *ops = &tp->jumbo_ops;
4285 switch (tp->mac_version) {
4286 case RTL_GIGA_MAC_VER_11:
4287 ops->disable = r8168b_0_hw_jumbo_disable;
4288 ops->enable = r8168b_0_hw_jumbo_enable;
4290 case RTL_GIGA_MAC_VER_12:
4291 case RTL_GIGA_MAC_VER_17:
4292 ops->disable = r8168b_1_hw_jumbo_disable;
4293 ops->enable = r8168b_1_hw_jumbo_enable;
4295 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4296 case RTL_GIGA_MAC_VER_19:
4297 case RTL_GIGA_MAC_VER_20:
4298 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4299 case RTL_GIGA_MAC_VER_22:
4300 case RTL_GIGA_MAC_VER_23:
4301 case RTL_GIGA_MAC_VER_24:
4302 case RTL_GIGA_MAC_VER_25:
4303 case RTL_GIGA_MAC_VER_26:
4304 ops->disable = r8168c_hw_jumbo_disable;
4305 ops->enable = r8168c_hw_jumbo_enable;
4307 case RTL_GIGA_MAC_VER_27:
4308 case RTL_GIGA_MAC_VER_28:
4309 ops->disable = r8168dp_hw_jumbo_disable;
4310 ops->enable = r8168dp_hw_jumbo_enable;
4312 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4313 case RTL_GIGA_MAC_VER_32:
4314 case RTL_GIGA_MAC_VER_33:
4315 case RTL_GIGA_MAC_VER_34:
4316 ops->disable = r8168e_hw_jumbo_disable;
4317 ops->enable = r8168e_hw_jumbo_enable;
4321 * No action needed for jumbo frames with 8169.
4322 * No jumbo for 810x at all.
4324 case RTL_GIGA_MAC_VER_40:
4325 case RTL_GIGA_MAC_VER_41:
4327 ops->disable = NULL;
4333 DECLARE_RTL_COND(rtl_chipcmd_cond)
4335 void __iomem *ioaddr = tp->mmio_addr;
4337 return RTL_R8(ChipCmd) & CmdReset;
4340 static void rtl_hw_reset(struct rtl8169_private *tp)
4342 void __iomem *ioaddr = tp->mmio_addr;
4344 RTL_W8(ChipCmd, CmdReset);
4346 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4349 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4351 struct rtl_fw *rtl_fw;
4355 name = rtl_lookup_firmware_name(tp);
4357 goto out_no_firmware;
4359 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4363 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4367 rc = rtl_check_firmware(tp, rtl_fw);
4369 goto err_release_firmware;
4371 tp->rtl_fw = rtl_fw;
4375 err_release_firmware:
4376 release_firmware(rtl_fw->fw);
4380 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4387 static void rtl_request_firmware(struct rtl8169_private *tp)
4389 if (IS_ERR(tp->rtl_fw))
4390 rtl_request_uncached_firmware(tp);
4393 static void rtl_rx_close(struct rtl8169_private *tp)
4395 void __iomem *ioaddr = tp->mmio_addr;
4397 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4400 DECLARE_RTL_COND(rtl_npq_cond)
4402 void __iomem *ioaddr = tp->mmio_addr;
4404 return RTL_R8(TxPoll) & NPQ;
4407 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4409 void __iomem *ioaddr = tp->mmio_addr;
4411 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4414 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4416 void __iomem *ioaddr = tp->mmio_addr;
4418 /* Disable interrupts */
4419 rtl8169_irq_mask_and_ack(tp);
4423 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4424 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4425 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4426 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4427 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4428 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4429 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4430 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4431 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4432 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4433 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4434 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4435 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4437 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4444 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4446 void __iomem *ioaddr = tp->mmio_addr;
4448 /* Set DMA burst size and Interframe Gap Time */
4449 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4450 (InterFrameGap << TxInterFrameGapShift));
4453 static void rtl_hw_start(struct net_device *dev)
4455 struct rtl8169_private *tp = netdev_priv(dev);
4459 rtl_irq_enable_all(tp);
4462 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4463 void __iomem *ioaddr)
4466 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4467 * register to be written before TxDescAddrLow to work.
4468 * Switching from MMIO to I/O access fixes the issue as well.
4470 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4471 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4472 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4473 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4476 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4480 cmd = RTL_R16(CPlusCmd);
4481 RTL_W16(CPlusCmd, cmd);
4485 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4487 /* Low hurts. Let's disable the filtering. */
4488 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4491 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4493 static const struct rtl_cfg2_info {
4498 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4499 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4500 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4501 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4503 const struct rtl_cfg2_info *p = cfg2_info;
4507 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4508 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4509 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4510 RTL_W32(0x7c, p->val);
4516 static void rtl_set_rx_mode(struct net_device *dev)
4518 struct rtl8169_private *tp = netdev_priv(dev);
4519 void __iomem *ioaddr = tp->mmio_addr;
4520 u32 mc_filter[2]; /* Multicast hash filter */
4524 if (dev->flags & IFF_PROMISC) {
4525 /* Unconditionally log net taps. */
4526 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4528 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4530 mc_filter[1] = mc_filter[0] = 0xffffffff;
4531 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4532 (dev->flags & IFF_ALLMULTI)) {
4533 /* Too many to filter perfectly -- accept all multicasts. */
4534 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4535 mc_filter[1] = mc_filter[0] = 0xffffffff;
4537 struct netdev_hw_addr *ha;
4539 rx_mode = AcceptBroadcast | AcceptMyPhys;
4540 mc_filter[1] = mc_filter[0] = 0;
4541 netdev_for_each_mc_addr(ha, dev) {
4542 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4543 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4544 rx_mode |= AcceptMulticast;
4548 if (dev->features & NETIF_F_RXALL)
4549 rx_mode |= (AcceptErr | AcceptRunt);
4551 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4553 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4554 u32 data = mc_filter[0];
4556 mc_filter[0] = swab32(mc_filter[1]);
4557 mc_filter[1] = swab32(data);
4560 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4561 mc_filter[1] = mc_filter[0] = 0xffffffff;
4563 RTL_W32(MAR0 + 4, mc_filter[1]);
4564 RTL_W32(MAR0 + 0, mc_filter[0]);
4566 RTL_W32(RxConfig, tmp);
4569 static void rtl_hw_start_8169(struct net_device *dev)
4571 struct rtl8169_private *tp = netdev_priv(dev);
4572 void __iomem *ioaddr = tp->mmio_addr;
4573 struct pci_dev *pdev = tp->pci_dev;
4575 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4576 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4577 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4580 RTL_W8(Cfg9346, Cfg9346_Unlock);
4581 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4582 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4583 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4584 tp->mac_version == RTL_GIGA_MAC_VER_04)
4585 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4589 RTL_W8(EarlyTxThres, NoEarlyTx);
4591 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4593 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4594 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4595 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4596 tp->mac_version == RTL_GIGA_MAC_VER_04)
4597 rtl_set_rx_tx_config_registers(tp);
4599 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4601 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4602 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4603 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4604 "Bit-3 and bit-14 MUST be 1\n");
4605 tp->cp_cmd |= (1 << 14);
4608 RTL_W16(CPlusCmd, tp->cp_cmd);
4610 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4613 * Undocumented corner. Supposedly:
4614 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4616 RTL_W16(IntrMitigate, 0x0000);
4618 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4620 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4621 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4622 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4623 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4624 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4625 rtl_set_rx_tx_config_registers(tp);
4628 RTL_W8(Cfg9346, Cfg9346_Lock);
4630 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4633 RTL_W32(RxMissed, 0);
4635 rtl_set_rx_mode(dev);
4637 /* no early-rx interrupts */
4638 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4641 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4643 if (tp->csi_ops.write)
4644 tp->csi_ops.write(tp, addr, value);
4647 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4649 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4652 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4656 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4657 rtl_csi_write(tp, 0x070c, csi | bits);
4660 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4662 rtl_csi_access_enable(tp, 0x17000000);
4665 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4667 rtl_csi_access_enable(tp, 0x27000000);
4670 DECLARE_RTL_COND(rtl_csiar_cond)
4672 void __iomem *ioaddr = tp->mmio_addr;
4674 return RTL_R32(CSIAR) & CSIAR_FLAG;
4677 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4679 void __iomem *ioaddr = tp->mmio_addr;
4681 RTL_W32(CSIDR, value);
4682 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4683 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4685 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4688 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4690 void __iomem *ioaddr = tp->mmio_addr;
4692 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4693 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4695 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4696 RTL_R32(CSIDR) : ~0;
4699 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4701 void __iomem *ioaddr = tp->mmio_addr;
4703 RTL_W32(CSIDR, value);
4704 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4705 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4708 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4711 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4713 void __iomem *ioaddr = tp->mmio_addr;
4715 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4716 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4718 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4719 RTL_R32(CSIDR) : ~0;
4722 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4724 struct csi_ops *ops = &tp->csi_ops;
4726 switch (tp->mac_version) {
4727 case RTL_GIGA_MAC_VER_01:
4728 case RTL_GIGA_MAC_VER_02:
4729 case RTL_GIGA_MAC_VER_03:
4730 case RTL_GIGA_MAC_VER_04:
4731 case RTL_GIGA_MAC_VER_05:
4732 case RTL_GIGA_MAC_VER_06:
4733 case RTL_GIGA_MAC_VER_10:
4734 case RTL_GIGA_MAC_VER_11:
4735 case RTL_GIGA_MAC_VER_12:
4736 case RTL_GIGA_MAC_VER_13:
4737 case RTL_GIGA_MAC_VER_14:
4738 case RTL_GIGA_MAC_VER_15:
4739 case RTL_GIGA_MAC_VER_16:
4740 case RTL_GIGA_MAC_VER_17:
4745 case RTL_GIGA_MAC_VER_37:
4746 case RTL_GIGA_MAC_VER_38:
4747 ops->write = r8402_csi_write;
4748 ops->read = r8402_csi_read;
4752 ops->write = r8169_csi_write;
4753 ops->read = r8169_csi_read;
4759 unsigned int offset;
4764 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4770 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4771 rtl_ephy_write(tp, e->offset, w);
4776 static void rtl_disable_clock_request(struct pci_dev *pdev)
4778 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4779 PCI_EXP_LNKCTL_CLKREQ_EN);
4782 static void rtl_enable_clock_request(struct pci_dev *pdev)
4784 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4785 PCI_EXP_LNKCTL_CLKREQ_EN);
4788 #define R8168_CPCMD_QUIRK_MASK (\
4799 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4801 void __iomem *ioaddr = tp->mmio_addr;
4802 struct pci_dev *pdev = tp->pci_dev;
4804 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4806 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4808 rtl_tx_performance_tweak(pdev,
4809 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4812 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4814 void __iomem *ioaddr = tp->mmio_addr;
4816 rtl_hw_start_8168bb(tp);
4818 RTL_W8(MaxTxPacketSize, TxPacketMax);
4820 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4823 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4825 void __iomem *ioaddr = tp->mmio_addr;
4826 struct pci_dev *pdev = tp->pci_dev;
4828 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4830 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4832 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4834 rtl_disable_clock_request(pdev);
4836 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4839 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4841 static const struct ephy_info e_info_8168cp[] = {
4842 { 0x01, 0, 0x0001 },
4843 { 0x02, 0x0800, 0x1000 },
4844 { 0x03, 0, 0x0042 },
4845 { 0x06, 0x0080, 0x0000 },
4849 rtl_csi_access_enable_2(tp);
4851 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4853 __rtl_hw_start_8168cp(tp);
4856 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4858 void __iomem *ioaddr = tp->mmio_addr;
4859 struct pci_dev *pdev = tp->pci_dev;
4861 rtl_csi_access_enable_2(tp);
4863 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4865 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4867 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4870 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4872 void __iomem *ioaddr = tp->mmio_addr;
4873 struct pci_dev *pdev = tp->pci_dev;
4875 rtl_csi_access_enable_2(tp);
4877 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4880 RTL_W8(DBG_REG, 0x20);
4882 RTL_W8(MaxTxPacketSize, TxPacketMax);
4884 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4886 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4889 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4891 void __iomem *ioaddr = tp->mmio_addr;
4892 static const struct ephy_info e_info_8168c_1[] = {
4893 { 0x02, 0x0800, 0x1000 },
4894 { 0x03, 0, 0x0002 },
4895 { 0x06, 0x0080, 0x0000 }
4898 rtl_csi_access_enable_2(tp);
4900 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4902 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4904 __rtl_hw_start_8168cp(tp);
4907 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4909 static const struct ephy_info e_info_8168c_2[] = {
4910 { 0x01, 0, 0x0001 },
4911 { 0x03, 0x0400, 0x0220 }
4914 rtl_csi_access_enable_2(tp);
4916 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4918 __rtl_hw_start_8168cp(tp);
4921 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4923 rtl_hw_start_8168c_2(tp);
4926 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4928 rtl_csi_access_enable_2(tp);
4930 __rtl_hw_start_8168cp(tp);
4933 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4935 void __iomem *ioaddr = tp->mmio_addr;
4936 struct pci_dev *pdev = tp->pci_dev;
4938 rtl_csi_access_enable_2(tp);
4940 rtl_disable_clock_request(pdev);
4942 RTL_W8(MaxTxPacketSize, TxPacketMax);
4944 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4946 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4949 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4951 void __iomem *ioaddr = tp->mmio_addr;
4952 struct pci_dev *pdev = tp->pci_dev;
4954 rtl_csi_access_enable_1(tp);
4956 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4958 RTL_W8(MaxTxPacketSize, TxPacketMax);
4960 rtl_disable_clock_request(pdev);
4963 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4965 void __iomem *ioaddr = tp->mmio_addr;
4966 struct pci_dev *pdev = tp->pci_dev;
4967 static const struct ephy_info e_info_8168d_4[] = {
4969 { 0x19, 0x20, 0x50 },
4974 rtl_csi_access_enable_1(tp);
4976 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4978 RTL_W8(MaxTxPacketSize, TxPacketMax);
4980 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4981 const struct ephy_info *e = e_info_8168d_4 + i;
4984 w = rtl_ephy_read(tp, e->offset);
4985 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4988 rtl_enable_clock_request(pdev);
4991 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4993 void __iomem *ioaddr = tp->mmio_addr;
4994 struct pci_dev *pdev = tp->pci_dev;
4995 static const struct ephy_info e_info_8168e_1[] = {
4996 { 0x00, 0x0200, 0x0100 },
4997 { 0x00, 0x0000, 0x0004 },
4998 { 0x06, 0x0002, 0x0001 },
4999 { 0x06, 0x0000, 0x0030 },
5000 { 0x07, 0x0000, 0x2000 },
5001 { 0x00, 0x0000, 0x0020 },
5002 { 0x03, 0x5800, 0x2000 },
5003 { 0x03, 0x0000, 0x0001 },
5004 { 0x01, 0x0800, 0x1000 },
5005 { 0x07, 0x0000, 0x4000 },
5006 { 0x1e, 0x0000, 0x2000 },
5007 { 0x19, 0xffff, 0xfe6c },
5008 { 0x0a, 0x0000, 0x0040 }
5011 rtl_csi_access_enable_2(tp);
5013 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5015 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5017 RTL_W8(MaxTxPacketSize, TxPacketMax);
5019 rtl_disable_clock_request(pdev);
5021 /* Reset tx FIFO pointer */
5022 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5023 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5025 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5028 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5030 void __iomem *ioaddr = tp->mmio_addr;
5031 struct pci_dev *pdev = tp->pci_dev;
5032 static const struct ephy_info e_info_8168e_2[] = {
5033 { 0x09, 0x0000, 0x0080 },
5034 { 0x19, 0x0000, 0x0224 }
5037 rtl_csi_access_enable_1(tp);
5039 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5041 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5043 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5044 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5045 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5046 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5047 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5048 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5049 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5050 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5052 RTL_W8(MaxTxPacketSize, EarlySize);
5054 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5055 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5057 /* Adjust EEE LED frequency */
5058 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5060 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5061 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5062 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5063 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5066 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5068 void __iomem *ioaddr = tp->mmio_addr;
5069 struct pci_dev *pdev = tp->pci_dev;
5071 rtl_csi_access_enable_2(tp);
5073 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5075 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5076 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5077 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5078 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5079 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5080 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5081 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5082 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5083 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5084 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5086 RTL_W8(MaxTxPacketSize, EarlySize);
5088 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5089 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5090 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5091 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
5092 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5093 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5096 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5098 void __iomem *ioaddr = tp->mmio_addr;
5099 static const struct ephy_info e_info_8168f_1[] = {
5100 { 0x06, 0x00c0, 0x0020 },
5101 { 0x08, 0x0001, 0x0002 },
5102 { 0x09, 0x0000, 0x0080 },
5103 { 0x19, 0x0000, 0x0224 }
5106 rtl_hw_start_8168f(tp);
5108 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5110 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5112 /* Adjust EEE LED frequency */
5113 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5116 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5118 static const struct ephy_info e_info_8168f_1[] = {
5119 { 0x06, 0x00c0, 0x0020 },
5120 { 0x0f, 0xffff, 0x5200 },
5121 { 0x1e, 0x0000, 0x4000 },
5122 { 0x19, 0x0000, 0x0224 }
5125 rtl_hw_start_8168f(tp);
5127 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5129 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5132 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5134 void __iomem *ioaddr = tp->mmio_addr;
5135 struct pci_dev *pdev = tp->pci_dev;
5137 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5138 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5139 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5140 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5142 rtl_csi_access_enable_1(tp);
5144 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5146 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5147 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5149 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5150 RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
5151 RTL_W8(MaxTxPacketSize, EarlySize);
5152 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5153 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5155 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5156 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5158 /* Adjust EEE LED frequency */
5159 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5161 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5164 static void rtl_hw_start_8168(struct net_device *dev)
5166 struct rtl8169_private *tp = netdev_priv(dev);
5167 void __iomem *ioaddr = tp->mmio_addr;
5169 RTL_W8(Cfg9346, Cfg9346_Unlock);
5171 RTL_W8(MaxTxPacketSize, TxPacketMax);
5173 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5175 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5177 RTL_W16(CPlusCmd, tp->cp_cmd);
5179 RTL_W16(IntrMitigate, 0x5151);
5181 /* Work around for RxFIFO overflow. */
5182 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5183 tp->event_slow |= RxFIFOOver | PCSTimeout;
5184 tp->event_slow &= ~RxOverflow;
5187 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5189 rtl_set_rx_mode(dev);
5191 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5192 (InterFrameGap << TxInterFrameGapShift));
5196 switch (tp->mac_version) {
5197 case RTL_GIGA_MAC_VER_11:
5198 rtl_hw_start_8168bb(tp);
5201 case RTL_GIGA_MAC_VER_12:
5202 case RTL_GIGA_MAC_VER_17:
5203 rtl_hw_start_8168bef(tp);
5206 case RTL_GIGA_MAC_VER_18:
5207 rtl_hw_start_8168cp_1(tp);
5210 case RTL_GIGA_MAC_VER_19:
5211 rtl_hw_start_8168c_1(tp);
5214 case RTL_GIGA_MAC_VER_20:
5215 rtl_hw_start_8168c_2(tp);
5218 case RTL_GIGA_MAC_VER_21:
5219 rtl_hw_start_8168c_3(tp);
5222 case RTL_GIGA_MAC_VER_22:
5223 rtl_hw_start_8168c_4(tp);
5226 case RTL_GIGA_MAC_VER_23:
5227 rtl_hw_start_8168cp_2(tp);
5230 case RTL_GIGA_MAC_VER_24:
5231 rtl_hw_start_8168cp_3(tp);
5234 case RTL_GIGA_MAC_VER_25:
5235 case RTL_GIGA_MAC_VER_26:
5236 case RTL_GIGA_MAC_VER_27:
5237 rtl_hw_start_8168d(tp);
5240 case RTL_GIGA_MAC_VER_28:
5241 rtl_hw_start_8168d_4(tp);
5244 case RTL_GIGA_MAC_VER_31:
5245 rtl_hw_start_8168dp(tp);
5248 case RTL_GIGA_MAC_VER_32:
5249 case RTL_GIGA_MAC_VER_33:
5250 rtl_hw_start_8168e_1(tp);
5252 case RTL_GIGA_MAC_VER_34:
5253 rtl_hw_start_8168e_2(tp);
5256 case RTL_GIGA_MAC_VER_35:
5257 case RTL_GIGA_MAC_VER_36:
5258 rtl_hw_start_8168f_1(tp);
5261 case RTL_GIGA_MAC_VER_38:
5262 rtl_hw_start_8411(tp);
5265 case RTL_GIGA_MAC_VER_40:
5266 case RTL_GIGA_MAC_VER_41:
5267 rtl_hw_start_8168g_1(tp);
5271 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5272 dev->name, tp->mac_version);
5276 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5278 RTL_W8(Cfg9346, Cfg9346_Lock);
5280 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5283 #define R810X_CPCMD_QUIRK_MASK (\
5294 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5296 void __iomem *ioaddr = tp->mmio_addr;
5297 struct pci_dev *pdev = tp->pci_dev;
5298 static const struct ephy_info e_info_8102e_1[] = {
5299 { 0x01, 0, 0x6e65 },
5300 { 0x02, 0, 0x091f },
5301 { 0x03, 0, 0xc2f9 },
5302 { 0x06, 0, 0xafb5 },
5303 { 0x07, 0, 0x0e00 },
5304 { 0x19, 0, 0xec80 },
5305 { 0x01, 0, 0x2e65 },
5310 rtl_csi_access_enable_2(tp);
5312 RTL_W8(DBG_REG, FIX_NAK_1);
5314 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5317 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5318 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5320 cfg1 = RTL_R8(Config1);
5321 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5322 RTL_W8(Config1, cfg1 & ~LEDS0);
5324 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5327 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5329 void __iomem *ioaddr = tp->mmio_addr;
5330 struct pci_dev *pdev = tp->pci_dev;
5332 rtl_csi_access_enable_2(tp);
5334 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5336 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5337 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5340 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5342 rtl_hw_start_8102e_2(tp);
5344 rtl_ephy_write(tp, 0x03, 0xc2f9);
5347 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5349 void __iomem *ioaddr = tp->mmio_addr;
5350 static const struct ephy_info e_info_8105e_1[] = {
5351 { 0x07, 0, 0x4000 },
5352 { 0x19, 0, 0x0200 },
5353 { 0x19, 0, 0x0020 },
5354 { 0x1e, 0, 0x2000 },
5355 { 0x03, 0, 0x0001 },
5356 { 0x19, 0, 0x0100 },
5357 { 0x19, 0, 0x0004 },
5361 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5362 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5364 /* Disable Early Tally Counter */
5365 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5367 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5368 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5369 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5370 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5371 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5373 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5376 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5378 rtl_hw_start_8105e_1(tp);
5379 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5382 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5384 void __iomem *ioaddr = tp->mmio_addr;
5385 static const struct ephy_info e_info_8402[] = {
5386 { 0x19, 0xffff, 0xff64 },
5390 rtl_csi_access_enable_2(tp);
5392 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5393 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5395 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5396 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5397 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5398 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5399 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5401 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5403 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5405 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5406 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5407 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5408 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5409 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5410 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5411 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5414 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5416 void __iomem *ioaddr = tp->mmio_addr;
5418 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5419 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5422 (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
5423 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5424 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5425 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5426 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5429 static void rtl_hw_start_8101(struct net_device *dev)
5431 struct rtl8169_private *tp = netdev_priv(dev);
5432 void __iomem *ioaddr = tp->mmio_addr;
5433 struct pci_dev *pdev = tp->pci_dev;
5435 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5436 tp->event_slow &= ~RxFIFOOver;
5438 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5439 tp->mac_version == RTL_GIGA_MAC_VER_16)
5440 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5441 PCI_EXP_DEVCTL_NOSNOOP_EN);
5443 RTL_W8(Cfg9346, Cfg9346_Unlock);
5445 switch (tp->mac_version) {
5446 case RTL_GIGA_MAC_VER_07:
5447 rtl_hw_start_8102e_1(tp);
5450 case RTL_GIGA_MAC_VER_08:
5451 rtl_hw_start_8102e_3(tp);
5454 case RTL_GIGA_MAC_VER_09:
5455 rtl_hw_start_8102e_2(tp);
5458 case RTL_GIGA_MAC_VER_29:
5459 rtl_hw_start_8105e_1(tp);
5461 case RTL_GIGA_MAC_VER_30:
5462 rtl_hw_start_8105e_2(tp);
5465 case RTL_GIGA_MAC_VER_37:
5466 rtl_hw_start_8402(tp);
5469 case RTL_GIGA_MAC_VER_39:
5470 rtl_hw_start_8106(tp);
5474 RTL_W8(Cfg9346, Cfg9346_Lock);
5476 RTL_W8(MaxTxPacketSize, TxPacketMax);
5478 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5480 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5481 RTL_W16(CPlusCmd, tp->cp_cmd);
5483 RTL_W16(IntrMitigate, 0x0000);
5485 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5487 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5488 rtl_set_rx_tx_config_registers(tp);
5492 rtl_set_rx_mode(dev);
5494 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5497 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5499 struct rtl8169_private *tp = netdev_priv(dev);
5501 if (new_mtu < ETH_ZLEN ||
5502 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5505 if (new_mtu > ETH_DATA_LEN)
5506 rtl_hw_jumbo_enable(tp);
5508 rtl_hw_jumbo_disable(tp);
5511 netdev_update_features(dev);
5516 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5518 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5519 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5522 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5523 void **data_buff, struct RxDesc *desc)
5525 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5530 rtl8169_make_unusable_by_asic(desc);
5533 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5535 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5537 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5540 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5543 desc->addr = cpu_to_le64(mapping);
5545 rtl8169_mark_to_asic(desc, rx_buf_sz);
5548 static inline void *rtl8169_align(void *data)
5550 return (void *)ALIGN((long)data, 16);
5553 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5554 struct RxDesc *desc)
5558 struct device *d = &tp->pci_dev->dev;
5559 struct net_device *dev = tp->dev;
5560 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5562 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5566 if (rtl8169_align(data) != data) {
5568 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5573 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5575 if (unlikely(dma_mapping_error(d, mapping))) {
5576 if (net_ratelimit())
5577 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5581 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5589 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5593 for (i = 0; i < NUM_RX_DESC; i++) {
5594 if (tp->Rx_databuff[i]) {
5595 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5596 tp->RxDescArray + i);
5601 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5603 desc->opts1 |= cpu_to_le32(RingEnd);
5606 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5610 for (i = 0; i < NUM_RX_DESC; i++) {
5613 if (tp->Rx_databuff[i])
5616 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5618 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5621 tp->Rx_databuff[i] = data;
5624 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5628 rtl8169_rx_clear(tp);
5632 static int rtl8169_init_ring(struct net_device *dev)
5634 struct rtl8169_private *tp = netdev_priv(dev);
5636 rtl8169_init_ring_indexes(tp);
5638 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5639 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5641 return rtl8169_rx_fill(tp);
5644 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5645 struct TxDesc *desc)
5647 unsigned int len = tx_skb->len;
5649 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5657 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5662 for (i = 0; i < n; i++) {
5663 unsigned int entry = (start + i) % NUM_TX_DESC;
5664 struct ring_info *tx_skb = tp->tx_skb + entry;
5665 unsigned int len = tx_skb->len;
5668 struct sk_buff *skb = tx_skb->skb;
5670 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5671 tp->TxDescArray + entry);
5673 tp->dev->stats.tx_dropped++;
5681 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5683 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5684 tp->cur_tx = tp->dirty_tx = 0;
5687 static void rtl_reset_work(struct rtl8169_private *tp)
5689 struct net_device *dev = tp->dev;
5692 napi_disable(&tp->napi);
5693 netif_stop_queue(dev);
5694 synchronize_sched();
5696 rtl8169_hw_reset(tp);
5698 for (i = 0; i < NUM_RX_DESC; i++)
5699 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5701 rtl8169_tx_clear(tp);
5702 rtl8169_init_ring_indexes(tp);
5704 napi_enable(&tp->napi);
5706 netif_wake_queue(dev);
5707 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5710 static void rtl8169_tx_timeout(struct net_device *dev)
5712 struct rtl8169_private *tp = netdev_priv(dev);
5714 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5717 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5720 struct skb_shared_info *info = skb_shinfo(skb);
5721 unsigned int cur_frag, entry;
5722 struct TxDesc * uninitialized_var(txd);
5723 struct device *d = &tp->pci_dev->dev;
5726 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5727 const skb_frag_t *frag = info->frags + cur_frag;
5732 entry = (entry + 1) % NUM_TX_DESC;
5734 txd = tp->TxDescArray + entry;
5735 len = skb_frag_size(frag);
5736 addr = skb_frag_address(frag);
5737 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5738 if (unlikely(dma_mapping_error(d, mapping))) {
5739 if (net_ratelimit())
5740 netif_err(tp, drv, tp->dev,
5741 "Failed to map TX fragments DMA!\n");
5745 /* Anti gcc 2.95.3 bugware (sic) */
5746 status = opts[0] | len |
5747 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5749 txd->opts1 = cpu_to_le32(status);
5750 txd->opts2 = cpu_to_le32(opts[1]);
5751 txd->addr = cpu_to_le64(mapping);
5753 tp->tx_skb[entry].len = len;
5757 tp->tx_skb[entry].skb = skb;
5758 txd->opts1 |= cpu_to_le32(LastFrag);
5764 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5768 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5769 struct sk_buff *skb, u32 *opts)
5771 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5772 u32 mss = skb_shinfo(skb)->gso_size;
5773 int offset = info->opts_offset;
5777 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5778 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5779 const struct iphdr *ip = ip_hdr(skb);
5781 if (ip->protocol == IPPROTO_TCP)
5782 opts[offset] |= info->checksum.tcp;
5783 else if (ip->protocol == IPPROTO_UDP)
5784 opts[offset] |= info->checksum.udp;
5790 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5791 struct net_device *dev)
5793 struct rtl8169_private *tp = netdev_priv(dev);
5794 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5795 struct TxDesc *txd = tp->TxDescArray + entry;
5796 void __iomem *ioaddr = tp->mmio_addr;
5797 struct device *d = &tp->pci_dev->dev;
5803 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5804 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5808 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5811 len = skb_headlen(skb);
5812 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5813 if (unlikely(dma_mapping_error(d, mapping))) {
5814 if (net_ratelimit())
5815 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5819 tp->tx_skb[entry].len = len;
5820 txd->addr = cpu_to_le64(mapping);
5822 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5825 rtl8169_tso_csum(tp, skb, opts);
5827 frags = rtl8169_xmit_frags(tp, skb, opts);
5831 opts[0] |= FirstFrag;
5833 opts[0] |= FirstFrag | LastFrag;
5834 tp->tx_skb[entry].skb = skb;
5837 txd->opts2 = cpu_to_le32(opts[1]);
5839 skb_tx_timestamp(skb);
5843 /* Anti gcc 2.95.3 bugware (sic) */
5844 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5845 txd->opts1 = cpu_to_le32(status);
5847 tp->cur_tx += frags + 1;
5851 RTL_W8(TxPoll, NPQ);
5855 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5856 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5857 * not miss a ring update when it notices a stopped queue.
5860 netif_stop_queue(dev);
5861 /* Sync with rtl_tx:
5862 * - publish queue status and cur_tx ring index (write barrier)
5863 * - refresh dirty_tx ring index (read barrier).
5864 * May the current thread have a pessimistic view of the ring
5865 * status and forget to wake up queue, a racing rtl_tx thread
5869 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5870 netif_wake_queue(dev);
5873 return NETDEV_TX_OK;
5876 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5879 dev->stats.tx_dropped++;
5880 return NETDEV_TX_OK;
5883 netif_stop_queue(dev);
5884 dev->stats.tx_dropped++;
5885 return NETDEV_TX_BUSY;
5888 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5890 struct rtl8169_private *tp = netdev_priv(dev);
5891 struct pci_dev *pdev = tp->pci_dev;
5892 u16 pci_status, pci_cmd;
5894 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5895 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5897 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5898 pci_cmd, pci_status);
5901 * The recovery sequence below admits a very elaborated explanation:
5902 * - it seems to work;
5903 * - I did not see what else could be done;
5904 * - it makes iop3xx happy.
5906 * Feel free to adjust to your needs.
5908 if (pdev->broken_parity_status)
5909 pci_cmd &= ~PCI_COMMAND_PARITY;
5911 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5913 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5915 pci_write_config_word(pdev, PCI_STATUS,
5916 pci_status & (PCI_STATUS_DETECTED_PARITY |
5917 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5918 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5920 /* The infamous DAC f*ckup only happens at boot time */
5921 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5922 void __iomem *ioaddr = tp->mmio_addr;
5924 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5925 tp->cp_cmd &= ~PCIDAC;
5926 RTL_W16(CPlusCmd, tp->cp_cmd);
5927 dev->features &= ~NETIF_F_HIGHDMA;
5930 rtl8169_hw_reset(tp);
5932 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5935 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5937 unsigned int dirty_tx, tx_left;
5939 dirty_tx = tp->dirty_tx;
5941 tx_left = tp->cur_tx - dirty_tx;
5943 while (tx_left > 0) {
5944 unsigned int entry = dirty_tx % NUM_TX_DESC;
5945 struct ring_info *tx_skb = tp->tx_skb + entry;
5949 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5950 if (status & DescOwn)
5953 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5954 tp->TxDescArray + entry);
5955 if (status & LastFrag) {
5956 u64_stats_update_begin(&tp->tx_stats.syncp);
5957 tp->tx_stats.packets++;
5958 tp->tx_stats.bytes += tx_skb->skb->len;
5959 u64_stats_update_end(&tp->tx_stats.syncp);
5960 dev_kfree_skb(tx_skb->skb);
5967 if (tp->dirty_tx != dirty_tx) {
5968 tp->dirty_tx = dirty_tx;
5969 /* Sync with rtl8169_start_xmit:
5970 * - publish dirty_tx ring index (write barrier)
5971 * - refresh cur_tx ring index and queue status (read barrier)
5972 * May the current thread miss the stopped queue condition,
5973 * a racing xmit thread can only have a right view of the
5977 if (netif_queue_stopped(dev) &&
5978 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5979 netif_wake_queue(dev);
5982 * 8168 hack: TxPoll requests are lost when the Tx packets are
5983 * too close. Let's kick an extra TxPoll request when a burst
5984 * of start_xmit activity is detected (if it is not detected,
5985 * it is slow enough). -- FR
5987 if (tp->cur_tx != dirty_tx) {
5988 void __iomem *ioaddr = tp->mmio_addr;
5990 RTL_W8(TxPoll, NPQ);
5995 static inline int rtl8169_fragmented_frame(u32 status)
5997 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
6000 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
6002 u32 status = opts1 & RxProtoMask;
6004 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
6005 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
6006 skb->ip_summed = CHECKSUM_UNNECESSARY;
6008 skb_checksum_none_assert(skb);
6011 static struct sk_buff *rtl8169_try_rx_copy(void *data,
6012 struct rtl8169_private *tp,
6016 struct sk_buff *skb;
6017 struct device *d = &tp->pci_dev->dev;
6019 data = rtl8169_align(data);
6020 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6022 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
6024 memcpy(skb->data, data, pkt_size);
6025 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6030 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6032 unsigned int cur_rx, rx_left;
6035 cur_rx = tp->cur_rx;
6036 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
6037 rx_left = min(rx_left, budget);
6039 for (; rx_left > 0; rx_left--, cur_rx++) {
6040 unsigned int entry = cur_rx % NUM_RX_DESC;
6041 struct RxDesc *desc = tp->RxDescArray + entry;
6045 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6047 if (status & DescOwn)
6049 if (unlikely(status & RxRES)) {
6050 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6052 dev->stats.rx_errors++;
6053 if (status & (RxRWT | RxRUNT))
6054 dev->stats.rx_length_errors++;
6056 dev->stats.rx_crc_errors++;
6057 if (status & RxFOVF) {
6058 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6059 dev->stats.rx_fifo_errors++;
6061 if ((status & (RxRUNT | RxCRC)) &&
6062 !(status & (RxRWT | RxFOVF)) &&
6063 (dev->features & NETIF_F_RXALL))
6066 struct sk_buff *skb;
6071 addr = le64_to_cpu(desc->addr);
6072 if (likely(!(dev->features & NETIF_F_RXFCS)))
6073 pkt_size = (status & 0x00003fff) - 4;
6075 pkt_size = status & 0x00003fff;
6078 * The driver does not support incoming fragmented
6079 * frames. They are seen as a symptom of over-mtu
6082 if (unlikely(rtl8169_fragmented_frame(status))) {
6083 dev->stats.rx_dropped++;
6084 dev->stats.rx_length_errors++;
6085 goto release_descriptor;
6088 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6089 tp, pkt_size, addr);
6091 dev->stats.rx_dropped++;
6092 goto release_descriptor;
6095 rtl8169_rx_csum(skb, status);
6096 skb_put(skb, pkt_size);
6097 skb->protocol = eth_type_trans(skb, dev);
6099 rtl8169_rx_vlan_tag(desc, skb);
6101 napi_gro_receive(&tp->napi, skb);
6103 u64_stats_update_begin(&tp->rx_stats.syncp);
6104 tp->rx_stats.packets++;
6105 tp->rx_stats.bytes += pkt_size;
6106 u64_stats_update_end(&tp->rx_stats.syncp);
6111 rtl8169_mark_to_asic(desc, rx_buf_sz);
6114 count = cur_rx - tp->cur_rx;
6115 tp->cur_rx = cur_rx;
6117 tp->dirty_rx += count;
6122 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6124 struct net_device *dev = dev_instance;
6125 struct rtl8169_private *tp = netdev_priv(dev);
6129 status = rtl_get_events(tp);
6130 if (status && status != 0xffff) {
6131 status &= RTL_EVENT_NAPI | tp->event_slow;
6135 rtl_irq_disable(tp);
6136 napi_schedule(&tp->napi);
6139 return IRQ_RETVAL(handled);
6143 * Workqueue context.
6145 static void rtl_slow_event_work(struct rtl8169_private *tp)
6147 struct net_device *dev = tp->dev;
6150 status = rtl_get_events(tp) & tp->event_slow;
6151 rtl_ack_events(tp, status);
6153 if (unlikely(status & RxFIFOOver)) {
6154 switch (tp->mac_version) {
6155 /* Work around for rx fifo overflow */
6156 case RTL_GIGA_MAC_VER_11:
6157 netif_stop_queue(dev);
6158 /* XXX - Hack alert. See rtl_task(). */
6159 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6165 if (unlikely(status & SYSErr))
6166 rtl8169_pcierr_interrupt(dev);
6168 if (status & LinkChg)
6169 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6171 rtl_irq_enable_all(tp);
6174 static void rtl_task(struct work_struct *work)
6176 static const struct {
6178 void (*action)(struct rtl8169_private *);
6180 /* XXX - keep rtl_slow_event_work() as first element. */
6181 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6182 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6183 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6185 struct rtl8169_private *tp =
6186 container_of(work, struct rtl8169_private, wk.work);
6187 struct net_device *dev = tp->dev;
6192 if (!netif_running(dev) ||
6193 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6196 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6199 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6201 rtl_work[i].action(tp);
6205 rtl_unlock_work(tp);
6208 static int rtl8169_poll(struct napi_struct *napi, int budget)
6210 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6211 struct net_device *dev = tp->dev;
6212 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6216 status = rtl_get_events(tp);
6217 rtl_ack_events(tp, status & ~tp->event_slow);
6219 if (status & RTL_EVENT_NAPI_RX)
6220 work_done = rtl_rx(dev, tp, (u32) budget);
6222 if (status & RTL_EVENT_NAPI_TX)
6225 if (status & tp->event_slow) {
6226 enable_mask &= ~tp->event_slow;
6228 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6231 if (work_done < budget) {
6232 napi_complete(napi);
6234 rtl_irq_enable(tp, enable_mask);
6241 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6243 struct rtl8169_private *tp = netdev_priv(dev);
6245 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6248 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6249 RTL_W32(RxMissed, 0);
6252 static void rtl8169_down(struct net_device *dev)
6254 struct rtl8169_private *tp = netdev_priv(dev);
6255 void __iomem *ioaddr = tp->mmio_addr;
6257 del_timer_sync(&tp->timer);
6259 napi_disable(&tp->napi);
6260 netif_stop_queue(dev);
6262 rtl8169_hw_reset(tp);
6264 * At this point device interrupts can not be enabled in any function,
6265 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6266 * and napi is disabled (rtl8169_poll).
6268 rtl8169_rx_missed(dev, ioaddr);
6270 /* Give a racing hard_start_xmit a few cycles to complete. */
6271 synchronize_sched();
6273 rtl8169_tx_clear(tp);
6275 rtl8169_rx_clear(tp);
6277 rtl_pll_power_down(tp);
6280 static int rtl8169_close(struct net_device *dev)
6282 struct rtl8169_private *tp = netdev_priv(dev);
6283 struct pci_dev *pdev = tp->pci_dev;
6285 pm_runtime_get_sync(&pdev->dev);
6287 /* Update counters before going down */
6288 rtl8169_update_counters(dev);
6291 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6294 rtl_unlock_work(tp);
6296 free_irq(pdev->irq, dev);
6298 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6300 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6302 tp->TxDescArray = NULL;
6303 tp->RxDescArray = NULL;
6305 pm_runtime_put_sync(&pdev->dev);
6310 #ifdef CONFIG_NET_POLL_CONTROLLER
6311 static void rtl8169_netpoll(struct net_device *dev)
6313 struct rtl8169_private *tp = netdev_priv(dev);
6315 rtl8169_interrupt(tp->pci_dev->irq, dev);
6319 static int rtl_open(struct net_device *dev)
6321 struct rtl8169_private *tp = netdev_priv(dev);
6322 void __iomem *ioaddr = tp->mmio_addr;
6323 struct pci_dev *pdev = tp->pci_dev;
6324 int retval = -ENOMEM;
6326 pm_runtime_get_sync(&pdev->dev);
6329 * Rx and Tx descriptors needs 256 bytes alignment.
6330 * dma_alloc_coherent provides more.
6332 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6333 &tp->TxPhyAddr, GFP_KERNEL);
6334 if (!tp->TxDescArray)
6335 goto err_pm_runtime_put;
6337 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6338 &tp->RxPhyAddr, GFP_KERNEL);
6339 if (!tp->RxDescArray)
6342 retval = rtl8169_init_ring(dev);
6346 INIT_WORK(&tp->wk.work, rtl_task);
6350 rtl_request_firmware(tp);
6352 retval = request_irq(pdev->irq, rtl8169_interrupt,
6353 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6356 goto err_release_fw_2;
6360 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6362 napi_enable(&tp->napi);
6364 rtl8169_init_phy(dev, tp);
6366 __rtl8169_set_features(dev, dev->features);
6368 rtl_pll_power_up(tp);
6372 netif_start_queue(dev);
6374 rtl_unlock_work(tp);
6376 tp->saved_wolopts = 0;
6377 pm_runtime_put_noidle(&pdev->dev);
6379 rtl8169_check_link_status(dev, tp, ioaddr);
6384 rtl_release_firmware(tp);
6385 rtl8169_rx_clear(tp);
6387 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6389 tp->RxDescArray = NULL;
6391 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6393 tp->TxDescArray = NULL;
6395 pm_runtime_put_noidle(&pdev->dev);
6399 static struct rtnl_link_stats64 *
6400 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6402 struct rtl8169_private *tp = netdev_priv(dev);
6403 void __iomem *ioaddr = tp->mmio_addr;
6406 if (netif_running(dev))
6407 rtl8169_rx_missed(dev, ioaddr);
6410 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6411 stats->rx_packets = tp->rx_stats.packets;
6412 stats->rx_bytes = tp->rx_stats.bytes;
6413 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6417 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6418 stats->tx_packets = tp->tx_stats.packets;
6419 stats->tx_bytes = tp->tx_stats.bytes;
6420 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6422 stats->rx_dropped = dev->stats.rx_dropped;
6423 stats->tx_dropped = dev->stats.tx_dropped;
6424 stats->rx_length_errors = dev->stats.rx_length_errors;
6425 stats->rx_errors = dev->stats.rx_errors;
6426 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6427 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6428 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6433 static void rtl8169_net_suspend(struct net_device *dev)
6435 struct rtl8169_private *tp = netdev_priv(dev);
6437 if (!netif_running(dev))
6440 netif_device_detach(dev);
6441 netif_stop_queue(dev);
6444 napi_disable(&tp->napi);
6445 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6446 rtl_unlock_work(tp);
6448 rtl_pll_power_down(tp);
6453 static int rtl8169_suspend(struct device *device)
6455 struct pci_dev *pdev = to_pci_dev(device);
6456 struct net_device *dev = pci_get_drvdata(pdev);
6458 rtl8169_net_suspend(dev);
6463 static void __rtl8169_resume(struct net_device *dev)
6465 struct rtl8169_private *tp = netdev_priv(dev);
6467 netif_device_attach(dev);
6469 rtl_pll_power_up(tp);
6472 napi_enable(&tp->napi);
6473 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6474 rtl_unlock_work(tp);
6476 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6479 static int rtl8169_resume(struct device *device)
6481 struct pci_dev *pdev = to_pci_dev(device);
6482 struct net_device *dev = pci_get_drvdata(pdev);
6483 struct rtl8169_private *tp = netdev_priv(dev);
6485 rtl8169_init_phy(dev, tp);
6487 if (netif_running(dev))
6488 __rtl8169_resume(dev);
6493 static int rtl8169_runtime_suspend(struct device *device)
6495 struct pci_dev *pdev = to_pci_dev(device);
6496 struct net_device *dev = pci_get_drvdata(pdev);
6497 struct rtl8169_private *tp = netdev_priv(dev);
6499 if (!tp->TxDescArray)
6503 tp->saved_wolopts = __rtl8169_get_wol(tp);
6504 __rtl8169_set_wol(tp, WAKE_ANY);
6505 rtl_unlock_work(tp);
6507 rtl8169_net_suspend(dev);
6512 static int rtl8169_runtime_resume(struct device *device)
6514 struct pci_dev *pdev = to_pci_dev(device);
6515 struct net_device *dev = pci_get_drvdata(pdev);
6516 struct rtl8169_private *tp = netdev_priv(dev);
6518 if (!tp->TxDescArray)
6522 __rtl8169_set_wol(tp, tp->saved_wolopts);
6523 tp->saved_wolopts = 0;
6524 rtl_unlock_work(tp);
6526 rtl8169_init_phy(dev, tp);
6528 __rtl8169_resume(dev);
6533 static int rtl8169_runtime_idle(struct device *device)
6535 struct pci_dev *pdev = to_pci_dev(device);
6536 struct net_device *dev = pci_get_drvdata(pdev);
6537 struct rtl8169_private *tp = netdev_priv(dev);
6539 return tp->TxDescArray ? -EBUSY : 0;
6542 static const struct dev_pm_ops rtl8169_pm_ops = {
6543 .suspend = rtl8169_suspend,
6544 .resume = rtl8169_resume,
6545 .freeze = rtl8169_suspend,
6546 .thaw = rtl8169_resume,
6547 .poweroff = rtl8169_suspend,
6548 .restore = rtl8169_resume,
6549 .runtime_suspend = rtl8169_runtime_suspend,
6550 .runtime_resume = rtl8169_runtime_resume,
6551 .runtime_idle = rtl8169_runtime_idle,
6554 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6556 #else /* !CONFIG_PM */
6558 #define RTL8169_PM_OPS NULL
6560 #endif /* !CONFIG_PM */
6562 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6564 void __iomem *ioaddr = tp->mmio_addr;
6566 /* WoL fails with 8168b when the receiver is disabled. */
6567 switch (tp->mac_version) {
6568 case RTL_GIGA_MAC_VER_11:
6569 case RTL_GIGA_MAC_VER_12:
6570 case RTL_GIGA_MAC_VER_17:
6571 pci_clear_master(tp->pci_dev);
6573 RTL_W8(ChipCmd, CmdRxEnb);
6582 static void rtl_shutdown(struct pci_dev *pdev)
6584 struct net_device *dev = pci_get_drvdata(pdev);
6585 struct rtl8169_private *tp = netdev_priv(dev);
6586 struct device *d = &pdev->dev;
6588 pm_runtime_get_sync(d);
6590 rtl8169_net_suspend(dev);
6592 /* Restore original MAC address */
6593 rtl_rar_set(tp, dev->perm_addr);
6595 rtl8169_hw_reset(tp);
6597 if (system_state == SYSTEM_POWER_OFF) {
6598 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6599 rtl_wol_suspend_quirk(tp);
6600 rtl_wol_shutdown_quirk(tp);
6603 pci_wake_from_d3(pdev, true);
6604 pci_set_power_state(pdev, PCI_D3hot);
6607 pm_runtime_put_noidle(d);
6610 static void rtl_remove_one(struct pci_dev *pdev)
6612 struct net_device *dev = pci_get_drvdata(pdev);
6613 struct rtl8169_private *tp = netdev_priv(dev);
6615 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6616 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6617 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6618 rtl8168_driver_stop(tp);
6621 cancel_work_sync(&tp->wk.work);
6623 netif_napi_del(&tp->napi);
6625 unregister_netdev(dev);
6627 rtl_release_firmware(tp);
6629 if (pci_dev_run_wake(pdev))
6630 pm_runtime_get_noresume(&pdev->dev);
6632 /* restore original MAC address */
6633 rtl_rar_set(tp, dev->perm_addr);
6635 rtl_disable_msi(pdev, tp);
6636 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6637 pci_set_drvdata(pdev, NULL);
6640 static const struct net_device_ops rtl_netdev_ops = {
6641 .ndo_open = rtl_open,
6642 .ndo_stop = rtl8169_close,
6643 .ndo_get_stats64 = rtl8169_get_stats64,
6644 .ndo_start_xmit = rtl8169_start_xmit,
6645 .ndo_tx_timeout = rtl8169_tx_timeout,
6646 .ndo_validate_addr = eth_validate_addr,
6647 .ndo_change_mtu = rtl8169_change_mtu,
6648 .ndo_fix_features = rtl8169_fix_features,
6649 .ndo_set_features = rtl8169_set_features,
6650 .ndo_set_mac_address = rtl_set_mac_address,
6651 .ndo_do_ioctl = rtl8169_ioctl,
6652 .ndo_set_rx_mode = rtl_set_rx_mode,
6653 #ifdef CONFIG_NET_POLL_CONTROLLER
6654 .ndo_poll_controller = rtl8169_netpoll,
6659 static const struct rtl_cfg_info {
6660 void (*hw_start)(struct net_device *);
6661 unsigned int region;
6666 } rtl_cfg_infos [] = {
6668 .hw_start = rtl_hw_start_8169,
6671 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6672 .features = RTL_FEATURE_GMII,
6673 .default_ver = RTL_GIGA_MAC_VER_01,
6676 .hw_start = rtl_hw_start_8168,
6679 .event_slow = SYSErr | LinkChg | RxOverflow,
6680 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6681 .default_ver = RTL_GIGA_MAC_VER_11,
6684 .hw_start = rtl_hw_start_8101,
6687 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6689 .features = RTL_FEATURE_MSI,
6690 .default_ver = RTL_GIGA_MAC_VER_13,
6694 /* Cfg9346_Unlock assumed. */
6695 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6696 const struct rtl_cfg_info *cfg)
6698 void __iomem *ioaddr = tp->mmio_addr;
6702 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6703 if (cfg->features & RTL_FEATURE_MSI) {
6704 if (pci_enable_msi(tp->pci_dev)) {
6705 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6708 msi = RTL_FEATURE_MSI;
6711 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6712 RTL_W8(Config2, cfg2);
6716 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6718 void __iomem *ioaddr = tp->mmio_addr;
6720 return RTL_R8(MCU) & LINK_LIST_RDY;
6723 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6725 void __iomem *ioaddr = tp->mmio_addr;
6727 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6730 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6732 void __iomem *ioaddr = tp->mmio_addr;
6735 tp->ocp_base = OCP_STD_PHY_BASE;
6737 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6739 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6742 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6745 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6747 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6749 data = r8168_mac_ocp_read(tp, 0xe8de);
6751 r8168_mac_ocp_write(tp, 0xe8de, data);
6753 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6756 data = r8168_mac_ocp_read(tp, 0xe8de);
6758 r8168_mac_ocp_write(tp, 0xe8de, data);
6760 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6764 static void rtl_hw_initialize(struct rtl8169_private *tp)
6766 switch (tp->mac_version) {
6767 case RTL_GIGA_MAC_VER_40:
6768 case RTL_GIGA_MAC_VER_41:
6769 rtl_hw_init_8168g(tp);
6778 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6780 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6781 const unsigned int region = cfg->region;
6782 struct rtl8169_private *tp;
6783 struct mii_if_info *mii;
6784 struct net_device *dev;
6785 void __iomem *ioaddr;
6789 if (netif_msg_drv(&debug)) {
6790 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6791 MODULENAME, RTL8169_VERSION);
6794 dev = alloc_etherdev(sizeof (*tp));
6800 SET_NETDEV_DEV(dev, &pdev->dev);
6801 dev->netdev_ops = &rtl_netdev_ops;
6802 tp = netdev_priv(dev);
6805 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6809 mii->mdio_read = rtl_mdio_read;
6810 mii->mdio_write = rtl_mdio_write;
6811 mii->phy_id_mask = 0x1f;
6812 mii->reg_num_mask = 0x1f;
6813 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6815 /* disable ASPM completely as that cause random device stop working
6816 * problems as well as full system hangs for some PCIe devices users */
6817 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6818 PCIE_LINK_STATE_CLKPM);
6820 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6821 rc = pci_enable_device(pdev);
6823 netif_err(tp, probe, dev, "enable failure\n");
6824 goto err_out_free_dev_1;
6827 if (pci_set_mwi(pdev) < 0)
6828 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6830 /* make sure PCI base addr 1 is MMIO */
6831 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6832 netif_err(tp, probe, dev,
6833 "region #%d not an MMIO resource, aborting\n",
6839 /* check for weird/broken PCI region reporting */
6840 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6841 netif_err(tp, probe, dev,
6842 "Invalid PCI region size(s), aborting\n");
6847 rc = pci_request_regions(pdev, MODULENAME);
6849 netif_err(tp, probe, dev, "could not request regions\n");
6853 tp->cp_cmd = RxChkSum;
6855 if ((sizeof(dma_addr_t) > 4) &&
6856 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6857 tp->cp_cmd |= PCIDAC;
6858 dev->features |= NETIF_F_HIGHDMA;
6860 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6862 netif_err(tp, probe, dev, "DMA configuration failed\n");
6863 goto err_out_free_res_3;
6867 /* ioremap MMIO region */
6868 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6870 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6872 goto err_out_free_res_3;
6874 tp->mmio_addr = ioaddr;
6876 if (!pci_is_pcie(pdev))
6877 netif_info(tp, probe, dev, "not PCI Express\n");
6879 /* Identify chip attached to board */
6880 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6884 rtl_irq_disable(tp);
6886 rtl_hw_initialize(tp);
6890 rtl_ack_events(tp, 0xffff);
6892 pci_set_master(pdev);
6895 * Pretend we are using VLANs; This bypasses a nasty bug where
6896 * Interrupts stop flowing on high load on 8110SCd controllers.
6898 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6899 tp->cp_cmd |= RxVlan;
6901 rtl_init_mdio_ops(tp);
6902 rtl_init_pll_power_ops(tp);
6903 rtl_init_jumbo_ops(tp);
6904 rtl_init_csi_ops(tp);
6906 rtl8169_print_mac_version(tp);
6908 chipset = tp->mac_version;
6909 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6911 RTL_W8(Cfg9346, Cfg9346_Unlock);
6912 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6913 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6914 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6915 tp->features |= RTL_FEATURE_WOL;
6916 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6917 tp->features |= RTL_FEATURE_WOL;
6918 tp->features |= rtl_try_msi(tp, cfg);
6919 RTL_W8(Cfg9346, Cfg9346_Lock);
6921 if (rtl_tbi_enabled(tp)) {
6922 tp->set_speed = rtl8169_set_speed_tbi;
6923 tp->get_settings = rtl8169_gset_tbi;
6924 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6925 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6926 tp->link_ok = rtl8169_tbi_link_ok;
6927 tp->do_ioctl = rtl_tbi_ioctl;
6929 tp->set_speed = rtl8169_set_speed_xmii;
6930 tp->get_settings = rtl8169_gset_xmii;
6931 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6932 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6933 tp->link_ok = rtl8169_xmii_link_ok;
6934 tp->do_ioctl = rtl_xmii_ioctl;
6937 mutex_init(&tp->wk.mutex);
6939 /* Get MAC address */
6940 for (i = 0; i < ETH_ALEN; i++)
6941 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6942 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6944 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6945 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6947 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6949 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6950 * properly for all devices */
6951 dev->features |= NETIF_F_RXCSUM |
6952 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6954 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6955 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6956 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6959 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6960 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6961 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6963 dev->hw_features |= NETIF_F_RXALL;
6964 dev->hw_features |= NETIF_F_RXFCS;
6966 tp->hw_start = cfg->hw_start;
6967 tp->event_slow = cfg->event_slow;
6969 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6970 ~(RxBOVF | RxFOVF) : ~0;
6972 init_timer(&tp->timer);
6973 tp->timer.data = (unsigned long) dev;
6974 tp->timer.function = rtl8169_phy_timer;
6976 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6978 rc = register_netdev(dev);
6982 pci_set_drvdata(pdev, dev);
6984 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6985 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6986 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6987 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6988 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6989 "tx checksumming: %s]\n",
6990 rtl_chip_infos[chipset].jumbo_max,
6991 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6994 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6995 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6996 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6997 rtl8168_driver_start(tp);
7000 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
7002 if (pci_dev_run_wake(pdev))
7003 pm_runtime_put_noidle(&pdev->dev);
7005 netif_carrier_off(dev);
7011 netif_napi_del(&tp->napi);
7012 rtl_disable_msi(pdev, tp);
7015 pci_release_regions(pdev);
7017 pci_clear_mwi(pdev);
7018 pci_disable_device(pdev);
7024 static struct pci_driver rtl8169_pci_driver = {
7026 .id_table = rtl8169_pci_tbl,
7027 .probe = rtl_init_one,
7028 .remove = rtl_remove_one,
7029 .shutdown = rtl_shutdown,
7030 .driver.pm = RTL8169_PM_OPS,
7033 module_pci_driver(rtl8169_pci_driver);