2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
46 #include <asm/unaligned.h>
50 /* Maximum number of hardware queues, downsized if needed */
51 #define GENET_MAX_MQ_CNT 4
53 /* Default highest priority queue for multi queue support */
54 #define GENET_Q0_PRIORITY 0
56 #define GENET_DEFAULT_BD_CNT \
57 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
59 #define RX_BUF_LENGTH 2048
60 #define SKB_ALIGNMENT 32
62 /* Tx/Rx DMA register offset, skip 256 descriptors */
63 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
64 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
66 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
67 TOTAL_DESC * DMA_DESC_SIZE)
69 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
73 void __iomem *d, u32 value)
75 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
78 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
81 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
84 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
88 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
90 /* Register writes to GISB bus can take couple hundred nanoseconds
91 * and are done for each packet, save these expensive writes unless
92 * the platform is explicitly configured for 64-bits/LPAE.
94 #ifdef CONFIG_PHYS_ADDR_T_64BIT
95 if (priv->hw_params->flags & GENET_HAS_40BITS)
96 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
100 /* Combined address + length/status setter */
101 static inline void dmadesc_set(struct bcmgenet_priv *priv,
102 void __iomem *d, dma_addr_t addr, u32 val)
104 dmadesc_set_length_status(priv, d, val);
105 dmadesc_set_addr(priv, d, addr);
108 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
113 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
115 /* Register writes to GISB bus can take couple hundred nanoseconds
116 * and are done for each packet, save these expensive writes unless
117 * the platform is explicitly configured for 64-bits/LPAE.
119 #ifdef CONFIG_PHYS_ADDR_T_64BIT
120 if (priv->hw_params->flags & GENET_HAS_40BITS)
121 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
126 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
128 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
131 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
133 if (GENET_IS_V1(priv))
134 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
136 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
139 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
141 if (GENET_IS_V1(priv))
142 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
144 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
147 /* These macros are defined to deal with register map change
148 * between GENET1.1 and GENET2. Only those currently being used
149 * by driver are defined.
151 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
153 if (GENET_IS_V1(priv))
154 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
156 return __raw_readl(priv->base +
157 priv->hw_params->tbuf_offset + TBUF_CTRL);
160 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
162 if (GENET_IS_V1(priv))
163 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
165 __raw_writel(val, priv->base +
166 priv->hw_params->tbuf_offset + TBUF_CTRL);
169 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
171 if (GENET_IS_V1(priv))
172 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
174 return __raw_readl(priv->base +
175 priv->hw_params->tbuf_offset + TBUF_BP_MC);
178 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
180 if (GENET_IS_V1(priv))
181 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
183 __raw_writel(val, priv->base +
184 priv->hw_params->tbuf_offset + TBUF_BP_MC);
187 /* RX/TX DMA register accessors */
199 static const u8 bcmgenet_dma_regs_v3plus[] = {
200 [DMA_RING_CFG] = 0x00,
203 [DMA_SCB_BURST_SIZE] = 0x0C,
204 [DMA_ARB_CTRL] = 0x2C,
205 [DMA_PRIORITY_0] = 0x30,
206 [DMA_PRIORITY_1] = 0x34,
207 [DMA_PRIORITY_2] = 0x38,
210 static const u8 bcmgenet_dma_regs_v2[] = {
211 [DMA_RING_CFG] = 0x00,
214 [DMA_SCB_BURST_SIZE] = 0x0C,
215 [DMA_ARB_CTRL] = 0x30,
216 [DMA_PRIORITY_0] = 0x34,
217 [DMA_PRIORITY_1] = 0x38,
218 [DMA_PRIORITY_2] = 0x3C,
221 static const u8 bcmgenet_dma_regs_v1[] = {
224 [DMA_SCB_BURST_SIZE] = 0x0C,
225 [DMA_ARB_CTRL] = 0x30,
226 [DMA_PRIORITY_0] = 0x34,
227 [DMA_PRIORITY_1] = 0x38,
228 [DMA_PRIORITY_2] = 0x3C,
231 /* Set at runtime once bcmgenet version is known */
232 static const u8 *bcmgenet_dma_regs;
234 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
236 return netdev_priv(dev_get_drvdata(dev));
239 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
242 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
243 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
246 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
247 u32 val, enum dma_reg r)
249 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
250 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
253 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
256 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
257 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
260 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
261 u32 val, enum dma_reg r)
263 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
264 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
267 /* RDMA/TDMA ring registers and accessors
268 * we merge the common fields and just prefix with T/D the registers
269 * having different meaning depending on the direction
273 RDMA_WRITE_PTR = TDMA_READ_PTR,
275 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
277 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
279 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
285 DMA_MBUF_DONE_THRESH,
287 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
289 RDMA_READ_PTR = TDMA_WRITE_PTR,
291 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
294 /* GENET v4 supports 40-bits pointer addressing
295 * for obvious reasons the LO and HI word parts
296 * are contiguous, but this offsets the other
299 static const u8 genet_dma_ring_regs_v4[] = {
300 [TDMA_READ_PTR] = 0x00,
301 [TDMA_READ_PTR_HI] = 0x04,
302 [TDMA_CONS_INDEX] = 0x08,
303 [TDMA_PROD_INDEX] = 0x0C,
304 [DMA_RING_BUF_SIZE] = 0x10,
305 [DMA_START_ADDR] = 0x14,
306 [DMA_START_ADDR_HI] = 0x18,
307 [DMA_END_ADDR] = 0x1C,
308 [DMA_END_ADDR_HI] = 0x20,
309 [DMA_MBUF_DONE_THRESH] = 0x24,
310 [TDMA_FLOW_PERIOD] = 0x28,
311 [TDMA_WRITE_PTR] = 0x2C,
312 [TDMA_WRITE_PTR_HI] = 0x30,
315 static const u8 genet_dma_ring_regs_v123[] = {
316 [TDMA_READ_PTR] = 0x00,
317 [TDMA_CONS_INDEX] = 0x04,
318 [TDMA_PROD_INDEX] = 0x08,
319 [DMA_RING_BUF_SIZE] = 0x0C,
320 [DMA_START_ADDR] = 0x10,
321 [DMA_END_ADDR] = 0x14,
322 [DMA_MBUF_DONE_THRESH] = 0x18,
323 [TDMA_FLOW_PERIOD] = 0x1C,
324 [TDMA_WRITE_PTR] = 0x20,
327 /* Set at runtime once GENET version is known */
328 static const u8 *genet_dma_ring_regs;
330 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
334 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
335 (DMA_RING_SIZE * ring) +
336 genet_dma_ring_regs[r]);
339 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
340 unsigned int ring, u32 val,
343 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
344 (DMA_RING_SIZE * ring) +
345 genet_dma_ring_regs[r]);
348 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
352 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
353 (DMA_RING_SIZE * ring) +
354 genet_dma_ring_regs[r]);
357 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
358 unsigned int ring, u32 val,
361 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
362 (DMA_RING_SIZE * ring) +
363 genet_dma_ring_regs[r]);
366 static int bcmgenet_get_settings(struct net_device *dev,
367 struct ethtool_cmd *cmd)
369 struct bcmgenet_priv *priv = netdev_priv(dev);
371 if (!netif_running(dev))
377 return phy_ethtool_gset(priv->phydev, cmd);
380 static int bcmgenet_set_settings(struct net_device *dev,
381 struct ethtool_cmd *cmd)
383 struct bcmgenet_priv *priv = netdev_priv(dev);
385 if (!netif_running(dev))
391 return phy_ethtool_sset(priv->phydev, cmd);
394 static int bcmgenet_set_rx_csum(struct net_device *dev,
395 netdev_features_t wanted)
397 struct bcmgenet_priv *priv = netdev_priv(dev);
401 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
403 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
405 /* enable rx checksumming */
407 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
409 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
410 priv->desc_rxchk_en = rx_csum_en;
412 /* If UniMAC forwards CRC, we need to skip over it to get
413 * a valid CHK bit to be set in the per-packet status word
415 if (rx_csum_en && priv->crc_fwd_en)
416 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
418 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
420 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
425 static int bcmgenet_set_tx_csum(struct net_device *dev,
426 netdev_features_t wanted)
428 struct bcmgenet_priv *priv = netdev_priv(dev);
430 u32 tbuf_ctrl, rbuf_ctrl;
432 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
433 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
435 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
437 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
439 tbuf_ctrl |= RBUF_64B_EN;
440 rbuf_ctrl |= RBUF_64B_EN;
442 tbuf_ctrl &= ~RBUF_64B_EN;
443 rbuf_ctrl &= ~RBUF_64B_EN;
445 priv->desc_64b_en = desc_64b_en;
447 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
448 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
453 static int bcmgenet_set_features(struct net_device *dev,
454 netdev_features_t features)
456 netdev_features_t changed = features ^ dev->features;
457 netdev_features_t wanted = dev->wanted_features;
460 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
461 ret = bcmgenet_set_tx_csum(dev, wanted);
462 if (changed & (NETIF_F_RXCSUM))
463 ret = bcmgenet_set_rx_csum(dev, wanted);
468 static u32 bcmgenet_get_msglevel(struct net_device *dev)
470 struct bcmgenet_priv *priv = netdev_priv(dev);
472 return priv->msg_enable;
475 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
477 struct bcmgenet_priv *priv = netdev_priv(dev);
479 priv->msg_enable = level;
482 /* standard ethtool support functions. */
483 enum bcmgenet_stat_type {
484 BCMGENET_STAT_NETDEV = -1,
485 BCMGENET_STAT_MIB_RX,
486 BCMGENET_STAT_MIB_TX,
491 struct bcmgenet_stats {
492 char stat_string[ETH_GSTRING_LEN];
495 enum bcmgenet_stat_type type;
496 /* reg offset from UMAC base for misc counters */
500 #define STAT_NETDEV(m) { \
501 .stat_string = __stringify(m), \
502 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
503 .stat_offset = offsetof(struct net_device_stats, m), \
504 .type = BCMGENET_STAT_NETDEV, \
507 #define STAT_GENET_MIB(str, m, _type) { \
508 .stat_string = str, \
509 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
510 .stat_offset = offsetof(struct bcmgenet_priv, m), \
514 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
515 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
516 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
518 #define STAT_GENET_MISC(str, m, offset) { \
519 .stat_string = str, \
520 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
521 .stat_offset = offsetof(struct bcmgenet_priv, m), \
522 .type = BCMGENET_STAT_MISC, \
523 .reg_offset = offset, \
527 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
528 * between the end of TX stats and the beginning of the RX RUNT
530 #define BCMGENET_STAT_OFFSET 0xc
532 /* Hardware counters must be kept in sync because the order/offset
533 * is important here (order in structure declaration = order in hardware)
535 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
537 STAT_NETDEV(rx_packets),
538 STAT_NETDEV(tx_packets),
539 STAT_NETDEV(rx_bytes),
540 STAT_NETDEV(tx_bytes),
541 STAT_NETDEV(rx_errors),
542 STAT_NETDEV(tx_errors),
543 STAT_NETDEV(rx_dropped),
544 STAT_NETDEV(tx_dropped),
545 STAT_NETDEV(multicast),
546 /* UniMAC RSV counters */
547 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
548 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
549 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
550 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
551 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
552 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
553 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
554 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
555 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
556 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
557 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
558 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
559 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
560 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
561 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
562 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
563 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
564 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
565 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
566 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
567 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
568 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
569 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
570 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
571 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
572 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
573 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
574 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
575 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
576 /* UniMAC TSV counters */
577 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
578 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
579 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
580 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
581 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
582 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
583 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
584 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
585 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
586 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
587 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
588 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
589 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
590 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
591 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
592 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
593 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
594 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
595 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
596 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
597 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
598 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
599 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
600 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
601 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
602 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
603 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
604 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
605 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
606 /* UniMAC RUNT counters */
607 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
608 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
609 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
610 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
611 /* Misc UniMAC counters */
612 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
614 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
615 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
616 STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
617 STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
618 STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
621 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
623 static void bcmgenet_get_drvinfo(struct net_device *dev,
624 struct ethtool_drvinfo *info)
626 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
627 strlcpy(info->version, "v2.0", sizeof(info->version));
628 info->n_stats = BCMGENET_STATS_LEN;
631 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
633 switch (string_set) {
635 return BCMGENET_STATS_LEN;
641 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
648 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
649 memcpy(data + i * ETH_GSTRING_LEN,
650 bcmgenet_gstrings_stats[i].stat_string,
657 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
661 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
662 const struct bcmgenet_stats *s;
667 s = &bcmgenet_gstrings_stats[i];
669 case BCMGENET_STAT_NETDEV:
671 case BCMGENET_STAT_MIB_RX:
672 case BCMGENET_STAT_MIB_TX:
673 case BCMGENET_STAT_RUNT:
674 if (s->type != BCMGENET_STAT_MIB_RX)
675 offset = BCMGENET_STAT_OFFSET;
676 val = bcmgenet_umac_readl(priv,
677 UMAC_MIB_START + j + offset);
679 case BCMGENET_STAT_MISC:
680 val = bcmgenet_umac_readl(priv, s->reg_offset);
681 /* clear if overflowed */
683 bcmgenet_umac_writel(priv, 0, s->reg_offset);
688 p = (char *)priv + s->stat_offset;
693 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
694 struct ethtool_stats *stats,
697 struct bcmgenet_priv *priv = netdev_priv(dev);
700 if (netif_running(dev))
701 bcmgenet_update_mib_counters(priv);
703 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
704 const struct bcmgenet_stats *s;
707 s = &bcmgenet_gstrings_stats[i];
708 if (s->type == BCMGENET_STAT_NETDEV)
709 p = (char *)&dev->stats;
717 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
719 struct bcmgenet_priv *priv = netdev_priv(dev);
720 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
723 if (enable && !priv->clk_eee_enabled) {
724 clk_prepare_enable(priv->clk_eee);
725 priv->clk_eee_enabled = true;
728 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
733 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
735 /* Enable EEE and switch to a 27Mhz clock automatically */
736 reg = __raw_readl(priv->base + off);
738 reg |= TBUF_EEE_EN | TBUF_PM_EN;
740 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
741 __raw_writel(reg, priv->base + off);
743 /* Do the same for thing for RBUF */
744 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
746 reg |= RBUF_EEE_EN | RBUF_PM_EN;
748 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
749 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
751 if (!enable && priv->clk_eee_enabled) {
752 clk_disable_unprepare(priv->clk_eee);
753 priv->clk_eee_enabled = false;
756 priv->eee.eee_enabled = enable;
757 priv->eee.eee_active = enable;
760 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
762 struct bcmgenet_priv *priv = netdev_priv(dev);
763 struct ethtool_eee *p = &priv->eee;
765 if (GENET_IS_V1(priv))
768 e->eee_enabled = p->eee_enabled;
769 e->eee_active = p->eee_active;
770 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
772 return phy_ethtool_get_eee(priv->phydev, e);
775 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
777 struct bcmgenet_priv *priv = netdev_priv(dev);
778 struct ethtool_eee *p = &priv->eee;
781 if (GENET_IS_V1(priv))
784 p->eee_enabled = e->eee_enabled;
786 if (!p->eee_enabled) {
787 bcmgenet_eee_enable_set(dev, false);
789 ret = phy_init_eee(priv->phydev, 0);
791 netif_err(priv, hw, dev, "EEE initialization failed\n");
795 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
796 bcmgenet_eee_enable_set(dev, true);
799 return phy_ethtool_set_eee(priv->phydev, e);
802 /* standard ethtool support functions. */
803 static struct ethtool_ops bcmgenet_ethtool_ops = {
804 .get_strings = bcmgenet_get_strings,
805 .get_sset_count = bcmgenet_get_sset_count,
806 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
807 .get_settings = bcmgenet_get_settings,
808 .set_settings = bcmgenet_set_settings,
809 .get_drvinfo = bcmgenet_get_drvinfo,
810 .get_link = ethtool_op_get_link,
811 .get_msglevel = bcmgenet_get_msglevel,
812 .set_msglevel = bcmgenet_set_msglevel,
813 .get_wol = bcmgenet_get_wol,
814 .set_wol = bcmgenet_set_wol,
815 .get_eee = bcmgenet_get_eee,
816 .set_eee = bcmgenet_set_eee,
819 /* Power down the unimac, based on mode. */
820 static void bcmgenet_power_down(struct bcmgenet_priv *priv,
821 enum bcmgenet_power_mode mode)
826 case GENET_POWER_CABLE_SENSE:
827 phy_detach(priv->phydev);
830 case GENET_POWER_WOL_MAGIC:
831 bcmgenet_wol_power_down_cfg(priv, mode);
834 case GENET_POWER_PASSIVE:
836 if (priv->hw_params->flags & GENET_HAS_EXT) {
837 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
838 reg |= (EXT_PWR_DOWN_PHY |
839 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
840 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
848 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
849 enum bcmgenet_power_mode mode)
853 if (!(priv->hw_params->flags & GENET_HAS_EXT))
856 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
859 case GENET_POWER_PASSIVE:
860 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
863 case GENET_POWER_CABLE_SENSE:
865 reg |= EXT_PWR_DN_EN_LD;
867 case GENET_POWER_WOL_MAGIC:
868 bcmgenet_wol_power_up_cfg(priv, mode);
874 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
876 if (mode == GENET_POWER_PASSIVE)
877 bcmgenet_mii_reset(priv->dev);
880 /* ioctl handle special commands that are not present in ethtool. */
881 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
883 struct bcmgenet_priv *priv = netdev_priv(dev);
886 if (!netif_running(dev))
896 val = phy_mii_ioctl(priv->phydev, rq, cmd);
907 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
908 struct bcmgenet_tx_ring *ring)
910 struct enet_cb *tx_cb_ptr;
912 tx_cb_ptr = ring->cbs;
913 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
914 tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
915 /* Advancing local write pointer */
916 if (ring->write_ptr == ring->end_ptr)
917 ring->write_ptr = ring->cb_ptr;
924 /* Simple helper to free a control block's resources */
925 static void bcmgenet_free_cb(struct enet_cb *cb)
927 dev_kfree_skb_any(cb->skb);
929 dma_unmap_addr_set(cb, dma_addr, 0);
932 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
933 struct bcmgenet_tx_ring *ring)
935 bcmgenet_intrl2_0_writel(priv,
936 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
937 INTRL2_CPU_MASK_SET);
940 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
941 struct bcmgenet_tx_ring *ring)
943 bcmgenet_intrl2_0_writel(priv,
944 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
945 INTRL2_CPU_MASK_CLEAR);
948 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
949 struct bcmgenet_tx_ring *ring)
951 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
952 INTRL2_CPU_MASK_CLEAR);
953 priv->int1_mask &= ~(1 << ring->index);
956 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
957 struct bcmgenet_tx_ring *ring)
959 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
960 INTRL2_CPU_MASK_SET);
961 priv->int1_mask |= (1 << ring->index);
964 /* Unlocked version of the reclaim routine */
965 static void __bcmgenet_tx_reclaim(struct net_device *dev,
966 struct bcmgenet_tx_ring *ring)
968 struct bcmgenet_priv *priv = netdev_priv(dev);
969 int last_tx_cn, last_c_index, num_tx_bds;
970 struct enet_cb *tx_cb_ptr;
971 struct netdev_queue *txq;
972 unsigned int bds_compl;
973 unsigned int c_index;
975 /* Compute how many buffers are transmitted since last xmit call */
976 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
977 txq = netdev_get_tx_queue(dev, ring->queue);
979 last_c_index = ring->c_index;
980 num_tx_bds = ring->size;
982 c_index &= (num_tx_bds - 1);
984 if (c_index >= last_c_index)
985 last_tx_cn = c_index - last_c_index;
987 last_tx_cn = num_tx_bds - last_c_index + c_index;
989 netif_dbg(priv, tx_done, dev,
990 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
991 __func__, ring->index,
992 c_index, last_tx_cn, last_c_index);
994 /* Reclaim transmitted buffers */
995 while (last_tx_cn-- > 0) {
996 tx_cb_ptr = ring->cbs + last_c_index;
998 if (tx_cb_ptr->skb) {
999 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
1000 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1001 dma_unmap_single(&dev->dev,
1002 dma_unmap_addr(tx_cb_ptr, dma_addr),
1003 tx_cb_ptr->skb->len,
1005 bcmgenet_free_cb(tx_cb_ptr);
1006 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1007 dev->stats.tx_bytes +=
1008 dma_unmap_len(tx_cb_ptr, dma_len);
1009 dma_unmap_page(&dev->dev,
1010 dma_unmap_addr(tx_cb_ptr, dma_addr),
1011 dma_unmap_len(tx_cb_ptr, dma_len),
1013 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1015 dev->stats.tx_packets++;
1016 ring->free_bds += bds_compl;
1019 last_c_index &= (num_tx_bds - 1);
1022 if (ring->free_bds > (MAX_SKB_FRAGS + 1))
1023 ring->int_disable(priv, ring);
1025 if (netif_tx_queue_stopped(txq))
1026 netif_tx_wake_queue(txq);
1028 ring->c_index = c_index;
1031 static void bcmgenet_tx_reclaim(struct net_device *dev,
1032 struct bcmgenet_tx_ring *ring)
1034 unsigned long flags;
1036 spin_lock_irqsave(&ring->lock, flags);
1037 __bcmgenet_tx_reclaim(dev, ring);
1038 spin_unlock_irqrestore(&ring->lock, flags);
1041 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1043 struct bcmgenet_priv *priv = netdev_priv(dev);
1046 if (netif_is_multiqueue(dev)) {
1047 for (i = 0; i < priv->hw_params->tx_queues; i++)
1048 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1051 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1054 /* Transmits a single SKB (either head of a fragment or a single SKB)
1055 * caller must hold priv->lock
1057 static int bcmgenet_xmit_single(struct net_device *dev,
1058 struct sk_buff *skb,
1060 struct bcmgenet_tx_ring *ring)
1062 struct bcmgenet_priv *priv = netdev_priv(dev);
1063 struct device *kdev = &priv->pdev->dev;
1064 struct enet_cb *tx_cb_ptr;
1065 unsigned int skb_len;
1070 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1072 if (unlikely(!tx_cb_ptr))
1075 tx_cb_ptr->skb = skb;
1077 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
1079 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1080 ret = dma_mapping_error(kdev, mapping);
1082 priv->mib.tx_dma_failed++;
1083 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1088 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1089 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
1090 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1091 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1094 if (skb->ip_summed == CHECKSUM_PARTIAL)
1095 length_status |= DMA_TX_DO_CSUM;
1097 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1099 /* Decrement total BD count and advance our write pointer */
1100 ring->free_bds -= 1;
1101 ring->prod_index += 1;
1102 ring->prod_index &= DMA_P_INDEX_MASK;
1107 /* Transmit a SKB fragment */
1108 static int bcmgenet_xmit_frag(struct net_device *dev,
1111 struct bcmgenet_tx_ring *ring)
1113 struct bcmgenet_priv *priv = netdev_priv(dev);
1114 struct device *kdev = &priv->pdev->dev;
1115 struct enet_cb *tx_cb_ptr;
1119 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1121 if (unlikely(!tx_cb_ptr))
1123 tx_cb_ptr->skb = NULL;
1125 mapping = skb_frag_dma_map(kdev, frag, 0,
1126 skb_frag_size(frag), DMA_TO_DEVICE);
1127 ret = dma_mapping_error(kdev, mapping);
1129 priv->mib.tx_dma_failed++;
1130 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1135 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1136 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1138 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1139 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1140 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1143 ring->free_bds -= 1;
1144 ring->prod_index += 1;
1145 ring->prod_index &= DMA_P_INDEX_MASK;
1150 /* Reallocate the SKB to put enough headroom in front of it and insert
1151 * the transmit checksum offsets in the descriptors
1153 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1154 struct sk_buff *skb)
1156 struct status_64 *status = NULL;
1157 struct sk_buff *new_skb;
1163 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1164 /* If 64 byte status block enabled, must make sure skb has
1165 * enough headroom for us to insert 64B status block.
1167 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1170 dev->stats.tx_errors++;
1171 dev->stats.tx_dropped++;
1177 skb_push(skb, sizeof(*status));
1178 status = (struct status_64 *)skb->data;
1180 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1181 ip_ver = htons(skb->protocol);
1184 ip_proto = ip_hdr(skb)->protocol;
1187 ip_proto = ipv6_hdr(skb)->nexthdr;
1193 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1194 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1195 (offset + skb->csum_offset);
1197 /* Set the length valid bit for TCP and UDP and just set
1198 * the special UDP flag for IPv4, else just set to 0.
1200 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1201 tx_csum_info |= STATUS_TX_CSUM_LV;
1202 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1203 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1208 status->tx_csum_info = tx_csum_info;
1214 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1216 struct bcmgenet_priv *priv = netdev_priv(dev);
1217 struct bcmgenet_tx_ring *ring = NULL;
1218 struct netdev_queue *txq;
1219 unsigned long flags = 0;
1220 int nr_frags, index;
1225 index = skb_get_queue_mapping(skb);
1226 /* Mapping strategy:
1227 * queue_mapping = 0, unclassified, packet xmited through ring16
1228 * queue_mapping = 1, goes to ring 0. (highest priority queue
1229 * queue_mapping = 2, goes to ring 1.
1230 * queue_mapping = 3, goes to ring 2.
1231 * queue_mapping = 4, goes to ring 3.
1238 nr_frags = skb_shinfo(skb)->nr_frags;
1239 ring = &priv->tx_rings[index];
1240 txq = netdev_get_tx_queue(dev, ring->queue);
1242 spin_lock_irqsave(&ring->lock, flags);
1243 if (ring->free_bds <= nr_frags + 1) {
1244 netif_tx_stop_queue(txq);
1245 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1246 __func__, index, ring->queue);
1247 ret = NETDEV_TX_BUSY;
1251 if (skb_padto(skb, ETH_ZLEN)) {
1256 /* set the SKB transmit checksum */
1257 if (priv->desc_64b_en) {
1258 skb = bcmgenet_put_tx_csum(dev, skb);
1265 dma_desc_flags = DMA_SOP;
1267 dma_desc_flags |= DMA_EOP;
1269 /* Transmit single SKB or head of fragment list */
1270 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1277 for (i = 0; i < nr_frags; i++) {
1278 ret = bcmgenet_xmit_frag(dev,
1279 &skb_shinfo(skb)->frags[i],
1280 (i == nr_frags - 1) ? DMA_EOP : 0,
1288 skb_tx_timestamp(skb);
1290 /* we kept a software copy of how much we should advance the TDMA
1291 * producer index, now write it down to the hardware
1293 bcmgenet_tdma_ring_writel(priv, ring->index,
1294 ring->prod_index, TDMA_PROD_INDEX);
1296 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
1297 netif_tx_stop_queue(txq);
1298 ring->int_enable(priv, ring);
1302 spin_unlock_irqrestore(&ring->lock, flags);
1308 static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
1310 struct device *kdev = &priv->pdev->dev;
1311 struct sk_buff *skb;
1315 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1319 /* a caller did not release this control block */
1320 WARN_ON(cb->skb != NULL);
1322 mapping = dma_map_single(kdev, skb->data,
1323 priv->rx_buf_len, DMA_FROM_DEVICE);
1324 ret = dma_mapping_error(kdev, mapping);
1326 priv->mib.rx_dma_failed++;
1327 bcmgenet_free_cb(cb);
1328 netif_err(priv, rx_err, priv->dev,
1329 "%s DMA map failed\n", __func__);
1333 dma_unmap_addr_set(cb, dma_addr, mapping);
1334 /* assign packet, prepare descriptor, and advance pointer */
1336 dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
1338 /* turn on the newly assigned BD for DMA to use */
1339 priv->rx_bd_assign_index++;
1340 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
1342 priv->rx_bd_assign_ptr = priv->rx_bds +
1343 (priv->rx_bd_assign_index * DMA_DESC_SIZE);
1348 /* bcmgenet_desc_rx - descriptor based rx process.
1349 * this could be called from bottom half, or from NAPI polling method.
1351 static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1352 unsigned int budget)
1354 struct net_device *dev = priv->dev;
1356 struct sk_buff *skb;
1357 u32 dma_length_status;
1358 unsigned long dma_flag;
1360 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1361 unsigned int p_index;
1362 unsigned int chksum_ok = 0;
1364 p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
1365 p_index &= DMA_P_INDEX_MASK;
1367 if (p_index < priv->rx_c_index)
1368 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
1369 priv->rx_c_index + p_index;
1371 rxpkttoprocess = p_index - priv->rx_c_index;
1373 netif_dbg(priv, rx_status, dev,
1374 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1376 while ((rxpktprocessed < rxpkttoprocess) &&
1377 (rxpktprocessed < budget)) {
1378 cb = &priv->rx_cbs[priv->rx_read_ptr];
1381 /* We do not have a backing SKB, so we do not have a
1382 * corresponding DMA mapping for this incoming packet since
1383 * bcmgenet_rx_refill always either has both skb and mapping or
1386 if (unlikely(!skb)) {
1387 dev->stats.rx_dropped++;
1388 dev->stats.rx_errors++;
1392 /* Unmap the packet contents such that we can use the
1393 * RSV from the 64 bytes descriptor when enabled and save
1394 * a 32-bits register read
1396 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
1397 priv->rx_buf_len, DMA_FROM_DEVICE);
1399 if (!priv->desc_64b_en) {
1401 dmadesc_get_length_status(priv,
1403 (priv->rx_read_ptr *
1406 struct status_64 *status;
1408 status = (struct status_64 *)skb->data;
1409 dma_length_status = status->length_status;
1412 /* DMA flags and length are still valid no matter how
1413 * we got the Receive Status Vector (64B RSB or register)
1415 dma_flag = dma_length_status & 0xffff;
1416 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1418 netif_dbg(priv, rx_status, dev,
1419 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1420 __func__, p_index, priv->rx_c_index,
1421 priv->rx_read_ptr, dma_length_status);
1423 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1424 netif_err(priv, rx_status, dev,
1425 "dropping fragmented packet!\n");
1426 dev->stats.rx_dropped++;
1427 dev->stats.rx_errors++;
1428 dev_kfree_skb_any(cb->skb);
1433 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1438 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1439 (unsigned int)dma_flag);
1440 if (dma_flag & DMA_RX_CRC_ERROR)
1441 dev->stats.rx_crc_errors++;
1442 if (dma_flag & DMA_RX_OV)
1443 dev->stats.rx_over_errors++;
1444 if (dma_flag & DMA_RX_NO)
1445 dev->stats.rx_frame_errors++;
1446 if (dma_flag & DMA_RX_LG)
1447 dev->stats.rx_length_errors++;
1448 dev->stats.rx_dropped++;
1449 dev->stats.rx_errors++;
1451 /* discard the packet and advance consumer index.*/
1452 dev_kfree_skb_any(cb->skb);
1455 } /* error packet */
1457 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1458 priv->desc_rxchk_en;
1461 if (priv->desc_64b_en) {
1466 if (likely(chksum_ok))
1467 skb->ip_summed = CHECKSUM_UNNECESSARY;
1469 /* remove hardware 2bytes added for IP alignment */
1473 if (priv->crc_fwd_en) {
1474 skb_trim(skb, len - ETH_FCS_LEN);
1478 /*Finish setting up the received SKB and send it to the kernel*/
1479 skb->protocol = eth_type_trans(skb, priv->dev);
1480 dev->stats.rx_packets++;
1481 dev->stats.rx_bytes += len;
1482 if (dma_flag & DMA_RX_MULT)
1483 dev->stats.multicast++;
1486 napi_gro_receive(&priv->napi, skb);
1488 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1490 /* refill RX path on the current control block */
1492 err = bcmgenet_rx_refill(priv, cb);
1494 priv->mib.alloc_rx_buff_failed++;
1495 netif_err(priv, rx_err, dev, "Rx refill failed\n");
1499 priv->rx_read_ptr++;
1500 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1503 return rxpktprocessed;
1506 /* Assign skb to RX DMA descriptor. */
1507 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1513 netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
1515 /* loop here for each buffer needing assign */
1516 for (i = 0; i < priv->num_rx_bds; i++) {
1517 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
1521 ret = bcmgenet_rx_refill(priv, cb);
1529 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1534 for (i = 0; i < priv->num_rx_bds; i++) {
1535 cb = &priv->rx_cbs[i];
1537 if (dma_unmap_addr(cb, dma_addr)) {
1538 dma_unmap_single(&priv->dev->dev,
1539 dma_unmap_addr(cb, dma_addr),
1540 priv->rx_buf_len, DMA_FROM_DEVICE);
1541 dma_unmap_addr_set(cb, dma_addr, 0);
1545 bcmgenet_free_cb(cb);
1549 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1553 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1558 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1560 /* UniMAC stops on a packet boundary, wait for a full-size packet
1564 usleep_range(1000, 2000);
1567 static int reset_umac(struct bcmgenet_priv *priv)
1569 struct device *kdev = &priv->pdev->dev;
1570 unsigned int timeout = 0;
1573 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1574 bcmgenet_rbuf_ctrl_set(priv, 0);
1577 /* disable MAC while updating its registers */
1578 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1580 /* issue soft reset, wait for it to complete */
1581 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1582 while (timeout++ < 1000) {
1583 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1584 if (!(reg & CMD_SW_RESET))
1590 if (timeout == 1000) {
1592 "timeout waiting for MAC to come out of reset\n");
1599 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1601 /* Mask all interrupts.*/
1602 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1603 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1604 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1605 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1606 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1607 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1610 static int init_umac(struct bcmgenet_priv *priv)
1612 struct device *kdev = &priv->pdev->dev;
1614 u32 reg, cpu_mask_clear;
1616 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1618 ret = reset_umac(priv);
1622 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1623 /* clear tx/rx counter */
1624 bcmgenet_umac_writel(priv,
1625 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1627 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1629 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1631 /* init rx registers, enable ip header optimization */
1632 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1633 reg |= RBUF_ALIGN_2B;
1634 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1636 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1637 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1639 bcmgenet_intr_disable(priv);
1641 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
1643 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1645 /* Monitor cable plug/unplugged event for internal PHY */
1646 if (phy_is_internal(priv->phydev)) {
1647 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1648 } else if (priv->ext_phy) {
1649 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1650 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1651 reg = bcmgenet_bp_mc_get(priv);
1652 reg |= BIT(priv->hw_params->bp_in_en_shift);
1654 /* bp_mask: back pressure mask */
1655 if (netif_is_multiqueue(priv->dev))
1656 reg |= priv->hw_params->bp_in_mask;
1658 reg &= ~priv->hw_params->bp_in_mask;
1659 bcmgenet_bp_mc_set(priv, reg);
1662 /* Enable MDIO interrupts on GENET v3+ */
1663 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1664 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
1666 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1668 /* Enable rx/tx engine.*/
1669 dev_dbg(kdev, "done init umac\n");
1674 /* Initialize all house-keeping variables for a TX ring, along
1675 * with corresponding hardware registers
1677 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1678 unsigned int index, unsigned int size,
1679 unsigned int write_ptr, unsigned int end_ptr)
1681 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1682 u32 words_per_bd = WORDS_PER_BD(priv);
1683 u32 flow_period_val = 0;
1684 unsigned int first_bd;
1686 spin_lock_init(&ring->lock);
1687 ring->index = index;
1688 if (index == DESC_INDEX) {
1690 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1691 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1693 ring->queue = index + 1;
1694 ring->int_enable = bcmgenet_tx_ring_int_enable;
1695 ring->int_disable = bcmgenet_tx_ring_int_disable;
1697 ring->cbs = priv->tx_cbs + write_ptr;
1700 ring->free_bds = size;
1701 ring->write_ptr = write_ptr;
1702 ring->cb_ptr = write_ptr;
1703 ring->end_ptr = end_ptr - 1;
1704 ring->prod_index = 0;
1706 /* Set flow period for ring != 16 */
1707 if (index != DESC_INDEX)
1708 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1710 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1711 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1712 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1713 /* Disable rate control for now */
1714 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1716 /* Unclassified traffic goes to ring 16 */
1717 bcmgenet_tdma_ring_writel(priv, index,
1718 ((size << DMA_RING_SIZE_SHIFT) |
1719 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1721 first_bd = write_ptr;
1723 /* Set start and end address, read and write pointers */
1724 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1726 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1728 bcmgenet_tdma_ring_writel(priv, index, first_bd,
1730 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1734 /* Initialize a RDMA ring */
1735 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1736 unsigned int index, unsigned int size)
1738 u32 words_per_bd = WORDS_PER_BD(priv);
1741 priv->num_rx_bds = TOTAL_DESC;
1742 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
1743 priv->rx_bd_assign_ptr = priv->rx_bds;
1744 priv->rx_bd_assign_index = 0;
1745 priv->rx_c_index = 0;
1746 priv->rx_read_ptr = 0;
1747 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
1752 ret = bcmgenet_alloc_rx_buffers(priv);
1754 kfree(priv->rx_cbs);
1758 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
1759 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1760 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1761 bcmgenet_rdma_ring_writel(priv, index,
1762 ((size << DMA_RING_SIZE_SHIFT) |
1763 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1764 bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
1765 bcmgenet_rdma_ring_writel(priv, index,
1766 words_per_bd * size - 1, DMA_END_ADDR);
1767 bcmgenet_rdma_ring_writel(priv, index,
1768 (DMA_FC_THRESH_LO <<
1769 DMA_XOFF_THRESHOLD_SHIFT) |
1770 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1771 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
1776 /* init multi xmit queues, only available for GENET2+
1777 * the queue is partitioned as follows:
1779 * queue 0 - 3 is priority based, each one has 32 descriptors,
1780 * with queue 0 being the highest priority queue.
1782 * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
1783 * descriptors: 256 - (number of tx queues * bds per queues) = 128
1786 * The transmit control block pool is then partitioned as following:
1787 * - tx_cbs[0...127] are for queue 16
1788 * - tx_ring_cbs[0] points to tx_cbs[128..159]
1789 * - tx_ring_cbs[1] points to tx_cbs[160..191]
1790 * - tx_ring_cbs[2] points to tx_cbs[192..223]
1791 * - tx_ring_cbs[3] points to tx_cbs[224..255]
1793 static void bcmgenet_init_multiq(struct net_device *dev)
1795 struct bcmgenet_priv *priv = netdev_priv(dev);
1796 unsigned int i, dma_enable;
1797 u32 reg, dma_ctrl, ring_cfg = 0;
1798 u32 dma_priority[3] = {0, 0, 0};
1800 if (!netif_is_multiqueue(dev)) {
1801 netdev_warn(dev, "called with non multi queue aware HW\n");
1805 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1806 dma_enable = dma_ctrl & DMA_EN;
1807 dma_ctrl &= ~DMA_EN;
1808 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1810 /* Enable strict priority arbiter mode */
1811 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1813 for (i = 0; i < priv->hw_params->tx_queues; i++) {
1814 /* first 64 tx_cbs are reserved for default tx queue
1817 bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
1818 i * priv->hw_params->bds_cnt,
1819 (i + 1) * priv->hw_params->bds_cnt);
1821 /* Configure ring as descriptor ring and setup priority */
1823 dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
1825 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
1826 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
1829 /* Set ring 16 priority and program the hardware registers */
1830 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
1831 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
1832 DMA_PRIO_REG_SHIFT(DESC_INDEX));
1833 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
1834 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
1835 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
1838 reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
1840 bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
1842 /* Configure ring as descriptor ring and re-enable DMA if enabled */
1843 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1847 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1850 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1856 /* Disable TDMA to stop add more frames in TX DMA */
1857 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1859 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1861 /* Check TDMA status register to confirm TDMA is disabled */
1862 while (timeout++ < DMA_TIMEOUT_VAL) {
1863 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
1864 if (reg & DMA_DISABLED)
1870 if (timeout == DMA_TIMEOUT_VAL) {
1871 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
1875 /* Wait 10ms for packet drain in both tx and rx dma */
1876 usleep_range(10000, 20000);
1879 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1881 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1884 /* Check RDMA status register to confirm RDMA is disabled */
1885 while (timeout++ < DMA_TIMEOUT_VAL) {
1886 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
1887 if (reg & DMA_DISABLED)
1893 if (timeout == DMA_TIMEOUT_VAL) {
1894 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
1901 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1906 bcmgenet_dma_teardown(priv);
1908 for (i = 0; i < priv->num_tx_bds; i++) {
1909 if (priv->tx_cbs[i].skb != NULL) {
1910 dev_kfree_skb(priv->tx_cbs[i].skb);
1911 priv->tx_cbs[i].skb = NULL;
1915 bcmgenet_free_rx_buffers(priv);
1916 kfree(priv->rx_cbs);
1917 kfree(priv->tx_cbs);
1920 /* init_edma: Initialize DMA control register */
1921 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1925 netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
1927 /* by default, enable ring 16 (descriptor based) */
1928 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
1930 netdev_err(priv->dev, "failed to initialize RX ring\n");
1935 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1938 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1940 /* Initialize common TX ring structures */
1941 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
1942 priv->num_tx_bds = TOTAL_DESC;
1943 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
1945 if (!priv->tx_cbs) {
1946 bcmgenet_fini_dma(priv);
1950 /* initialize multi xmit queue */
1951 bcmgenet_init_multiq(priv->dev);
1953 /* initialize special ring 16 */
1954 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
1955 priv->hw_params->tx_queues *
1956 priv->hw_params->bds_cnt,
1962 /* NAPI polling method*/
1963 static int bcmgenet_poll(struct napi_struct *napi, int budget)
1965 struct bcmgenet_priv *priv = container_of(napi,
1966 struct bcmgenet_priv, napi);
1967 unsigned int work_done;
1970 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1972 work_done = bcmgenet_desc_rx(priv, budget);
1974 /* Advancing our consumer index*/
1975 priv->rx_c_index += work_done;
1976 priv->rx_c_index &= DMA_C_INDEX_MASK;
1977 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
1978 priv->rx_c_index, RDMA_CONS_INDEX);
1979 if (work_done < budget) {
1980 napi_complete(napi);
1981 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
1982 INTRL2_CPU_MASK_CLEAR);
1988 /* Interrupt bottom half */
1989 static void bcmgenet_irq_task(struct work_struct *work)
1991 struct bcmgenet_priv *priv = container_of(
1992 work, struct bcmgenet_priv, bcmgenet_irq_work);
1994 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
1996 if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
1997 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
1998 netif_dbg(priv, wol, priv->dev,
1999 "magic packet detected, waking up\n");
2000 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2003 /* Link UP/DOWN event */
2004 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2005 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
2006 phy_mac_interrupt(priv->phydev,
2007 priv->irq0_stat & UMAC_IRQ_LINK_UP);
2008 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
2012 /* bcmgenet_isr1: interrupt handler for ring buffer. */
2013 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2015 struct bcmgenet_priv *priv = dev_id;
2018 /* Save irq status for bottom-half processing. */
2020 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2022 /* clear interrupts */
2023 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2025 netif_dbg(priv, intr, priv->dev,
2026 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2027 /* Check the MBDONE interrupts.
2028 * packet is done, reclaim descriptors
2030 if (priv->irq1_stat & 0x0000ffff) {
2032 for (index = 0; index < 16; index++) {
2033 if (priv->irq1_stat & (1 << index))
2034 bcmgenet_tx_reclaim(priv->dev,
2035 &priv->tx_rings[index]);
2041 /* bcmgenet_isr0: Handle various interrupts. */
2042 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2044 struct bcmgenet_priv *priv = dev_id;
2046 /* Save irq status for bottom-half processing. */
2048 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2049 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2050 /* clear interrupts */
2051 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2053 netif_dbg(priv, intr, priv->dev,
2054 "IRQ=0x%x\n", priv->irq0_stat);
2056 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
2057 /* We use NAPI(software interrupt throttling, if
2058 * Rx Descriptor throttling is not used.
2059 * Disable interrupt, will be enabled in the poll method.
2061 if (likely(napi_schedule_prep(&priv->napi))) {
2062 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
2063 INTRL2_CPU_MASK_SET);
2064 __napi_schedule(&priv->napi);
2067 if (priv->irq0_stat &
2068 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2070 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
2072 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2073 UMAC_IRQ_PHY_DET_F |
2075 UMAC_IRQ_LINK_DOWN |
2079 /* all other interested interrupts handled in bottom half */
2080 schedule_work(&priv->bcmgenet_irq_work);
2083 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2084 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2085 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2092 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2094 struct bcmgenet_priv *priv = dev_id;
2096 pm_wakeup_event(&priv->pdev->dev, 0);
2101 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2105 reg = bcmgenet_rbuf_ctrl_get(priv);
2107 bcmgenet_rbuf_ctrl_set(priv, reg);
2111 bcmgenet_rbuf_ctrl_set(priv, reg);
2115 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2116 unsigned char *addr)
2118 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2119 (addr[2] << 8) | addr[3], UMAC_MAC0);
2120 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2123 /* Returns a reusable dma control register value */
2124 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2130 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2131 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2133 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2135 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2137 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2139 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2141 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2146 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2150 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2152 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2154 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2156 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2159 static void bcmgenet_netif_start(struct net_device *dev)
2161 struct bcmgenet_priv *priv = netdev_priv(dev);
2163 /* Start the network engine */
2164 napi_enable(&priv->napi);
2166 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2168 if (phy_is_internal(priv->phydev))
2169 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2171 netif_tx_start_all_queues(dev);
2173 phy_start(priv->phydev);
2176 static int bcmgenet_open(struct net_device *dev)
2178 struct bcmgenet_priv *priv = netdev_priv(dev);
2179 unsigned long dma_ctrl;
2183 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2185 /* Turn on the clock */
2186 if (!IS_ERR(priv->clk))
2187 clk_prepare_enable(priv->clk);
2189 /* take MAC out of reset */
2190 bcmgenet_umac_reset(priv);
2192 ret = init_umac(priv);
2194 goto err_clk_disable;
2196 /* disable ethernet MAC while updating its registers */
2197 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2199 /* Make sure we reflect the value of CRC_CMD_FWD */
2200 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2201 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2203 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2205 if (phy_is_internal(priv->phydev)) {
2206 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2207 reg |= EXT_ENERGY_DET_MASK;
2208 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2211 /* Disable RX/TX DMA and flush TX queues */
2212 dma_ctrl = bcmgenet_dma_disable(priv);
2214 /* Reinitialize TDMA and RDMA and SW housekeeping */
2215 ret = bcmgenet_init_dma(priv);
2217 netdev_err(dev, "failed to initialize DMA\n");
2221 /* Always enable ring 16 - descriptor ring */
2222 bcmgenet_enable_dma(priv, dma_ctrl);
2224 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2227 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2231 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2234 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2238 /* Re-configure the port multiplexer towards the PHY device */
2239 bcmgenet_mii_config(priv->dev, false);
2241 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
2242 priv->phy_interface);
2244 bcmgenet_netif_start(dev);
2249 free_irq(priv->irq0, dev);
2251 bcmgenet_fini_dma(priv);
2253 if (!IS_ERR(priv->clk))
2254 clk_disable_unprepare(priv->clk);
2258 static void bcmgenet_netif_stop(struct net_device *dev)
2260 struct bcmgenet_priv *priv = netdev_priv(dev);
2262 netif_tx_stop_all_queues(dev);
2263 napi_disable(&priv->napi);
2264 phy_stop(priv->phydev);
2266 bcmgenet_intr_disable(priv);
2268 /* Wait for pending work items to complete. Since interrupts are
2269 * disabled no new work will be scheduled.
2271 cancel_work_sync(&priv->bcmgenet_irq_work);
2273 priv->old_link = -1;
2274 priv->old_speed = -1;
2275 priv->old_duplex = -1;
2276 priv->old_pause = -1;
2279 static int bcmgenet_close(struct net_device *dev)
2281 struct bcmgenet_priv *priv = netdev_priv(dev);
2284 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2286 bcmgenet_netif_stop(dev);
2288 /* Really kill the PHY state machine and disconnect from it */
2289 phy_disconnect(priv->phydev);
2291 /* Disable MAC receive */
2292 umac_enable_set(priv, CMD_RX_EN, false);
2294 ret = bcmgenet_dma_teardown(priv);
2298 /* Disable MAC transmit. TX DMA disabled have to done before this */
2299 umac_enable_set(priv, CMD_TX_EN, false);
2302 bcmgenet_tx_reclaim_all(dev);
2303 bcmgenet_fini_dma(priv);
2305 free_irq(priv->irq0, priv);
2306 free_irq(priv->irq1, priv);
2308 if (phy_is_internal(priv->phydev))
2309 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2311 if (!IS_ERR(priv->clk))
2312 clk_disable_unprepare(priv->clk);
2317 static void bcmgenet_timeout(struct net_device *dev)
2319 struct bcmgenet_priv *priv = netdev_priv(dev);
2321 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2323 dev->trans_start = jiffies;
2325 dev->stats.tx_errors++;
2327 netif_tx_wake_all_queues(dev);
2330 #define MAX_MC_COUNT 16
2332 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2333 unsigned char *addr,
2339 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
2340 UMAC_MDF_ADDR + (*i * 4));
2341 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
2342 addr[4] << 8 | addr[5],
2343 UMAC_MDF_ADDR + ((*i + 1) * 4));
2344 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2345 reg |= (1 << (MAX_MC_COUNT - *mc));
2346 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2351 static void bcmgenet_set_rx_mode(struct net_device *dev)
2353 struct bcmgenet_priv *priv = netdev_priv(dev);
2354 struct netdev_hw_addr *ha;
2358 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2360 /* Promiscuous mode */
2361 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2362 if (dev->flags & IFF_PROMISC) {
2364 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2365 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2368 reg &= ~CMD_PROMISC;
2369 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2372 /* UniMac doesn't support ALLMULTI */
2373 if (dev->flags & IFF_ALLMULTI) {
2374 netdev_warn(dev, "ALLMULTI is not supported\n");
2378 /* update MDF filter */
2382 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2383 /* my own address.*/
2384 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2386 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2389 if (!netdev_uc_empty(dev))
2390 netdev_for_each_uc_addr(ha, dev)
2391 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2393 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2396 netdev_for_each_mc_addr(ha, dev)
2397 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2400 /* Set the hardware MAC address. */
2401 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2403 struct sockaddr *addr = p;
2405 /* Setting the MAC address at the hardware level is not possible
2406 * without disabling the UniMAC RX/TX enable bits.
2408 if (netif_running(dev))
2411 ether_addr_copy(dev->dev_addr, addr->sa_data);
2416 static const struct net_device_ops bcmgenet_netdev_ops = {
2417 .ndo_open = bcmgenet_open,
2418 .ndo_stop = bcmgenet_close,
2419 .ndo_start_xmit = bcmgenet_xmit,
2420 .ndo_tx_timeout = bcmgenet_timeout,
2421 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
2422 .ndo_set_mac_address = bcmgenet_set_mac_addr,
2423 .ndo_do_ioctl = bcmgenet_ioctl,
2424 .ndo_set_features = bcmgenet_set_features,
2427 /* Array of GENET hardware parameters/characteristics */
2428 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2433 .bp_in_en_shift = 16,
2434 .bp_in_mask = 0xffff,
2435 .hfb_filter_cnt = 16,
2437 .hfb_offset = 0x1000,
2438 .rdma_offset = 0x2000,
2439 .tdma_offset = 0x3000,
2446 .bp_in_en_shift = 16,
2447 .bp_in_mask = 0xffff,
2448 .hfb_filter_cnt = 16,
2450 .tbuf_offset = 0x0600,
2451 .hfb_offset = 0x1000,
2452 .hfb_reg_offset = 0x2000,
2453 .rdma_offset = 0x3000,
2454 .tdma_offset = 0x4000,
2456 .flags = GENET_HAS_EXT,
2462 .bp_in_en_shift = 17,
2463 .bp_in_mask = 0x1ffff,
2464 .hfb_filter_cnt = 48,
2466 .tbuf_offset = 0x0600,
2467 .hfb_offset = 0x8000,
2468 .hfb_reg_offset = 0xfc00,
2469 .rdma_offset = 0x10000,
2470 .tdma_offset = 0x11000,
2472 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2478 .bp_in_en_shift = 17,
2479 .bp_in_mask = 0x1ffff,
2480 .hfb_filter_cnt = 48,
2482 .tbuf_offset = 0x0600,
2483 .hfb_offset = 0x8000,
2484 .hfb_reg_offset = 0xfc00,
2485 .rdma_offset = 0x2000,
2486 .tdma_offset = 0x4000,
2488 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2492 /* Infer hardware parameters from the detected GENET version */
2493 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
2495 struct bcmgenet_hw_params *params;
2499 if (GENET_IS_V4(priv)) {
2500 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2501 genet_dma_ring_regs = genet_dma_ring_regs_v4;
2502 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2503 priv->version = GENET_V4;
2504 } else if (GENET_IS_V3(priv)) {
2505 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2506 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2507 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2508 priv->version = GENET_V3;
2509 } else if (GENET_IS_V2(priv)) {
2510 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
2511 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2512 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2513 priv->version = GENET_V2;
2514 } else if (GENET_IS_V1(priv)) {
2515 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
2516 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2517 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2518 priv->version = GENET_V1;
2521 /* enum genet_version starts at 1 */
2522 priv->hw_params = &bcmgenet_hw_params[priv->version];
2523 params = priv->hw_params;
2525 /* Read GENET HW version */
2526 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
2527 major = (reg >> 24 & 0x0f);
2530 else if (major == 0)
2532 if (major != priv->version) {
2533 dev_err(&priv->pdev->dev,
2534 "GENET version mismatch, got: %d, configured for: %d\n",
2535 major, priv->version);
2538 /* Print the GENET core version */
2539 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
2540 major, (reg >> 16) & 0x0f, reg & 0xffff);
2542 /* Store the integrated PHY revision for the MDIO probing function
2543 * to pass this information to the PHY driver. The PHY driver expects
2544 * to find the PHY major revision in bits 15:8 while the GENET register
2545 * stores that information in bits 7:0, account for that.
2547 priv->gphy_rev = (reg & 0xffff) << 8;
2549 #ifdef CONFIG_PHYS_ADDR_T_64BIT
2550 if (!(params->flags & GENET_HAS_40BITS))
2551 pr_warn("GENET does not support 40-bits PA\n");
2554 pr_debug("Configuration for version: %d\n"
2555 "TXq: %1d, RXq: %1d, BDs: %1d\n"
2556 "BP << en: %2d, BP msk: 0x%05x\n"
2557 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2558 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2559 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2562 params->tx_queues, params->rx_queues, params->bds_cnt,
2563 params->bp_in_en_shift, params->bp_in_mask,
2564 params->hfb_filter_cnt, params->qtag_mask,
2565 params->tbuf_offset, params->hfb_offset,
2566 params->hfb_reg_offset,
2567 params->rdma_offset, params->tdma_offset,
2568 params->words_per_bd);
2571 static const struct of_device_id bcmgenet_match[] = {
2572 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
2573 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
2574 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
2575 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
2579 static int bcmgenet_probe(struct platform_device *pdev)
2581 struct device_node *dn = pdev->dev.of_node;
2582 const struct of_device_id *of_id;
2583 struct bcmgenet_priv *priv;
2584 struct net_device *dev;
2585 const void *macaddr;
2589 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
2590 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
2592 dev_err(&pdev->dev, "can't allocate net device\n");
2596 of_id = of_match_node(bcmgenet_match, dn);
2600 priv = netdev_priv(dev);
2601 priv->irq0 = platform_get_irq(pdev, 0);
2602 priv->irq1 = platform_get_irq(pdev, 1);
2603 priv->wol_irq = platform_get_irq(pdev, 2);
2604 if (!priv->irq0 || !priv->irq1) {
2605 dev_err(&pdev->dev, "can't find IRQs\n");
2610 macaddr = of_get_mac_address(dn);
2612 dev_err(&pdev->dev, "can't find MAC address\n");
2617 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2618 priv->base = devm_ioremap_resource(&pdev->dev, r);
2619 if (IS_ERR(priv->base)) {
2620 err = PTR_ERR(priv->base);
2624 SET_NETDEV_DEV(dev, &pdev->dev);
2625 dev_set_drvdata(&pdev->dev, dev);
2626 ether_addr_copy(dev->dev_addr, macaddr);
2627 dev->watchdog_timeo = 2 * HZ;
2628 dev->ethtool_ops = &bcmgenet_ethtool_ops;
2629 dev->netdev_ops = &bcmgenet_netdev_ops;
2630 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2632 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
2634 /* Set hardware features */
2635 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
2636 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2638 /* Request the WOL interrupt and advertise suspend if available */
2639 priv->wol_irq_disabled = true;
2640 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
2643 device_set_wakeup_capable(&pdev->dev, 1);
2645 /* Set the needed headroom to account for any possible
2646 * features enabling/disabling at runtime
2648 dev->needed_headroom += 64;
2650 netdev_boot_setup_check(dev);
2654 priv->version = (enum bcmgenet_version)of_id->data;
2656 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
2657 if (IS_ERR(priv->clk))
2658 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
2660 if (!IS_ERR(priv->clk))
2661 clk_prepare_enable(priv->clk);
2663 bcmgenet_set_hw_params(priv);
2665 /* Mii wait queue */
2666 init_waitqueue_head(&priv->wq);
2667 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2668 priv->rx_buf_len = RX_BUF_LENGTH;
2669 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
2671 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
2672 if (IS_ERR(priv->clk_wol))
2673 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
2675 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
2676 if (IS_ERR(priv->clk_eee)) {
2677 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
2678 priv->clk_eee = NULL;
2681 err = reset_umac(priv);
2683 goto err_clk_disable;
2685 err = bcmgenet_mii_init(dev);
2687 goto err_clk_disable;
2689 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2690 * just the ring 16 descriptor based TX
2692 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2693 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2695 /* libphy will determine the link state */
2696 netif_carrier_off(dev);
2698 /* Turn off the main clock, WOL clock is handled separately */
2699 if (!IS_ERR(priv->clk))
2700 clk_disable_unprepare(priv->clk);
2702 err = register_netdev(dev);
2709 if (!IS_ERR(priv->clk))
2710 clk_disable_unprepare(priv->clk);
2716 static int bcmgenet_remove(struct platform_device *pdev)
2718 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
2720 dev_set_drvdata(&pdev->dev, NULL);
2721 unregister_netdev(priv->dev);
2722 bcmgenet_mii_exit(priv->dev);
2723 free_netdev(priv->dev);
2728 #ifdef CONFIG_PM_SLEEP
2729 static int bcmgenet_suspend(struct device *d)
2731 struct net_device *dev = dev_get_drvdata(d);
2732 struct bcmgenet_priv *priv = netdev_priv(dev);
2735 if (!netif_running(dev))
2738 bcmgenet_netif_stop(dev);
2740 phy_suspend(priv->phydev);
2742 netif_device_detach(dev);
2744 /* Disable MAC receive */
2745 umac_enable_set(priv, CMD_RX_EN, false);
2747 ret = bcmgenet_dma_teardown(priv);
2751 /* Disable MAC transmit. TX DMA disabled have to done before this */
2752 umac_enable_set(priv, CMD_TX_EN, false);
2755 bcmgenet_tx_reclaim_all(dev);
2756 bcmgenet_fini_dma(priv);
2758 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
2759 if (device_may_wakeup(d) && priv->wolopts) {
2760 bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
2761 clk_prepare_enable(priv->clk_wol);
2764 /* Turn off the clocks */
2765 clk_disable_unprepare(priv->clk);
2770 static int bcmgenet_resume(struct device *d)
2772 struct net_device *dev = dev_get_drvdata(d);
2773 struct bcmgenet_priv *priv = netdev_priv(dev);
2774 unsigned long dma_ctrl;
2778 if (!netif_running(dev))
2781 /* Turn on the clock */
2782 ret = clk_prepare_enable(priv->clk);
2786 bcmgenet_umac_reset(priv);
2788 ret = init_umac(priv);
2790 goto out_clk_disable;
2792 /* From WOL-enabled suspend, switch to regular clock */
2794 clk_disable_unprepare(priv->clk_wol);
2796 phy_init_hw(priv->phydev);
2797 /* Speed settings must be restored */
2798 bcmgenet_mii_config(priv->dev, false);
2800 /* disable ethernet MAC while updating its registers */
2801 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2803 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2805 if (phy_is_internal(priv->phydev)) {
2806 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2807 reg |= EXT_ENERGY_DET_MASK;
2808 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2812 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2814 /* Disable RX/TX DMA and flush TX queues */
2815 dma_ctrl = bcmgenet_dma_disable(priv);
2817 /* Reinitialize TDMA and RDMA and SW housekeeping */
2818 ret = bcmgenet_init_dma(priv);
2820 netdev_err(dev, "failed to initialize DMA\n");
2821 goto out_clk_disable;
2824 /* Always enable ring 16 - descriptor ring */
2825 bcmgenet_enable_dma(priv, dma_ctrl);
2827 netif_device_attach(dev);
2829 phy_resume(priv->phydev);
2831 if (priv->eee.eee_enabled)
2832 bcmgenet_eee_enable_set(dev, true);
2834 bcmgenet_netif_start(dev);
2839 clk_disable_unprepare(priv->clk);
2842 #endif /* CONFIG_PM_SLEEP */
2844 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
2846 static struct platform_driver bcmgenet_driver = {
2847 .probe = bcmgenet_probe,
2848 .remove = bcmgenet_remove,
2851 .owner = THIS_MODULE,
2852 .of_match_table = bcmgenet_match,
2853 .pm = &bcmgenet_pm_ops,
2856 module_platform_driver(bcmgenet_driver);
2858 MODULE_AUTHOR("Broadcom Corporation");
2859 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
2860 MODULE_ALIAS("platform:bcmgenet");
2861 MODULE_LICENSE("GPL");