2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
46 #include <asm/unaligned.h>
50 /* Maximum number of hardware queues, downsized if needed */
51 #define GENET_MAX_MQ_CNT 4
53 /* Default highest priority queue for multi queue support */
54 #define GENET_Q0_PRIORITY 0
56 #define GENET_DEFAULT_BD_CNT \
57 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
59 #define RX_BUF_LENGTH 2048
60 #define SKB_ALIGNMENT 32
62 /* Tx/Rx DMA register offset, skip 256 descriptors */
63 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
64 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
66 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
67 TOTAL_DESC * DMA_DESC_SIZE)
69 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
73 void __iomem *d, u32 value)
75 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
78 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
81 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
84 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
88 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
90 /* Register writes to GISB bus can take couple hundred nanoseconds
91 * and are done for each packet, save these expensive writes unless
92 * the platform is explicitly configured for 64-bits/LPAE.
94 #ifdef CONFIG_PHYS_ADDR_T_64BIT
95 if (priv->hw_params->flags & GENET_HAS_40BITS)
96 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
100 /* Combined address + length/status setter */
101 static inline void dmadesc_set(struct bcmgenet_priv *priv,
102 void __iomem *d, dma_addr_t addr, u32 val)
104 dmadesc_set_length_status(priv, d, val);
105 dmadesc_set_addr(priv, d, addr);
108 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
113 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
115 /* Register writes to GISB bus can take couple hundred nanoseconds
116 * and are done for each packet, save these expensive writes unless
117 * the platform is explicitly configured for 64-bits/LPAE.
119 #ifdef CONFIG_PHYS_ADDR_T_64BIT
120 if (priv->hw_params->flags & GENET_HAS_40BITS)
121 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
126 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
128 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
131 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
133 if (GENET_IS_V1(priv))
134 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
136 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
139 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
141 if (GENET_IS_V1(priv))
142 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
144 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
147 /* These macros are defined to deal with register map change
148 * between GENET1.1 and GENET2. Only those currently being used
149 * by driver are defined.
151 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
153 if (GENET_IS_V1(priv))
154 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
156 return __raw_readl(priv->base +
157 priv->hw_params->tbuf_offset + TBUF_CTRL);
160 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
162 if (GENET_IS_V1(priv))
163 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
165 __raw_writel(val, priv->base +
166 priv->hw_params->tbuf_offset + TBUF_CTRL);
169 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
171 if (GENET_IS_V1(priv))
172 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
174 return __raw_readl(priv->base +
175 priv->hw_params->tbuf_offset + TBUF_BP_MC);
178 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
180 if (GENET_IS_V1(priv))
181 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
183 __raw_writel(val, priv->base +
184 priv->hw_params->tbuf_offset + TBUF_BP_MC);
187 /* RX/TX DMA register accessors */
199 static const u8 bcmgenet_dma_regs_v3plus[] = {
200 [DMA_RING_CFG] = 0x00,
203 [DMA_SCB_BURST_SIZE] = 0x0C,
204 [DMA_ARB_CTRL] = 0x2C,
205 [DMA_PRIORITY_0] = 0x30,
206 [DMA_PRIORITY_1] = 0x34,
207 [DMA_PRIORITY_2] = 0x38,
210 static const u8 bcmgenet_dma_regs_v2[] = {
211 [DMA_RING_CFG] = 0x00,
214 [DMA_SCB_BURST_SIZE] = 0x0C,
215 [DMA_ARB_CTRL] = 0x30,
216 [DMA_PRIORITY_0] = 0x34,
217 [DMA_PRIORITY_1] = 0x38,
218 [DMA_PRIORITY_2] = 0x3C,
221 static const u8 bcmgenet_dma_regs_v1[] = {
224 [DMA_SCB_BURST_SIZE] = 0x0C,
225 [DMA_ARB_CTRL] = 0x30,
226 [DMA_PRIORITY_0] = 0x34,
227 [DMA_PRIORITY_1] = 0x38,
228 [DMA_PRIORITY_2] = 0x3C,
231 /* Set at runtime once bcmgenet version is known */
232 static const u8 *bcmgenet_dma_regs;
234 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
236 return netdev_priv(dev_get_drvdata(dev));
239 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
242 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
243 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
246 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
247 u32 val, enum dma_reg r)
249 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
250 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
253 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
256 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
257 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
260 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
261 u32 val, enum dma_reg r)
263 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
264 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
267 /* RDMA/TDMA ring registers and accessors
268 * we merge the common fields and just prefix with T/D the registers
269 * having different meaning depending on the direction
273 RDMA_WRITE_PTR = TDMA_READ_PTR,
275 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
277 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
279 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
285 DMA_MBUF_DONE_THRESH,
287 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
289 RDMA_READ_PTR = TDMA_WRITE_PTR,
291 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
294 /* GENET v4 supports 40-bits pointer addressing
295 * for obvious reasons the LO and HI word parts
296 * are contiguous, but this offsets the other
299 static const u8 genet_dma_ring_regs_v4[] = {
300 [TDMA_READ_PTR] = 0x00,
301 [TDMA_READ_PTR_HI] = 0x04,
302 [TDMA_CONS_INDEX] = 0x08,
303 [TDMA_PROD_INDEX] = 0x0C,
304 [DMA_RING_BUF_SIZE] = 0x10,
305 [DMA_START_ADDR] = 0x14,
306 [DMA_START_ADDR_HI] = 0x18,
307 [DMA_END_ADDR] = 0x1C,
308 [DMA_END_ADDR_HI] = 0x20,
309 [DMA_MBUF_DONE_THRESH] = 0x24,
310 [TDMA_FLOW_PERIOD] = 0x28,
311 [TDMA_WRITE_PTR] = 0x2C,
312 [TDMA_WRITE_PTR_HI] = 0x30,
315 static const u8 genet_dma_ring_regs_v123[] = {
316 [TDMA_READ_PTR] = 0x00,
317 [TDMA_CONS_INDEX] = 0x04,
318 [TDMA_PROD_INDEX] = 0x08,
319 [DMA_RING_BUF_SIZE] = 0x0C,
320 [DMA_START_ADDR] = 0x10,
321 [DMA_END_ADDR] = 0x14,
322 [DMA_MBUF_DONE_THRESH] = 0x18,
323 [TDMA_FLOW_PERIOD] = 0x1C,
324 [TDMA_WRITE_PTR] = 0x20,
327 /* Set at runtime once GENET version is known */
328 static const u8 *genet_dma_ring_regs;
330 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
334 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
335 (DMA_RING_SIZE * ring) +
336 genet_dma_ring_regs[r]);
339 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
340 unsigned int ring, u32 val,
343 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
344 (DMA_RING_SIZE * ring) +
345 genet_dma_ring_regs[r]);
348 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
352 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
353 (DMA_RING_SIZE * ring) +
354 genet_dma_ring_regs[r]);
357 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
358 unsigned int ring, u32 val,
361 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
362 (DMA_RING_SIZE * ring) +
363 genet_dma_ring_regs[r]);
366 static int bcmgenet_get_settings(struct net_device *dev,
367 struct ethtool_cmd *cmd)
369 struct bcmgenet_priv *priv = netdev_priv(dev);
371 if (!netif_running(dev))
377 return phy_ethtool_gset(priv->phydev, cmd);
380 static int bcmgenet_set_settings(struct net_device *dev,
381 struct ethtool_cmd *cmd)
383 struct bcmgenet_priv *priv = netdev_priv(dev);
385 if (!netif_running(dev))
391 return phy_ethtool_sset(priv->phydev, cmd);
394 static int bcmgenet_set_rx_csum(struct net_device *dev,
395 netdev_features_t wanted)
397 struct bcmgenet_priv *priv = netdev_priv(dev);
401 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
403 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
405 /* enable rx checksumming */
407 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
409 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
410 priv->desc_rxchk_en = rx_csum_en;
412 /* If UniMAC forwards CRC, we need to skip over it to get
413 * a valid CHK bit to be set in the per-packet status word
415 if (rx_csum_en && priv->crc_fwd_en)
416 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
418 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
420 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
425 static int bcmgenet_set_tx_csum(struct net_device *dev,
426 netdev_features_t wanted)
428 struct bcmgenet_priv *priv = netdev_priv(dev);
430 u32 tbuf_ctrl, rbuf_ctrl;
432 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
433 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
435 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
437 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
439 tbuf_ctrl |= RBUF_64B_EN;
440 rbuf_ctrl |= RBUF_64B_EN;
442 tbuf_ctrl &= ~RBUF_64B_EN;
443 rbuf_ctrl &= ~RBUF_64B_EN;
445 priv->desc_64b_en = desc_64b_en;
447 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
448 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
453 static int bcmgenet_set_features(struct net_device *dev,
454 netdev_features_t features)
456 netdev_features_t changed = features ^ dev->features;
457 netdev_features_t wanted = dev->wanted_features;
460 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
461 ret = bcmgenet_set_tx_csum(dev, wanted);
462 if (changed & (NETIF_F_RXCSUM))
463 ret = bcmgenet_set_rx_csum(dev, wanted);
468 static u32 bcmgenet_get_msglevel(struct net_device *dev)
470 struct bcmgenet_priv *priv = netdev_priv(dev);
472 return priv->msg_enable;
475 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
477 struct bcmgenet_priv *priv = netdev_priv(dev);
479 priv->msg_enable = level;
482 /* standard ethtool support functions. */
483 enum bcmgenet_stat_type {
484 BCMGENET_STAT_NETDEV = -1,
485 BCMGENET_STAT_MIB_RX,
486 BCMGENET_STAT_MIB_TX,
491 struct bcmgenet_stats {
492 char stat_string[ETH_GSTRING_LEN];
495 enum bcmgenet_stat_type type;
496 /* reg offset from UMAC base for misc counters */
500 #define STAT_NETDEV(m) { \
501 .stat_string = __stringify(m), \
502 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
503 .stat_offset = offsetof(struct net_device_stats, m), \
504 .type = BCMGENET_STAT_NETDEV, \
507 #define STAT_GENET_MIB(str, m, _type) { \
508 .stat_string = str, \
509 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
510 .stat_offset = offsetof(struct bcmgenet_priv, m), \
514 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
515 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
516 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
518 #define STAT_GENET_MISC(str, m, offset) { \
519 .stat_string = str, \
520 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
521 .stat_offset = offsetof(struct bcmgenet_priv, m), \
522 .type = BCMGENET_STAT_MISC, \
523 .reg_offset = offset, \
527 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
528 * between the end of TX stats and the beginning of the RX RUNT
530 #define BCMGENET_STAT_OFFSET 0xc
532 /* Hardware counters must be kept in sync because the order/offset
533 * is important here (order in structure declaration = order in hardware)
535 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
537 STAT_NETDEV(rx_packets),
538 STAT_NETDEV(tx_packets),
539 STAT_NETDEV(rx_bytes),
540 STAT_NETDEV(tx_bytes),
541 STAT_NETDEV(rx_errors),
542 STAT_NETDEV(tx_errors),
543 STAT_NETDEV(rx_dropped),
544 STAT_NETDEV(tx_dropped),
545 STAT_NETDEV(multicast),
546 /* UniMAC RSV counters */
547 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
548 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
549 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
550 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
551 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
552 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
553 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
554 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
555 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
556 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
557 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
558 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
559 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
560 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
561 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
562 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
563 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
564 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
565 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
566 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
567 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
568 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
569 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
570 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
571 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
572 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
573 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
574 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
575 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
576 /* UniMAC TSV counters */
577 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
578 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
579 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
580 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
581 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
582 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
583 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
584 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
585 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
586 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
587 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
588 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
589 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
590 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
591 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
592 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
593 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
594 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
595 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
596 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
597 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
598 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
599 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
600 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
601 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
602 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
603 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
604 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
605 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
606 /* UniMAC RUNT counters */
607 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
608 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
609 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
610 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
611 /* Misc UniMAC counters */
612 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
614 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
615 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
618 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
620 static void bcmgenet_get_drvinfo(struct net_device *dev,
621 struct ethtool_drvinfo *info)
623 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
624 strlcpy(info->version, "v2.0", sizeof(info->version));
625 info->n_stats = BCMGENET_STATS_LEN;
628 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
630 switch (string_set) {
632 return BCMGENET_STATS_LEN;
638 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
645 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
646 memcpy(data + i * ETH_GSTRING_LEN,
647 bcmgenet_gstrings_stats[i].stat_string,
654 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
658 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
659 const struct bcmgenet_stats *s;
664 s = &bcmgenet_gstrings_stats[i];
666 case BCMGENET_STAT_NETDEV:
668 case BCMGENET_STAT_MIB_RX:
669 case BCMGENET_STAT_MIB_TX:
670 case BCMGENET_STAT_RUNT:
671 if (s->type != BCMGENET_STAT_MIB_RX)
672 offset = BCMGENET_STAT_OFFSET;
673 val = bcmgenet_umac_readl(priv,
674 UMAC_MIB_START + j + offset);
676 case BCMGENET_STAT_MISC:
677 val = bcmgenet_umac_readl(priv, s->reg_offset);
678 /* clear if overflowed */
680 bcmgenet_umac_writel(priv, 0, s->reg_offset);
685 p = (char *)priv + s->stat_offset;
690 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
691 struct ethtool_stats *stats,
694 struct bcmgenet_priv *priv = netdev_priv(dev);
697 if (netif_running(dev))
698 bcmgenet_update_mib_counters(priv);
700 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
701 const struct bcmgenet_stats *s;
704 s = &bcmgenet_gstrings_stats[i];
705 if (s->type == BCMGENET_STAT_NETDEV)
706 p = (char *)&dev->stats;
714 /* standard ethtool support functions. */
715 static struct ethtool_ops bcmgenet_ethtool_ops = {
716 .get_strings = bcmgenet_get_strings,
717 .get_sset_count = bcmgenet_get_sset_count,
718 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
719 .get_settings = bcmgenet_get_settings,
720 .set_settings = bcmgenet_set_settings,
721 .get_drvinfo = bcmgenet_get_drvinfo,
722 .get_link = ethtool_op_get_link,
723 .get_msglevel = bcmgenet_get_msglevel,
724 .set_msglevel = bcmgenet_set_msglevel,
725 .get_wol = bcmgenet_get_wol,
726 .set_wol = bcmgenet_set_wol,
729 /* Power down the unimac, based on mode. */
730 static void bcmgenet_power_down(struct bcmgenet_priv *priv,
731 enum bcmgenet_power_mode mode)
736 case GENET_POWER_CABLE_SENSE:
737 phy_detach(priv->phydev);
740 case GENET_POWER_WOL_MAGIC:
741 bcmgenet_wol_power_down_cfg(priv, mode);
744 case GENET_POWER_PASSIVE:
746 if (priv->hw_params->flags & GENET_HAS_EXT) {
747 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
748 reg |= (EXT_PWR_DOWN_PHY |
749 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
750 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
758 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
759 enum bcmgenet_power_mode mode)
763 if (!(priv->hw_params->flags & GENET_HAS_EXT))
766 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
769 case GENET_POWER_PASSIVE:
770 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
773 case GENET_POWER_CABLE_SENSE:
775 reg |= EXT_PWR_DN_EN_LD;
777 case GENET_POWER_WOL_MAGIC:
778 bcmgenet_wol_power_up_cfg(priv, mode);
784 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
786 if (mode == GENET_POWER_PASSIVE)
787 bcmgenet_mii_reset(priv->dev);
790 /* ioctl handle special commands that are not present in ethtool. */
791 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
793 struct bcmgenet_priv *priv = netdev_priv(dev);
796 if (!netif_running(dev))
806 val = phy_mii_ioctl(priv->phydev, rq, cmd);
817 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
818 struct bcmgenet_tx_ring *ring)
820 struct enet_cb *tx_cb_ptr;
822 tx_cb_ptr = ring->cbs;
823 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
824 tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
825 /* Advancing local write pointer */
826 if (ring->write_ptr == ring->end_ptr)
827 ring->write_ptr = ring->cb_ptr;
834 /* Simple helper to free a control block's resources */
835 static void bcmgenet_free_cb(struct enet_cb *cb)
837 dev_kfree_skb_any(cb->skb);
839 dma_unmap_addr_set(cb, dma_addr, 0);
842 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
843 struct bcmgenet_tx_ring *ring)
845 bcmgenet_intrl2_0_writel(priv,
846 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
847 INTRL2_CPU_MASK_SET);
850 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
851 struct bcmgenet_tx_ring *ring)
853 bcmgenet_intrl2_0_writel(priv,
854 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
855 INTRL2_CPU_MASK_CLEAR);
858 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
859 struct bcmgenet_tx_ring *ring)
861 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
862 INTRL2_CPU_MASK_CLEAR);
863 priv->int1_mask &= ~(1 << ring->index);
866 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
867 struct bcmgenet_tx_ring *ring)
869 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
870 INTRL2_CPU_MASK_SET);
871 priv->int1_mask |= (1 << ring->index);
874 /* Unlocked version of the reclaim routine */
875 static void __bcmgenet_tx_reclaim(struct net_device *dev,
876 struct bcmgenet_tx_ring *ring)
878 struct bcmgenet_priv *priv = netdev_priv(dev);
879 int last_tx_cn, last_c_index, num_tx_bds;
880 struct enet_cb *tx_cb_ptr;
881 struct netdev_queue *txq;
882 unsigned int bds_compl;
883 unsigned int c_index;
885 /* Compute how many buffers are transmitted since last xmit call */
886 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
887 txq = netdev_get_tx_queue(dev, ring->queue);
889 last_c_index = ring->c_index;
890 num_tx_bds = ring->size;
892 c_index &= (num_tx_bds - 1);
894 if (c_index >= last_c_index)
895 last_tx_cn = c_index - last_c_index;
897 last_tx_cn = num_tx_bds - last_c_index + c_index;
899 netif_dbg(priv, tx_done, dev,
900 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
901 __func__, ring->index,
902 c_index, last_tx_cn, last_c_index);
904 /* Reclaim transmitted buffers */
905 while (last_tx_cn-- > 0) {
906 tx_cb_ptr = ring->cbs + last_c_index;
908 if (tx_cb_ptr->skb) {
909 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
910 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
911 dma_unmap_single(&dev->dev,
912 dma_unmap_addr(tx_cb_ptr, dma_addr),
915 bcmgenet_free_cb(tx_cb_ptr);
916 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
917 dev->stats.tx_bytes +=
918 dma_unmap_len(tx_cb_ptr, dma_len);
919 dma_unmap_page(&dev->dev,
920 dma_unmap_addr(tx_cb_ptr, dma_addr),
921 dma_unmap_len(tx_cb_ptr, dma_len),
923 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
925 dev->stats.tx_packets++;
926 ring->free_bds += bds_compl;
929 last_c_index &= (num_tx_bds - 1);
932 if (ring->free_bds > (MAX_SKB_FRAGS + 1))
933 ring->int_disable(priv, ring);
935 if (netif_tx_queue_stopped(txq))
936 netif_tx_wake_queue(txq);
938 ring->c_index = c_index;
941 static void bcmgenet_tx_reclaim(struct net_device *dev,
942 struct bcmgenet_tx_ring *ring)
946 spin_lock_irqsave(&ring->lock, flags);
947 __bcmgenet_tx_reclaim(dev, ring);
948 spin_unlock_irqrestore(&ring->lock, flags);
951 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
953 struct bcmgenet_priv *priv = netdev_priv(dev);
956 if (netif_is_multiqueue(dev)) {
957 for (i = 0; i < priv->hw_params->tx_queues; i++)
958 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
961 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
964 /* Transmits a single SKB (either head of a fragment or a single SKB)
965 * caller must hold priv->lock
967 static int bcmgenet_xmit_single(struct net_device *dev,
970 struct bcmgenet_tx_ring *ring)
972 struct bcmgenet_priv *priv = netdev_priv(dev);
973 struct device *kdev = &priv->pdev->dev;
974 struct enet_cb *tx_cb_ptr;
975 unsigned int skb_len;
980 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
982 if (unlikely(!tx_cb_ptr))
985 tx_cb_ptr->skb = skb;
987 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
989 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
990 ret = dma_mapping_error(kdev, mapping);
992 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
997 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
998 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
999 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1000 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1003 if (skb->ip_summed == CHECKSUM_PARTIAL)
1004 length_status |= DMA_TX_DO_CSUM;
1006 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1008 /* Decrement total BD count and advance our write pointer */
1009 ring->free_bds -= 1;
1010 ring->prod_index += 1;
1011 ring->prod_index &= DMA_P_INDEX_MASK;
1016 /* Transmit a SKB fragment */
1017 static int bcmgenet_xmit_frag(struct net_device *dev,
1020 struct bcmgenet_tx_ring *ring)
1022 struct bcmgenet_priv *priv = netdev_priv(dev);
1023 struct device *kdev = &priv->pdev->dev;
1024 struct enet_cb *tx_cb_ptr;
1028 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1030 if (unlikely(!tx_cb_ptr))
1032 tx_cb_ptr->skb = NULL;
1034 mapping = skb_frag_dma_map(kdev, frag, 0,
1035 skb_frag_size(frag), DMA_TO_DEVICE);
1036 ret = dma_mapping_error(kdev, mapping);
1038 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1043 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1044 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1046 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1047 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1048 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1051 ring->free_bds -= 1;
1052 ring->prod_index += 1;
1053 ring->prod_index &= DMA_P_INDEX_MASK;
1058 /* Reallocate the SKB to put enough headroom in front of it and insert
1059 * the transmit checksum offsets in the descriptors
1061 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1062 struct sk_buff *skb)
1064 struct status_64 *status = NULL;
1065 struct sk_buff *new_skb;
1071 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1072 /* If 64 byte status block enabled, must make sure skb has
1073 * enough headroom for us to insert 64B status block.
1075 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1078 dev->stats.tx_errors++;
1079 dev->stats.tx_dropped++;
1085 skb_push(skb, sizeof(*status));
1086 status = (struct status_64 *)skb->data;
1088 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1089 ip_ver = htons(skb->protocol);
1092 ip_proto = ip_hdr(skb)->protocol;
1095 ip_proto = ipv6_hdr(skb)->nexthdr;
1101 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1102 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1103 (offset + skb->csum_offset);
1105 /* Set the length valid bit for TCP and UDP and just set
1106 * the special UDP flag for IPv4, else just set to 0.
1108 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1109 tx_csum_info |= STATUS_TX_CSUM_LV;
1110 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1111 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1116 status->tx_csum_info = tx_csum_info;
1122 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1124 struct bcmgenet_priv *priv = netdev_priv(dev);
1125 struct bcmgenet_tx_ring *ring = NULL;
1126 struct netdev_queue *txq;
1127 unsigned long flags = 0;
1128 int nr_frags, index;
1133 index = skb_get_queue_mapping(skb);
1134 /* Mapping strategy:
1135 * queue_mapping = 0, unclassified, packet xmited through ring16
1136 * queue_mapping = 1, goes to ring 0. (highest priority queue
1137 * queue_mapping = 2, goes to ring 1.
1138 * queue_mapping = 3, goes to ring 2.
1139 * queue_mapping = 4, goes to ring 3.
1146 nr_frags = skb_shinfo(skb)->nr_frags;
1147 ring = &priv->tx_rings[index];
1148 txq = netdev_get_tx_queue(dev, ring->queue);
1150 spin_lock_irqsave(&ring->lock, flags);
1151 if (ring->free_bds <= nr_frags + 1) {
1152 netif_tx_stop_queue(txq);
1153 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1154 __func__, index, ring->queue);
1155 ret = NETDEV_TX_BUSY;
1159 if (skb_padto(skb, ETH_ZLEN)) {
1164 /* set the SKB transmit checksum */
1165 if (priv->desc_64b_en) {
1166 skb = bcmgenet_put_tx_csum(dev, skb);
1173 dma_desc_flags = DMA_SOP;
1175 dma_desc_flags |= DMA_EOP;
1177 /* Transmit single SKB or head of fragment list */
1178 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1185 for (i = 0; i < nr_frags; i++) {
1186 ret = bcmgenet_xmit_frag(dev,
1187 &skb_shinfo(skb)->frags[i],
1188 (i == nr_frags - 1) ? DMA_EOP : 0,
1196 skb_tx_timestamp(skb);
1198 /* we kept a software copy of how much we should advance the TDMA
1199 * producer index, now write it down to the hardware
1201 bcmgenet_tdma_ring_writel(priv, ring->index,
1202 ring->prod_index, TDMA_PROD_INDEX);
1204 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
1205 netif_tx_stop_queue(txq);
1206 ring->int_enable(priv, ring);
1210 spin_unlock_irqrestore(&ring->lock, flags);
1216 static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
1218 struct device *kdev = &priv->pdev->dev;
1219 struct sk_buff *skb;
1223 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1227 /* a caller did not release this control block */
1228 WARN_ON(cb->skb != NULL);
1230 mapping = dma_map_single(kdev, skb->data,
1231 priv->rx_buf_len, DMA_FROM_DEVICE);
1232 ret = dma_mapping_error(kdev, mapping);
1234 bcmgenet_free_cb(cb);
1235 netif_err(priv, rx_err, priv->dev,
1236 "%s DMA map failed\n", __func__);
1240 dma_unmap_addr_set(cb, dma_addr, mapping);
1241 /* assign packet, prepare descriptor, and advance pointer */
1243 dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
1245 /* turn on the newly assigned BD for DMA to use */
1246 priv->rx_bd_assign_index++;
1247 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
1249 priv->rx_bd_assign_ptr = priv->rx_bds +
1250 (priv->rx_bd_assign_index * DMA_DESC_SIZE);
1255 /* bcmgenet_desc_rx - descriptor based rx process.
1256 * this could be called from bottom half, or from NAPI polling method.
1258 static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1259 unsigned int budget)
1261 struct net_device *dev = priv->dev;
1263 struct sk_buff *skb;
1264 u32 dma_length_status;
1265 unsigned long dma_flag;
1267 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1268 unsigned int p_index;
1269 unsigned int chksum_ok = 0;
1271 p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
1272 p_index &= DMA_P_INDEX_MASK;
1274 if (p_index < priv->rx_c_index)
1275 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
1276 priv->rx_c_index + p_index;
1278 rxpkttoprocess = p_index - priv->rx_c_index;
1280 netif_dbg(priv, rx_status, dev,
1281 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1283 while ((rxpktprocessed < rxpkttoprocess) &&
1284 (rxpktprocessed < budget)) {
1285 cb = &priv->rx_cbs[priv->rx_read_ptr];
1288 /* We do not have a backing SKB, so we do not have a
1289 * corresponding DMA mapping for this incoming packet since
1290 * bcmgenet_rx_refill always either has both skb and mapping or
1293 if (unlikely(!skb)) {
1294 dev->stats.rx_dropped++;
1295 dev->stats.rx_errors++;
1299 /* Unmap the packet contents such that we can use the
1300 * RSV from the 64 bytes descriptor when enabled and save
1301 * a 32-bits register read
1303 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
1304 priv->rx_buf_len, DMA_FROM_DEVICE);
1306 if (!priv->desc_64b_en) {
1308 dmadesc_get_length_status(priv,
1310 (priv->rx_read_ptr *
1313 struct status_64 *status;
1315 status = (struct status_64 *)skb->data;
1316 dma_length_status = status->length_status;
1319 /* DMA flags and length are still valid no matter how
1320 * we got the Receive Status Vector (64B RSB or register)
1322 dma_flag = dma_length_status & 0xffff;
1323 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1325 netif_dbg(priv, rx_status, dev,
1326 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1327 __func__, p_index, priv->rx_c_index,
1328 priv->rx_read_ptr, dma_length_status);
1330 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1331 netif_err(priv, rx_status, dev,
1332 "dropping fragmented packet!\n");
1333 dev->stats.rx_dropped++;
1334 dev->stats.rx_errors++;
1335 dev_kfree_skb_any(cb->skb);
1340 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1345 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1346 (unsigned int)dma_flag);
1347 if (dma_flag & DMA_RX_CRC_ERROR)
1348 dev->stats.rx_crc_errors++;
1349 if (dma_flag & DMA_RX_OV)
1350 dev->stats.rx_over_errors++;
1351 if (dma_flag & DMA_RX_NO)
1352 dev->stats.rx_frame_errors++;
1353 if (dma_flag & DMA_RX_LG)
1354 dev->stats.rx_length_errors++;
1355 dev->stats.rx_dropped++;
1356 dev->stats.rx_errors++;
1358 /* discard the packet and advance consumer index.*/
1359 dev_kfree_skb_any(cb->skb);
1362 } /* error packet */
1364 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1365 priv->desc_rxchk_en;
1368 if (priv->desc_64b_en) {
1373 if (likely(chksum_ok))
1374 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376 /* remove hardware 2bytes added for IP alignment */
1380 if (priv->crc_fwd_en) {
1381 skb_trim(skb, len - ETH_FCS_LEN);
1385 /*Finish setting up the received SKB and send it to the kernel*/
1386 skb->protocol = eth_type_trans(skb, priv->dev);
1387 dev->stats.rx_packets++;
1388 dev->stats.rx_bytes += len;
1389 if (dma_flag & DMA_RX_MULT)
1390 dev->stats.multicast++;
1393 napi_gro_receive(&priv->napi, skb);
1395 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1397 /* refill RX path on the current control block */
1399 err = bcmgenet_rx_refill(priv, cb);
1401 netif_err(priv, rx_err, dev, "Rx refill failed\n");
1404 priv->rx_read_ptr++;
1405 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1408 return rxpktprocessed;
1411 /* Assign skb to RX DMA descriptor. */
1412 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1418 netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
1420 /* loop here for each buffer needing assign */
1421 for (i = 0; i < priv->num_rx_bds; i++) {
1422 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
1426 ret = bcmgenet_rx_refill(priv, cb);
1434 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1439 for (i = 0; i < priv->num_rx_bds; i++) {
1440 cb = &priv->rx_cbs[i];
1442 if (dma_unmap_addr(cb, dma_addr)) {
1443 dma_unmap_single(&priv->dev->dev,
1444 dma_unmap_addr(cb, dma_addr),
1445 priv->rx_buf_len, DMA_FROM_DEVICE);
1446 dma_unmap_addr_set(cb, dma_addr, 0);
1450 bcmgenet_free_cb(cb);
1454 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1458 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1463 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1465 /* UniMAC stops on a packet boundary, wait for a full-size packet
1469 usleep_range(1000, 2000);
1472 static int reset_umac(struct bcmgenet_priv *priv)
1474 struct device *kdev = &priv->pdev->dev;
1475 unsigned int timeout = 0;
1478 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1479 bcmgenet_rbuf_ctrl_set(priv, 0);
1482 /* disable MAC while updating its registers */
1483 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1485 /* issue soft reset, wait for it to complete */
1486 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1487 while (timeout++ < 1000) {
1488 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1489 if (!(reg & CMD_SW_RESET))
1495 if (timeout == 1000) {
1497 "timeout waiting for MAC to come out of reset\n");
1504 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1506 /* Mask all interrupts.*/
1507 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1508 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1509 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1510 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1511 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1512 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1515 static int init_umac(struct bcmgenet_priv *priv)
1517 struct device *kdev = &priv->pdev->dev;
1519 u32 reg, cpu_mask_clear;
1521 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1523 ret = reset_umac(priv);
1527 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1528 /* clear tx/rx counter */
1529 bcmgenet_umac_writel(priv,
1530 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1532 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1534 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1536 /* init rx registers, enable ip header optimization */
1537 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1538 reg |= RBUF_ALIGN_2B;
1539 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1541 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1542 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1544 bcmgenet_intr_disable(priv);
1546 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
1548 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1550 /* Monitor cable plug/unplugged event for internal PHY */
1551 if (phy_is_internal(priv->phydev)) {
1552 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1553 } else if (priv->ext_phy) {
1554 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1555 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1556 reg = bcmgenet_bp_mc_get(priv);
1557 reg |= BIT(priv->hw_params->bp_in_en_shift);
1559 /* bp_mask: back pressure mask */
1560 if (netif_is_multiqueue(priv->dev))
1561 reg |= priv->hw_params->bp_in_mask;
1563 reg &= ~priv->hw_params->bp_in_mask;
1564 bcmgenet_bp_mc_set(priv, reg);
1567 /* Enable MDIO interrupts on GENET v3+ */
1568 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1569 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
1571 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1573 /* Enable rx/tx engine.*/
1574 dev_dbg(kdev, "done init umac\n");
1579 /* Initialize all house-keeping variables for a TX ring, along
1580 * with corresponding hardware registers
1582 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1583 unsigned int index, unsigned int size,
1584 unsigned int write_ptr, unsigned int end_ptr)
1586 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1587 u32 words_per_bd = WORDS_PER_BD(priv);
1588 u32 flow_period_val = 0;
1589 unsigned int first_bd;
1591 spin_lock_init(&ring->lock);
1592 ring->index = index;
1593 if (index == DESC_INDEX) {
1595 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1596 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1598 ring->queue = index + 1;
1599 ring->int_enable = bcmgenet_tx_ring_int_enable;
1600 ring->int_disable = bcmgenet_tx_ring_int_disable;
1602 ring->cbs = priv->tx_cbs + write_ptr;
1605 ring->free_bds = size;
1606 ring->write_ptr = write_ptr;
1607 ring->cb_ptr = write_ptr;
1608 ring->end_ptr = end_ptr - 1;
1609 ring->prod_index = 0;
1611 /* Set flow period for ring != 16 */
1612 if (index != DESC_INDEX)
1613 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1615 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1616 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1617 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1618 /* Disable rate control for now */
1619 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1621 /* Unclassified traffic goes to ring 16 */
1622 bcmgenet_tdma_ring_writel(priv, index,
1623 ((size << DMA_RING_SIZE_SHIFT) |
1624 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1626 first_bd = write_ptr;
1628 /* Set start and end address, read and write pointers */
1629 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1631 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1633 bcmgenet_tdma_ring_writel(priv, index, first_bd,
1635 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1639 /* Initialize a RDMA ring */
1640 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1641 unsigned int index, unsigned int size)
1643 u32 words_per_bd = WORDS_PER_BD(priv);
1646 priv->num_rx_bds = TOTAL_DESC;
1647 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
1648 priv->rx_bd_assign_ptr = priv->rx_bds;
1649 priv->rx_bd_assign_index = 0;
1650 priv->rx_c_index = 0;
1651 priv->rx_read_ptr = 0;
1652 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
1657 ret = bcmgenet_alloc_rx_buffers(priv);
1659 kfree(priv->rx_cbs);
1663 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
1664 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1665 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1666 bcmgenet_rdma_ring_writel(priv, index,
1667 ((size << DMA_RING_SIZE_SHIFT) |
1668 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1669 bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
1670 bcmgenet_rdma_ring_writel(priv, index,
1671 words_per_bd * size - 1, DMA_END_ADDR);
1672 bcmgenet_rdma_ring_writel(priv, index,
1673 (DMA_FC_THRESH_LO <<
1674 DMA_XOFF_THRESHOLD_SHIFT) |
1675 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1676 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
1681 /* init multi xmit queues, only available for GENET2+
1682 * the queue is partitioned as follows:
1684 * queue 0 - 3 is priority based, each one has 32 descriptors,
1685 * with queue 0 being the highest priority queue.
1687 * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
1688 * descriptors: 256 - (number of tx queues * bds per queues) = 128
1691 * The transmit control block pool is then partitioned as following:
1692 * - tx_cbs[0...127] are for queue 16
1693 * - tx_ring_cbs[0] points to tx_cbs[128..159]
1694 * - tx_ring_cbs[1] points to tx_cbs[160..191]
1695 * - tx_ring_cbs[2] points to tx_cbs[192..223]
1696 * - tx_ring_cbs[3] points to tx_cbs[224..255]
1698 static void bcmgenet_init_multiq(struct net_device *dev)
1700 struct bcmgenet_priv *priv = netdev_priv(dev);
1701 unsigned int i, dma_enable;
1702 u32 reg, dma_ctrl, ring_cfg = 0;
1703 u32 dma_priority[3] = {0, 0, 0};
1705 if (!netif_is_multiqueue(dev)) {
1706 netdev_warn(dev, "called with non multi queue aware HW\n");
1710 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1711 dma_enable = dma_ctrl & DMA_EN;
1712 dma_ctrl &= ~DMA_EN;
1713 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1715 /* Enable strict priority arbiter mode */
1716 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1718 for (i = 0; i < priv->hw_params->tx_queues; i++) {
1719 /* first 64 tx_cbs are reserved for default tx queue
1722 bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
1723 i * priv->hw_params->bds_cnt,
1724 (i + 1) * priv->hw_params->bds_cnt);
1726 /* Configure ring as descriptor ring and setup priority */
1728 dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
1730 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
1731 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
1734 /* Set ring 16 priority and program the hardware registers */
1735 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
1736 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
1737 DMA_PRIO_REG_SHIFT(DESC_INDEX));
1738 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
1739 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
1740 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
1743 reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
1745 bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
1747 /* Configure ring as descriptor ring and re-enable DMA if enabled */
1748 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1752 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1755 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1761 /* Disable TDMA to stop add more frames in TX DMA */
1762 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1764 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1766 /* Check TDMA status register to confirm TDMA is disabled */
1767 while (timeout++ < DMA_TIMEOUT_VAL) {
1768 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
1769 if (reg & DMA_DISABLED)
1775 if (timeout == DMA_TIMEOUT_VAL) {
1776 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
1780 /* Wait 10ms for packet drain in both tx and rx dma */
1781 usleep_range(10000, 20000);
1784 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1786 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1789 /* Check RDMA status register to confirm RDMA is disabled */
1790 while (timeout++ < DMA_TIMEOUT_VAL) {
1791 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
1792 if (reg & DMA_DISABLED)
1798 if (timeout == DMA_TIMEOUT_VAL) {
1799 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
1806 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1811 bcmgenet_dma_teardown(priv);
1813 for (i = 0; i < priv->num_tx_bds; i++) {
1814 if (priv->tx_cbs[i].skb != NULL) {
1815 dev_kfree_skb(priv->tx_cbs[i].skb);
1816 priv->tx_cbs[i].skb = NULL;
1820 bcmgenet_free_rx_buffers(priv);
1821 kfree(priv->rx_cbs);
1822 kfree(priv->tx_cbs);
1825 /* init_edma: Initialize DMA control register */
1826 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1830 netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
1832 /* by default, enable ring 16 (descriptor based) */
1833 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
1835 netdev_err(priv->dev, "failed to initialize RX ring\n");
1840 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1843 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1845 /* Initialize common TX ring structures */
1846 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
1847 priv->num_tx_bds = TOTAL_DESC;
1848 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
1850 if (!priv->tx_cbs) {
1851 bcmgenet_fini_dma(priv);
1855 /* initialize multi xmit queue */
1856 bcmgenet_init_multiq(priv->dev);
1858 /* initialize special ring 16 */
1859 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
1860 priv->hw_params->tx_queues *
1861 priv->hw_params->bds_cnt,
1867 /* NAPI polling method*/
1868 static int bcmgenet_poll(struct napi_struct *napi, int budget)
1870 struct bcmgenet_priv *priv = container_of(napi,
1871 struct bcmgenet_priv, napi);
1872 unsigned int work_done;
1875 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1877 work_done = bcmgenet_desc_rx(priv, budget);
1879 /* Advancing our consumer index*/
1880 priv->rx_c_index += work_done;
1881 priv->rx_c_index &= DMA_C_INDEX_MASK;
1882 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
1883 priv->rx_c_index, RDMA_CONS_INDEX);
1884 if (work_done < budget) {
1885 napi_complete(napi);
1886 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
1887 INTRL2_CPU_MASK_CLEAR);
1893 /* Interrupt bottom half */
1894 static void bcmgenet_irq_task(struct work_struct *work)
1896 struct bcmgenet_priv *priv = container_of(
1897 work, struct bcmgenet_priv, bcmgenet_irq_work);
1899 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
1901 if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
1902 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
1903 netif_dbg(priv, wol, priv->dev,
1904 "magic packet detected, waking up\n");
1905 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
1908 /* Link UP/DOWN event */
1909 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
1910 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
1911 phy_mac_interrupt(priv->phydev,
1912 priv->irq0_stat & UMAC_IRQ_LINK_UP);
1913 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
1917 /* bcmgenet_isr1: interrupt handler for ring buffer. */
1918 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
1920 struct bcmgenet_priv *priv = dev_id;
1923 /* Save irq status for bottom-half processing. */
1925 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
1927 /* clear interrupts */
1928 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
1930 netif_dbg(priv, intr, priv->dev,
1931 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
1932 /* Check the MBDONE interrupts.
1933 * packet is done, reclaim descriptors
1935 if (priv->irq1_stat & 0x0000ffff) {
1937 for (index = 0; index < 16; index++) {
1938 if (priv->irq1_stat & (1 << index))
1939 bcmgenet_tx_reclaim(priv->dev,
1940 &priv->tx_rings[index]);
1946 /* bcmgenet_isr0: Handle various interrupts. */
1947 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
1949 struct bcmgenet_priv *priv = dev_id;
1951 /* Save irq status for bottom-half processing. */
1953 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
1954 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1955 /* clear interrupts */
1956 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1958 netif_dbg(priv, intr, priv->dev,
1959 "IRQ=0x%x\n", priv->irq0_stat);
1961 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
1962 /* We use NAPI(software interrupt throttling, if
1963 * Rx Descriptor throttling is not used.
1964 * Disable interrupt, will be enabled in the poll method.
1966 if (likely(napi_schedule_prep(&priv->napi))) {
1967 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
1968 INTRL2_CPU_MASK_SET);
1969 __napi_schedule(&priv->napi);
1972 if (priv->irq0_stat &
1973 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
1975 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1977 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
1978 UMAC_IRQ_PHY_DET_F |
1980 UMAC_IRQ_LINK_DOWN |
1984 /* all other interested interrupts handled in bottom half */
1985 schedule_work(&priv->bcmgenet_irq_work);
1988 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
1989 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
1990 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1997 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
1999 struct bcmgenet_priv *priv = dev_id;
2001 pm_wakeup_event(&priv->pdev->dev, 0);
2006 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2010 reg = bcmgenet_rbuf_ctrl_get(priv);
2012 bcmgenet_rbuf_ctrl_set(priv, reg);
2016 bcmgenet_rbuf_ctrl_set(priv, reg);
2020 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2021 unsigned char *addr)
2023 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2024 (addr[2] << 8) | addr[3], UMAC_MAC0);
2025 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2028 /* Returns a reusable dma control register value */
2029 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2035 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2036 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2038 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2040 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2042 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2044 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2046 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2051 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2055 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2057 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2059 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2061 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2064 static void bcmgenet_netif_start(struct net_device *dev)
2066 struct bcmgenet_priv *priv = netdev_priv(dev);
2068 /* Start the network engine */
2069 napi_enable(&priv->napi);
2071 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2073 if (phy_is_internal(priv->phydev))
2074 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2076 netif_tx_start_all_queues(dev);
2078 phy_start(priv->phydev);
2081 static int bcmgenet_open(struct net_device *dev)
2083 struct bcmgenet_priv *priv = netdev_priv(dev);
2084 unsigned long dma_ctrl;
2088 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2090 /* Turn on the clock */
2091 if (!IS_ERR(priv->clk))
2092 clk_prepare_enable(priv->clk);
2094 /* take MAC out of reset */
2095 bcmgenet_umac_reset(priv);
2097 ret = init_umac(priv);
2099 goto err_clk_disable;
2101 /* disable ethernet MAC while updating its registers */
2102 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2104 /* Make sure we reflect the value of CRC_CMD_FWD */
2105 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2106 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2108 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2110 if (phy_is_internal(priv->phydev)) {
2111 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2112 reg |= EXT_ENERGY_DET_MASK;
2113 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2116 /* Disable RX/TX DMA and flush TX queues */
2117 dma_ctrl = bcmgenet_dma_disable(priv);
2119 /* Reinitialize TDMA and RDMA and SW housekeeping */
2120 ret = bcmgenet_init_dma(priv);
2122 netdev_err(dev, "failed to initialize DMA\n");
2126 /* Always enable ring 16 - descriptor ring */
2127 bcmgenet_enable_dma(priv, dma_ctrl);
2129 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2132 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2136 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2139 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2143 /* Re-configure the port multiplexer towards the PHY device */
2144 bcmgenet_mii_config(priv->dev, false);
2146 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
2147 priv->phy_interface);
2149 bcmgenet_netif_start(dev);
2154 free_irq(priv->irq0, dev);
2156 bcmgenet_fini_dma(priv);
2158 if (!IS_ERR(priv->clk))
2159 clk_disable_unprepare(priv->clk);
2163 static void bcmgenet_netif_stop(struct net_device *dev)
2165 struct bcmgenet_priv *priv = netdev_priv(dev);
2167 netif_tx_stop_all_queues(dev);
2168 napi_disable(&priv->napi);
2169 phy_stop(priv->phydev);
2171 bcmgenet_intr_disable(priv);
2173 /* Wait for pending work items to complete. Since interrupts are
2174 * disabled no new work will be scheduled.
2176 cancel_work_sync(&priv->bcmgenet_irq_work);
2178 priv->old_link = -1;
2179 priv->old_speed = -1;
2180 priv->old_duplex = -1;
2181 priv->old_pause = -1;
2184 static int bcmgenet_close(struct net_device *dev)
2186 struct bcmgenet_priv *priv = netdev_priv(dev);
2189 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2191 bcmgenet_netif_stop(dev);
2193 /* Really kill the PHY state machine and disconnect from it */
2194 phy_disconnect(priv->phydev);
2196 /* Disable MAC receive */
2197 umac_enable_set(priv, CMD_RX_EN, false);
2199 ret = bcmgenet_dma_teardown(priv);
2203 /* Disable MAC transmit. TX DMA disabled have to done before this */
2204 umac_enable_set(priv, CMD_TX_EN, false);
2207 bcmgenet_tx_reclaim_all(dev);
2208 bcmgenet_fini_dma(priv);
2210 free_irq(priv->irq0, priv);
2211 free_irq(priv->irq1, priv);
2213 if (phy_is_internal(priv->phydev))
2214 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2216 if (!IS_ERR(priv->clk))
2217 clk_disable_unprepare(priv->clk);
2222 static void bcmgenet_timeout(struct net_device *dev)
2224 struct bcmgenet_priv *priv = netdev_priv(dev);
2226 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2228 dev->trans_start = jiffies;
2230 dev->stats.tx_errors++;
2232 netif_tx_wake_all_queues(dev);
2235 #define MAX_MC_COUNT 16
2237 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2238 unsigned char *addr,
2244 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
2245 UMAC_MDF_ADDR + (*i * 4));
2246 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
2247 addr[4] << 8 | addr[5],
2248 UMAC_MDF_ADDR + ((*i + 1) * 4));
2249 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2250 reg |= (1 << (MAX_MC_COUNT - *mc));
2251 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2256 static void bcmgenet_set_rx_mode(struct net_device *dev)
2258 struct bcmgenet_priv *priv = netdev_priv(dev);
2259 struct netdev_hw_addr *ha;
2263 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2265 /* Promiscuous mode */
2266 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2267 if (dev->flags & IFF_PROMISC) {
2269 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2270 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2273 reg &= ~CMD_PROMISC;
2274 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2277 /* UniMac doesn't support ALLMULTI */
2278 if (dev->flags & IFF_ALLMULTI) {
2279 netdev_warn(dev, "ALLMULTI is not supported\n");
2283 /* update MDF filter */
2287 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2288 /* my own address.*/
2289 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2291 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2294 if (!netdev_uc_empty(dev))
2295 netdev_for_each_uc_addr(ha, dev)
2296 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2298 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2301 netdev_for_each_mc_addr(ha, dev)
2302 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2305 /* Set the hardware MAC address. */
2306 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2308 struct sockaddr *addr = p;
2310 /* Setting the MAC address at the hardware level is not possible
2311 * without disabling the UniMAC RX/TX enable bits.
2313 if (netif_running(dev))
2316 ether_addr_copy(dev->dev_addr, addr->sa_data);
2321 static const struct net_device_ops bcmgenet_netdev_ops = {
2322 .ndo_open = bcmgenet_open,
2323 .ndo_stop = bcmgenet_close,
2324 .ndo_start_xmit = bcmgenet_xmit,
2325 .ndo_tx_timeout = bcmgenet_timeout,
2326 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
2327 .ndo_set_mac_address = bcmgenet_set_mac_addr,
2328 .ndo_do_ioctl = bcmgenet_ioctl,
2329 .ndo_set_features = bcmgenet_set_features,
2332 /* Array of GENET hardware parameters/characteristics */
2333 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2338 .bp_in_en_shift = 16,
2339 .bp_in_mask = 0xffff,
2340 .hfb_filter_cnt = 16,
2342 .hfb_offset = 0x1000,
2343 .rdma_offset = 0x2000,
2344 .tdma_offset = 0x3000,
2351 .bp_in_en_shift = 16,
2352 .bp_in_mask = 0xffff,
2353 .hfb_filter_cnt = 16,
2355 .tbuf_offset = 0x0600,
2356 .hfb_offset = 0x1000,
2357 .hfb_reg_offset = 0x2000,
2358 .rdma_offset = 0x3000,
2359 .tdma_offset = 0x4000,
2361 .flags = GENET_HAS_EXT,
2367 .bp_in_en_shift = 17,
2368 .bp_in_mask = 0x1ffff,
2369 .hfb_filter_cnt = 48,
2371 .tbuf_offset = 0x0600,
2372 .hfb_offset = 0x8000,
2373 .hfb_reg_offset = 0xfc00,
2374 .rdma_offset = 0x10000,
2375 .tdma_offset = 0x11000,
2377 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2383 .bp_in_en_shift = 17,
2384 .bp_in_mask = 0x1ffff,
2385 .hfb_filter_cnt = 48,
2387 .tbuf_offset = 0x0600,
2388 .hfb_offset = 0x8000,
2389 .hfb_reg_offset = 0xfc00,
2390 .rdma_offset = 0x2000,
2391 .tdma_offset = 0x4000,
2393 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2397 /* Infer hardware parameters from the detected GENET version */
2398 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
2400 struct bcmgenet_hw_params *params;
2404 if (GENET_IS_V4(priv)) {
2405 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2406 genet_dma_ring_regs = genet_dma_ring_regs_v4;
2407 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2408 priv->version = GENET_V4;
2409 } else if (GENET_IS_V3(priv)) {
2410 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2411 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2412 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2413 priv->version = GENET_V3;
2414 } else if (GENET_IS_V2(priv)) {
2415 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
2416 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2417 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2418 priv->version = GENET_V2;
2419 } else if (GENET_IS_V1(priv)) {
2420 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
2421 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2422 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2423 priv->version = GENET_V1;
2426 /* enum genet_version starts at 1 */
2427 priv->hw_params = &bcmgenet_hw_params[priv->version];
2428 params = priv->hw_params;
2430 /* Read GENET HW version */
2431 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
2432 major = (reg >> 24 & 0x0f);
2435 else if (major == 0)
2437 if (major != priv->version) {
2438 dev_err(&priv->pdev->dev,
2439 "GENET version mismatch, got: %d, configured for: %d\n",
2440 major, priv->version);
2443 /* Print the GENET core version */
2444 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
2445 major, (reg >> 16) & 0x0f, reg & 0xffff);
2447 /* Store the integrated PHY revision for the MDIO probing function
2448 * to pass this information to the PHY driver. The PHY driver expects
2449 * to find the PHY major revision in bits 15:8 while the GENET register
2450 * stores that information in bits 7:0, account for that.
2452 priv->gphy_rev = (reg & 0xffff) << 8;
2454 #ifdef CONFIG_PHYS_ADDR_T_64BIT
2455 if (!(params->flags & GENET_HAS_40BITS))
2456 pr_warn("GENET does not support 40-bits PA\n");
2459 pr_debug("Configuration for version: %d\n"
2460 "TXq: %1d, RXq: %1d, BDs: %1d\n"
2461 "BP << en: %2d, BP msk: 0x%05x\n"
2462 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2463 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2464 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2467 params->tx_queues, params->rx_queues, params->bds_cnt,
2468 params->bp_in_en_shift, params->bp_in_mask,
2469 params->hfb_filter_cnt, params->qtag_mask,
2470 params->tbuf_offset, params->hfb_offset,
2471 params->hfb_reg_offset,
2472 params->rdma_offset, params->tdma_offset,
2473 params->words_per_bd);
2476 static const struct of_device_id bcmgenet_match[] = {
2477 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
2478 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
2479 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
2480 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
2484 static int bcmgenet_probe(struct platform_device *pdev)
2486 struct device_node *dn = pdev->dev.of_node;
2487 const struct of_device_id *of_id;
2488 struct bcmgenet_priv *priv;
2489 struct net_device *dev;
2490 const void *macaddr;
2494 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
2495 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
2497 dev_err(&pdev->dev, "can't allocate net device\n");
2501 of_id = of_match_node(bcmgenet_match, dn);
2505 priv = netdev_priv(dev);
2506 priv->irq0 = platform_get_irq(pdev, 0);
2507 priv->irq1 = platform_get_irq(pdev, 1);
2508 priv->wol_irq = platform_get_irq(pdev, 2);
2509 if (!priv->irq0 || !priv->irq1) {
2510 dev_err(&pdev->dev, "can't find IRQs\n");
2515 macaddr = of_get_mac_address(dn);
2517 dev_err(&pdev->dev, "can't find MAC address\n");
2522 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2523 priv->base = devm_ioremap_resource(&pdev->dev, r);
2524 if (IS_ERR(priv->base)) {
2525 err = PTR_ERR(priv->base);
2529 SET_NETDEV_DEV(dev, &pdev->dev);
2530 dev_set_drvdata(&pdev->dev, dev);
2531 ether_addr_copy(dev->dev_addr, macaddr);
2532 dev->watchdog_timeo = 2 * HZ;
2533 dev->ethtool_ops = &bcmgenet_ethtool_ops;
2534 dev->netdev_ops = &bcmgenet_netdev_ops;
2535 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2537 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
2539 /* Set hardware features */
2540 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
2541 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2543 /* Request the WOL interrupt and advertise suspend if available */
2544 priv->wol_irq_disabled = true;
2545 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
2548 device_set_wakeup_capable(&pdev->dev, 1);
2550 /* Set the needed headroom to account for any possible
2551 * features enabling/disabling at runtime
2553 dev->needed_headroom += 64;
2555 netdev_boot_setup_check(dev);
2559 priv->version = (enum bcmgenet_version)of_id->data;
2561 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
2562 if (IS_ERR(priv->clk))
2563 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
2565 if (!IS_ERR(priv->clk))
2566 clk_prepare_enable(priv->clk);
2568 bcmgenet_set_hw_params(priv);
2570 /* Mii wait queue */
2571 init_waitqueue_head(&priv->wq);
2572 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2573 priv->rx_buf_len = RX_BUF_LENGTH;
2574 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
2576 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
2577 if (IS_ERR(priv->clk_wol))
2578 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
2580 err = reset_umac(priv);
2582 goto err_clk_disable;
2584 err = bcmgenet_mii_init(dev);
2586 goto err_clk_disable;
2588 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2589 * just the ring 16 descriptor based TX
2591 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2592 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2594 /* libphy will determine the link state */
2595 netif_carrier_off(dev);
2597 /* Turn off the main clock, WOL clock is handled separately */
2598 if (!IS_ERR(priv->clk))
2599 clk_disable_unprepare(priv->clk);
2601 err = register_netdev(dev);
2608 if (!IS_ERR(priv->clk))
2609 clk_disable_unprepare(priv->clk);
2615 static int bcmgenet_remove(struct platform_device *pdev)
2617 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
2619 dev_set_drvdata(&pdev->dev, NULL);
2620 unregister_netdev(priv->dev);
2621 bcmgenet_mii_exit(priv->dev);
2622 free_netdev(priv->dev);
2627 #ifdef CONFIG_PM_SLEEP
2628 static int bcmgenet_suspend(struct device *d)
2630 struct net_device *dev = dev_get_drvdata(d);
2631 struct bcmgenet_priv *priv = netdev_priv(dev);
2634 if (!netif_running(dev))
2637 bcmgenet_netif_stop(dev);
2639 phy_suspend(priv->phydev);
2641 netif_device_detach(dev);
2643 /* Disable MAC receive */
2644 umac_enable_set(priv, CMD_RX_EN, false);
2646 ret = bcmgenet_dma_teardown(priv);
2650 /* Disable MAC transmit. TX DMA disabled have to done before this */
2651 umac_enable_set(priv, CMD_TX_EN, false);
2654 bcmgenet_tx_reclaim_all(dev);
2655 bcmgenet_fini_dma(priv);
2657 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
2658 if (device_may_wakeup(d) && priv->wolopts) {
2659 bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
2660 clk_prepare_enable(priv->clk_wol);
2663 /* Turn off the clocks */
2664 clk_disable_unprepare(priv->clk);
2669 static int bcmgenet_resume(struct device *d)
2671 struct net_device *dev = dev_get_drvdata(d);
2672 struct bcmgenet_priv *priv = netdev_priv(dev);
2673 unsigned long dma_ctrl;
2677 if (!netif_running(dev))
2680 /* Turn on the clock */
2681 ret = clk_prepare_enable(priv->clk);
2685 bcmgenet_umac_reset(priv);
2687 ret = init_umac(priv);
2689 goto out_clk_disable;
2691 /* From WOL-enabled suspend, switch to regular clock */
2693 clk_disable_unprepare(priv->clk_wol);
2695 phy_init_hw(priv->phydev);
2696 /* Speed settings must be restored */
2697 bcmgenet_mii_config(priv->dev, false);
2699 /* disable ethernet MAC while updating its registers */
2700 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2702 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2704 if (phy_is_internal(priv->phydev)) {
2705 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2706 reg |= EXT_ENERGY_DET_MASK;
2707 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2711 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2713 /* Disable RX/TX DMA and flush TX queues */
2714 dma_ctrl = bcmgenet_dma_disable(priv);
2716 /* Reinitialize TDMA and RDMA and SW housekeeping */
2717 ret = bcmgenet_init_dma(priv);
2719 netdev_err(dev, "failed to initialize DMA\n");
2720 goto out_clk_disable;
2723 /* Always enable ring 16 - descriptor ring */
2724 bcmgenet_enable_dma(priv, dma_ctrl);
2726 netif_device_attach(dev);
2728 phy_resume(priv->phydev);
2730 bcmgenet_netif_start(dev);
2735 clk_disable_unprepare(priv->clk);
2738 #endif /* CONFIG_PM_SLEEP */
2740 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
2742 static struct platform_driver bcmgenet_driver = {
2743 .probe = bcmgenet_probe,
2744 .remove = bcmgenet_remove,
2747 .owner = THIS_MODULE,
2748 .of_match_table = bcmgenet_match,
2749 .pm = &bcmgenet_pm_ops,
2752 module_platform_driver(bcmgenet_driver);
2754 MODULE_AUTHOR("Broadcom Corporation");
2755 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
2756 MODULE_ALIAS("platform:bcmgenet");
2757 MODULE_LICENSE("GPL");