2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2009 Cavium Networks
9 #include <linux/platform_device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/capability.h>
13 #include <linux/interrupt.h>
14 #include <linux/netdevice.h>
15 #include <linux/spinlock.h>
16 #include <linux/if_vlan.h>
17 #include <linux/of_mdio.h>
18 #include <linux/module.h>
19 #include <linux/of_net.h>
20 #include <linux/init.h>
21 #include <linux/slab.h>
22 #include <linux/phy.h>
25 #include <asm/octeon/octeon.h>
26 #include <asm/octeon/cvmx-mixx-defs.h>
27 #include <asm/octeon/cvmx-agl-defs.h>
29 #define DRV_NAME "octeon_mgmt"
30 #define DRV_VERSION "2.0"
31 #define DRV_DESCRIPTION \
32 "Cavium Networks Octeon MII (management) port Network Driver"
34 #define OCTEON_MGMT_NAPI_WEIGHT 16
37 * Ring sizes that are powers of two allow for more efficient modulo
40 #define OCTEON_MGMT_RX_RING_SIZE 512
41 #define OCTEON_MGMT_TX_RING_SIZE 128
43 /* Allow 8 bytes for vlan and FCS. */
44 #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
46 union mgmt_port_ring_entry {
50 /* Length of the buffer/packet in bytes */
52 /* For TX, signals that the packet should be timestamped */
54 /* The RX error code */
56 #define RING_ENTRY_CODE_DONE 0xf
57 #define RING_ENTRY_CODE_MORE 0x10
58 /* Physical address of the buffer */
63 #define MIX_ORING1 0x0
64 #define MIX_ORING2 0x8
65 #define MIX_IRING1 0x10
66 #define MIX_IRING2 0x18
68 #define MIX_IRHWM 0x28
69 #define MIX_IRCNT 0x30
70 #define MIX_ORHWM 0x38
71 #define MIX_ORCNT 0x40
73 #define MIX_INTENA 0x50
74 #define MIX_REMCNT 0x58
77 #define AGL_GMX_PRT_CFG 0x10
78 #define AGL_GMX_RX_FRM_CTL 0x18
79 #define AGL_GMX_RX_FRM_MAX 0x30
80 #define AGL_GMX_RX_JABBER 0x38
81 #define AGL_GMX_RX_STATS_CTL 0x50
83 #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
84 #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
85 #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
87 #define AGL_GMX_RX_ADR_CTL 0x100
88 #define AGL_GMX_RX_ADR_CAM_EN 0x108
89 #define AGL_GMX_RX_ADR_CAM0 0x180
90 #define AGL_GMX_RX_ADR_CAM1 0x188
91 #define AGL_GMX_RX_ADR_CAM2 0x190
92 #define AGL_GMX_RX_ADR_CAM3 0x198
93 #define AGL_GMX_RX_ADR_CAM4 0x1a0
94 #define AGL_GMX_RX_ADR_CAM5 0x1a8
96 #define AGL_GMX_TX_STATS_CTL 0x268
97 #define AGL_GMX_TX_CTL 0x270
98 #define AGL_GMX_TX_STAT0 0x280
99 #define AGL_GMX_TX_STAT1 0x288
100 #define AGL_GMX_TX_STAT2 0x290
101 #define AGL_GMX_TX_STAT3 0x298
102 #define AGL_GMX_TX_STAT4 0x2a0
103 #define AGL_GMX_TX_STAT5 0x2a8
104 #define AGL_GMX_TX_STAT6 0x2b0
105 #define AGL_GMX_TX_STAT7 0x2b8
106 #define AGL_GMX_TX_STAT8 0x2c0
107 #define AGL_GMX_TX_STAT9 0x2c8
110 struct net_device *netdev;
116 dma_addr_t tx_ring_handle;
117 unsigned int tx_next;
118 unsigned int tx_next_clean;
119 unsigned int tx_current_fill;
120 /* The tx_list lock also protects the ring related variables */
121 struct sk_buff_head tx_list;
123 /* RX variables only touched in napi_poll. No locking necessary. */
125 dma_addr_t rx_ring_handle;
126 unsigned int rx_next;
127 unsigned int rx_next_fill;
128 unsigned int rx_current_fill;
129 struct sk_buff_head rx_list;
132 unsigned int last_duplex;
133 unsigned int last_link;
135 struct napi_struct napi;
136 struct tasklet_struct tx_clean_tasklet;
137 struct phy_device *phydev;
138 struct device_node *phy_np;
139 resource_size_t mix_phys;
140 resource_size_t mix_size;
141 resource_size_t agl_phys;
142 resource_size_t agl_size;
145 static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
147 union cvmx_mixx_intena mix_intena;
150 spin_lock_irqsave(&p->lock, flags);
151 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
152 mix_intena.s.ithena = enable ? 1 : 0;
153 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
154 spin_unlock_irqrestore(&p->lock, flags);
157 static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
159 union cvmx_mixx_intena mix_intena;
162 spin_lock_irqsave(&p->lock, flags);
163 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
164 mix_intena.s.othena = enable ? 1 : 0;
165 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
166 spin_unlock_irqrestore(&p->lock, flags);
169 static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
171 octeon_mgmt_set_rx_irq(p, 1);
174 static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
176 octeon_mgmt_set_rx_irq(p, 0);
179 static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
181 octeon_mgmt_set_tx_irq(p, 1);
184 static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
186 octeon_mgmt_set_tx_irq(p, 0);
189 static unsigned int ring_max_fill(unsigned int ring_size)
191 return ring_size - 8;
194 static unsigned int ring_size_to_bytes(unsigned int ring_size)
196 return ring_size * sizeof(union mgmt_port_ring_entry);
199 static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
201 struct octeon_mgmt *p = netdev_priv(netdev);
203 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
205 union mgmt_port_ring_entry re;
208 /* CN56XX pass 1 needs 8 bytes of padding. */
209 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
211 skb = netdev_alloc_skb(netdev, size);
214 skb_reserve(skb, NET_IP_ALIGN);
215 __skb_queue_tail(&p->rx_list, skb);
219 re.s.addr = dma_map_single(p->dev, skb->data,
223 /* Put it in the ring. */
224 p->rx_ring[p->rx_next_fill] = re.d64;
225 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
226 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
229 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
230 p->rx_current_fill++;
232 cvmx_write_csr(p->mix + MIX_IRING2, 1);
236 static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
238 union cvmx_mixx_orcnt mix_orcnt;
239 union mgmt_port_ring_entry re;
244 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
245 while (mix_orcnt.s.orcnt) {
246 spin_lock_irqsave(&p->tx_list.lock, flags);
248 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
250 if (mix_orcnt.s.orcnt == 0) {
251 spin_unlock_irqrestore(&p->tx_list.lock, flags);
255 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
256 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
259 re.d64 = p->tx_ring[p->tx_next_clean];
261 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
262 skb = __skb_dequeue(&p->tx_list);
265 mix_orcnt.s.orcnt = 1;
267 /* Acknowledge to hardware that we have the buffer. */
268 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
269 p->tx_current_fill--;
271 spin_unlock_irqrestore(&p->tx_list.lock, flags);
273 dma_unmap_single(p->dev, re.s.addr, re.s.len,
275 dev_kfree_skb_any(skb);
278 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
281 if (cleaned && netif_queue_stopped(p->netdev))
282 netif_wake_queue(p->netdev);
285 static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
287 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
288 octeon_mgmt_clean_tx_buffers(p);
289 octeon_mgmt_enable_tx_irq(p);
292 static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
294 struct octeon_mgmt *p = netdev_priv(netdev);
298 /* These reads also clear the count registers. */
299 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
300 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
303 /* Do an atomic update. */
304 spin_lock_irqsave(&p->lock, flags);
305 netdev->stats.rx_errors += bad;
306 netdev->stats.rx_dropped += drop;
307 spin_unlock_irqrestore(&p->lock, flags);
311 static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
313 struct octeon_mgmt *p = netdev_priv(netdev);
316 union cvmx_agl_gmx_txx_stat0 s0;
317 union cvmx_agl_gmx_txx_stat1 s1;
319 /* These reads also clear the count registers. */
320 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
321 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
323 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
324 /* Do an atomic update. */
325 spin_lock_irqsave(&p->lock, flags);
326 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
327 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
328 spin_unlock_irqrestore(&p->lock, flags);
333 * Dequeue a receive skb and its corresponding ring entry. The ring
334 * entry is returned, *pskb is updated to point to the skb.
336 static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
337 struct sk_buff **pskb)
339 union mgmt_port_ring_entry re;
341 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
342 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
345 re.d64 = p->rx_ring[p->rx_next];
346 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
347 p->rx_current_fill--;
348 *pskb = __skb_dequeue(&p->rx_list);
350 dma_unmap_single(p->dev, re.s.addr,
351 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
358 static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
360 struct net_device *netdev = p->netdev;
361 union cvmx_mixx_ircnt mix_ircnt;
362 union mgmt_port_ring_entry re;
364 struct sk_buff *skb2;
365 struct sk_buff *skb_new;
366 union mgmt_port_ring_entry re2;
370 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
371 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
372 /* A good packet, send it up. */
373 skb_put(skb, re.s.len);
375 skb->protocol = eth_type_trans(skb, netdev);
376 netdev->stats.rx_packets++;
377 netdev->stats.rx_bytes += skb->len;
378 netif_receive_skb(skb);
380 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
382 * Packet split across skbs. This can happen if we
383 * increase the MTU. Buffers that are already in the
384 * rx ring can then end up being too small. As the rx
385 * ring is refilled, buffers sized for the new MTU
386 * will be used and we should go back to the normal
389 skb_put(skb, re.s.len);
391 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
392 if (re2.s.code != RING_ENTRY_CODE_MORE
393 && re2.s.code != RING_ENTRY_CODE_DONE)
395 skb_put(skb2, re2.s.len);
396 skb_new = skb_copy_expand(skb, 0, skb2->len,
400 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
403 skb_put(skb_new, skb2->len);
404 dev_kfree_skb_any(skb);
405 dev_kfree_skb_any(skb2);
407 } while (re2.s.code == RING_ENTRY_CODE_MORE);
410 /* Some other error, discard it. */
411 dev_kfree_skb_any(skb);
413 * Error statistics are accumulated in
414 * octeon_mgmt_update_rx_stats.
419 /* Discard the whole mess. */
420 dev_kfree_skb_any(skb);
421 dev_kfree_skb_any(skb2);
422 while (re2.s.code == RING_ENTRY_CODE_MORE) {
423 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
424 dev_kfree_skb_any(skb2);
426 netdev->stats.rx_errors++;
429 /* Tell the hardware we processed a packet. */
431 mix_ircnt.s.ircnt = 1;
432 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
436 static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
438 unsigned int work_done = 0;
439 union cvmx_mixx_ircnt mix_ircnt;
442 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
443 while (work_done < budget && mix_ircnt.s.ircnt) {
445 rc = octeon_mgmt_receive_one(p);
449 /* Check for more packets. */
450 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
453 octeon_mgmt_rx_fill_ring(p->netdev);
458 static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
460 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
461 struct net_device *netdev = p->netdev;
462 unsigned int work_done = 0;
464 work_done = octeon_mgmt_receive_packets(p, budget);
466 if (work_done < budget) {
467 /* We stopped because no more packets were available. */
469 octeon_mgmt_enable_rx_irq(p);
471 octeon_mgmt_update_rx_stats(netdev);
476 /* Reset the hardware to clean state. */
477 static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
479 union cvmx_mixx_ctl mix_ctl;
480 union cvmx_mixx_bist mix_bist;
481 union cvmx_agl_gmx_bist agl_gmx_bist;
484 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
486 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
487 } while (mix_ctl.s.busy);
489 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
490 cvmx_read_csr(p->mix + MIX_CTL);
493 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
495 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
496 (unsigned long long)mix_bist.u64);
498 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
499 if (agl_gmx_bist.u64)
500 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
501 (unsigned long long)agl_gmx_bist.u64);
504 struct octeon_mgmt_cam_state {
510 static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
515 for (i = 0; i < 6; i++)
516 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
517 cs->cam_mask |= (1ULL << cs->cam_index);
521 static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
523 struct octeon_mgmt *p = netdev_priv(netdev);
524 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
525 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
527 unsigned int prev_packet_enable;
528 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
529 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
530 struct octeon_mgmt_cam_state cam_state;
531 struct netdev_hw_addr *ha;
532 int available_cam_entries;
534 memset(&cam_state, 0, sizeof(cam_state));
536 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
538 available_cam_entries = 8;
541 * One CAM entry for the primary address, leaves seven
542 * for the secondary addresses.
544 available_cam_entries = 7 - netdev->uc.count;
547 if (netdev->flags & IFF_MULTICAST) {
548 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
549 netdev_mc_count(netdev) > available_cam_entries)
550 multicast_mode = 2; /* 2 - Accept all multicast. */
552 multicast_mode = 0; /* 0 - Use CAM. */
556 /* Add primary address. */
557 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
558 netdev_for_each_uc_addr(ha, netdev)
559 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
561 if (multicast_mode == 0) {
562 netdev_for_each_mc_addr(ha, netdev)
563 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
566 spin_lock_irqsave(&p->lock, flags);
568 /* Disable packet I/O. */
569 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
570 prev_packet_enable = agl_gmx_prtx.s.en;
571 agl_gmx_prtx.s.en = 0;
572 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
575 adr_ctl.s.cam_mode = cam_mode;
576 adr_ctl.s.mcst = multicast_mode;
577 adr_ctl.s.bcst = 1; /* Allow broadcast */
579 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
581 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
582 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
583 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
584 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
585 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
586 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
587 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
589 /* Restore packet I/O. */
590 agl_gmx_prtx.s.en = prev_packet_enable;
591 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
593 spin_unlock_irqrestore(&p->lock, flags);
596 static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
598 struct sockaddr *sa = addr;
600 if (!is_valid_ether_addr(sa->sa_data))
601 return -EADDRNOTAVAIL;
603 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
605 octeon_mgmt_set_rx_filtering(netdev);
610 static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
612 struct octeon_mgmt *p = netdev_priv(netdev);
613 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
616 * Limit the MTU to make sure the ethernet packets are between
617 * 64 bytes and 16383 bytes.
619 if (size_without_fcs < 64 || size_without_fcs > 16383) {
620 dev_warn(p->dev, "MTU must be between %d and %d.\n",
621 64 - OCTEON_MGMT_RX_HEADROOM,
622 16383 - OCTEON_MGMT_RX_HEADROOM);
626 netdev->mtu = new_mtu;
628 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
629 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
630 (size_without_fcs + 7) & 0xfff8);
635 static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
637 struct net_device *netdev = dev_id;
638 struct octeon_mgmt *p = netdev_priv(netdev);
639 union cvmx_mixx_isr mixx_isr;
641 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
643 /* Clear any pending interrupts */
644 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
645 cvmx_read_csr(p->mix + MIX_ISR);
647 if (mixx_isr.s.irthresh) {
648 octeon_mgmt_disable_rx_irq(p);
649 napi_schedule(&p->napi);
651 if (mixx_isr.s.orthresh) {
652 octeon_mgmt_disable_tx_irq(p);
653 tasklet_schedule(&p->tx_clean_tasklet);
659 static int octeon_mgmt_ioctl(struct net_device *netdev,
660 struct ifreq *rq, int cmd)
662 struct octeon_mgmt *p = netdev_priv(netdev);
664 if (!netif_running(netdev))
670 return phy_mii_ioctl(p->phydev, rq, cmd);
673 static void octeon_mgmt_adjust_link(struct net_device *netdev)
675 struct octeon_mgmt *p = netdev_priv(netdev);
676 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
678 int link_changed = 0;
680 spin_lock_irqsave(&p->lock, flags);
681 if (p->phydev->link) {
684 if (p->last_duplex != p->phydev->duplex) {
685 p->last_duplex = p->phydev->duplex;
686 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
687 prtx_cfg.s.duplex = p->phydev->duplex;
688 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
694 p->last_link = p->phydev->link;
695 spin_unlock_irqrestore(&p->lock, flags);
697 if (link_changed != 0) {
698 if (link_changed > 0) {
699 netif_carrier_on(netdev);
700 pr_info("%s: Link is up - %d/%s\n", netdev->name,
702 DUPLEX_FULL == p->phydev->duplex ?
705 netif_carrier_off(netdev);
706 pr_info("%s: Link is down\n", netdev->name);
711 static int octeon_mgmt_init_phy(struct net_device *netdev)
713 struct octeon_mgmt *p = netdev_priv(netdev);
715 if (octeon_is_simulation() || p->phy_np == NULL) {
716 /* No PHYs in the simulator. */
717 netif_carrier_on(netdev);
721 p->phydev = of_phy_connect(netdev, p->phy_np,
722 octeon_mgmt_adjust_link, 0,
723 PHY_INTERFACE_MODE_MII);
728 phy_start_aneg(p->phydev);
733 static int octeon_mgmt_open(struct net_device *netdev)
735 struct octeon_mgmt *p = netdev_priv(netdev);
737 union cvmx_mixx_ctl mix_ctl;
738 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
739 union cvmx_mixx_oring1 oring1;
740 union cvmx_mixx_iring1 iring1;
741 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
742 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
743 union cvmx_mixx_irhwm mix_irhwm;
744 union cvmx_mixx_orhwm mix_orhwm;
745 union cvmx_mixx_intena mix_intena;
748 /* Allocate ring buffers. */
749 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
754 dma_map_single(p->dev, p->tx_ring,
755 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
758 p->tx_next_clean = 0;
759 p->tx_current_fill = 0;
762 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
767 dma_map_single(p->dev, p->rx_ring,
768 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
773 p->rx_current_fill = 0;
775 octeon_mgmt_reset_hw(p);
777 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
779 /* Bring it out of reset if needed. */
780 if (mix_ctl.s.reset) {
782 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
784 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
785 } while (mix_ctl.s.reset);
788 agl_gmx_inf_mode.u64 = 0;
789 agl_gmx_inf_mode.s.en = 1;
790 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
793 oring1.s.obase = p->tx_ring_handle >> 3;
794 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
795 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
798 iring1.s.ibase = p->rx_ring_handle >> 3;
799 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
800 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
802 /* Disable packet I/O. */
803 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
805 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
807 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
808 octeon_mgmt_set_mac_address(netdev, &sa);
810 octeon_mgmt_change_mtu(netdev, netdev->mtu);
813 * Enable the port HW. Packets are not allowed until
814 * cvmx_mgmt_port_enable() is called.
817 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
818 mix_ctl.s.en = 1; /* Enable the port */
819 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
820 /* MII CB-request FIFO programmable high watermark */
821 mix_ctl.s.mrq_hwm = 1;
822 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
824 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
825 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
827 * Force compensation values, as they are not
828 * determined properly by HW
830 union cvmx_agl_gmx_drv_ctl drv_ctl;
832 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
834 drv_ctl.s.byp_en1 = 1;
838 drv_ctl.s.byp_en = 1;
842 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
845 octeon_mgmt_rx_fill_ring(netdev);
847 /* Clear statistics. */
849 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
850 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
851 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
853 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
854 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
855 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
857 /* Clear any pending interrupts */
858 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
860 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
862 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
866 /* Interrupt every single RX packet */
868 mix_irhwm.s.irhwm = 0;
869 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
871 /* Interrupt when we have 1 or more packets to clean. */
873 mix_orhwm.s.orhwm = 1;
874 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
876 /* Enable receive and transmit interrupts */
878 mix_intena.s.ithena = 1;
879 mix_intena.s.othena = 1;
880 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
883 /* Enable packet I/O. */
886 rxx_frm_ctl.s.pre_align = 1;
888 * When set, disables the length check for non-min sized pkts
889 * with padding in the client data.
891 rxx_frm_ctl.s.pad_len = 1;
892 /* When set, disables the length check for VLAN pkts */
893 rxx_frm_ctl.s.vlan_len = 1;
894 /* When set, PREAMBLE checking is less strict */
895 rxx_frm_ctl.s.pre_free = 1;
896 /* Control Pause Frames can match station SMAC */
897 rxx_frm_ctl.s.ctl_smac = 0;
898 /* Control Pause Frames can match globally assign Multicast address */
899 rxx_frm_ctl.s.ctl_mcst = 1;
900 /* Forward pause information to TX block */
901 rxx_frm_ctl.s.ctl_bck = 1;
902 /* Drop Control Pause Frames */
903 rxx_frm_ctl.s.ctl_drp = 1;
904 /* Strip off the preamble */
905 rxx_frm_ctl.s.pre_strp = 1;
907 * This port is configured to send PREAMBLE+SFD to begin every
908 * frame. GMX checks that the PREAMBLE is sent correctly.
910 rxx_frm_ctl.s.pre_chk = 1;
911 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
913 /* Enable the AGL block */
914 agl_gmx_inf_mode.u64 = 0;
915 agl_gmx_inf_mode.s.en = 1;
916 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
918 /* Configure the port duplex and enables */
919 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
920 prtx_cfg.s.tx_en = 1;
921 prtx_cfg.s.rx_en = 1;
924 prtx_cfg.s.duplex = p->last_duplex;
925 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
928 netif_carrier_off(netdev);
930 if (octeon_mgmt_init_phy(netdev)) {
931 dev_err(p->dev, "Cannot initialize PHY.\n");
935 netif_wake_queue(netdev);
936 napi_enable(&p->napi);
940 octeon_mgmt_reset_hw(p);
941 dma_unmap_single(p->dev, p->rx_ring_handle,
942 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
946 dma_unmap_single(p->dev, p->tx_ring_handle,
947 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
953 static int octeon_mgmt_stop(struct net_device *netdev)
955 struct octeon_mgmt *p = netdev_priv(netdev);
957 napi_disable(&p->napi);
958 netif_stop_queue(netdev);
961 phy_disconnect(p->phydev);
963 netif_carrier_off(netdev);
965 octeon_mgmt_reset_hw(p);
967 free_irq(p->irq, netdev);
969 /* dma_unmap is a nop on Octeon, so just free everything. */
970 skb_queue_purge(&p->tx_list);
971 skb_queue_purge(&p->rx_list);
973 dma_unmap_single(p->dev, p->rx_ring_handle,
974 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
978 dma_unmap_single(p->dev, p->tx_ring_handle,
979 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
986 static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
988 struct octeon_mgmt *p = netdev_priv(netdev);
989 union mgmt_port_ring_entry re;
991 int rv = NETDEV_TX_BUSY;
995 re.s.addr = dma_map_single(p->dev, skb->data,
999 spin_lock_irqsave(&p->tx_list.lock, flags);
1001 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1002 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1003 netif_stop_queue(netdev);
1004 spin_lock_irqsave(&p->tx_list.lock, flags);
1007 if (unlikely(p->tx_current_fill >=
1008 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1009 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1010 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1015 __skb_queue_tail(&p->tx_list, skb);
1017 /* Put it in the ring. */
1018 p->tx_ring[p->tx_next] = re.d64;
1019 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1020 p->tx_current_fill++;
1022 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1024 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1025 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1028 netdev->stats.tx_packets++;
1029 netdev->stats.tx_bytes += skb->len;
1031 /* Ring the bell. */
1032 cvmx_write_csr(p->mix + MIX_ORING2, 1);
1036 octeon_mgmt_update_tx_stats(netdev);
1040 #ifdef CONFIG_NET_POLL_CONTROLLER
1041 static void octeon_mgmt_poll_controller(struct net_device *netdev)
1043 struct octeon_mgmt *p = netdev_priv(netdev);
1045 octeon_mgmt_receive_packets(p, 16);
1046 octeon_mgmt_update_rx_stats(netdev);
1050 static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1051 struct ethtool_drvinfo *info)
1053 strncpy(info->driver, DRV_NAME, sizeof(info->driver));
1054 strncpy(info->version, DRV_VERSION, sizeof(info->version));
1055 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
1056 strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
1058 info->testinfo_len = 0;
1059 info->regdump_len = 0;
1060 info->eedump_len = 0;
1063 static int octeon_mgmt_get_settings(struct net_device *netdev,
1064 struct ethtool_cmd *cmd)
1066 struct octeon_mgmt *p = netdev_priv(netdev);
1069 return phy_ethtool_gset(p->phydev, cmd);
1074 static int octeon_mgmt_set_settings(struct net_device *netdev,
1075 struct ethtool_cmd *cmd)
1077 struct octeon_mgmt *p = netdev_priv(netdev);
1079 if (!capable(CAP_NET_ADMIN))
1083 return phy_ethtool_sset(p->phydev, cmd);
1088 static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1089 .get_drvinfo = octeon_mgmt_get_drvinfo,
1090 .get_link = ethtool_op_get_link,
1091 .get_settings = octeon_mgmt_get_settings,
1092 .set_settings = octeon_mgmt_set_settings
1095 static const struct net_device_ops octeon_mgmt_ops = {
1096 .ndo_open = octeon_mgmt_open,
1097 .ndo_stop = octeon_mgmt_stop,
1098 .ndo_start_xmit = octeon_mgmt_xmit,
1099 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1100 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1101 .ndo_do_ioctl = octeon_mgmt_ioctl,
1102 .ndo_change_mtu = octeon_mgmt_change_mtu,
1103 #ifdef CONFIG_NET_POLL_CONTROLLER
1104 .ndo_poll_controller = octeon_mgmt_poll_controller,
1108 static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1110 struct net_device *netdev;
1111 struct octeon_mgmt *p;
1114 struct resource *res_mix;
1115 struct resource *res_agl;
1119 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1123 dev_set_drvdata(&pdev->dev, netdev);
1124 p = netdev_priv(netdev);
1125 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1126 OCTEON_MGMT_NAPI_WEIGHT);
1129 p->dev = &pdev->dev;
1131 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1132 if (data && len == sizeof(*data)) {
1133 p->port = be32_to_cpup(data);
1135 dev_err(&pdev->dev, "no 'cell-index' property\n");
1140 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1142 result = platform_get_irq(pdev, 0);
1148 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1149 if (res_mix == NULL) {
1150 dev_err(&pdev->dev, "no 'reg' resource\n");
1155 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1156 if (res_agl == NULL) {
1157 dev_err(&pdev->dev, "no 'reg' resource\n");
1162 p->mix_phys = res_mix->start;
1163 p->mix_size = resource_size(res_mix);
1164 p->agl_phys = res_agl->start;
1165 p->agl_size = resource_size(res_agl);
1168 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1170 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1176 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1179 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1185 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1186 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1188 spin_lock_init(&p->lock);
1190 skb_queue_head_init(&p->tx_list);
1191 skb_queue_head_init(&p->rx_list);
1192 tasklet_init(&p->tx_clean_tasklet,
1193 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1195 netdev->priv_flags |= IFF_UNICAST_FLT;
1197 netdev->netdev_ops = &octeon_mgmt_ops;
1198 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1200 mac = of_get_mac_address(pdev->dev.of_node);
1203 memcpy(netdev->dev_addr, mac, 6);
1205 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1207 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1208 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1210 result = register_netdev(netdev);
1214 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1218 free_netdev(netdev);
1222 static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
1224 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1226 unregister_netdev(netdev);
1227 free_netdev(netdev);
1231 static struct of_device_id octeon_mgmt_match[] = {
1233 .compatible = "cavium,octeon-5750-mix",
1237 MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1239 static struct platform_driver octeon_mgmt_driver = {
1241 .name = "octeon_mgmt",
1242 .owner = THIS_MODULE,
1243 .of_match_table = octeon_mgmt_match,
1245 .probe = octeon_mgmt_probe,
1246 .remove = __devexit_p(octeon_mgmt_remove),
1249 extern void octeon_mdiobus_force_mod_depencency(void);
1251 static int __init octeon_mgmt_mod_init(void)
1253 /* Force our mdiobus driver module to be loaded first. */
1254 octeon_mdiobus_force_mod_depencency();
1255 return platform_driver_register(&octeon_mgmt_driver);
1258 static void __exit octeon_mgmt_mod_exit(void)
1260 platform_driver_unregister(&octeon_mgmt_driver);
1263 module_init(octeon_mgmt_mod_init);
1264 module_exit(octeon_mgmt_mod_exit);
1266 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1267 MODULE_AUTHOR("David Daney");
1268 MODULE_LICENSE("GPL");
1269 MODULE_VERSION(DRV_VERSION);