2 * ADM5120 built-in ethernet switch driver
4 * Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org>
6 * This code was based on a driver for Linux 2.6.xx by Jeroen Vreeken.
7 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
8 * NAPI extension for the Jeroen's driver
9 * Copyright Thomas Langer (Thomas.Langer@infineon.com), 2007
10 * Copyright Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
11 * Inspiration for the Jeroen's driver came from the ADMtek 2.4 driver.
12 * Copyright ADMtek Inc.
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License version 2 as published
16 * by the Free Software Foundation.
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/errno.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/spinlock.h>
26 #include <linux/platform_device.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
33 #include <linux/irq.h>
35 #include <asm/mipsregs.h>
37 #include <adm5120_info.h>
38 #include <adm5120_defs.h>
39 #include <adm5120_irq.h>
40 #include <adm5120_switch.h>
42 #include "adm5120sw.h"
44 #define DRV_NAME "adm5120-switch"
45 #define DRV_DESC "ADM5120 built-in ethernet switch driver"
46 #define DRV_VERSION "0.1.0"
48 #define CONFIG_ADM5120_SWITCH_NAPI 1
49 #undef CONFIG_ADM5120_SWITCH_DEBUG
51 /* ------------------------------------------------------------------------ */
53 #ifdef CONFIG_ADM5120_SWITCH_DEBUG
54 #define SW_DBG(f, a...) printk(KERN_DBG "%s: " f, DRV_NAME , ## a)
56 #define SW_DBG(f, a...) do {} while (0)
58 #define SW_ERR(f, a...) printk(KERN_ERR "%s: " f, DRV_NAME , ## a)
59 #define SW_INFO(f, a...) printk(KERN_INFO "%s: " f, DRV_NAME , ## a)
61 #define SWITCH_NUM_PORTS 6
62 #define ETH_CSUM_LEN 4
64 #define RX_MAX_PKTLEN 1550
65 #define RX_RING_SIZE 64
67 #define TX_RING_SIZE 32
68 #define TX_QUEUE_LEN 28 /* Limit ring entries actually used. */
69 #define TX_TIMEOUT HZ*400
71 #define RX_DESCS_SIZE (RX_RING_SIZE * sizeof(struct dma_desc *))
72 #define RX_SKBS_SIZE (RX_RING_SIZE * sizeof(struct sk_buff *))
73 #define TX_DESCS_SIZE (TX_RING_SIZE * sizeof(struct dma_desc *))
74 #define TX_SKBS_SIZE (TX_RING_SIZE * sizeof(struct sk_buff *))
76 #define SKB_ALLOC_LEN (RX_MAX_PKTLEN + 32)
77 #define SKB_RESERVE_LEN (NET_IP_ALIGN + NET_SKB_PAD)
79 #define SWITCH_INTS_HIGH (SWITCH_INT_SHD | SWITCH_INT_RHD | SWITCH_INT_HDF)
80 #define SWITCH_INTS_LOW (SWITCH_INT_SLD | SWITCH_INT_RLD | SWITCH_INT_LDF)
81 #define SWITCH_INTS_ERR (SWITCH_INT_RDE | SWITCH_INT_SDE | SWITCH_INT_CPUH)
82 #define SWITCH_INTS_Q (SWITCH_INT_P0QF | SWITCH_INT_P1QF | SWITCH_INT_P2QF | \
83 SWITCH_INT_P3QF | SWITCH_INT_P4QF | SWITCH_INT_P5QF | \
84 SWITCH_INT_CPQF | SWITCH_INT_GQF)
86 #define SWITCH_INTS_ALL (SWITCH_INTS_HIGH | SWITCH_INTS_LOW | \
87 SWITCH_INTS_ERR | SWITCH_INTS_Q | \
88 SWITCH_INT_MD | SWITCH_INT_PSC)
90 #define SWITCH_INTS_USED (SWITCH_INTS_LOW | SWITCH_INT_PSC)
91 #define SWITCH_INTS_POLL (SWITCH_INT_RLD | SWITCH_INT_LDF | SWITCH_INT_SLD)
93 /* ------------------------------------------------------------------------ */
95 struct adm5120_if_priv {
97 unsigned int port_mask;
102 #define DESC_OWN (1UL << 31) /* Owned by the switch */
103 #define DESC_EOR (1UL << 28) /* End of Ring */
104 #define DESC_ADDR_MASK 0x1FFFFFF
105 #define DESC_ADDR(x) ((__u32)(x) & DESC_ADDR_MASK)
107 #define DESC_BUF2_EN (1UL << 31) /* Buffer 2 enable */
110 /* definitions for tx/rx descriptors */
111 #define DESC_PKTLEN_SHIFT 16
112 #define DESC_PKTLEN_MASK 0x7FF
113 /* tx descriptor specific part */
114 #define DESC_CSUM (1UL << 31) /* Append checksum */
115 #define DESC_DSTPORT_SHIFT 8
116 #define DESC_DSTPORT_MASK 0x3F
117 #define DESC_VLAN_MASK 0x3F
118 /* rx descriptor specific part */
119 #define DESC_SRCPORT_SHIFT 12
120 #define DESC_SRCPORT_MASK 0x7
121 #define DESC_DA_MASK 0x3
122 #define DESC_DA_SHIFT 4
123 #define DESC_IPCSUM_FAIL (1UL << 3) /* IP checksum fail */
124 #define DESC_VLAN_TAG (1UL << 2) /* VLAN tag present */
125 #define DESC_TYPE_MASK 0x3 /* mask for Packet type */
126 #define DESC_TYPE_IP 0x0 /* IP packet */
127 #define DESC_TYPE_PPPoE 0x1 /* PPPoE packet */
128 } __attribute__ ((aligned(16)));
130 /* ------------------------------------------------------------------------ */
132 static int adm5120_nrdevs;
134 static struct net_device *adm5120_devs[SWITCH_NUM_PORTS];
135 /* Lookup table port -> device */
136 static struct net_device *adm5120_port[SWITCH_NUM_PORTS];
138 static struct dma_desc *txl_descs;
139 static struct dma_desc *rxl_descs;
141 static dma_addr_t txl_descs_dma;
142 static dma_addr_t rxl_descs_dma;
144 static struct sk_buff **txl_skbuff;
145 static struct sk_buff **rxl_skbuff;
147 static unsigned int cur_rxl, dirty_rxl; /* producer/consumer ring indices */
148 static unsigned int cur_txl, dirty_txl;
150 static unsigned int sw_used;
152 static spinlock_t tx_lock = SPIN_LOCK_UNLOCKED;
154 /* ------------------------------------------------------------------------ */
156 static inline u32 sw_read_reg(u32 reg)
158 return __raw_readl((void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
161 static inline void sw_write_reg(u32 reg, u32 val)
163 __raw_writel(val, (void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
166 static inline void sw_int_mask(u32 mask)
170 t = sw_read_reg(SWITCH_REG_INT_MASK);
172 sw_write_reg(SWITCH_REG_INT_MASK, t);
175 static inline void sw_int_unmask(u32 mask)
179 t = sw_read_reg(SWITCH_REG_INT_MASK);
181 sw_write_reg(SWITCH_REG_INT_MASK, t);
184 static inline void sw_int_ack(u32 mask)
186 sw_write_reg(SWITCH_REG_INT_STATUS, mask);
189 static inline u32 sw_int_status(void)
193 t = sw_read_reg(SWITCH_REG_INT_STATUS);
194 t &= ~sw_read_reg(SWITCH_REG_INT_MASK);
198 static inline u32 desc_get_srcport(struct dma_desc *desc)
200 return (desc->misc >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK;
203 static inline u32 desc_get_pktlen(struct dma_desc *desc)
205 return (desc->misc >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK;
208 static inline int desc_ipcsum_fail(struct dma_desc *desc)
210 return ((desc->misc & DESC_IPCSUM_FAIL) != 0);
213 /* ------------------------------------------------------------------------ */
215 static void sw_dump_desc(char *label, struct dma_desc *desc, int tx)
219 SW_DBG("%s %s desc/%p\n", label, tx ? "tx" : "rx", desc);
222 SW_DBG(" buf1 %08X addr=%08X; len=%08X %s%s\n", t,
225 (t & DESC_OWN) ? "SWITCH" : "CPU",
226 (t & DESC_EOR) ? " RE" : "");
229 SW_DBG(" buf2 %08X addr=%08X%s\n", desc->buf2,
231 (t & DESC_BUF2_EN) ? " EN" : "" );
235 SW_DBG(" misc %08X%s pktlen=%04X ports=%02X vlan=%02X\n", t,
236 (t & DESC_CSUM) ? " CSUM" : "",
237 (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
238 (t >> DESC_DSTPORT_SHIFT) & DESC_DSTPORT_MASK,
241 SW_DBG(" misc %08X pktlen=%04X port=%d DA=%d%s%s type=%d\n",
243 (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
244 (t >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK,
245 (t >> DESC_DA_SHIFT) & DESC_DA_MASK,
246 (t & DESC_IPCSUM_FAIL) ? " IPCF" : "",
247 (t & DESC_VLAN_TAG) ? " VLAN" : "",
248 (t & DESC_TYPE_MASK));
251 static void sw_dump_intr_mask(char *label, u32 mask)
253 SW_DBG("%s %08X%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
255 (mask & SWITCH_INT_SHD) ? " SHD" : "",
256 (mask & SWITCH_INT_SLD) ? " SLD" : "",
257 (mask & SWITCH_INT_RHD) ? " RHD" : "",
258 (mask & SWITCH_INT_RLD) ? " RLD" : "",
259 (mask & SWITCH_INT_HDF) ? " HDF" : "",
260 (mask & SWITCH_INT_LDF) ? " LDF" : "",
261 (mask & SWITCH_INT_P0QF) ? " P0QF" : "",
262 (mask & SWITCH_INT_P1QF) ? " P1QF" : "",
263 (mask & SWITCH_INT_P2QF) ? " P2QF" : "",
264 (mask & SWITCH_INT_P3QF) ? " P3QF" : "",
265 (mask & SWITCH_INT_P4QF) ? " P4QF" : "",
266 (mask & SWITCH_INT_CPQF) ? " CPQF" : "",
267 (mask & SWITCH_INT_GQF) ? " GQF" : "",
268 (mask & SWITCH_INT_MD) ? " MD" : "",
269 (mask & SWITCH_INT_BCS) ? " BCS" : "",
270 (mask & SWITCH_INT_PSC) ? " PSC" : "",
271 (mask & SWITCH_INT_ID) ? " ID" : "",
272 (mask & SWITCH_INT_W0TE) ? " W0TE" : "",
273 (mask & SWITCH_INT_W1TE) ? " W1TE" : "",
274 (mask & SWITCH_INT_RDE) ? " RDE" : "",
275 (mask & SWITCH_INT_SDE) ? " SDE" : "",
276 (mask & SWITCH_INT_CPUH) ? " CPUH" : "");
279 static void sw_dump_regs(void)
283 t = SW_READ_REG(PHY_STATUS);
284 SW_DBG("phy_status: %08X\n", t);
286 t = SW_READ_REG(CPUP_CONF);
287 SW_DBG("cpup_conf: %08X%s%s%s\n", t,
288 (t & CPUP_CONF_DCPUP) ? " DCPUP" : "",
289 (t & CPUP_CONF_CRCP) ? " CRCP" : "",
290 (t & CPUP_CONF_BTM) ? " BTM" : "");
292 t = SW_READ_REG(PORT_CONF0);
293 SW_DBG("port_conf0: %08X\n", t);
294 t = SW_READ_REG(PORT_CONF1);
295 SW_DBG("port_conf1: %08X\n", t);
296 t = SW_READ_REG(PORT_CONF2);
297 SW_DBG("port_conf2: %08X\n", t);
299 t = SW_READ_REG(VLAN_G1);
300 SW_DBG("vlan g1: %08X\n", t);
301 t = SW_READ_REG(VLAN_G2);
302 SW_DBG("vlan g2: %08X\n", t);
304 t = SW_READ_REG(BW_CNTL0);
305 SW_DBG("bw_cntl0: %08X\n", t);
306 t = SW_READ_REG(BW_CNTL1);
307 SW_DBG("bw_cntl1: %08X\n", t);
309 t = SW_READ_REG(PHY_CNTL0);
310 SW_DBG("phy_cntl0: %08X\n", t);
311 t = SW_READ_REG(PHY_CNTL1);
312 SW_DBG("phy_cntl1: %08X\n", t);
313 t = SW_READ_REG(PHY_CNTL2);
314 SW_DBG("phy_cntl2: %08X\n", t);
315 t = SW_READ_REG(PHY_CNTL3);
316 SW_DBG("phy_cntl3: %08X\n", t);
317 t = SW_READ_REG(PHY_CNTL4);
318 SW_DBG("phy_cntl4: %08X\n", t);
320 t = SW_READ_REG(INT_STATUS);
321 sw_dump_intr_mask("int_status: ", t);
323 t = SW_READ_REG(INT_MASK);
324 sw_dump_intr_mask("int_mask: ", t);
326 t = SW_READ_REG(SHDA);
327 SW_DBG("shda: %08X\n", t);
328 t = SW_READ_REG(SLDA);
329 SW_DBG("slda: %08X\n", t);
330 t = SW_READ_REG(RHDA);
331 SW_DBG("rhda: %08X\n", t);
332 t = SW_READ_REG(RLDA);
333 SW_DBG("rlda: %08X\n", t);
337 /* ------------------------------------------------------------------------ */
339 static inline void adm5120_rx_dma_update(struct dma_desc *desc,
340 struct sk_buff *skb, int end)
344 desc->buflen = RX_MAX_PKTLEN;
345 desc->buf1 = DESC_ADDR(skb->data) |
346 DESC_OWN | (end ? DESC_EOR : 0);
349 static void adm5120_switch_rx_refill(void)
353 for (; cur_rxl - dirty_rxl > 0; dirty_rxl++) {
354 struct dma_desc *desc;
357 entry = dirty_rxl % RX_RING_SIZE;
358 desc = &rxl_descs[entry];
360 skb = rxl_skbuff[entry];
362 skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
364 skb_reserve(skb, SKB_RESERVE_LEN);
365 rxl_skbuff[entry] = skb;
367 SW_ERR("no memory for skb\n");
371 desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN;
377 desc->buflen = RX_MAX_PKTLEN;
379 desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN |
380 DESC_ADDR(skb->data);
384 static int adm5120_switch_rx(int limit)
386 unsigned int done = 0;
388 SW_DBG("rx start, limit=%d, cur_rxl=%u, dirty_rxl=%u\n",
389 limit, cur_rxl, dirty_rxl);
391 while (done < limit) {
392 int entry = cur_rxl % RX_RING_SIZE;
393 struct dma_desc *desc = &rxl_descs[entry];
394 struct net_device *rdev;
397 if (desc->buf1 & DESC_OWN)
400 if (dirty_rxl + RX_RING_SIZE == cur_rxl)
403 port = desc_get_srcport(desc);
404 rdev = adm5120_port[port];
406 SW_DBG("rx descriptor %u, desc=%p, skb=%p\n", entry, desc,
409 if ((rdev) && netif_running(rdev)) {
410 struct sk_buff *skb = rxl_skbuff[entry];
413 pktlen = desc_get_pktlen(desc);
414 pktlen -= ETH_CSUM_LEN;
416 if ((pktlen == 0) || desc_ipcsum_fail(desc)) {
417 rdev->stats.rx_errors++;
419 rdev->stats.rx_length_errors++;
420 if (desc_ipcsum_fail(desc))
421 rdev->stats.rx_crc_errors++;
422 SW_DBG("rx error, recycling skb %u\n", entry);
424 skb_put(skb, pktlen);
427 skb->protocol = eth_type_trans(skb, rdev);
428 skb->ip_summed = CHECKSUM_UNNECESSARY;
430 dma_cache_wback_inv((unsigned long)skb->data,
433 #ifdef CONFIG_ADM5120_SWITCH_USE_NAPI
434 netif_receive_skb(skb);
439 rdev->last_rx = jiffies;
440 rdev->stats.rx_packets++;
441 rdev->stats.rx_bytes += pktlen;
443 rxl_skbuff[entry] = NULL;
447 SW_DBG("no rx device, recycling skb %u\n", entry);
451 if (cur_rxl - dirty_rxl > RX_RING_SIZE / 4)
452 adm5120_switch_rx_refill();
455 adm5120_switch_rx_refill();
457 SW_DBG("rx finished, cur_rxl=%u, dirty_rxl=%u, processed %d\n",
458 cur_rxl, dirty_rxl, done);
463 static void adm5120_switch_tx(void)
468 entry = dirty_txl % TX_RING_SIZE;
469 while (dirty_txl != cur_txl) {
470 struct dma_desc *desc = &txl_descs[entry];
471 struct sk_buff *skb = txl_skbuff[entry];
473 if (desc->buf1 & DESC_OWN)
476 if (netif_running(skb->dev)) {
477 skb->dev->stats.tx_bytes += skb->len;
478 skb->dev->stats.tx_packets++;
481 dev_kfree_skb_irq(skb);
482 txl_skbuff[entry] = NULL;
483 entry = (++dirty_txl) % TX_RING_SIZE;
486 if ((cur_txl - dirty_txl) < TX_QUEUE_LEN - 4) {
488 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
489 if (!adm5120_devs[i])
491 netif_wake_queue(adm5120_devs[i]);
494 spin_unlock(&tx_lock);
497 #ifdef CONFIG_ADM5120_SWITCH_NAPI
498 static int adm5120_if_poll(struct net_device *dev, int *budget)
500 int limit = min(dev->quota, *budget);
504 sw_int_ack(SWITCH_INTS_POLL);
506 SW_DBG("%s: processing TX ring\n", dev->name);
509 SW_DBG("%s: processing RX ring\n", dev->name);
510 done = adm5120_switch_rx(limit);
515 status = sw_int_status() & SWITCH_INTS_POLL;
516 if ((done < limit) && (!status)) {
517 SW_DBG("disable polling mode for %s\n", dev->name);
518 netif_rx_complete(dev);
519 sw_int_unmask(SWITCH_INTS_POLL);
523 SW_DBG("%s still in polling mode, done=%d, status=%x\n",
524 dev->name, done, status);
527 #endif /* CONFIG_ADM5120_SWITCH_USE_NAPI */
530 static irqreturn_t adm5120_switch_irq(int irq, void *dev_id)
534 status = sw_int_status();
535 status &= SWITCH_INTS_ALL;
539 #ifdef CONFIG_ADM5120_SWITCH_NAPI
540 sw_int_ack(status & ~SWITCH_INTS_POLL);
542 if (status & SWITCH_INTS_POLL) {
543 struct net_device *dev = dev_id;
544 sw_dump_intr_mask("poll ints", status);
545 SW_DBG("enable polling mode for %s\n", dev->name);
546 sw_int_mask(SWITCH_INTS_POLL);
547 netif_rx_schedule(dev);
552 if (status & (SWITCH_INT_RLD | SWITCH_INT_LDF)) {
553 adm5120_switch_rx(RX_RING_SIZE);
556 if (status & SWITCH_INT_SLD) {
564 static void adm5120_set_bw(char *matrix)
568 /* Port 0 to 3 are set using the bandwidth control 0 register */
569 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
570 sw_write_reg(SWITCH_REG_BW_CNTL0, val);
572 /* Port 4 and 5 are set using the bandwidth control 1 register */
575 sw_write_reg(SWITCH_REG_BW_CNTL1, val | 0x80000000);
577 sw_write_reg(SWITCH_REG_BW_CNTL1, val & ~0x8000000);
579 SW_DBG("D: ctl0 0x%ux, ctl1 0x%ux\n", sw_read_reg(SWITCH_REG_BW_CNTL0),
580 sw_read_reg(SWITCH_REG_BW_CNTL1));
583 static void adm5120_switch_tx_ring_reset(struct dma_desc *desc,
584 struct sk_buff **skbl, int num)
586 memset(desc, 0, num * sizeof(*desc));
587 desc[num-1].buf1 |= DESC_EOR;
588 memset(skbl, 0, sizeof(struct skb*)*num);
594 static void adm5120_switch_rx_ring_reset(struct dma_desc *desc,
595 struct sk_buff **skbl, int num)
599 memset(desc, 0, num * sizeof(*desc));
600 for (i = 0; i < num; i++) {
601 skbl[i] = dev_alloc_skb(SKB_ALLOC_LEN);
606 skb_reserve(skbl[i], SKB_RESERVE_LEN);
607 adm5120_rx_dma_update(&desc[i], skbl[i], (num-1==i));
614 static int adm5120_switch_tx_ring_alloc(void)
618 txl_descs = dma_alloc_coherent(NULL, TX_DESCS_SIZE, &txl_descs_dma,
625 txl_skbuff = kzalloc(TX_SKBS_SIZE, GFP_KERNEL);
637 static void adm5120_switch_tx_ring_free(void)
642 for (i = 0; i < TX_RING_SIZE; i++)
644 kfree_skb(txl_skbuff[i]);
649 dma_free_coherent(NULL, TX_DESCS_SIZE, txl_descs,
653 static int adm5120_switch_rx_ring_alloc(void)
659 rxl_descs = dma_alloc_coherent(NULL, RX_DESCS_SIZE, &rxl_descs_dma,
666 rxl_skbuff = kzalloc(RX_SKBS_SIZE, GFP_KERNEL);
672 for (i = 0; i < RX_RING_SIZE; i++) {
674 skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
680 skb_reserve(skb, SKB_RESERVE_LEN);
689 static void adm5120_switch_rx_ring_free(void)
694 for (i = 0; i < RX_RING_SIZE; i++)
696 kfree_skb(rxl_skbuff[i]);
701 dma_free_coherent(NULL, RX_DESCS_SIZE, rxl_descs,
705 static void adm5120_write_mac(struct net_device *dev)
707 struct adm5120_if_priv *priv = netdev_priv(dev);
708 unsigned char *mac = dev->dev_addr;
711 t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT) |
712 (mac[4] << MAC_WT1_MAC4_SHIFT) | (mac[5] << MAC_WT1_MAC4_SHIFT);
713 sw_write_reg(SWITCH_REG_MAC_WT1, t);
715 t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
716 MAC_WT0_MAWC | MAC_WT0_WVE | (priv->vlan_no<<3);
718 sw_write_reg(SWITCH_REG_MAC_WT0, t);
720 while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD));
723 static void adm5120_set_vlan(char *matrix)
728 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
729 sw_write_reg(SWITCH_REG_VLAN_G1, val);
730 val = matrix[4] + (matrix[5]<<8);
731 sw_write_reg(SWITCH_REG_VLAN_G2, val);
733 /* Now set/update the port vs. device lookup table */
734 for (port=0; port<SWITCH_NUM_PORTS; port++) {
735 for (vlan_port=0; vlan_port<SWITCH_NUM_PORTS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++);
736 if (vlan_port <SWITCH_NUM_PORTS)
737 adm5120_port[port] = adm5120_devs[vlan_port];
739 adm5120_port[port] = NULL;
743 static void adm5120_switch_set_vlan_mac(unsigned int vlan, unsigned char *mac)
747 t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT)
748 | (mac[4] << MAC_WT1_MAC4_SHIFT)
749 | (mac[5] << MAC_WT1_MAC4_SHIFT);
750 sw_write_reg(SWITCH_REG_MAC_WT1, t);
752 t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
753 MAC_WT0_MAWC | MAC_WT0_WVE | (vlan << MAC_WT0_WVN_SHIFT) |
754 (MAC_WT0_WAF_STATIC << MAC_WT0_WAF_SHIFT);
755 sw_write_reg(SWITCH_REG_MAC_WT0, t);
758 t = sw_read_reg(SWITCH_REG_MAC_WT0);
759 } while ((t & MAC_WT0_MWD) == 0);
762 static void adm5120_switch_set_vlan_ports(unsigned int vlan, u32 ports)
768 reg = SWITCH_REG_VLAN_G1;
771 reg = SWITCH_REG_VLAN_G2;
774 t = sw_read_reg(reg);
775 t &= ~(0xFF << (vlan*8));
776 t |= (ports << (vlan*8));
777 sw_write_reg(reg, t);
780 /* ------------------------------------------------------------------------ */
782 static int adm5120_if_open(struct net_device *dev)
788 err = request_irq(dev->irq, adm5120_switch_irq,
789 (IRQF_SHARED | IRQF_DISABLED), dev->name, dev);
791 SW_ERR("unable to get irq for %s\n", dev->name);
796 /* enable interrupts on first open */
797 sw_int_unmask(SWITCH_INTS_USED);
799 /* enable (additional) port */
800 t = sw_read_reg(SWITCH_REG_PORT_CONF0);
801 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
802 if (dev == adm5120_devs[i])
803 t &= ~adm5120_eth_vlans[i];
805 sw_write_reg(SWITCH_REG_PORT_CONF0, t);
807 netif_start_queue(dev);
815 static int adm5120_if_stop(struct net_device *dev)
820 netif_stop_queue(dev);
822 /* disable port if not assigned to other devices */
823 t = sw_read_reg(SWITCH_REG_PORT_CONF0);
824 t |= SWITCH_PORTS_NOCPU;
825 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
826 if ((dev != adm5120_devs[i]) && netif_running(adm5120_devs[i]))
827 t &= ~adm5120_eth_vlans[i];
829 sw_write_reg(SWITCH_REG_PORT_CONF0, t);
832 sw_int_mask(SWITCH_INTS_USED);
834 free_irq(dev->irq, dev);
839 static int adm5120_if_hard_start_xmit(struct sk_buff *skb,
840 struct net_device *dev)
842 struct dma_desc *desc;
843 struct adm5120_if_priv *priv = netdev_priv(dev);
847 /* lock switch irq */
848 spin_lock_irq(&tx_lock);
850 /* calculate the next TX descriptor entry. */
851 entry = cur_txl % TX_RING_SIZE;
853 desc = &txl_descs[entry];
854 if (desc->buf1 & DESC_OWN) {
855 /* We want to write a packet but the TX queue is still
856 * occupied by the DMA. We are faster than the DMA... */
857 SW_DBG("%s unable to transmit, packet dopped\n", dev->name);
859 dev->stats.tx_dropped++;
863 txl_skbuff[entry] = skb;
864 data = (desc->buf1 & DESC_EOR);
865 data |= DESC_ADDR(skb->data);
868 ((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << DESC_PKTLEN_SHIFT) |
869 (0x1 << priv->vlan_no);
871 desc->buflen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
873 desc->buf1 = data | DESC_OWN;
874 sw_write_reg(SWITCH_REG_SEND_TRIG, SEND_TRIG_STL);
877 if (cur_txl == dirty_txl + TX_QUEUE_LEN) {
878 /* FIXME: stop queue for all devices */
879 netif_stop_queue(dev);
882 dev->trans_start = jiffies;
884 spin_unlock_irq(&tx_lock);
889 static void adm5120_if_tx_timeout(struct net_device *dev)
891 SW_INFO("TX timeout on %s\n",dev->name);
894 static void adm5120_if_set_multicast_list(struct net_device *dev)
896 struct adm5120_if_priv *priv = netdev_priv(dev);
900 ports = adm5120_eth_vlans[priv->vlan_no] & SWITCH_PORTS_NOCPU;
902 t = sw_read_reg(SWITCH_REG_CPUP_CONF);
903 if (dev->flags & IFF_PROMISC)
904 /* enable unknown packets */
905 t &= ~(ports << CPUP_CONF_DUNP_SHIFT);
907 /* disable unknown packets */
908 t |= (ports << CPUP_CONF_DUNP_SHIFT);
910 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
912 /* enable multicast packets */
913 t &= ~(ports << CPUP_CONF_DMCP_SHIFT);
915 /* disable multicast packets */
916 t |= (ports << CPUP_CONF_DMCP_SHIFT);
918 /* If there is any port configured to be in promiscuous mode, then the */
919 /* Bridge Test Mode has to be activated. This will result in */
920 /* transporting also packets learned in another VLAN to be forwarded */
922 /* The difficult scenario is when we want to build a bridge on the CPU.*/
923 /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
924 /* CPU port in VLAN1. Now we build a bridge on the CPU between */
925 /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
926 /* Now assume a packet with ethernet source address 99 enters port 0 */
927 /* It will be forwarded to the CPU because it is unknown. Then the */
928 /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
929 /* When now a packet with ethernet destination address 99 comes in at */
930 /* port 1 in VLAN1, then the switch has learned that this address is */
931 /* located at port 0 in VLAN0. Therefore the switch will drop */
932 /* this packet. In order to avoid this and to send the packet still */
933 /* to the CPU, the Bridge Test Mode has to be activated. */
935 /* Check if there is any vlan in promisc mode. */
936 if (t & (SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT))
937 t &= ~CPUP_CONF_BTM; /* Disable Bridge Testing Mode */
939 t |= CPUP_CONF_BTM; /* Enable Bridge Testing Mode */
941 sw_write_reg(SWITCH_REG_CPUP_CONF, t);
945 static int adm5120_if_set_mac_address(struct net_device *dev, void *p)
947 struct sockaddr *addr = p;
949 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
950 adm5120_write_mac(dev);
954 static int adm5120_if_do_ioctl(struct net_device *dev, struct ifreq *rq,
958 struct adm5120_sw_info info;
959 struct adm5120_if_priv *priv = netdev_priv(dev);
964 info.ports = adm5120_nrdevs;
965 info.vlan = priv->vlan_no;
966 err = copy_to_user(rq->ifr_data, &info, sizeof(info));
971 if (!capable(CAP_NET_ADMIN))
973 err = copy_from_user(adm5120_eth_vlans, rq->ifr_data,
974 sizeof(adm5120_eth_vlans));
977 adm5120_set_vlan(adm5120_eth_vlans);
980 err = copy_to_user(rq->ifr_data, adm5120_eth_vlans,
981 sizeof(adm5120_eth_vlans));
991 static struct net_device *adm5120_if_alloc(void)
993 struct net_device *dev;
994 struct adm5120_if_priv *priv;
996 dev = alloc_etherdev(sizeof(*priv));
1000 dev->irq = ADM5120_IRQ_SWITCH;
1001 dev->open = adm5120_if_open;
1002 dev->hard_start_xmit = adm5120_if_hard_start_xmit;
1003 dev->stop = adm5120_if_stop;
1004 dev->set_multicast_list = adm5120_if_set_multicast_list;
1005 dev->do_ioctl = adm5120_if_do_ioctl;
1006 dev->tx_timeout = adm5120_if_tx_timeout;
1007 dev->watchdog_timeo = TX_TIMEOUT;
1008 dev->set_mac_address = adm5120_if_set_mac_address;
1009 #ifdef CONFIG_ADM5120_SWITCH_NAPI
1010 dev->poll = adm5120_if_poll;
1014 SET_MODULE_OWNER(dev);
1019 /* ------------------------------------------------------------------------ */
1021 static void adm5120_switch_cleanup(void)
1025 /* disable interrupts */
1026 sw_int_mask(SWITCH_INTS_ALL);
1028 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
1029 struct net_device *dev = adm5120_devs[i];
1031 unregister_netdev(dev);
1036 adm5120_switch_tx_ring_free();
1037 adm5120_switch_rx_ring_free();
1040 static int __init adm5120_switch_probe(struct platform_device *pdev)
1045 adm5120_nrdevs = adm5120_eth_num_ports;
1047 t = CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
1048 SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT |
1049 SWITCH_PORTS_NOCPU << CPUP_CONF_DMCP_SHIFT ;
1050 sw_write_reg(SWITCH_REG_CPUP_CONF, t);
1052 t = (SWITCH_PORTS_NOCPU << PORT_CONF0_EMCP_SHIFT) |
1053 (SWITCH_PORTS_NOCPU << PORT_CONF0_BP_SHIFT) |
1054 (SWITCH_PORTS_NOCPU);
1055 sw_write_reg(SWITCH_REG_PORT_CONF0, t);
1057 /* setup ports to Autoneg/100M/Full duplex/Auto MDIX */
1058 t = SWITCH_PORTS_PHY |
1059 (SWITCH_PORTS_PHY << PHY_CNTL2_SC_SHIFT) |
1060 (SWITCH_PORTS_PHY << PHY_CNTL2_DC_SHIFT) |
1061 (SWITCH_PORTS_PHY << PHY_CNTL2_PHYR_SHIFT) |
1062 (SWITCH_PORTS_PHY << PHY_CNTL2_AMDIX_SHIFT) |
1064 SW_WRITE_REG(PHY_CNTL2, t);
1066 t = sw_read_reg(SWITCH_REG_PHY_CNTL3);
1068 sw_write_reg(SWITCH_REG_PHY_CNTL3, t);
1070 /* Force all the packets from all ports are low priority */
1071 sw_write_reg(SWITCH_REG_PRI_CNTL, 0);
1073 sw_int_mask(SWITCH_INTS_ALL);
1074 sw_int_ack(SWITCH_INTS_ALL);
1076 err = adm5120_switch_rx_ring_alloc();
1080 err = adm5120_switch_tx_ring_alloc();
1084 adm5120_switch_tx_ring_reset(txl_descs, txl_skbuff, TX_RING_SIZE);
1085 adm5120_switch_rx_ring_reset(rxl_descs, rxl_skbuff, RX_RING_SIZE);
1087 sw_write_reg(SWITCH_REG_SHDA, 0);
1088 sw_write_reg(SWITCH_REG_SLDA, KSEG1ADDR(txl_descs));
1089 sw_write_reg(SWITCH_REG_RHDA, 0);
1090 sw_write_reg(SWITCH_REG_RLDA, KSEG1ADDR(rxl_descs));
1092 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
1093 struct net_device *dev;
1094 struct adm5120_if_priv *priv;
1096 dev = adm5120_if_alloc();
1102 adm5120_devs[i] = dev;
1103 priv = netdev_priv(dev);
1106 priv->port_mask = adm5120_eth_vlans[i];
1108 memcpy(dev->dev_addr, adm5120_eth_macs[i], 6);
1109 adm5120_write_mac(dev);
1111 err = register_netdev(dev);
1113 SW_INFO("%s register failed, error=%d\n",
1119 /* setup vlan/port mapping after devs are filled up */
1120 adm5120_set_vlan(adm5120_eth_vlans);
1122 /* enable CPU port */
1123 t = sw_read_reg(SWITCH_REG_CPUP_CONF);
1124 t &= ~CPUP_CONF_DCPUP;
1125 sw_write_reg(SWITCH_REG_CPUP_CONF, t);
1130 adm5120_switch_cleanup();
1132 SW_ERR("init failed\n");
1136 static int adm5120_switch_remove(struct platform_device *dev)
1138 adm5120_switch_cleanup();
1142 static struct platform_driver adm5120_switch_driver = {
1143 .probe = adm5120_switch_probe,
1144 .remove = adm5120_switch_remove,
1150 /* -------------------------------------------------------------------------- */
1152 static int __init adm5120_switch_mod_init(void)
1156 pr_info(DRV_DESC " version " DRV_VERSION "\n");
1157 err = platform_driver_register(&adm5120_switch_driver);
1162 static void __exit adm5120_switch_mod_exit(void)
1164 platform_driver_unregister(&adm5120_switch_driver);
1167 module_init(adm5120_switch_mod_init);
1168 module_exit(adm5120_switch_mod_exit);
1170 MODULE_LICENSE("GPL v2");
1171 MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
1172 MODULE_DESCRIPTION(DRV_DESC);
1173 MODULE_VERSION(DRV_VERSION);