2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/delay.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_bridge.h>
14 #include <linux/jiffies.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/phy.h>
20 #include "mv88e6xxx.h"
22 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
23 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
24 * will be directly accessible on some {device address,register address}
25 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
26 * will only respond to SMI transactions to that specific address, and
27 * an indirect addressing mechanism needs to be used to access its
30 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
35 for (i = 0; i < 16; i++) {
36 ret = mdiobus_read(bus, sw_addr, SMI_CMD);
40 if ((ret & SMI_CMD_BUSY) == 0)
47 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
52 return mdiobus_read(bus, addr, reg);
54 /* Wait for the bus to become free. */
55 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
59 /* Transmit the read command. */
60 ret = mdiobus_write(bus, sw_addr, SMI_CMD,
61 SMI_CMD_OP_22_READ | (addr << 5) | reg);
65 /* Wait for the read command to complete. */
66 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
71 ret = mdiobus_read(bus, sw_addr, SMI_DATA);
78 /* Must be called with SMI mutex held */
79 static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
81 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
87 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
91 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
97 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
99 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
102 mutex_lock(&ps->smi_mutex);
103 ret = _mv88e6xxx_reg_read(ds, addr, reg);
104 mutex_unlock(&ps->smi_mutex);
109 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
115 return mdiobus_write(bus, addr, reg, val);
117 /* Wait for the bus to become free. */
118 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
122 /* Transmit the data to write. */
123 ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
127 /* Transmit the write command. */
128 ret = mdiobus_write(bus, sw_addr, SMI_CMD,
129 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
133 /* Wait for the write command to complete. */
134 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
141 /* Must be called with SMI mutex held */
142 static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
145 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
150 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
153 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
156 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
158 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
161 mutex_lock(&ps->smi_mutex);
162 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
163 mutex_unlock(&ps->smi_mutex);
168 int mv88e6xxx_config_prio(struct dsa_switch *ds)
170 /* Configure the IP ToS mapping registers. */
171 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
172 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
173 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
174 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
175 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
176 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
177 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
178 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
180 /* Configure the IEEE 802.1p priority mapping register. */
181 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
186 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
188 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
189 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
190 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
195 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
200 for (i = 0; i < 6; i++) {
203 /* Write the MAC address byte. */
204 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
205 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
207 /* Wait for the write to complete. */
208 for (j = 0; j < 16; j++) {
209 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
210 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
220 /* Must be called with phy mutex held */
221 static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
224 return mv88e6xxx_reg_read(ds, addr, regnum);
228 /* Must be called with phy mutex held */
229 static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
233 return mv88e6xxx_reg_write(ds, addr, regnum, val);
237 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
238 static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
241 unsigned long timeout;
243 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
244 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
245 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
247 timeout = jiffies + 1 * HZ;
248 while (time_before(jiffies, timeout)) {
249 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
250 usleep_range(1000, 2000);
251 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
252 GLOBAL_STATUS_PPU_POLLING)
259 static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
262 unsigned long timeout;
264 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
265 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
267 timeout = jiffies + 1 * HZ;
268 while (time_before(jiffies, timeout)) {
269 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
270 usleep_range(1000, 2000);
271 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
272 GLOBAL_STATUS_PPU_POLLING)
279 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
281 struct mv88e6xxx_priv_state *ps;
283 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
284 if (mutex_trylock(&ps->ppu_mutex)) {
285 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
287 if (mv88e6xxx_ppu_enable(ds) == 0)
288 ps->ppu_disabled = 0;
289 mutex_unlock(&ps->ppu_mutex);
293 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
295 struct mv88e6xxx_priv_state *ps = (void *)_ps;
297 schedule_work(&ps->ppu_work);
300 static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
302 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
305 mutex_lock(&ps->ppu_mutex);
307 /* If the PHY polling unit is enabled, disable it so that
308 * we can access the PHY registers. If it was already
309 * disabled, cancel the timer that is going to re-enable
312 if (!ps->ppu_disabled) {
313 ret = mv88e6xxx_ppu_disable(ds);
315 mutex_unlock(&ps->ppu_mutex);
318 ps->ppu_disabled = 1;
320 del_timer(&ps->ppu_timer);
327 static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
329 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
331 /* Schedule a timer to re-enable the PHY polling unit. */
332 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
333 mutex_unlock(&ps->ppu_mutex);
336 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
338 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
340 mutex_init(&ps->ppu_mutex);
341 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
342 init_timer(&ps->ppu_timer);
343 ps->ppu_timer.data = (unsigned long)ps;
344 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
347 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
351 ret = mv88e6xxx_ppu_access_get(ds);
353 ret = mv88e6xxx_reg_read(ds, addr, regnum);
354 mv88e6xxx_ppu_access_put(ds);
360 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
365 ret = mv88e6xxx_ppu_access_get(ds);
367 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
368 mv88e6xxx_ppu_access_put(ds);
375 void mv88e6xxx_poll_link(struct dsa_switch *ds)
379 for (i = 0; i < DSA_MAX_PORTS; i++) {
380 struct net_device *dev;
381 int uninitialized_var(port_status);
392 if (dev->flags & IFF_UP) {
393 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
398 link = !!(port_status & PORT_STATUS_LINK);
402 if (netif_carrier_ok(dev)) {
403 netdev_info(dev, "link down\n");
404 netif_carrier_off(dev);
409 switch (port_status & PORT_STATUS_SPEED_MASK) {
410 case PORT_STATUS_SPEED_10:
413 case PORT_STATUS_SPEED_100:
416 case PORT_STATUS_SPEED_1000:
423 duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
424 fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
426 if (!netif_carrier_ok(dev)) {
428 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
430 duplex ? "full" : "half",
432 netif_carrier_on(dev);
437 static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
439 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
442 case PORT_SWITCH_ID_6352:
443 case PORT_SWITCH_ID_6172:
444 case PORT_SWITCH_ID_6176:
450 static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
455 for (i = 0; i < 10; i++) {
456 ret = REG_READ(REG_GLOBAL, GLOBAL_STATS_OP);
457 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
464 static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
468 if (mv88e6xxx_6352_family(ds))
469 port = (port + 1) << 5;
471 /* Snapshot the hardware statistics counters for this port. */
472 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP,
473 GLOBAL_STATS_OP_CAPTURE_PORT |
474 GLOBAL_STATS_OP_HIST_RX_TX | port);
476 /* Wait for the snapshotting to complete. */
477 ret = mv88e6xxx_stats_wait(ds);
484 static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
491 ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
492 GLOBAL_STATS_OP_READ_CAPTURED |
493 GLOBAL_STATS_OP_HIST_RX_TX | stat);
497 ret = mv88e6xxx_stats_wait(ds);
501 ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
507 ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
514 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
515 { "in_good_octets", 8, 0x00, },
516 { "in_bad_octets", 4, 0x02, },
517 { "in_unicast", 4, 0x04, },
518 { "in_broadcasts", 4, 0x06, },
519 { "in_multicasts", 4, 0x07, },
520 { "in_pause", 4, 0x16, },
521 { "in_undersize", 4, 0x18, },
522 { "in_fragments", 4, 0x19, },
523 { "in_oversize", 4, 0x1a, },
524 { "in_jabber", 4, 0x1b, },
525 { "in_rx_error", 4, 0x1c, },
526 { "in_fcs_error", 4, 0x1d, },
527 { "out_octets", 8, 0x0e, },
528 { "out_unicast", 4, 0x10, },
529 { "out_broadcasts", 4, 0x13, },
530 { "out_multicasts", 4, 0x12, },
531 { "out_pause", 4, 0x15, },
532 { "excessive", 4, 0x11, },
533 { "collisions", 4, 0x1e, },
534 { "deferred", 4, 0x05, },
535 { "single", 4, 0x14, },
536 { "multiple", 4, 0x17, },
537 { "out_fcs_error", 4, 0x03, },
538 { "late", 4, 0x1f, },
539 { "hist_64bytes", 4, 0x08, },
540 { "hist_65_127bytes", 4, 0x09, },
541 { "hist_128_255bytes", 4, 0x0a, },
542 { "hist_256_511bytes", 4, 0x0b, },
543 { "hist_512_1023bytes", 4, 0x0c, },
544 { "hist_1024_max_bytes", 4, 0x0d, },
545 /* Not all devices have the following counters */
546 { "sw_in_discards", 4, 0x110, },
547 { "sw_in_filtered", 2, 0x112, },
548 { "sw_out_filtered", 2, 0x113, },
552 static bool have_sw_in_discards(struct dsa_switch *ds)
554 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
557 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
558 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
559 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
560 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
561 case PORT_SWITCH_ID_6352:
568 static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
570 struct mv88e6xxx_hw_stat *stats,
571 int port, uint8_t *data)
575 for (i = 0; i < nr_stats; i++) {
576 memcpy(data + i * ETH_GSTRING_LEN,
577 stats[i].string, ETH_GSTRING_LEN);
581 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
583 struct mv88e6xxx_hw_stat *stats,
584 int port, uint64_t *data)
586 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
590 mutex_lock(&ps->stats_mutex);
592 ret = mv88e6xxx_stats_snapshot(ds, port);
594 mutex_unlock(&ps->stats_mutex);
598 /* Read each of the counters. */
599 for (i = 0; i < nr_stats; i++) {
600 struct mv88e6xxx_hw_stat *s = stats + i;
604 if (s->reg >= 0x100) {
605 ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
610 if (s->sizeof_stat == 4) {
611 ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
617 data[i] = (((u64)high) << 16) | low;
620 mv88e6xxx_stats_read(ds, s->reg, &low);
621 if (s->sizeof_stat == 8)
622 mv88e6xxx_stats_read(ds, s->reg + 1, &high);
624 data[i] = (((u64)high) << 32) | low;
627 mutex_unlock(&ps->stats_mutex);
630 /* All the statistics in the table */
632 mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
634 if (have_sw_in_discards(ds))
635 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
636 mv88e6xxx_hw_stats, port, data);
638 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
639 mv88e6xxx_hw_stats, port, data);
642 int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
644 if (have_sw_in_discards(ds))
645 return ARRAY_SIZE(mv88e6xxx_hw_stats);
646 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
650 mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
651 int port, uint64_t *data)
653 if (have_sw_in_discards(ds))
654 _mv88e6xxx_get_ethtool_stats(
655 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
656 mv88e6xxx_hw_stats, port, data);
658 _mv88e6xxx_get_ethtool_stats(
659 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
660 mv88e6xxx_hw_stats, port, data);
663 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
665 return 32 * sizeof(u16);
668 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
669 struct ethtool_regs *regs, void *_p)
676 memset(p, 0xff, 32 * sizeof(u16));
678 for (i = 0; i < 32; i++) {
681 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
687 #ifdef CONFIG_NET_DSA_HWMON
689 int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
691 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
697 mutex_lock(&ps->phy_mutex);
699 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
703 /* Enable temperature sensor */
704 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
708 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
712 /* Wait for temperature to stabilize */
713 usleep_range(10000, 12000);
715 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
721 /* Disable temperature sensor */
722 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
726 *temp = ((val & 0x1f) - 5) * 5;
729 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
730 mutex_unlock(&ps->phy_mutex);
733 #endif /* CONFIG_NET_DSA_HWMON */
735 static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
737 unsigned long timeout = jiffies + HZ / 10;
739 while (time_before(jiffies, timeout)) {
742 ret = REG_READ(reg, offset);
746 usleep_range(1000, 2000);
751 int mv88e6xxx_phy_wait(struct dsa_switch *ds)
753 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
754 GLOBAL2_SMI_OP_BUSY);
757 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
759 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
760 GLOBAL2_EEPROM_OP_LOAD);
763 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
765 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
766 GLOBAL2_EEPROM_OP_BUSY);
769 /* Must be called with SMI lock held */
770 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
772 unsigned long timeout = jiffies + HZ / 10;
774 while (time_before(jiffies, timeout)) {
777 ret = _mv88e6xxx_reg_read(ds, reg, offset);
783 usleep_range(1000, 2000);
788 /* Must be called with SMI lock held */
789 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
791 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
795 /* Must be called with phy mutex held */
796 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
801 REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
802 GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum);
804 ret = mv88e6xxx_phy_wait(ds);
808 return REG_READ(REG_GLOBAL2, GLOBAL2_SMI_DATA);
811 /* Must be called with phy mutex held */
812 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
815 REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
816 REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
817 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum);
819 return mv88e6xxx_phy_wait(ds);
822 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
824 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
827 mutex_lock(&ps->phy_mutex);
829 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
833 e->eee_enabled = !!(reg & 0x0200);
834 e->tx_lpi_enabled = !!(reg & 0x0100);
836 reg = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
840 e->eee_active = !!(reg & PORT_STATUS_EEE);
844 mutex_unlock(&ps->phy_mutex);
848 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
849 struct phy_device *phydev, struct ethtool_eee *e)
851 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
855 mutex_lock(&ps->phy_mutex);
857 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
864 if (e->tx_lpi_enabled)
867 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
869 mutex_unlock(&ps->phy_mutex);
874 static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
878 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
882 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
886 return _mv88e6xxx_atu_wait(ds);
889 static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
893 ret = _mv88e6xxx_atu_wait(ds);
897 return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
900 static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
902 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
906 mutex_lock(&ps->smi_mutex);
908 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
914 oldstate = reg & PORT_CONTROL_STATE_MASK;
915 if (oldstate != state) {
916 /* Flush forwarding database if we're moving a port
917 * from Learning or Forwarding state to Disabled or
918 * Blocking or Listening state.
920 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
921 state <= PORT_CONTROL_STATE_BLOCKING) {
922 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
926 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
927 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
932 mutex_unlock(&ps->smi_mutex);
936 /* Must be called with smi lock held */
937 static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
939 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
940 u8 fid = ps->fid[port];
943 if (dsa_is_cpu_port(ds, port))
944 reg |= ds->phys_port_mask;
946 reg |= (ps->bridge_mask[fid] |
947 (1 << dsa_upstream_port(ds))) & ~(1 << port);
949 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
952 /* Must be called with smi lock held */
953 static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
955 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
960 mask = ds->phys_port_mask;
963 mask &= ~(1 << port);
964 if (ps->fid[port] != fid)
967 ret = _mv88e6xxx_update_port_config(ds, port);
972 return _mv88e6xxx_flush_fid(ds, fid);
975 /* Bridge handling functions */
977 int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
979 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
984 /* If the bridge group is not empty, join that group.
985 * Otherwise create a new group.
988 nmask = br_port_mask & ~(1 << port);
990 fid = ps->fid[__ffs(nmask)];
992 nmask = ps->bridge_mask[fid] | (1 << port);
993 if (nmask != br_port_mask) {
994 netdev_err(ds->ports[port],
995 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
996 fid, br_port_mask, nmask);
1000 mutex_lock(&ps->smi_mutex);
1002 ps->bridge_mask[fid] = br_port_mask;
1004 if (fid != ps->fid[port]) {
1005 ps->fid_mask |= 1 << ps->fid[port];
1006 ps->fid[port] = fid;
1007 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1010 mutex_unlock(&ps->smi_mutex);
1015 int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1017 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1021 fid = ps->fid[port];
1023 if (ps->bridge_mask[fid] != br_port_mask) {
1024 netdev_err(ds->ports[port],
1025 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1026 fid, br_port_mask, ps->bridge_mask[fid]);
1030 /* If the port was the last port of a bridge, we are done.
1031 * Otherwise assign a new fid to the port, and fix up
1032 * the bridge configuration.
1034 if (br_port_mask == (1 << port))
1037 mutex_lock(&ps->smi_mutex);
1039 newfid = __ffs(ps->fid_mask);
1040 ps->fid[port] = newfid;
1041 ps->fid_mask &= (1 << newfid);
1042 ps->bridge_mask[fid] &= ~(1 << port);
1043 ps->bridge_mask[newfid] = 1 << port;
1045 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1047 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1049 mutex_unlock(&ps->smi_mutex);
1054 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1056 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1060 case BR_STATE_DISABLED:
1061 stp_state = PORT_CONTROL_STATE_DISABLED;
1063 case BR_STATE_BLOCKING:
1064 case BR_STATE_LISTENING:
1065 stp_state = PORT_CONTROL_STATE_BLOCKING;
1067 case BR_STATE_LEARNING:
1068 stp_state = PORT_CONTROL_STATE_LEARNING;
1070 case BR_STATE_FORWARDING:
1072 stp_state = PORT_CONTROL_STATE_FORWARDING;
1076 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1078 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1079 * so we can not update the port state directly but need to schedule it.
1081 ps->port_state[port] = stp_state;
1082 set_bit(port, &ps->port_state_update_mask);
1083 schedule_work(&ps->bridge_work);
1088 static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
1089 const unsigned char *addr)
1093 for (i = 0; i < 3; i++) {
1094 ret = _mv88e6xxx_reg_write(
1095 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1096 (addr[i * 2] << 8) | addr[i * 2 + 1]);
1104 static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
1108 for (i = 0; i < 3; i++) {
1109 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1110 GLOBAL_ATU_MAC_01 + i);
1113 addr[i * 2] = ret >> 8;
1114 addr[i * 2 + 1] = ret & 0xff;
1120 static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
1121 const unsigned char *addr, int state)
1123 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1124 u8 fid = ps->fid[port];
1127 ret = _mv88e6xxx_atu_wait(ds);
1131 ret = __mv88e6xxx_write_addr(ds, addr);
1135 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
1136 (0x10 << port) | state);
1140 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
1145 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1146 const unsigned char *addr, u16 vid)
1148 int state = is_multicast_ether_addr(addr) ?
1149 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1150 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1151 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1154 mutex_lock(&ps->smi_mutex);
1155 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
1156 mutex_unlock(&ps->smi_mutex);
1161 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1162 const unsigned char *addr, u16 vid)
1164 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1167 mutex_lock(&ps->smi_mutex);
1168 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
1169 GLOBAL_ATU_DATA_STATE_UNUSED);
1170 mutex_unlock(&ps->smi_mutex);
1175 static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
1176 unsigned char *addr, bool *is_static)
1178 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1179 u8 fid = ps->fid[port];
1182 ret = _mv88e6xxx_atu_wait(ds);
1186 ret = __mv88e6xxx_write_addr(ds, addr);
1191 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
1195 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1198 state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1199 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
1201 } while (!(((ret >> 4) & 0xff) & (1 << port)));
1203 ret = __mv88e6xxx_read_addr(ds, addr);
1207 *is_static = state == (is_multicast_ether_addr(addr) ?
1208 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1209 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1214 /* get next entry for port */
1215 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
1216 unsigned char *addr, bool *is_static)
1218 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1221 mutex_lock(&ps->smi_mutex);
1222 ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
1223 mutex_unlock(&ps->smi_mutex);
1228 static void mv88e6xxx_bridge_work(struct work_struct *work)
1230 struct mv88e6xxx_priv_state *ps;
1231 struct dsa_switch *ds;
1234 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1235 ds = ((struct dsa_switch *)ps) - 1;
1237 while (ps->port_state_update_mask) {
1238 port = __ffs(ps->port_state_update_mask);
1239 clear_bit(port, &ps->port_state_update_mask);
1240 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1244 int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
1246 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1249 mutex_lock(&ps->smi_mutex);
1251 /* Port Control 1: disable trunking, disable sending
1252 * learning messages to this port.
1254 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
1258 /* Port based VLAN map: give each port its own address
1259 * database, allow the CPU port to talk to each of the 'real'
1260 * ports, and allow each of the 'real' ports to only talk to
1261 * the upstream port.
1263 fid = __ffs(ps->fid_mask);
1264 ps->fid[port] = fid;
1265 ps->fid_mask &= ~(1 << fid);
1267 if (!dsa_is_cpu_port(ds, port))
1268 ps->bridge_mask[fid] = 1 << port;
1270 ret = _mv88e6xxx_update_port_config(ds, port);
1274 /* Default VLAN ID and priority: don't set a default VLAN
1275 * ID, and set the default packet priority to zero.
1277 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1280 mutex_unlock(&ps->smi_mutex);
1284 int mv88e6xxx_setup_common(struct dsa_switch *ds)
1286 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1288 mutex_init(&ps->smi_mutex);
1289 mutex_init(&ps->stats_mutex);
1290 mutex_init(&ps->phy_mutex);
1292 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
1294 ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
1296 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
1301 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
1303 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1304 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
1305 unsigned long timeout;
1309 /* Set all ports to the disabled state. */
1310 for (i = 0; i < ps->num_ports; i++) {
1311 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
1312 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
1315 /* Wait for transmit queues to drain. */
1316 usleep_range(2000, 4000);
1318 /* Reset the switch. Keep the PPU active if requested. The PPU
1319 * needs to be active to support indirect phy register access
1320 * through global registers 0x18 and 0x19.
1323 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
1325 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
1327 /* Wait up to one second for reset to complete. */
1328 timeout = jiffies + 1 * HZ;
1329 while (time_before(jiffies, timeout)) {
1330 ret = REG_READ(REG_GLOBAL, 0x00);
1331 if ((ret & is_reset) == is_reset)
1333 usleep_range(1000, 2000);
1335 if (time_after(jiffies, timeout))
1341 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
1343 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1346 mutex_lock(&ps->phy_mutex);
1347 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
1350 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
1352 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
1353 mutex_unlock(&ps->phy_mutex);
1357 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
1360 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1363 mutex_lock(&ps->phy_mutex);
1364 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
1368 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
1370 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
1371 mutex_unlock(&ps->phy_mutex);
1375 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
1377 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1379 if (port >= 0 && port < ps->num_ports)
1385 mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
1387 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1388 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1394 mutex_lock(&ps->phy_mutex);
1395 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
1396 mutex_unlock(&ps->phy_mutex);
1401 mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
1403 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1404 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1410 mutex_lock(&ps->phy_mutex);
1411 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
1412 mutex_unlock(&ps->phy_mutex);
1417 mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
1419 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1420 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1426 mutex_lock(&ps->phy_mutex);
1427 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
1428 mutex_unlock(&ps->phy_mutex);
1433 mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
1436 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1437 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1443 mutex_lock(&ps->phy_mutex);
1444 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
1445 mutex_unlock(&ps->phy_mutex);
1449 static int __init mv88e6xxx_init(void)
1451 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
1452 register_switch_driver(&mv88e6131_switch_driver);
1454 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
1455 register_switch_driver(&mv88e6123_61_65_switch_driver);
1457 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
1458 register_switch_driver(&mv88e6352_switch_driver);
1460 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
1461 register_switch_driver(&mv88e6171_switch_driver);
1465 module_init(mv88e6xxx_init);
1467 static void __exit mv88e6xxx_cleanup(void)
1469 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
1470 unregister_switch_driver(&mv88e6171_switch_driver);
1472 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
1473 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
1475 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
1476 unregister_switch_driver(&mv88e6131_switch_driver);
1479 module_exit(mv88e6xxx_cleanup);
1481 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
1482 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
1483 MODULE_LICENSE("GPL");