2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/debugfs.h>
15 #include <linux/delay.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ethtool.h>
18 #include <linux/if_bridge.h>
19 #include <linux/jiffies.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/netdevice.h>
23 #include <linux/phy.h>
24 #include <linux/seq_file.h>
26 #include "mv88e6xxx.h"
28 /* MDIO bus access can be nested in the case of PHYs connected to the
29 * internal MDIO bus of the switch, which is accessed via MDIO bus of
30 * the Ethernet interface. Avoid lockdep false positives by using
31 * mutex_lock_nested().
33 static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
37 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
38 ret = bus->read(bus, addr, regnum);
39 mutex_unlock(&bus->mdio_lock);
44 static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
49 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
50 ret = bus->write(bus, addr, regnum, val);
51 mutex_unlock(&bus->mdio_lock);
56 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
57 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
58 * will be directly accessible on some {device address,register address}
59 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
60 * will only respond to SMI transactions to that specific address, and
61 * an indirect addressing mechanism needs to be used to access its
64 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
69 for (i = 0; i < 16; i++) {
70 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
74 if ((ret & SMI_CMD_BUSY) == 0)
81 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
86 return mv88e6xxx_mdiobus_read(bus, addr, reg);
88 /* Wait for the bus to become free. */
89 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
93 /* Transmit the read command. */
94 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
95 SMI_CMD_OP_22_READ | (addr << 5) | reg);
99 /* Wait for the read command to complete. */
100 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
105 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
112 /* Must be called with SMI mutex held */
113 static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
115 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
121 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
125 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
131 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
133 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
136 mutex_lock(&ps->smi_mutex);
137 ret = _mv88e6xxx_reg_read(ds, addr, reg);
138 mutex_unlock(&ps->smi_mutex);
143 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
149 return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
151 /* Wait for the bus to become free. */
152 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
156 /* Transmit the data to write. */
157 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
161 /* Transmit the write command. */
162 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
163 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
167 /* Wait for the write command to complete. */
168 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
175 /* Must be called with SMI mutex held */
176 static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
179 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
184 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
187 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
190 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
192 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
195 mutex_lock(&ps->smi_mutex);
196 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
197 mutex_unlock(&ps->smi_mutex);
202 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
204 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
205 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
206 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
211 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
216 for (i = 0; i < 6; i++) {
219 /* Write the MAC address byte. */
220 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
221 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
223 /* Wait for the write to complete. */
224 for (j = 0; j < 16; j++) {
225 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
226 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
236 /* Must be called with SMI mutex held */
237 static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
240 return _mv88e6xxx_reg_read(ds, addr, regnum);
244 /* Must be called with SMI mutex held */
245 static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
249 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
253 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
254 static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
257 unsigned long timeout;
259 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
260 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
261 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
263 timeout = jiffies + 1 * HZ;
264 while (time_before(jiffies, timeout)) {
265 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
266 usleep_range(1000, 2000);
267 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
268 GLOBAL_STATUS_PPU_POLLING)
275 static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
278 unsigned long timeout;
280 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
281 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
283 timeout = jiffies + 1 * HZ;
284 while (time_before(jiffies, timeout)) {
285 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
286 usleep_range(1000, 2000);
287 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
288 GLOBAL_STATUS_PPU_POLLING)
295 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
297 struct mv88e6xxx_priv_state *ps;
299 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
300 if (mutex_trylock(&ps->ppu_mutex)) {
301 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
303 if (mv88e6xxx_ppu_enable(ds) == 0)
304 ps->ppu_disabled = 0;
305 mutex_unlock(&ps->ppu_mutex);
309 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
311 struct mv88e6xxx_priv_state *ps = (void *)_ps;
313 schedule_work(&ps->ppu_work);
316 static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
318 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
321 mutex_lock(&ps->ppu_mutex);
323 /* If the PHY polling unit is enabled, disable it so that
324 * we can access the PHY registers. If it was already
325 * disabled, cancel the timer that is going to re-enable
328 if (!ps->ppu_disabled) {
329 ret = mv88e6xxx_ppu_disable(ds);
331 mutex_unlock(&ps->ppu_mutex);
334 ps->ppu_disabled = 1;
336 del_timer(&ps->ppu_timer);
343 static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
345 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
347 /* Schedule a timer to re-enable the PHY polling unit. */
348 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
349 mutex_unlock(&ps->ppu_mutex);
352 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
354 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
356 mutex_init(&ps->ppu_mutex);
357 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
358 init_timer(&ps->ppu_timer);
359 ps->ppu_timer.data = (unsigned long)ps;
360 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
363 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
367 ret = mv88e6xxx_ppu_access_get(ds);
369 ret = mv88e6xxx_reg_read(ds, addr, regnum);
370 mv88e6xxx_ppu_access_put(ds);
376 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
381 ret = mv88e6xxx_ppu_access_get(ds);
383 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
384 mv88e6xxx_ppu_access_put(ds);
391 static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
393 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
396 case PORT_SWITCH_ID_6031:
397 case PORT_SWITCH_ID_6061:
398 case PORT_SWITCH_ID_6035:
399 case PORT_SWITCH_ID_6065:
405 static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
407 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
410 case PORT_SWITCH_ID_6092:
411 case PORT_SWITCH_ID_6095:
417 static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
419 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
422 case PORT_SWITCH_ID_6046:
423 case PORT_SWITCH_ID_6085:
424 case PORT_SWITCH_ID_6096:
425 case PORT_SWITCH_ID_6097:
431 static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
433 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
436 case PORT_SWITCH_ID_6123:
437 case PORT_SWITCH_ID_6161:
438 case PORT_SWITCH_ID_6165:
444 static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
446 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
449 case PORT_SWITCH_ID_6121:
450 case PORT_SWITCH_ID_6122:
451 case PORT_SWITCH_ID_6152:
452 case PORT_SWITCH_ID_6155:
453 case PORT_SWITCH_ID_6182:
454 case PORT_SWITCH_ID_6185:
455 case PORT_SWITCH_ID_6108:
456 case PORT_SWITCH_ID_6131:
462 static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
464 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
467 case PORT_SWITCH_ID_6320:
468 case PORT_SWITCH_ID_6321:
474 static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
476 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
479 case PORT_SWITCH_ID_6171:
480 case PORT_SWITCH_ID_6175:
481 case PORT_SWITCH_ID_6350:
482 case PORT_SWITCH_ID_6351:
488 static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
490 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
493 case PORT_SWITCH_ID_6172:
494 case PORT_SWITCH_ID_6176:
495 case PORT_SWITCH_ID_6240:
496 case PORT_SWITCH_ID_6352:
502 /* We expect the switch to perform auto negotiation if there is a real
503 * phy. However, in the case of a fixed link phy, we force the port
504 * settings from the fixed link settings.
506 void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
507 struct phy_device *phydev)
509 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
513 if (!phy_is_pseudo_fixed_link(phydev))
516 mutex_lock(&ps->smi_mutex);
518 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
522 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
523 PORT_PCS_CTRL_FORCE_LINK |
524 PORT_PCS_CTRL_DUPLEX_FULL |
525 PORT_PCS_CTRL_FORCE_DUPLEX |
526 PORT_PCS_CTRL_UNFORCED);
528 reg |= PORT_PCS_CTRL_FORCE_LINK;
530 reg |= PORT_PCS_CTRL_LINK_UP;
532 if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
535 switch (phydev->speed) {
537 reg |= PORT_PCS_CTRL_1000;
540 reg |= PORT_PCS_CTRL_100;
543 reg |= PORT_PCS_CTRL_10;
546 pr_info("Unknown speed");
550 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
551 if (phydev->duplex == DUPLEX_FULL)
552 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
554 if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
555 (port >= ps->num_ports - 2)) {
556 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
557 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
558 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
559 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
560 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
561 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
562 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
564 _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
567 mutex_unlock(&ps->smi_mutex);
570 /* Must be called with SMI mutex held */
571 static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
576 for (i = 0; i < 10; i++) {
577 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
578 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
585 /* Must be called with SMI mutex held */
586 static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
590 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
591 port = (port + 1) << 5;
593 /* Snapshot the hardware statistics counters for this port. */
594 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
595 GLOBAL_STATS_OP_CAPTURE_PORT |
596 GLOBAL_STATS_OP_HIST_RX_TX | port);
600 /* Wait for the snapshotting to complete. */
601 ret = _mv88e6xxx_stats_wait(ds);
608 /* Must be called with SMI mutex held */
609 static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
616 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
617 GLOBAL_STATS_OP_READ_CAPTURED |
618 GLOBAL_STATS_OP_HIST_RX_TX | stat);
622 ret = _mv88e6xxx_stats_wait(ds);
626 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
632 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
639 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
640 { "in_good_octets", 8, 0x00, },
641 { "in_bad_octets", 4, 0x02, },
642 { "in_unicast", 4, 0x04, },
643 { "in_broadcasts", 4, 0x06, },
644 { "in_multicasts", 4, 0x07, },
645 { "in_pause", 4, 0x16, },
646 { "in_undersize", 4, 0x18, },
647 { "in_fragments", 4, 0x19, },
648 { "in_oversize", 4, 0x1a, },
649 { "in_jabber", 4, 0x1b, },
650 { "in_rx_error", 4, 0x1c, },
651 { "in_fcs_error", 4, 0x1d, },
652 { "out_octets", 8, 0x0e, },
653 { "out_unicast", 4, 0x10, },
654 { "out_broadcasts", 4, 0x13, },
655 { "out_multicasts", 4, 0x12, },
656 { "out_pause", 4, 0x15, },
657 { "excessive", 4, 0x11, },
658 { "collisions", 4, 0x1e, },
659 { "deferred", 4, 0x05, },
660 { "single", 4, 0x14, },
661 { "multiple", 4, 0x17, },
662 { "out_fcs_error", 4, 0x03, },
663 { "late", 4, 0x1f, },
664 { "hist_64bytes", 4, 0x08, },
665 { "hist_65_127bytes", 4, 0x09, },
666 { "hist_128_255bytes", 4, 0x0a, },
667 { "hist_256_511bytes", 4, 0x0b, },
668 { "hist_512_1023bytes", 4, 0x0c, },
669 { "hist_1024_max_bytes", 4, 0x0d, },
670 /* Not all devices have the following counters */
671 { "sw_in_discards", 4, 0x110, },
672 { "sw_in_filtered", 2, 0x112, },
673 { "sw_out_filtered", 2, 0x113, },
677 static bool have_sw_in_discards(struct dsa_switch *ds)
679 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
682 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
683 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
684 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
685 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
686 case PORT_SWITCH_ID_6352:
693 static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
695 struct mv88e6xxx_hw_stat *stats,
696 int port, uint8_t *data)
700 for (i = 0; i < nr_stats; i++) {
701 memcpy(data + i * ETH_GSTRING_LEN,
702 stats[i].string, ETH_GSTRING_LEN);
706 static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
708 struct mv88e6xxx_hw_stat *stats,
711 struct mv88e6xxx_hw_stat *s = stats + stat;
717 if (s->reg >= 0x100) {
718 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
724 if (s->sizeof_stat == 4) {
725 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
732 _mv88e6xxx_stats_read(ds, s->reg, &low);
733 if (s->sizeof_stat == 8)
734 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
736 value = (((u64)high) << 16) | low;
740 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
742 struct mv88e6xxx_hw_stat *stats,
743 int port, uint64_t *data)
745 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
749 mutex_lock(&ps->smi_mutex);
751 ret = _mv88e6xxx_stats_snapshot(ds, port);
753 mutex_unlock(&ps->smi_mutex);
757 /* Read each of the counters. */
758 for (i = 0; i < nr_stats; i++)
759 data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
761 mutex_unlock(&ps->smi_mutex);
764 /* All the statistics in the table */
766 mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
768 if (have_sw_in_discards(ds))
769 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
770 mv88e6xxx_hw_stats, port, data);
772 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
773 mv88e6xxx_hw_stats, port, data);
776 int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
778 if (have_sw_in_discards(ds))
779 return ARRAY_SIZE(mv88e6xxx_hw_stats);
780 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
784 mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
785 int port, uint64_t *data)
787 if (have_sw_in_discards(ds))
788 _mv88e6xxx_get_ethtool_stats(
789 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
790 mv88e6xxx_hw_stats, port, data);
792 _mv88e6xxx_get_ethtool_stats(
793 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
794 mv88e6xxx_hw_stats, port, data);
797 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
799 return 32 * sizeof(u16);
802 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
803 struct ethtool_regs *regs, void *_p)
810 memset(p, 0xff, 32 * sizeof(u16));
812 for (i = 0; i < 32; i++) {
815 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
821 /* Must be called with SMI lock held */
822 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
825 unsigned long timeout = jiffies + HZ / 10;
827 while (time_before(jiffies, timeout)) {
830 ret = _mv88e6xxx_reg_read(ds, reg, offset);
836 usleep_range(1000, 2000);
841 static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
843 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
846 mutex_lock(&ps->smi_mutex);
847 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
848 mutex_unlock(&ps->smi_mutex);
853 static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
855 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
856 GLOBAL2_SMI_OP_BUSY);
859 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
861 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
862 GLOBAL2_EEPROM_OP_LOAD);
865 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
867 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
868 GLOBAL2_EEPROM_OP_BUSY);
871 /* Must be called with SMI lock held */
872 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
874 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
878 /* Must be called with SMI lock held */
879 static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
881 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
882 GLOBAL2_SCRATCH_BUSY);
885 /* Must be called with SMI mutex held */
886 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
891 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
892 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
897 ret = _mv88e6xxx_phy_wait(ds);
901 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
904 /* Must be called with SMI mutex held */
905 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
910 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
914 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
915 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
918 return _mv88e6xxx_phy_wait(ds);
921 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
923 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
926 mutex_lock(&ps->smi_mutex);
928 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
932 e->eee_enabled = !!(reg & 0x0200);
933 e->tx_lpi_enabled = !!(reg & 0x0100);
935 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
939 e->eee_active = !!(reg & PORT_STATUS_EEE);
943 mutex_unlock(&ps->smi_mutex);
947 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
948 struct phy_device *phydev, struct ethtool_eee *e)
950 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
954 mutex_lock(&ps->smi_mutex);
956 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
963 if (e->tx_lpi_enabled)
966 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
968 mutex_unlock(&ps->smi_mutex);
973 static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
977 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
981 return _mv88e6xxx_atu_wait(ds);
984 static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
985 struct mv88e6xxx_atu_entry *entry)
987 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
989 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
990 unsigned int mask, shift;
993 data |= GLOBAL_ATU_DATA_TRUNK;
994 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
995 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
997 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
998 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1001 data |= (entry->portv_trunkid << shift) & mask;
1004 return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
1007 static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
1008 struct mv88e6xxx_atu_entry *entry,
1014 err = _mv88e6xxx_atu_wait(ds);
1018 err = _mv88e6xxx_atu_data_write(ds, entry);
1023 err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
1028 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1029 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1031 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
1032 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1035 return _mv88e6xxx_atu_cmd(ds, op);
1038 static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
1040 struct mv88e6xxx_atu_entry entry = {
1042 .state = 0, /* EntryState bits must be 0 */
1045 return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1048 static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
1050 return _mv88e6xxx_atu_flush(ds, fid, false);
1053 static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
1054 int to_port, bool static_too)
1056 struct mv88e6xxx_atu_entry entry = {
1061 /* EntryState bits must be 0xF */
1062 entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1064 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1065 entry.portv_trunkid = (to_port & 0x0f) << 4;
1066 entry.portv_trunkid |= from_port & 0x0f;
1068 return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1071 static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
1074 /* Destination port 0xF means remove the entries */
1075 return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
1078 static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
1080 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1084 mutex_lock(&ps->smi_mutex);
1086 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
1092 oldstate = reg & PORT_CONTROL_STATE_MASK;
1093 if (oldstate != state) {
1094 /* Flush forwarding database if we're moving a port
1095 * from Learning or Forwarding state to Disabled or
1096 * Blocking or Listening state.
1098 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1099 state <= PORT_CONTROL_STATE_BLOCKING) {
1100 ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
1104 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1105 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1110 mutex_unlock(&ps->smi_mutex);
1114 /* Must be called with smi lock held */
1115 static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
1117 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1118 u8 fid = ps->fid[port];
1119 u16 reg = fid << 12;
1121 if (dsa_is_cpu_port(ds, port))
1122 reg |= ds->phys_port_mask;
1124 reg |= (ps->bridge_mask[fid] |
1125 (1 << dsa_upstream_port(ds))) & ~(1 << port);
1127 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
1130 /* Must be called with smi lock held */
1131 static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
1133 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1138 mask = ds->phys_port_mask;
1141 mask &= ~(1 << port);
1142 if (ps->fid[port] != fid)
1145 ret = _mv88e6xxx_update_port_config(ds, port);
1150 return _mv88e6xxx_flush_fid(ds, fid);
1153 /* Bridge handling functions */
1155 int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1157 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1162 /* If the bridge group is not empty, join that group.
1163 * Otherwise create a new group.
1165 fid = ps->fid[port];
1166 nmask = br_port_mask & ~(1 << port);
1168 fid = ps->fid[__ffs(nmask)];
1170 nmask = ps->bridge_mask[fid] | (1 << port);
1171 if (nmask != br_port_mask) {
1172 netdev_err(ds->ports[port],
1173 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1174 fid, br_port_mask, nmask);
1178 mutex_lock(&ps->smi_mutex);
1180 ps->bridge_mask[fid] = br_port_mask;
1182 if (fid != ps->fid[port]) {
1183 clear_bit(ps->fid[port], ps->fid_bitmap);
1184 ps->fid[port] = fid;
1185 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1188 mutex_unlock(&ps->smi_mutex);
1193 int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1195 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1199 fid = ps->fid[port];
1201 if (ps->bridge_mask[fid] != br_port_mask) {
1202 netdev_err(ds->ports[port],
1203 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1204 fid, br_port_mask, ps->bridge_mask[fid]);
1208 /* If the port was the last port of a bridge, we are done.
1209 * Otherwise assign a new fid to the port, and fix up
1210 * the bridge configuration.
1212 if (br_port_mask == (1 << port))
1215 mutex_lock(&ps->smi_mutex);
1217 newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
1218 if (unlikely(newfid > ps->num_ports)) {
1219 netdev_err(ds->ports[port], "all first %d FIDs are used\n",
1225 ps->fid[port] = newfid;
1226 set_bit(newfid, ps->fid_bitmap);
1227 ps->bridge_mask[fid] &= ~(1 << port);
1228 ps->bridge_mask[newfid] = 1 << port;
1230 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1232 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1235 mutex_unlock(&ps->smi_mutex);
1240 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1242 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1246 case BR_STATE_DISABLED:
1247 stp_state = PORT_CONTROL_STATE_DISABLED;
1249 case BR_STATE_BLOCKING:
1250 case BR_STATE_LISTENING:
1251 stp_state = PORT_CONTROL_STATE_BLOCKING;
1253 case BR_STATE_LEARNING:
1254 stp_state = PORT_CONTROL_STATE_LEARNING;
1256 case BR_STATE_FORWARDING:
1258 stp_state = PORT_CONTROL_STATE_FORWARDING;
1262 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1264 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1265 * so we can not update the port state directly but need to schedule it.
1267 ps->port_state[port] = stp_state;
1268 set_bit(port, &ps->port_state_update_mask);
1269 schedule_work(&ps->bridge_work);
1274 int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
1278 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
1282 *pvid = ret & PORT_DEFAULT_VLAN_MASK;
1287 int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
1289 return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1290 pvid & PORT_DEFAULT_VLAN_MASK);
1293 static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
1295 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
1296 GLOBAL_VTU_OP_BUSY);
1299 static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
1303 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
1307 return _mv88e6xxx_vtu_wait(ds);
1310 static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
1314 ret = _mv88e6xxx_vtu_wait(ds);
1318 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
1321 static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
1322 struct mv88e6xxx_vtu_stu_entry *entry,
1323 unsigned int nibble_offset)
1325 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1330 for (i = 0; i < 3; ++i) {
1331 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1332 GLOBAL_VTU_DATA_0_3 + i);
1339 for (i = 0; i < ps->num_ports; ++i) {
1340 unsigned int shift = (i % 4) * 4 + nibble_offset;
1341 u16 reg = regs[i / 4];
1343 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1349 static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
1350 struct mv88e6xxx_vtu_stu_entry *entry,
1351 unsigned int nibble_offset)
1353 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1354 u16 regs[3] = { 0 };
1358 for (i = 0; i < ps->num_ports; ++i) {
1359 unsigned int shift = (i % 4) * 4 + nibble_offset;
1360 u8 data = entry->data[i];
1362 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1365 for (i = 0; i < 3; ++i) {
1366 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
1367 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1375 static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
1376 struct mv88e6xxx_vtu_stu_entry *entry)
1378 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1381 ret = _mv88e6xxx_vtu_wait(ds);
1385 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
1386 vid & GLOBAL_VTU_VID_MASK);
1390 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
1394 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1398 next.vid = ret & GLOBAL_VTU_VID_MASK;
1399 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1402 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
1406 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1407 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1408 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1413 next.fid = ret & GLOBAL_VTU_FID_MASK;
1415 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1420 next.sid = ret & GLOBAL_VTU_SID_MASK;
1428 static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1429 struct mv88e6xxx_vtu_stu_entry *entry)
1434 ret = _mv88e6xxx_vtu_wait(ds);
1441 /* Write port member tags */
1442 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
1446 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1447 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1448 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1449 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1453 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1454 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1459 reg = GLOBAL_VTU_VID_VALID;
1461 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1462 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1466 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
1469 static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
1470 struct mv88e6xxx_vtu_stu_entry *entry)
1472 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1475 ret = _mv88e6xxx_vtu_wait(ds);
1479 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
1480 sid & GLOBAL_VTU_SID_MASK);
1484 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
1488 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
1492 next.sid = ret & GLOBAL_VTU_SID_MASK;
1494 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1498 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1501 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
1510 static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
1511 struct mv88e6xxx_vtu_stu_entry *entry)
1516 ret = _mv88e6xxx_vtu_wait(ds);
1523 /* Write port states */
1524 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
1528 reg = GLOBAL_VTU_VID_VALID;
1530 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1534 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1535 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1539 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1542 static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
1543 struct mv88e6xxx_vtu_stu_entry *entry)
1545 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1546 struct mv88e6xxx_vtu_stu_entry vlan = {
1552 /* exclude all ports except the CPU */
1553 for (i = 0; i < ps->num_ports; ++i)
1554 vlan.data[i] = dsa_is_cpu_port(ds, i) ?
1555 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
1556 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1558 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1559 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1560 struct mv88e6xxx_vtu_stu_entry vstp;
1563 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1564 * implemented, only one STU entry is needed to cover all VTU
1565 * entries. Thus, validate the SID 0.
1568 err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
1572 if (vstp.sid != vlan.sid || !vstp.valid) {
1573 memset(&vstp, 0, sizeof(vstp));
1575 vstp.sid = vlan.sid;
1577 err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
1582 /* Non-bridged ports and bridge groups use FIDs from 1 to
1583 * num_ports; VLANs use FIDs from num_ports+1 to 4095.
1585 vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
1587 if (unlikely(vlan.fid == VLAN_N_VID)) {
1588 pr_err("no more FID available for VLAN %d\n", vid);
1592 /* Clear all MAC addresses from the new database */
1593 err = _mv88e6xxx_atu_flush(ds, vlan.fid, true);
1597 set_bit(vlan.fid, ps->fid_bitmap);
1604 int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
1607 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1608 struct mv88e6xxx_vtu_stu_entry vlan;
1611 mutex_lock(&ps->smi_mutex);
1612 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1616 if (vlan.vid != vid || !vlan.valid) {
1617 err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
1622 vlan.data[port] = untagged ?
1623 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
1624 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
1626 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1628 mutex_unlock(&ps->smi_mutex);
1633 int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1635 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1636 struct mv88e6xxx_vtu_stu_entry vlan;
1640 mutex_lock(&ps->smi_mutex);
1642 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1646 if (vlan.vid != vid || !vlan.valid ||
1647 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1652 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1654 /* keep the VLAN unless all ports are excluded */
1655 for (i = 0; i < ps->num_ports; ++i) {
1656 if (dsa_is_cpu_port(ds, i))
1659 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1666 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1670 err = _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
1675 clear_bit(vlan.fid, ps->fid_bitmap);
1678 mutex_unlock(&ps->smi_mutex);
1683 static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
1684 struct mv88e6xxx_vtu_stu_entry *entry)
1692 err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
1700 } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
1701 entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
1706 int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
1707 unsigned long *ports, unsigned long *untagged)
1709 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1710 struct mv88e6xxx_vtu_stu_entry next;
1717 mutex_lock(&ps->smi_mutex);
1718 err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
1719 mutex_unlock(&ps->smi_mutex);
1729 for (port = 0; port < ps->num_ports; ++port) {
1730 clear_bit(port, ports);
1731 clear_bit(port, untagged);
1733 if (dsa_is_cpu_port(ds, port))
1736 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
1737 next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1738 set_bit(port, ports);
1740 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1741 set_bit(port, untagged);
1747 static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1748 const unsigned char *addr)
1752 for (i = 0; i < 3; i++) {
1753 ret = _mv88e6xxx_reg_write(
1754 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1755 (addr[i * 2] << 8) | addr[i * 2 + 1]);
1763 static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
1767 for (i = 0; i < 3; i++) {
1768 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1769 GLOBAL_ATU_MAC_01 + i);
1772 addr[i * 2] = ret >> 8;
1773 addr[i * 2 + 1] = ret & 0xff;
1779 static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1780 struct mv88e6xxx_atu_entry *entry)
1784 ret = _mv88e6xxx_atu_wait(ds);
1788 ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
1792 ret = _mv88e6xxx_atu_data_write(ds, entry);
1796 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
1800 return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
1803 static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
1805 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1806 struct mv88e6xxx_vtu_stu_entry vlan;
1810 return ps->fid[port];
1812 err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
1816 if (vlan.vid == vid)
1822 static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
1823 const unsigned char *addr, u16 vid,
1826 struct mv88e6xxx_atu_entry entry = { 0 };
1829 ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
1834 entry.state = state;
1835 ether_addr_copy(entry.mac, addr);
1836 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1837 entry.trunk = false;
1838 entry.portv_trunkid = BIT(port);
1841 return _mv88e6xxx_atu_load(ds, &entry);
1844 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1845 const unsigned char *addr, u16 vid)
1847 int state = is_multicast_ether_addr(addr) ?
1848 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1849 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1850 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1853 mutex_lock(&ps->smi_mutex);
1854 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
1855 mutex_unlock(&ps->smi_mutex);
1860 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1861 const unsigned char *addr, u16 vid)
1863 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1866 mutex_lock(&ps->smi_mutex);
1867 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
1868 GLOBAL_ATU_DATA_STATE_UNUSED);
1869 mutex_unlock(&ps->smi_mutex);
1874 static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
1875 const unsigned char *addr,
1876 struct mv88e6xxx_atu_entry *entry)
1878 struct mv88e6xxx_atu_entry next = { 0 };
1883 ret = _mv88e6xxx_atu_wait(ds);
1887 ret = _mv88e6xxx_atu_mac_write(ds, addr);
1891 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1895 ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
1899 ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
1903 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1907 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1908 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1909 unsigned int mask, shift;
1911 if (ret & GLOBAL_ATU_DATA_TRUNK) {
1913 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1914 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1917 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1918 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1921 next.portv_trunkid = (ret & mask) >> shift;
1928 /* get next entry for port */
1929 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
1930 unsigned char *addr, u16 *vid, bool *is_static)
1932 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1933 struct mv88e6xxx_atu_entry next;
1937 mutex_lock(&ps->smi_mutex);
1939 ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
1945 if (is_broadcast_ether_addr(addr)) {
1946 struct mv88e6xxx_vtu_stu_entry vtu;
1948 ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
1956 ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
1960 ether_addr_copy(addr, next.mac);
1962 if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
1964 } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
1966 *is_static = next.state == (is_multicast_ether_addr(addr) ?
1967 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1968 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1970 mutex_unlock(&ps->smi_mutex);
1975 static void mv88e6xxx_bridge_work(struct work_struct *work)
1977 struct mv88e6xxx_priv_state *ps;
1978 struct dsa_switch *ds;
1981 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1982 ds = ((struct dsa_switch *)ps) - 1;
1984 while (ps->port_state_update_mask) {
1985 port = __ffs(ps->port_state_update_mask);
1986 clear_bit(port, &ps->port_state_update_mask);
1987 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1991 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
1993 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1997 mutex_lock(&ps->smi_mutex);
1999 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2000 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2001 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2002 mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
2003 /* MAC Forcing register: don't force link, speed,
2004 * duplex or flow control state to any particular
2005 * values on physical ports, but force the CPU port
2006 * and all DSA ports to their maximum bandwidth and
2009 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
2010 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2011 reg &= ~PORT_PCS_CTRL_UNFORCED;
2012 reg |= PORT_PCS_CTRL_FORCE_LINK |
2013 PORT_PCS_CTRL_LINK_UP |
2014 PORT_PCS_CTRL_DUPLEX_FULL |
2015 PORT_PCS_CTRL_FORCE_DUPLEX;
2016 if (mv88e6xxx_6065_family(ds))
2017 reg |= PORT_PCS_CTRL_100;
2019 reg |= PORT_PCS_CTRL_1000;
2021 reg |= PORT_PCS_CTRL_UNFORCED;
2024 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2025 PORT_PCS_CTRL, reg);
2030 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2031 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2032 * tunneling, determine priority by looking at 802.1p and IP
2033 * priority fields (IP prio has precedence), and set STP state
2036 * If this is the CPU link, use DSA or EDSA tagging depending
2037 * on which tagging mode was configured.
2039 * If this is a link to another switch, use DSA tagging mode.
2041 * If this is the upstream port for this switch, enable
2042 * forwarding of unknown unicasts and multicasts.
2045 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2046 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2047 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
2048 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
2049 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2050 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2051 PORT_CONTROL_STATE_FORWARDING;
2052 if (dsa_is_cpu_port(ds, port)) {
2053 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2054 reg |= PORT_CONTROL_DSA_TAG;
2055 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2056 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2057 mv88e6xxx_6320_family(ds)) {
2058 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2059 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2061 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2062 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2063 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2066 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2067 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2068 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
2069 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
2070 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2071 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2074 if (dsa_is_dsa_port(ds, port)) {
2075 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2076 reg |= PORT_CONTROL_DSA_TAG;
2077 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2078 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2079 mv88e6xxx_6320_family(ds)) {
2080 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2083 if (port == dsa_upstream_port(ds))
2084 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2085 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2088 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2094 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2095 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
2096 * untagged frames on this port, do a destination address lookup on all
2097 * received packets as usual, disable ARP mirroring and don't send a
2098 * copy of all transmitted/received frames on this port to the CPU.
2101 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2102 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2103 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
2104 reg = PORT_CONTROL_2_MAP_DA;
2106 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2107 mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
2108 reg |= PORT_CONTROL_2_JUMBO_10240;
2110 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
2111 /* Set the upstream port this port should use */
2112 reg |= dsa_upstream_port(ds);
2113 /* enable forwarding of unknown multicast addresses to
2116 if (port == dsa_upstream_port(ds))
2117 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2120 reg |= PORT_CONTROL_2_8021Q_FALLBACK;
2123 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2124 PORT_CONTROL_2, reg);
2129 /* Port Association Vector: when learning source addresses
2130 * of packets, add the address to the address database using
2131 * a port bitmap that has only the bit for this port set and
2132 * the other bits clear.
2134 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
2139 /* Egress rate control 2: disable egress rate control. */
2140 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
2145 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2146 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2147 mv88e6xxx_6320_family(ds)) {
2148 /* Do not limit the period of time that this port can
2149 * be paused for by the remote end or the period of
2150 * time that this port can pause the remote end.
2152 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2153 PORT_PAUSE_CTRL, 0x0000);
2157 /* Port ATU control: disable limiting the number of
2158 * address database entries that this port is allowed
2161 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2162 PORT_ATU_CONTROL, 0x0000);
2163 /* Priority Override: disable DA, SA and VTU priority
2166 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2167 PORT_PRI_OVERRIDE, 0x0000);
2171 /* Port Ethertype: use the Ethertype DSA Ethertype
2174 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2175 PORT_ETH_TYPE, ETH_P_EDSA);
2178 /* Tag Remap: use an identity 802.1p prio -> switch
2181 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2182 PORT_TAG_REGMAP_0123, 0x3210);
2186 /* Tag Remap 2: use an identity 802.1p prio -> switch
2189 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2190 PORT_TAG_REGMAP_4567, 0x7654);
2195 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2196 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2197 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2198 mv88e6xxx_6320_family(ds)) {
2199 /* Rate Control: disable ingress rate limiting. */
2200 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2201 PORT_RATE_CONTROL, 0x0001);
2206 /* Port Control 1: disable trunking, disable sending
2207 * learning messages to this port.
2209 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
2213 /* Port based VLAN map: give each port its own address
2214 * database, allow the CPU port to talk to each of the 'real'
2215 * ports, and allow each of the 'real' ports to only talk to
2216 * the upstream port.
2219 ps->fid[port] = fid;
2220 set_bit(fid, ps->fid_bitmap);
2222 if (!dsa_is_cpu_port(ds, port))
2223 ps->bridge_mask[fid] = 1 << port;
2225 ret = _mv88e6xxx_update_port_config(ds, port);
2229 /* Default VLAN ID and priority: don't set a default VLAN
2230 * ID, and set the default packet priority to zero.
2232 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
2235 mutex_unlock(&ps->smi_mutex);
2239 int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2241 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2245 for (i = 0; i < ps->num_ports; i++) {
2246 ret = mv88e6xxx_setup_port(ds, i);
2253 static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
2255 struct dsa_switch *ds = s->private;
2257 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2260 seq_puts(s, " GLOBAL GLOBAL2 ");
2261 for (port = 0 ; port < ps->num_ports; port++)
2262 seq_printf(s, " %2d ", port);
2265 for (reg = 0; reg < 32; reg++) {
2266 seq_printf(s, "%2x: ", reg);
2267 seq_printf(s, " %4x %4x ",
2268 mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
2269 mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
2271 for (port = 0 ; port < ps->num_ports; port++)
2272 seq_printf(s, "%4x ",
2273 mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
2280 static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
2282 return single_open(file, mv88e6xxx_regs_show, inode->i_private);
2285 static const struct file_operations mv88e6xxx_regs_fops = {
2286 .open = mv88e6xxx_regs_open,
2288 .llseek = no_llseek,
2289 .release = single_release,
2290 .owner = THIS_MODULE,
2293 static void mv88e6xxx_atu_show_header(struct seq_file *s)
2295 seq_puts(s, "DB T/P Vec State Addr\n");
2298 static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
2299 unsigned char *addr, int data)
2301 bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
2302 int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
2303 GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
2304 int state = data & GLOBAL_ATU_DATA_STATE_MASK;
2306 seq_printf(s, "%03x %5s %10pb %x %pM\n",
2307 dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
2310 static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
2313 unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2314 unsigned char addr[6];
2315 int ret, data, state;
2317 ret = _mv88e6xxx_atu_mac_write(ds, bcast);
2322 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
2327 ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
2331 data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
2335 state = data & GLOBAL_ATU_DATA_STATE_MASK;
2336 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
2338 ret = _mv88e6xxx_atu_mac_read(ds, addr);
2341 mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
2342 } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
2347 static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
2349 struct dsa_switch *ds = s->private;
2350 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2353 mv88e6xxx_atu_show_header(s);
2355 for (dbnum = 0; dbnum < 255; dbnum++) {
2356 mutex_lock(&ps->smi_mutex);
2357 mv88e6xxx_atu_show_db(s, ds, dbnum);
2358 mutex_unlock(&ps->smi_mutex);
2364 static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
2366 return single_open(file, mv88e6xxx_atu_show, inode->i_private);
2369 static const struct file_operations mv88e6xxx_atu_fops = {
2370 .open = mv88e6xxx_atu_open,
2372 .llseek = no_llseek,
2373 .release = single_release,
2374 .owner = THIS_MODULE,
2377 static void mv88e6xxx_stats_show_header(struct seq_file *s,
2378 struct mv88e6xxx_priv_state *ps)
2382 seq_puts(s, " Statistic ");
2383 for (port = 0 ; port < ps->num_ports; port++)
2384 seq_printf(s, "Port %2d ", port);
2388 static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
2390 struct dsa_switch *ds = s->private;
2391 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2392 struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
2393 int port, stat, max_stats;
2396 if (have_sw_in_discards(ds))
2397 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
2399 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
2401 mv88e6xxx_stats_show_header(s, ps);
2403 mutex_lock(&ps->smi_mutex);
2405 for (stat = 0; stat < max_stats; stat++) {
2406 seq_printf(s, "%19s: ", stats[stat].string);
2407 for (port = 0 ; port < ps->num_ports; port++) {
2408 _mv88e6xxx_stats_snapshot(ds, port);
2409 value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
2411 seq_printf(s, "%8llu ", value);
2415 mutex_unlock(&ps->smi_mutex);
2420 static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
2422 return single_open(file, mv88e6xxx_stats_show, inode->i_private);
2425 static const struct file_operations mv88e6xxx_stats_fops = {
2426 .open = mv88e6xxx_stats_open,
2428 .llseek = no_llseek,
2429 .release = single_release,
2430 .owner = THIS_MODULE,
2433 static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
2435 struct dsa_switch *ds = s->private;
2436 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2439 seq_puts(s, "Target Port\n");
2441 mutex_lock(&ps->smi_mutex);
2442 for (target = 0; target < 32; target++) {
2443 ret = _mv88e6xxx_reg_write(
2444 ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2445 target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
2448 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2449 GLOBAL2_DEVICE_MAPPING);
2450 seq_printf(s, " %2d %2d\n", target,
2451 ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
2454 mutex_unlock(&ps->smi_mutex);
2459 static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
2461 return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
2464 static const struct file_operations mv88e6xxx_device_map_fops = {
2465 .open = mv88e6xxx_device_map_open,
2467 .llseek = no_llseek,
2468 .release = single_release,
2469 .owner = THIS_MODULE,
2472 static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
2474 struct dsa_switch *ds = s->private;
2475 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2478 seq_puts(s, "Register Value\n");
2480 mutex_lock(&ps->smi_mutex);
2481 for (reg = 0; reg < 0x80; reg++) {
2482 ret = _mv88e6xxx_reg_write(
2483 ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
2484 reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
2488 ret = _mv88e6xxx_scratch_wait(ds);
2492 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2493 GLOBAL2_SCRATCH_MISC);
2494 seq_printf(s, " %2x %2x\n", reg,
2495 ret & GLOBAL2_SCRATCH_VALUE_MASK);
2498 mutex_unlock(&ps->smi_mutex);
2503 static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
2505 return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
2508 static const struct file_operations mv88e6xxx_scratch_fops = {
2509 .open = mv88e6xxx_scratch_open,
2511 .llseek = no_llseek,
2512 .release = single_release,
2513 .owner = THIS_MODULE,
2516 int mv88e6xxx_setup_common(struct dsa_switch *ds)
2518 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2521 mutex_init(&ps->smi_mutex);
2523 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
2525 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2527 name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
2528 ps->dbgfs = debugfs_create_dir(name, NULL);
2531 debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
2532 &mv88e6xxx_regs_fops);
2534 debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
2535 &mv88e6xxx_atu_fops);
2537 debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
2538 &mv88e6xxx_stats_fops);
2540 debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
2541 &mv88e6xxx_device_map_fops);
2543 debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
2544 &mv88e6xxx_scratch_fops);
2548 int mv88e6xxx_setup_global(struct dsa_switch *ds)
2550 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2554 /* Set the default address aging time to 5 minutes, and
2555 * enable address learn messages to be sent to all message
2558 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
2559 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2561 /* Configure the IP ToS mapping registers. */
2562 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2563 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2564 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2565 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2566 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2567 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2568 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2569 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2571 /* Configure the IEEE 802.1p priority mapping register. */
2572 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2574 /* Send all frames with destination addresses matching
2575 * 01:80:c2:00:00:0x to the CPU port.
2577 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2579 /* Ignore removed tag data on doubly tagged packets, disable
2580 * flow control messages, force flow control priority to the
2581 * highest, and send all special multicast frames to the CPU
2582 * port at the highest priority.
2584 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2585 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2586 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2588 /* Program the DSA routing table. */
2589 for (i = 0; i < 32; i++) {
2592 if (ds->pd->rtable &&
2593 i != ds->index && i < ds->dst->pd->nr_chips)
2594 nexthop = ds->pd->rtable[i] & 0x1f;
2596 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2597 GLOBAL2_DEVICE_MAPPING_UPDATE |
2598 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
2602 /* Clear all trunk masks. */
2603 for (i = 0; i < 8; i++)
2604 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2605 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2606 ((1 << ps->num_ports) - 1));
2608 /* Clear all trunk mappings. */
2609 for (i = 0; i < 16; i++)
2610 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
2611 GLOBAL2_TRUNK_MAPPING_UPDATE |
2612 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2614 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2615 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2616 mv88e6xxx_6320_family(ds)) {
2617 /* Send all frames with destination addresses matching
2618 * 01:80:c2:00:00:2x to the CPU port.
2620 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2622 /* Initialise cross-chip port VLAN table to reset
2625 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2627 /* Clear the priority override table. */
2628 for (i = 0; i < 16; i++)
2629 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2633 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2634 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2635 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2636 mv88e6xxx_6320_family(ds)) {
2637 /* Disable ingress rate limiting by resetting all
2638 * ingress rate limit registers to their initial
2641 for (i = 0; i < ps->num_ports; i++)
2642 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2646 /* Clear the statistics counters for all ports */
2647 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2649 /* Wait for the flush to complete. */
2650 mutex_lock(&ps->smi_mutex);
2651 ret = _mv88e6xxx_stats_wait(ds);
2655 /* Clear all ATU entries */
2656 ret = _mv88e6xxx_atu_flush(ds, 0, true);
2660 /* Clear all the VTU and STU entries */
2661 ret = _mv88e6xxx_vtu_stu_flush(ds);
2663 mutex_unlock(&ps->smi_mutex);
2668 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2670 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2671 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2672 unsigned long timeout;
2676 /* Set all ports to the disabled state. */
2677 for (i = 0; i < ps->num_ports; i++) {
2678 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2679 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
2682 /* Wait for transmit queues to drain. */
2683 usleep_range(2000, 4000);
2685 /* Reset the switch. Keep the PPU active if requested. The PPU
2686 * needs to be active to support indirect phy register access
2687 * through global registers 0x18 and 0x19.
2690 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2692 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2694 /* Wait up to one second for reset to complete. */
2695 timeout = jiffies + 1 * HZ;
2696 while (time_before(jiffies, timeout)) {
2697 ret = REG_READ(REG_GLOBAL, 0x00);
2698 if ((ret & is_reset) == is_reset)
2700 usleep_range(1000, 2000);
2702 if (time_after(jiffies, timeout))
2708 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2710 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2713 mutex_lock(&ps->smi_mutex);
2714 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2717 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2719 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2720 mutex_unlock(&ps->smi_mutex);
2724 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2727 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2730 mutex_lock(&ps->smi_mutex);
2731 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2735 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2737 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2738 mutex_unlock(&ps->smi_mutex);
2742 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2744 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2746 if (port >= 0 && port < ps->num_ports)
2752 mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2754 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2755 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2761 mutex_lock(&ps->smi_mutex);
2762 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
2763 mutex_unlock(&ps->smi_mutex);
2768 mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2770 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2771 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2777 mutex_lock(&ps->smi_mutex);
2778 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
2779 mutex_unlock(&ps->smi_mutex);
2784 mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2786 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2787 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2793 mutex_lock(&ps->smi_mutex);
2794 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
2795 mutex_unlock(&ps->smi_mutex);
2800 mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2803 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2804 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2810 mutex_lock(&ps->smi_mutex);
2811 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
2812 mutex_unlock(&ps->smi_mutex);
2816 #ifdef CONFIG_NET_DSA_HWMON
2818 static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2820 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2826 mutex_lock(&ps->smi_mutex);
2828 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2832 /* Enable temperature sensor */
2833 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2837 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2841 /* Wait for temperature to stabilize */
2842 usleep_range(10000, 12000);
2844 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2850 /* Disable temperature sensor */
2851 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2855 *temp = ((val & 0x1f) - 5) * 5;
2858 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2859 mutex_unlock(&ps->smi_mutex);
2863 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2865 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2870 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2874 *temp = (ret & 0xff) - 25;
2879 int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2881 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2882 return mv88e63xx_get_temp(ds, temp);
2884 return mv88e61xx_get_temp(ds, temp);
2887 int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2889 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2892 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2897 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2901 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
2906 int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2908 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2911 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2914 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2917 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2918 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2919 (ret & 0xe0ff) | (temp << 8));
2922 int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2924 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2927 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2932 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2936 *alarm = !!(ret & 0x40);
2940 #endif /* CONFIG_NET_DSA_HWMON */
2942 static int __init mv88e6xxx_init(void)
2944 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2945 register_switch_driver(&mv88e6131_switch_driver);
2947 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2948 register_switch_driver(&mv88e6123_61_65_switch_driver);
2950 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2951 register_switch_driver(&mv88e6352_switch_driver);
2953 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2954 register_switch_driver(&mv88e6171_switch_driver);
2958 module_init(mv88e6xxx_init);
2960 static void __exit mv88e6xxx_cleanup(void)
2962 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2963 unregister_switch_driver(&mv88e6171_switch_driver);
2965 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2966 unregister_switch_driver(&mv88e6352_switch_driver);
2968 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2969 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
2971 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2972 unregister_switch_driver(&mv88e6131_switch_driver);
2975 module_exit(mv88e6xxx_cleanup);
2977 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2978 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2979 MODULE_LICENSE("GPL");