2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
29 #include <ramips_eth_platform.h>
30 #include "ramips_eth.h"
32 #define TX_TIMEOUT (20 * HZ / 100)
33 #define MAX_RX_LENGTH 1600
35 #ifdef CONFIG_RALINK_RT305X
36 #include "ramips_esw.c"
38 static inline int rt305x_esw_init(void) { return 0; }
39 static inline void rt305x_esw_exit(void) { }
42 #define phys_to_bus(a) (a & 0x1FFFFFFF)
44 #ifdef CONFIG_RAMIPS_ETH_DEBUG
45 #define RADEBUG(fmt, args...) printk(KERN_DEBUG fmt, ## args)
47 #define RADEBUG(fmt, args...) do {} while (0)
51 RAETH_REG_PDMA_GLO_CFG = 0,
52 RAETH_REG_PDMA_RST_CFG,
53 RAETH_REG_DLY_INT_CFG,
54 RAETH_REG_TX_BASE_PTR0,
55 RAETH_REG_TX_MAX_CNT0,
56 RAETH_REG_TX_CTX_IDX0,
57 RAETH_REG_RX_BASE_PTR0,
58 RAETH_REG_RX_MAX_CNT0,
59 RAETH_REG_RX_CALC_IDX0,
60 RAETH_REG_FE_INT_ENABLE,
61 RAETH_REG_FE_INT_STATUS,
65 static const u32 ramips_reg_table[RAETH_REG_COUNT] = {
66 [RAETH_REG_PDMA_GLO_CFG] = RAMIPS_PDMA_GLO_CFG,
67 [RAETH_REG_PDMA_RST_CFG] = RAMIPS_PDMA_RST_CFG,
68 [RAETH_REG_DLY_INT_CFG] = RAMIPS_DLY_INT_CFG,
69 [RAETH_REG_TX_BASE_PTR0] = RAMIPS_TX_BASE_PTR0,
70 [RAETH_REG_TX_MAX_CNT0] = RAMIPS_TX_MAX_CNT0,
71 [RAETH_REG_TX_CTX_IDX0] = RAMIPS_TX_CTX_IDX0,
72 [RAETH_REG_RX_BASE_PTR0] = RAMIPS_RX_BASE_PTR0,
73 [RAETH_REG_RX_MAX_CNT0] = RAMIPS_RX_MAX_CNT0,
74 [RAETH_REG_RX_CALC_IDX0] = RAMIPS_RX_CALC_IDX0,
75 [RAETH_REG_FE_INT_ENABLE] = RAMIPS_FE_INT_ENABLE,
76 [RAETH_REG_FE_INT_STATUS] = RAMIPS_FE_INT_STATUS,
79 static struct net_device * ramips_dev;
80 static void __iomem *ramips_fe_base = 0;
82 static inline u32 get_reg_offset(enum raeth_reg reg)
86 table = ramips_reg_table;
92 ramips_fe_wr(u32 val, unsigned reg)
94 __raw_writel(val, ramips_fe_base + reg);
98 ramips_fe_rr(unsigned reg)
100 return __raw_readl(ramips_fe_base + reg);
104 ramips_fe_twr(u32 val, enum raeth_reg reg)
106 ramips_fe_wr(val, get_reg_offset(reg));
110 ramips_fe_trr(enum raeth_reg reg)
112 return ramips_fe_rr(get_reg_offset(reg));
116 ramips_fe_int_disable(u32 mask)
118 ramips_fe_twr(ramips_fe_trr(RAETH_REG_FE_INT_ENABLE) & ~mask,
119 RAETH_REG_FE_INT_ENABLE);
121 ramips_fe_trr(RAETH_REG_FE_INT_ENABLE);
125 ramips_fe_int_enable(u32 mask)
127 ramips_fe_twr(ramips_fe_trr(RAETH_REG_FE_INT_ENABLE) | mask,
128 RAETH_REG_FE_INT_ENABLE);
130 ramips_fe_trr(RAETH_REG_FE_INT_ENABLE);
134 ramips_hw_set_macaddr(unsigned char *mac)
136 ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
137 ramips_fe_wr((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
138 RAMIPS_GDMA1_MAC_ADRL);
141 static struct sk_buff *
142 ramips_alloc_skb(struct raeth_priv *re)
146 skb = netdev_alloc_skb(re->netdev, MAX_RX_LENGTH + NET_IP_ALIGN);
150 skb_reserve(skb, NET_IP_ALIGN);
156 ramips_ring_setup(struct raeth_priv *re)
161 memset(re->tx_info, 0, NUM_TX_DESC * sizeof(struct raeth_tx_info));
163 len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
164 memset(re->tx, 0, len);
166 for (i = 0; i < NUM_TX_DESC; i++) {
167 struct raeth_tx_info *txi;
168 struct ramips_tx_dma *txd;
171 txd->txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
172 txd->txd2 = TX_DMA_LSO | TX_DMA_DONE;
174 txi = &re->tx_info[i];
176 if (txi->tx_skb != NULL) {
177 netdev_warn(re->netdev,
178 "dirty skb for TX desc %d\n", i);
183 len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
184 memset(re->rx, 0, len);
186 for (i = 0; i < NUM_RX_DESC; i++) {
187 struct raeth_rx_info *rxi;
188 struct ramips_rx_dma *rxd;
192 rxi = &re->rx_info[i];
193 BUG_ON(rxi->rx_skb == NULL);
194 dma_addr = dma_map_single(&re->netdev->dev, rxi->rx_skb->data,
195 MAX_RX_LENGTH, DMA_FROM_DEVICE);
196 rxi->rx_dma = dma_addr;
199 rxd->rxd1 = (unsigned int) dma_addr;
200 rxd->rxd2 = RX_DMA_LSO;
203 /* flush descriptors */
208 ramips_ring_cleanup(struct raeth_priv *re)
212 for (i = 0; i < NUM_RX_DESC; i++) {
213 struct raeth_rx_info *rxi;
215 rxi = &re->rx_info[i];
217 dma_unmap_single(&re->netdev->dev, rxi->rx_dma,
218 MAX_RX_LENGTH, DMA_FROM_DEVICE);
221 for (i = 0; i < NUM_TX_DESC; i++) {
222 struct raeth_tx_info *txi;
224 txi = &re->tx_info[i];
226 dev_kfree_skb_any(txi->tx_skb);
231 netdev_reset_queue(re->netdev);
234 #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883)
236 #define RAMIPS_MDIO_RETRY 1000
238 static unsigned char *ramips_speed_str(struct raeth_priv *re)
252 static void ramips_link_adjust(struct raeth_priv *re)
254 struct ramips_eth_platform_data *pdata;
257 pdata = re->parent->platform_data;
259 netif_carrier_off(re->netdev);
260 netdev_info(re->netdev, "link down\n");
264 mdio_cfg = RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
265 RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
266 RAMIPS_MDIO_CFG_GP1_FRC_EN;
268 if (re->duplex == DUPLEX_FULL)
269 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_DUPLEX;
272 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_TX;
275 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_RX;
279 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_10;
282 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_100;
285 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_1000;
291 ramips_fe_wr(mdio_cfg, RAMIPS_MDIO_CFG);
293 netif_carrier_on(re->netdev);
294 netdev_info(re->netdev, "link up (%sMbps/%s duplex)\n",
295 ramips_speed_str(re),
296 (DUPLEX_FULL == re->duplex) ? "Full" : "Half");
300 ramips_mdio_wait_ready(struct raeth_priv *re)
304 retries = RAMIPS_MDIO_RETRY;
308 t = ramips_fe_rr(RAMIPS_MDIO_ACCESS);
309 if ((t & (0x1 << 31)) == 0)
318 dev_err(re->parent, "MDIO operation timed out\n");
323 ramips_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
325 struct raeth_priv *re = bus->priv;
329 err = ramips_mdio_wait_ready(re);
333 t = (phy_addr << 24) | (phy_reg << 16);
334 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
336 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
338 err = ramips_mdio_wait_ready(re);
342 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__,
343 phy_addr, phy_reg, ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff);
345 return ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff;
349 ramips_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
351 struct raeth_priv *re = bus->priv;
355 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__,
356 phy_addr, phy_reg, ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff);
358 err = ramips_mdio_wait_ready(re);
362 t = (1 << 30) | (phy_addr << 24) | (phy_reg << 16) | val;
363 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
365 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
367 return ramips_mdio_wait_ready(re);
371 ramips_mdio_reset(struct mii_bus *bus)
378 ramips_mdio_init(struct raeth_priv *re)
383 re->mii_bus = mdiobus_alloc();
384 if (re->mii_bus == NULL)
387 re->mii_bus->name = "ramips_mdio";
388 re->mii_bus->read = ramips_mdio_read;
389 re->mii_bus->write = ramips_mdio_write;
390 re->mii_bus->reset = ramips_mdio_reset;
391 re->mii_bus->irq = re->mii_irq;
392 re->mii_bus->priv = re;
393 re->mii_bus->parent = re->parent;
395 snprintf(re->mii_bus->id, MII_BUS_ID_SIZE, "%s", "ramips_mdio");
396 re->mii_bus->phy_mask = 0;
398 for (i = 0; i < PHY_MAX_ADDR; i++)
399 re->mii_irq[i] = PHY_POLL;
401 err = mdiobus_register(re->mii_bus);
413 ramips_mdio_cleanup(struct raeth_priv *re)
415 mdiobus_unregister(re->mii_bus);
420 ramips_phy_link_adjust(struct net_device *dev)
422 struct raeth_priv *re = netdev_priv(dev);
423 struct phy_device *phydev = re->phy_dev;
425 int status_change = 0;
427 spin_lock_irqsave(&re->phy_lock, flags);
430 if (re->duplex != phydev->duplex ||
431 re->speed != phydev->speed)
434 if (phydev->link != re->link)
437 re->link = phydev->link;
438 re->duplex = phydev->duplex;
439 re->speed = phydev->speed;
442 ramips_link_adjust(re);
444 spin_unlock_irqrestore(&re->phy_lock, flags);
448 ramips_phy_connect_multi(struct raeth_priv *re)
450 struct net_device *netdev = re->netdev;
451 struct ramips_eth_platform_data *pdata;
452 struct phy_device *phydev = NULL;
456 pdata = re->parent->platform_data;
457 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
458 if (!(pdata->phy_mask & (1 << phy_addr)))
461 if (re->mii_bus->phy_map[phy_addr] == NULL)
464 RADEBUG("%s: PHY found at %s, uid=%08x\n",
466 dev_name(&re->mii_bus->phy_map[phy_addr]->dev),
467 re->mii_bus->phy_map[phy_addr]->phy_id);
470 phydev = re->mii_bus->phy_map[phy_addr];
474 netdev_err(netdev, "no PHY found with phy_mask=%08x\n",
479 re->phy_dev = phy_connect(netdev, dev_name(&phydev->dev),
480 ramips_phy_link_adjust, 0,
483 if (IS_ERR(re->phy_dev)) {
484 netdev_err(netdev, "could not connect to PHY at %s\n",
485 dev_name(&phydev->dev));
486 return PTR_ERR(re->phy_dev);
489 phydev->supported &= PHY_GBIT_FEATURES;
490 phydev->advertising = phydev->supported;
492 RADEBUG("%s: connected to PHY at %s [uid=%08x, driver=%s]\n",
493 netdev->name, dev_name(&phydev->dev),
494 phydev->phy_id, phydev->drv->name);
506 ramips_phy_connect_fixed(struct raeth_priv *re)
508 struct ramips_eth_platform_data *pdata;
510 pdata = re->parent->platform_data;
511 switch (pdata->speed) {
517 netdev_err(re->netdev, "invalid speed specified\n");
521 RADEBUG("%s: using fixed link parameters\n", re->netdev->name);
523 re->speed = pdata->speed;
524 re->duplex = pdata->duplex;
525 re->tx_fc = pdata->tx_fc;
526 re->rx_fc = pdata->tx_fc;
532 ramips_phy_connect(struct raeth_priv *re)
534 struct ramips_eth_platform_data *pdata;
536 pdata = re->parent->platform_data;
538 return ramips_phy_connect_multi(re);
540 return ramips_phy_connect_fixed(re);
544 ramips_phy_disconnect(struct raeth_priv *re)
547 phy_disconnect(re->phy_dev);
551 ramips_phy_start(struct raeth_priv *re)
556 phy_start(re->phy_dev);
558 spin_lock_irqsave(&re->phy_lock, flags);
560 ramips_link_adjust(re);
561 spin_unlock_irqrestore(&re->phy_lock, flags);
566 ramips_phy_stop(struct raeth_priv *re)
571 phy_stop(re->phy_dev);
573 spin_lock_irqsave(&re->phy_lock, flags);
575 ramips_link_adjust(re);
576 spin_unlock_irqrestore(&re->phy_lock, flags);
580 ramips_mdio_init(struct raeth_priv *re)
586 ramips_mdio_cleanup(struct raeth_priv *re)
591 ramips_phy_connect(struct raeth_priv *re)
597 ramips_phy_disconnect(struct raeth_priv *re)
602 ramips_phy_start(struct raeth_priv *re)
607 ramips_phy_stop(struct raeth_priv *re)
610 #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT3883 */
613 ramips_ring_free(struct raeth_priv *re)
619 for (i = 0; i < NUM_RX_DESC; i++) {
620 struct raeth_rx_info *rxi;
622 rxi = &re->rx_info[i];
624 dev_kfree_skb_any(rxi->rx_skb);
630 len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
631 dma_free_coherent(&re->netdev->dev, len, re->rx,
636 len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
637 dma_free_coherent(&re->netdev->dev, len, re->tx,
645 ramips_ring_alloc(struct raeth_priv *re)
651 re->tx_info = kzalloc(NUM_TX_DESC * sizeof(struct raeth_tx_info),
656 re->rx_info = kzalloc(NUM_RX_DESC * sizeof(struct raeth_rx_info),
661 /* allocate tx ring */
662 len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
663 re->tx = dma_alloc_coherent(&re->netdev->dev, len,
664 &re->tx_desc_dma, GFP_ATOMIC);
668 /* allocate rx ring */
669 len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
670 re->rx = dma_alloc_coherent(&re->netdev->dev, len,
671 &re->rx_desc_dma, GFP_ATOMIC);
675 for (i = 0; i < NUM_RX_DESC; i++) {
678 skb = ramips_alloc_skb(re);
682 re->rx_info[i].rx_skb = skb;
688 ramips_ring_free(re);
693 ramips_setup_dma(struct raeth_priv *re)
695 ramips_fe_twr(re->tx_desc_dma, RAETH_REG_TX_BASE_PTR0);
696 ramips_fe_twr(NUM_TX_DESC, RAETH_REG_TX_MAX_CNT0);
697 ramips_fe_twr(0, RAETH_REG_TX_CTX_IDX0);
698 ramips_fe_twr(RAMIPS_PST_DTX_IDX0, RAETH_REG_PDMA_RST_CFG);
700 ramips_fe_twr(re->rx_desc_dma, RAETH_REG_RX_BASE_PTR0);
701 ramips_fe_twr(NUM_RX_DESC, RAETH_REG_RX_MAX_CNT0);
702 ramips_fe_twr((NUM_RX_DESC - 1), RAETH_REG_RX_CALC_IDX0);
703 ramips_fe_twr(RAMIPS_PST_DRX_IDX0, RAETH_REG_PDMA_RST_CFG);
707 ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
709 struct raeth_priv *re = netdev_priv(dev);
710 struct raeth_tx_info *txi, *txi_next;
711 struct ramips_tx_dma *txd, *txd_next;
713 unsigned int tx_next;
714 dma_addr_t mapped_addr;
716 if (re->plat->min_pkt_len) {
717 if (skb->len < re->plat->min_pkt_len) {
718 if (skb_padto(skb, re->plat->min_pkt_len)) {
720 "ramips_eth: skb_padto failed\n");
724 skb_put(skb, re->plat->min_pkt_len - skb->len);
728 dev->trans_start = jiffies;
729 mapped_addr = dma_map_single(&re->netdev->dev, skb->data, skb->len,
732 spin_lock(&re->page_lock);
733 tx = ramips_fe_trr(RAETH_REG_TX_CTX_IDX0);
734 tx_next = (tx + 1) % NUM_TX_DESC;
736 txi = &re->tx_info[tx];
738 txi_next = &re->tx_info[tx_next];
739 txd_next = txi_next->tx_desc;
741 if ((txi->tx_skb) || (txi_next->tx_skb) ||
742 !(txd->txd2 & TX_DMA_DONE) ||
743 !(txd_next->txd2 & TX_DMA_DONE))
748 txd->txd1 = (unsigned int) mapped_addr;
750 txd->txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len);
751 dev->stats.tx_packets++;
752 dev->stats.tx_bytes += skb->len;
753 ramips_fe_twr(tx_next, RAETH_REG_TX_CTX_IDX0);
754 netdev_sent_queue(dev, skb->len);
755 spin_unlock(&re->page_lock);
759 spin_unlock(&re->page_lock);
760 dev->stats.tx_dropped++;
766 ramips_eth_rx_hw(unsigned long ptr)
768 struct net_device *dev = (struct net_device *) ptr;
769 struct raeth_priv *re = netdev_priv(dev);
773 rx = ramips_fe_trr(RAETH_REG_RX_CALC_IDX0);
776 struct raeth_rx_info *rxi;
777 struct ramips_rx_dma *rxd;
778 struct sk_buff *rx_skb, *new_skb;
781 rx = (rx + 1) % NUM_RX_DESC;
783 rxi = &re->rx_info[rx];
785 if (!(rxd->rxd2 & RX_DMA_DONE))
788 rx_skb = rxi->rx_skb;
789 pktlen = RX_DMA_PLEN0(rxd->rxd2);
791 new_skb = ramips_alloc_skb(re);
792 /* Reuse the buffer on allocation failures */
796 dma_unmap_single(&re->netdev->dev, rxi->rx_dma,
797 MAX_RX_LENGTH, DMA_FROM_DEVICE);
799 skb_put(rx_skb, pktlen);
801 rx_skb->protocol = eth_type_trans(rx_skb, dev);
802 rx_skb->ip_summed = CHECKSUM_NONE;
803 dev->stats.rx_packets++;
804 dev->stats.rx_bytes += pktlen;
807 rxi->rx_skb = new_skb;
809 dma_addr = dma_map_single(&re->netdev->dev,
813 rxi->rx_dma = dma_addr;
814 rxd->rxd1 = (unsigned int) dma_addr;
817 dev->stats.rx_dropped++;
820 rxd->rxd2 = RX_DMA_LSO;
821 ramips_fe_twr(rx, RAETH_REG_RX_CALC_IDX0);
826 tasklet_schedule(&re->rx_tasklet);
828 ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
832 ramips_eth_tx_housekeeping(unsigned long ptr)
834 struct net_device *dev = (struct net_device*)ptr;
835 struct raeth_priv *re = netdev_priv(dev);
836 unsigned int bytes_compl = 0, pkts_compl = 0;
838 spin_lock(&re->page_lock);
840 struct raeth_tx_info *txi;
841 struct ramips_tx_dma *txd;
843 txi = &re->tx_info[re->skb_free_idx];
846 if (!(txd->txd2 & TX_DMA_DONE) || !(txi->tx_skb))
850 bytes_compl += txi->tx_skb->len;
852 dev_kfree_skb_irq(txi->tx_skb);
855 if (re->skb_free_idx >= NUM_TX_DESC)
856 re->skb_free_idx = 0;
858 netdev_completed_queue(dev, pkts_compl, bytes_compl);
859 spin_unlock(&re->page_lock);
861 ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
865 ramips_eth_timeout(struct net_device *dev)
867 struct raeth_priv *re = netdev_priv(dev);
869 tasklet_schedule(&re->tx_housekeeping_tasklet);
873 ramips_eth_irq(int irq, void *dev)
875 struct raeth_priv *re = netdev_priv(dev);
878 status = ramips_fe_trr(RAETH_REG_FE_INT_STATUS);
879 status &= ramips_fe_trr(RAETH_REG_FE_INT_ENABLE);
884 ramips_fe_twr(status, RAETH_REG_FE_INT_STATUS);
886 if (status & RAMIPS_RX_DLY_INT) {
887 ramips_fe_int_disable(RAMIPS_RX_DLY_INT);
888 tasklet_schedule(&re->rx_tasklet);
891 if (status & RAMIPS_TX_DLY_INT) {
892 ramips_fe_int_disable(RAMIPS_TX_DLY_INT);
893 tasklet_schedule(&re->tx_housekeeping_tasklet);
896 raeth_debugfs_update_int_stats(re, status);
902 ramips_eth_open(struct net_device *dev)
904 struct raeth_priv *re = netdev_priv(dev);
907 err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
912 err = ramips_ring_alloc(re);
916 ramips_ring_setup(re);
917 ramips_hw_set_macaddr(dev->dev_addr);
919 ramips_setup_dma(re);
920 ramips_fe_twr((ramips_fe_trr(RAETH_REG_PDMA_GLO_CFG) & 0xff) |
921 (RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
922 RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS),
923 RAETH_REG_PDMA_GLO_CFG);
924 ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) &
925 ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
926 ((re->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
929 tasklet_init(&re->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
931 tasklet_init(&re->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
933 ramips_phy_start(re);
935 ramips_fe_twr(RAMIPS_DELAY_INIT, RAETH_REG_DLY_INT_CFG);
936 ramips_fe_twr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAETH_REG_FE_INT_ENABLE);
937 ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) &
938 ~(RAMIPS_GDM1_ICS_EN | RAMIPS_GDM1_TCS_EN | RAMIPS_GDM1_UCS_EN | 0xffff),
939 RAMIPS_GDMA1_FWD_CFG);
940 ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG) &
941 ~(RAMIPS_ICS_GEN_EN | RAMIPS_TCS_GEN_EN | RAMIPS_UCS_GEN_EN),
942 RAMIPS_CDMA_CSG_CFG);
943 ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG);
944 ramips_fe_wr(1, RAMIPS_FE_RST_GL);
945 ramips_fe_wr(0, RAMIPS_FE_RST_GL);
947 netif_start_queue(dev);
951 free_irq(dev->irq, dev);
956 ramips_eth_stop(struct net_device *dev)
958 struct raeth_priv *re = netdev_priv(dev);
960 ramips_fe_twr(ramips_fe_trr(RAETH_REG_PDMA_GLO_CFG) &
961 ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
962 RAETH_REG_PDMA_GLO_CFG);
964 /* disable all interrupts in the hw */
965 ramips_fe_twr(0, RAETH_REG_FE_INT_ENABLE);
968 free_irq(dev->irq, dev);
969 netif_stop_queue(dev);
970 tasklet_kill(&re->tx_housekeeping_tasklet);
971 tasklet_kill(&re->rx_tasklet);
972 ramips_ring_cleanup(re);
973 ramips_ring_free(re);
974 RADEBUG("ramips_eth: stopped\n");
979 ramips_eth_probe(struct net_device *dev)
981 struct raeth_priv *re = netdev_priv(dev);
984 BUG_ON(!re->plat->reset_fe);
985 re->plat->reset_fe();
986 net_srandom(jiffies);
987 memcpy(dev->dev_addr, re->plat->mac, ETH_ALEN);
991 dev->watchdog_timeo = TX_TIMEOUT;
992 spin_lock_init(&re->page_lock);
993 spin_lock_init(&re->phy_lock);
995 err = ramips_mdio_init(re);
999 err = ramips_phy_connect(re);
1001 goto err_mdio_cleanup;
1003 err = raeth_debugfs_init(re);
1005 goto err_phy_disconnect;
1010 ramips_phy_disconnect(re);
1012 ramips_mdio_cleanup(re);
1017 ramips_eth_uninit(struct net_device *dev)
1019 struct raeth_priv *re = netdev_priv(dev);
1021 raeth_debugfs_exit(re);
1022 ramips_phy_disconnect(re);
1023 ramips_mdio_cleanup(re);
1026 static const struct net_device_ops ramips_eth_netdev_ops = {
1027 .ndo_init = ramips_eth_probe,
1028 .ndo_uninit = ramips_eth_uninit,
1029 .ndo_open = ramips_eth_open,
1030 .ndo_stop = ramips_eth_stop,
1031 .ndo_start_xmit = ramips_eth_hard_start_xmit,
1032 .ndo_tx_timeout = ramips_eth_timeout,
1033 .ndo_change_mtu = eth_change_mtu,
1034 .ndo_set_mac_address = eth_mac_addr,
1035 .ndo_validate_addr = eth_validate_addr,
1039 ramips_eth_plat_probe(struct platform_device *plat)
1041 struct raeth_priv *re;
1042 struct ramips_eth_platform_data *data = plat->dev.platform_data;
1043 struct resource *res;
1047 dev_err(&plat->dev, "no platform data specified\n");
1051 res = platform_get_resource(plat, IORESOURCE_MEM, 0);
1053 dev_err(&plat->dev, "no memory resource found\n");
1057 ramips_fe_base = ioremap_nocache(res->start, res->end - res->start + 1);
1058 if (!ramips_fe_base)
1061 ramips_dev = alloc_etherdev(sizeof(struct raeth_priv));
1063 dev_err(&plat->dev, "alloc_etherdev failed\n");
1068 strcpy(ramips_dev->name, "eth%d");
1069 ramips_dev->irq = platform_get_irq(plat, 0);
1070 if (ramips_dev->irq < 0) {
1071 dev_err(&plat->dev, "no IRQ resource found\n");
1075 ramips_dev->addr_len = ETH_ALEN;
1076 ramips_dev->base_addr = (unsigned long)ramips_fe_base;
1077 ramips_dev->netdev_ops = &ramips_eth_netdev_ops;
1079 re = netdev_priv(ramips_dev);
1081 re->netdev = ramips_dev;
1082 re->parent = &plat->dev;
1083 re->speed = data->speed;
1084 re->duplex = data->duplex;
1085 re->rx_fc = data->rx_fc;
1086 re->tx_fc = data->tx_fc;
1089 err = register_netdev(ramips_dev);
1091 dev_err(&plat->dev, "error bringing up device\n");
1095 RADEBUG("ramips_eth: loaded\n");
1101 iounmap(ramips_fe_base);
1106 ramips_eth_plat_remove(struct platform_device *plat)
1108 unregister_netdev(ramips_dev);
1109 free_netdev(ramips_dev);
1110 RADEBUG("ramips_eth: unloaded\n");
1114 static struct platform_driver ramips_eth_driver = {
1115 .probe = ramips_eth_plat_probe,
1116 .remove = ramips_eth_plat_remove,
1118 .name = "ramips_eth",
1119 .owner = THIS_MODULE,
1124 ramips_eth_init(void)
1128 ret = raeth_debugfs_root_init();
1132 ret = rt305x_esw_init();
1134 goto err_debugfs_exit;
1136 ret = platform_driver_register(&ramips_eth_driver);
1139 "ramips_eth: Error registering platfom driver!\n");
1148 raeth_debugfs_root_exit();
1154 ramips_eth_cleanup(void)
1156 platform_driver_unregister(&ramips_eth_driver);
1158 raeth_debugfs_root_exit();
1161 module_init(ramips_eth_init);
1162 module_exit(ramips_eth_cleanup);
1164 MODULE_LICENSE("GPL");
1165 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1166 MODULE_DESCRIPTION("ethernet driver for ramips boards");