2 * SuperH Ethernet device driver
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2012 Renesas Solutions Corp.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/platform_device.h>
32 #include <linux/mdio-bitbang.h>
33 #include <linux/netdevice.h>
34 #include <linux/phy.h>
35 #include <linux/cache.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/slab.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/clk.h>
42 #include <linux/sh_eth.h>
46 #define SH_ETH_DEF_MSG_ENABLE \
52 #if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
53 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
54 defined(CONFIG_ARCH_R8A7740)
55 static void sh_eth_select_mii(struct net_device *ndev)
58 struct sh_eth_private *mdp = netdev_priv(ndev);
60 switch (mdp->phy_interface) {
61 case PHY_INTERFACE_MODE_GMII:
64 case PHY_INTERFACE_MODE_MII:
67 case PHY_INTERFACE_MODE_RMII:
71 pr_warn("PHY interface mode was not setup. Set to MII.\n");
76 sh_eth_write(ndev, value, RMII_MII);
80 /* There is CPU dependent code */
81 #if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779)
82 #define SH_ETH_RESET_DEFAULT 1
83 static void sh_eth_set_duplex(struct net_device *ndev)
85 struct sh_eth_private *mdp = netdev_priv(ndev);
87 if (mdp->duplex) /* Full */
88 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
90 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
93 static void sh_eth_set_rate(struct net_device *ndev)
95 struct sh_eth_private *mdp = netdev_priv(ndev);
96 unsigned int bits = ECMR_RTM;
98 #if defined(CONFIG_ARCH_R8A7779)
102 switch (mdp->speed) {
103 case 10: /* 10BASE */
104 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR);
106 case 100:/* 100BASE */
107 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR);
115 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
116 .set_duplex = sh_eth_set_duplex,
117 .set_rate = sh_eth_set_rate,
119 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
120 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
121 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
123 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
124 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
125 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
126 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
133 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
135 #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
136 #define SH_ETH_HAS_BOTH_MODULES 1
137 #define SH_ETH_HAS_TSU 1
138 static int sh_eth_check_reset(struct net_device *ndev);
140 static void sh_eth_set_duplex(struct net_device *ndev)
142 struct sh_eth_private *mdp = netdev_priv(ndev);
144 if (mdp->duplex) /* Full */
145 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
147 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
150 static void sh_eth_set_rate(struct net_device *ndev)
152 struct sh_eth_private *mdp = netdev_priv(ndev);
154 switch (mdp->speed) {
155 case 10: /* 10BASE */
156 sh_eth_write(ndev, 0, RTRATE);
158 case 100:/* 100BASE */
159 sh_eth_write(ndev, 1, RTRATE);
167 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
168 .set_duplex = sh_eth_set_duplex,
169 .set_rate = sh_eth_set_rate,
171 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
172 .rmcr_value = 0x00000001,
174 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
175 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
176 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
177 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
185 .rpadir_value = 2 << 16,
188 #define SH_GIGA_ETH_BASE 0xfee00000
189 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
190 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
191 static void sh_eth_chip_reset_giga(struct net_device *ndev)
194 unsigned long mahr[2], malr[2];
196 /* save MAHR and MALR */
197 for (i = 0; i < 2; i++) {
198 malr[i] = ioread32((void *)GIGA_MALR(i));
199 mahr[i] = ioread32((void *)GIGA_MAHR(i));
203 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
206 /* restore MAHR and MALR */
207 for (i = 0; i < 2; i++) {
208 iowrite32(malr[i], (void *)GIGA_MALR(i));
209 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
213 static int sh_eth_is_gether(struct sh_eth_private *mdp);
214 static int sh_eth_reset(struct net_device *ndev)
216 struct sh_eth_private *mdp = netdev_priv(ndev);
219 if (sh_eth_is_gether(mdp)) {
220 sh_eth_write(ndev, 0x03, EDSR);
221 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
224 ret = sh_eth_check_reset(ndev);
229 sh_eth_write(ndev, 0x0, TDLAR);
230 sh_eth_write(ndev, 0x0, TDFAR);
231 sh_eth_write(ndev, 0x0, TDFXR);
232 sh_eth_write(ndev, 0x0, TDFFR);
233 sh_eth_write(ndev, 0x0, RDLAR);
234 sh_eth_write(ndev, 0x0, RDFAR);
235 sh_eth_write(ndev, 0x0, RDFXR);
236 sh_eth_write(ndev, 0x0, RDFFR);
238 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
241 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
249 static void sh_eth_set_duplex_giga(struct net_device *ndev)
251 struct sh_eth_private *mdp = netdev_priv(ndev);
253 if (mdp->duplex) /* Full */
254 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
256 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
259 static void sh_eth_set_rate_giga(struct net_device *ndev)
261 struct sh_eth_private *mdp = netdev_priv(ndev);
263 switch (mdp->speed) {
264 case 10: /* 10BASE */
265 sh_eth_write(ndev, 0x00000000, GECMR);
267 case 100:/* 100BASE */
268 sh_eth_write(ndev, 0x00000010, GECMR);
270 case 1000: /* 1000BASE */
271 sh_eth_write(ndev, 0x00000020, GECMR);
278 /* SH7757(GETHERC) */
279 static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
280 .chip_reset = sh_eth_chip_reset_giga,
281 .set_duplex = sh_eth_set_duplex_giga,
282 .set_rate = sh_eth_set_rate_giga,
284 .ecsr_value = ECSR_ICD | ECSR_MPD,
285 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
286 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
288 .tx_check = EESR_TC1 | EESR_FTC,
289 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
290 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
292 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
294 .fdr_value = 0x0000072f,
295 .rmcr_value = 0x00000001,
303 .rpadir_value = 2 << 16,
309 static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
311 if (sh_eth_is_gether(mdp))
312 return &sh_eth_my_cpu_data_giga;
314 return &sh_eth_my_cpu_data;
317 #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
318 #define SH_ETH_HAS_TSU 1
319 static int sh_eth_check_reset(struct net_device *ndev);
320 static void sh_eth_reset_hw_crc(struct net_device *ndev);
322 static void sh_eth_chip_reset(struct net_device *ndev)
324 struct sh_eth_private *mdp = netdev_priv(ndev);
327 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
331 static void sh_eth_set_duplex(struct net_device *ndev)
333 struct sh_eth_private *mdp = netdev_priv(ndev);
335 if (mdp->duplex) /* Full */
336 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
338 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
341 static void sh_eth_set_rate(struct net_device *ndev)
343 struct sh_eth_private *mdp = netdev_priv(ndev);
345 switch (mdp->speed) {
346 case 10: /* 10BASE */
347 sh_eth_write(ndev, GECMR_10, GECMR);
349 case 100:/* 100BASE */
350 sh_eth_write(ndev, GECMR_100, GECMR);
352 case 1000: /* 1000BASE */
353 sh_eth_write(ndev, GECMR_1000, GECMR);
361 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
362 .chip_reset = sh_eth_chip_reset,
363 .set_duplex = sh_eth_set_duplex,
364 .set_rate = sh_eth_set_rate,
366 .ecsr_value = ECSR_ICD | ECSR_MPD,
367 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
368 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
370 .tx_check = EESR_TC1 | EESR_FTC,
371 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
372 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
374 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
385 #if defined(CONFIG_CPU_SUBTYPE_SH7734)
391 static int sh_eth_reset(struct net_device *ndev)
395 sh_eth_write(ndev, EDSR_ENALL, EDSR);
396 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
398 ret = sh_eth_check_reset(ndev);
403 sh_eth_write(ndev, 0x0, TDLAR);
404 sh_eth_write(ndev, 0x0, TDFAR);
405 sh_eth_write(ndev, 0x0, TDFXR);
406 sh_eth_write(ndev, 0x0, TDFFR);
407 sh_eth_write(ndev, 0x0, RDLAR);
408 sh_eth_write(ndev, 0x0, RDFAR);
409 sh_eth_write(ndev, 0x0, RDFXR);
410 sh_eth_write(ndev, 0x0, RDFFR);
412 /* Reset HW CRC register */
413 sh_eth_reset_hw_crc(ndev);
415 /* Select MII mode */
416 if (sh_eth_my_cpu_data.select_mii)
417 sh_eth_select_mii(ndev);
422 static void sh_eth_reset_hw_crc(struct net_device *ndev)
424 if (sh_eth_my_cpu_data.hw_crc)
425 sh_eth_write(ndev, 0x0, CSMR);
428 #elif defined(CONFIG_ARCH_R8A7740)
429 #define SH_ETH_HAS_TSU 1
430 static int sh_eth_check_reset(struct net_device *ndev);
432 static void sh_eth_chip_reset(struct net_device *ndev)
434 struct sh_eth_private *mdp = netdev_priv(ndev);
437 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
440 sh_eth_select_mii(ndev);
443 static int sh_eth_reset(struct net_device *ndev)
447 sh_eth_write(ndev, EDSR_ENALL, EDSR);
448 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
450 ret = sh_eth_check_reset(ndev);
455 sh_eth_write(ndev, 0x0, TDLAR);
456 sh_eth_write(ndev, 0x0, TDFAR);
457 sh_eth_write(ndev, 0x0, TDFXR);
458 sh_eth_write(ndev, 0x0, TDFFR);
459 sh_eth_write(ndev, 0x0, RDLAR);
460 sh_eth_write(ndev, 0x0, RDFAR);
461 sh_eth_write(ndev, 0x0, RDFXR);
462 sh_eth_write(ndev, 0x0, RDFFR);
468 static void sh_eth_set_duplex(struct net_device *ndev)
470 struct sh_eth_private *mdp = netdev_priv(ndev);
472 if (mdp->duplex) /* Full */
473 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
475 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
478 static void sh_eth_set_rate(struct net_device *ndev)
480 struct sh_eth_private *mdp = netdev_priv(ndev);
482 switch (mdp->speed) {
483 case 10: /* 10BASE */
484 sh_eth_write(ndev, GECMR_10, GECMR);
486 case 100:/* 100BASE */
487 sh_eth_write(ndev, GECMR_100, GECMR);
489 case 1000: /* 1000BASE */
490 sh_eth_write(ndev, GECMR_1000, GECMR);
498 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
499 .chip_reset = sh_eth_chip_reset,
500 .set_duplex = sh_eth_set_duplex,
501 .set_rate = sh_eth_set_rate,
503 .ecsr_value = ECSR_ICD | ECSR_MPD,
504 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
505 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
507 .tx_check = EESR_TC1 | EESR_FTC,
508 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
509 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
511 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
525 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
526 #define SH_ETH_RESET_DEFAULT 1
527 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
528 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
535 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
536 #define SH_ETH_RESET_DEFAULT 1
537 #define SH_ETH_HAS_TSU 1
538 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
539 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
544 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
547 cd->ecsr_value = DEFAULT_ECSR_INIT;
549 if (!cd->ecsipr_value)
550 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
552 if (!cd->fcftr_value)
553 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
554 DEFAULT_FIFO_F_D_RFD;
557 cd->fdr_value = DEFAULT_FDR_INIT;
560 cd->rmcr_value = DEFAULT_RMCR_VALUE;
563 cd->tx_check = DEFAULT_TX_CHECK;
565 if (!cd->eesr_err_check)
566 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
568 if (!cd->tx_error_check)
569 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
572 #if defined(SH_ETH_RESET_DEFAULT)
574 static int sh_eth_reset(struct net_device *ndev)
576 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
578 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
583 static int sh_eth_check_reset(struct net_device *ndev)
589 if (!(sh_eth_read(ndev, EDMR) & 0x3))
595 printk(KERN_ERR "Device reset fail\n");
602 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
603 static void sh_eth_set_receive_align(struct sk_buff *skb)
607 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
609 skb_reserve(skb, reserve);
612 static void sh_eth_set_receive_align(struct sk_buff *skb)
614 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
619 /* CPU <-> EDMAC endian convert */
620 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
622 switch (mdp->edmac_endian) {
623 case EDMAC_LITTLE_ENDIAN:
624 return cpu_to_le32(x);
625 case EDMAC_BIG_ENDIAN:
626 return cpu_to_be32(x);
631 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
633 switch (mdp->edmac_endian) {
634 case EDMAC_LITTLE_ENDIAN:
635 return le32_to_cpu(x);
636 case EDMAC_BIG_ENDIAN:
637 return be32_to_cpu(x);
643 * Program the hardware MAC address from dev->dev_addr.
645 static void update_mac_address(struct net_device *ndev)
648 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
649 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
651 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
655 * Get MAC address from SuperH MAC address register
657 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
658 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
659 * When you want use this device, you must set MAC address in bootloader.
662 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
664 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
665 memcpy(ndev->dev_addr, mac, 6);
667 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
668 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
669 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
670 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
671 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
672 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
676 static int sh_eth_is_gether(struct sh_eth_private *mdp)
678 if (mdp->reg_offset == sh_eth_offset_gigabit)
684 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
686 if (sh_eth_is_gether(mdp))
687 return EDTRR_TRNS_GETHER;
689 return EDTRR_TRNS_ETHER;
693 void (*set_gate)(void *addr);
694 struct mdiobb_ctrl ctrl;
696 u32 mmd_msk;/* MMD */
703 static void bb_set(void *addr, u32 msk)
705 iowrite32(ioread32(addr) | msk, addr);
709 static void bb_clr(void *addr, u32 msk)
711 iowrite32((ioread32(addr) & ~msk), addr);
715 static int bb_read(void *addr, u32 msk)
717 return (ioread32(addr) & msk) != 0;
720 /* Data I/O pin control */
721 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
723 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
725 if (bitbang->set_gate)
726 bitbang->set_gate(bitbang->addr);
729 bb_set(bitbang->addr, bitbang->mmd_msk);
731 bb_clr(bitbang->addr, bitbang->mmd_msk);
735 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
737 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
739 if (bitbang->set_gate)
740 bitbang->set_gate(bitbang->addr);
743 bb_set(bitbang->addr, bitbang->mdo_msk);
745 bb_clr(bitbang->addr, bitbang->mdo_msk);
749 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
751 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
753 if (bitbang->set_gate)
754 bitbang->set_gate(bitbang->addr);
756 return bb_read(bitbang->addr, bitbang->mdi_msk);
759 /* MDC pin control */
760 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
762 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
764 if (bitbang->set_gate)
765 bitbang->set_gate(bitbang->addr);
768 bb_set(bitbang->addr, bitbang->mdc_msk);
770 bb_clr(bitbang->addr, bitbang->mdc_msk);
773 /* mdio bus control struct */
774 static struct mdiobb_ops bb_ops = {
775 .owner = THIS_MODULE,
776 .set_mdc = sh_mdc_ctrl,
777 .set_mdio_dir = sh_mmd_ctrl,
778 .set_mdio_data = sh_set_mdio,
779 .get_mdio_data = sh_get_mdio,
782 /* free skb and descriptor buffer */
783 static void sh_eth_ring_free(struct net_device *ndev)
785 struct sh_eth_private *mdp = netdev_priv(ndev);
788 /* Free Rx skb ringbuffer */
789 if (mdp->rx_skbuff) {
790 for (i = 0; i < mdp->num_rx_ring; i++) {
791 if (mdp->rx_skbuff[i])
792 dev_kfree_skb(mdp->rx_skbuff[i]);
795 kfree(mdp->rx_skbuff);
796 mdp->rx_skbuff = NULL;
798 /* Free Tx skb ringbuffer */
799 if (mdp->tx_skbuff) {
800 for (i = 0; i < mdp->num_tx_ring; i++) {
801 if (mdp->tx_skbuff[i])
802 dev_kfree_skb(mdp->tx_skbuff[i]);
805 kfree(mdp->tx_skbuff);
806 mdp->tx_skbuff = NULL;
809 /* format skb and descriptor buffer */
810 static void sh_eth_ring_format(struct net_device *ndev)
812 struct sh_eth_private *mdp = netdev_priv(ndev);
815 struct sh_eth_rxdesc *rxdesc = NULL;
816 struct sh_eth_txdesc *txdesc = NULL;
817 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
818 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
820 mdp->cur_rx = mdp->cur_tx = 0;
821 mdp->dirty_rx = mdp->dirty_tx = 0;
823 memset(mdp->rx_ring, 0, rx_ringsize);
825 /* build Rx ring buffer */
826 for (i = 0; i < mdp->num_rx_ring; i++) {
828 mdp->rx_skbuff[i] = NULL;
829 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
830 mdp->rx_skbuff[i] = skb;
833 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
835 sh_eth_set_receive_align(skb);
838 rxdesc = &mdp->rx_ring[i];
839 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
840 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
842 /* The size of the buffer is 16 byte boundary. */
843 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
844 /* Rx descriptor address set */
846 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
847 if (sh_eth_is_gether(mdp))
848 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
852 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
854 /* Mark the last entry as wrapping the ring. */
855 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
857 memset(mdp->tx_ring, 0, tx_ringsize);
859 /* build Tx ring buffer */
860 for (i = 0; i < mdp->num_tx_ring; i++) {
861 mdp->tx_skbuff[i] = NULL;
862 txdesc = &mdp->tx_ring[i];
863 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
864 txdesc->buffer_length = 0;
866 /* Tx descriptor address set */
867 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
868 if (sh_eth_is_gether(mdp))
869 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
873 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
876 /* Get skb and descriptor buffer */
877 static int sh_eth_ring_init(struct net_device *ndev)
879 struct sh_eth_private *mdp = netdev_priv(ndev);
880 int rx_ringsize, tx_ringsize, ret = 0;
883 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
884 * card needs room to do 8 byte alignment, +2 so we can reserve
885 * the first 2 bytes, and +16 gets room for the status word from the
888 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
889 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
891 mdp->rx_buf_sz += NET_IP_ALIGN;
893 /* Allocate RX and TX skb rings */
894 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
895 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
896 if (!mdp->rx_skbuff) {
901 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
902 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
903 if (!mdp->tx_skbuff) {
908 /* Allocate all Rx descriptors. */
909 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
910 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
914 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
922 /* Allocate all Tx descriptors. */
923 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
924 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
927 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
935 /* free DMA buffer */
936 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
939 /* Free Rx and Tx skb ring buffer */
940 sh_eth_ring_free(ndev);
947 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
952 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
953 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
959 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
960 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
966 static int sh_eth_dev_init(struct net_device *ndev, bool start)
969 struct sh_eth_private *mdp = netdev_priv(ndev);
973 ret = sh_eth_reset(ndev);
977 /* Descriptor format */
978 sh_eth_ring_format(ndev);
980 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
982 /* all sh_eth int mask */
983 sh_eth_write(ndev, 0, EESIPR);
985 #if defined(__LITTLE_ENDIAN)
986 if (mdp->cd->hw_swap)
987 sh_eth_write(ndev, EDMR_EL, EDMR);
990 sh_eth_write(ndev, 0, EDMR);
993 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
994 sh_eth_write(ndev, 0, TFTR);
996 /* Frame recv control */
997 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
999 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1002 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
1004 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1006 if (!mdp->cd->no_trimd)
1007 sh_eth_write(ndev, 0, TRIMD);
1009 /* Recv frame limit set register */
1010 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1013 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1015 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1017 /* PAUSE Prohibition */
1018 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1019 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1021 sh_eth_write(ndev, val, ECMR);
1023 if (mdp->cd->set_rate)
1024 mdp->cd->set_rate(ndev);
1026 /* E-MAC Status Register clear */
1027 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1029 /* E-MAC Interrupt Enable register */
1031 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1033 /* Set MAC address */
1034 update_mac_address(ndev);
1038 sh_eth_write(ndev, APR_AP, APR);
1040 sh_eth_write(ndev, MPR_MP, MPR);
1041 if (mdp->cd->tpauser)
1042 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1045 /* Setting the Rx mode will start the Rx process. */
1046 sh_eth_write(ndev, EDRRR_R, EDRRR);
1048 netif_start_queue(ndev);
1055 /* free Tx skb function */
1056 static int sh_eth_txfree(struct net_device *ndev)
1058 struct sh_eth_private *mdp = netdev_priv(ndev);
1059 struct sh_eth_txdesc *txdesc;
1063 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1064 entry = mdp->dirty_tx % mdp->num_tx_ring;
1065 txdesc = &mdp->tx_ring[entry];
1066 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1068 /* Free the original skb. */
1069 if (mdp->tx_skbuff[entry]) {
1070 dma_unmap_single(&ndev->dev, txdesc->addr,
1071 txdesc->buffer_length, DMA_TO_DEVICE);
1072 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1073 mdp->tx_skbuff[entry] = NULL;
1076 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1077 if (entry >= mdp->num_tx_ring - 1)
1078 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1080 ndev->stats.tx_packets++;
1081 ndev->stats.tx_bytes += txdesc->buffer_length;
1086 /* Packet receive function */
1087 static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1089 struct sh_eth_private *mdp = netdev_priv(ndev);
1090 struct sh_eth_rxdesc *rxdesc;
1092 int entry = mdp->cur_rx % mdp->num_rx_ring;
1093 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1094 struct sk_buff *skb;
1098 rxdesc = &mdp->rx_ring[entry];
1099 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1100 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1101 pkt_len = rxdesc->frame_length;
1103 #if defined(CONFIG_ARCH_R8A7740)
1110 if (!(desc_status & RDFEND))
1111 ndev->stats.rx_length_errors++;
1113 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1114 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1115 ndev->stats.rx_errors++;
1116 if (desc_status & RD_RFS1)
1117 ndev->stats.rx_crc_errors++;
1118 if (desc_status & RD_RFS2)
1119 ndev->stats.rx_frame_errors++;
1120 if (desc_status & RD_RFS3)
1121 ndev->stats.rx_length_errors++;
1122 if (desc_status & RD_RFS4)
1123 ndev->stats.rx_length_errors++;
1124 if (desc_status & RD_RFS6)
1125 ndev->stats.rx_missed_errors++;
1126 if (desc_status & RD_RFS10)
1127 ndev->stats.rx_over_errors++;
1129 if (!mdp->cd->hw_swap)
1131 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1133 skb = mdp->rx_skbuff[entry];
1134 mdp->rx_skbuff[entry] = NULL;
1135 if (mdp->cd->rpadir)
1136 skb_reserve(skb, NET_IP_ALIGN);
1137 skb_put(skb, pkt_len);
1138 skb->protocol = eth_type_trans(skb, ndev);
1140 ndev->stats.rx_packets++;
1141 ndev->stats.rx_bytes += pkt_len;
1143 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1144 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1145 rxdesc = &mdp->rx_ring[entry];
1148 /* Refill the Rx ring buffers. */
1149 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1150 entry = mdp->dirty_rx % mdp->num_rx_ring;
1151 rxdesc = &mdp->rx_ring[entry];
1152 /* The size of the buffer is 16 byte boundary. */
1153 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1155 if (mdp->rx_skbuff[entry] == NULL) {
1156 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1157 mdp->rx_skbuff[entry] = skb;
1159 break; /* Better luck next round. */
1160 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1162 sh_eth_set_receive_align(skb);
1164 skb_checksum_none_assert(skb);
1165 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1167 if (entry >= mdp->num_rx_ring - 1)
1169 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1172 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1175 /* Restart Rx engine if stopped. */
1176 /* If we don't need to check status, don't. -KDU */
1177 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1178 /* fix the values for the next receiving if RDE is set */
1179 if (intr_status & EESR_RDE)
1180 mdp->cur_rx = mdp->dirty_rx =
1181 (sh_eth_read(ndev, RDFAR) -
1182 sh_eth_read(ndev, RDLAR)) >> 4;
1183 sh_eth_write(ndev, EDRRR_R, EDRRR);
1189 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1191 /* disable tx and rx */
1192 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1193 ~(ECMR_RE | ECMR_TE), ECMR);
1196 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1198 /* enable tx and rx */
1199 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1200 (ECMR_RE | ECMR_TE), ECMR);
1203 /* error control function */
1204 static void sh_eth_error(struct net_device *ndev, int intr_status)
1206 struct sh_eth_private *mdp = netdev_priv(ndev);
1211 if (intr_status & EESR_ECI) {
1212 felic_stat = sh_eth_read(ndev, ECSR);
1213 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
1214 if (felic_stat & ECSR_ICD)
1215 ndev->stats.tx_carrier_errors++;
1216 if (felic_stat & ECSR_LCHNG) {
1218 if (mdp->cd->no_psr || mdp->no_ether_link) {
1221 link_stat = (sh_eth_read(ndev, PSR));
1222 if (mdp->ether_link_active_low)
1223 link_stat = ~link_stat;
1225 if (!(link_stat & PHY_ST_LINK))
1226 sh_eth_rcv_snd_disable(ndev);
1229 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1230 ~DMAC_M_ECI, EESIPR);
1232 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1234 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1235 DMAC_M_ECI, EESIPR);
1236 /* enable tx and rx */
1237 sh_eth_rcv_snd_enable(ndev);
1243 if (intr_status & EESR_TWB) {
1244 /* Write buck end. unused write back interrupt */
1245 if (intr_status & EESR_TABT) /* Transmit Abort int */
1246 ndev->stats.tx_aborted_errors++;
1247 if (netif_msg_tx_err(mdp))
1248 dev_err(&ndev->dev, "Transmit Abort\n");
1251 if (intr_status & EESR_RABT) {
1252 /* Receive Abort int */
1253 if (intr_status & EESR_RFRMER) {
1254 /* Receive Frame Overflow int */
1255 ndev->stats.rx_frame_errors++;
1256 if (netif_msg_rx_err(mdp))
1257 dev_err(&ndev->dev, "Receive Abort\n");
1261 if (intr_status & EESR_TDE) {
1262 /* Transmit Descriptor Empty int */
1263 ndev->stats.tx_fifo_errors++;
1264 if (netif_msg_tx_err(mdp))
1265 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1268 if (intr_status & EESR_TFE) {
1269 /* FIFO under flow */
1270 ndev->stats.tx_fifo_errors++;
1271 if (netif_msg_tx_err(mdp))
1272 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1275 if (intr_status & EESR_RDE) {
1276 /* Receive Descriptor Empty int */
1277 ndev->stats.rx_over_errors++;
1279 if (netif_msg_rx_err(mdp))
1280 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1283 if (intr_status & EESR_RFE) {
1284 /* Receive FIFO Overflow int */
1285 ndev->stats.rx_fifo_errors++;
1286 if (netif_msg_rx_err(mdp))
1287 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1290 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1292 ndev->stats.tx_fifo_errors++;
1293 if (netif_msg_tx_err(mdp))
1294 dev_err(&ndev->dev, "Address Error\n");
1297 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1298 if (mdp->cd->no_ade)
1300 if (intr_status & mask) {
1302 u32 edtrr = sh_eth_read(ndev, EDTRR);
1304 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1305 intr_status, mdp->cur_tx);
1306 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1307 mdp->dirty_tx, (u32) ndev->state, edtrr);
1308 /* dirty buffer free */
1309 sh_eth_txfree(ndev);
1312 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1314 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1317 netif_wake_queue(ndev);
1321 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1323 struct net_device *ndev = netdev;
1324 struct sh_eth_private *mdp = netdev_priv(ndev);
1325 struct sh_eth_cpu_data *cd = mdp->cd;
1326 irqreturn_t ret = IRQ_NONE;
1327 u32 intr_status = 0;
1329 spin_lock(&mdp->lock);
1331 /* Get interrpt stat */
1332 intr_status = sh_eth_read(ndev, EESR);
1333 /* Clear interrupt */
1334 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1335 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
1336 cd->tx_check | cd->eesr_err_check)) {
1337 sh_eth_write(ndev, intr_status, EESR);
1342 if (intr_status & (EESR_FRC | /* Frame recv*/
1343 EESR_RMAF | /* Multi cast address recv*/
1344 EESR_RRF | /* Bit frame recv */
1345 EESR_RTLF | /* Long frame recv*/
1346 EESR_RTSF | /* short frame recv */
1347 EESR_PRE | /* PHY-LSI recv error */
1348 EESR_CERF)){ /* recv frame CRC error */
1349 sh_eth_rx(ndev, intr_status);
1353 if (intr_status & cd->tx_check) {
1354 sh_eth_txfree(ndev);
1355 netif_wake_queue(ndev);
1358 if (intr_status & cd->eesr_err_check)
1359 sh_eth_error(ndev, intr_status);
1362 spin_unlock(&mdp->lock);
1367 /* PHY state control function */
1368 static void sh_eth_adjust_link(struct net_device *ndev)
1370 struct sh_eth_private *mdp = netdev_priv(ndev);
1371 struct phy_device *phydev = mdp->phydev;
1374 if (phydev->link != PHY_DOWN) {
1375 if (phydev->duplex != mdp->duplex) {
1377 mdp->duplex = phydev->duplex;
1378 if (mdp->cd->set_duplex)
1379 mdp->cd->set_duplex(ndev);
1382 if (phydev->speed != mdp->speed) {
1384 mdp->speed = phydev->speed;
1385 if (mdp->cd->set_rate)
1386 mdp->cd->set_rate(ndev);
1388 if (mdp->link == PHY_DOWN) {
1390 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1392 mdp->link = phydev->link;
1393 if (mdp->cd->no_psr || mdp->no_ether_link)
1394 sh_eth_rcv_snd_enable(ndev);
1396 } else if (mdp->link) {
1398 mdp->link = PHY_DOWN;
1401 if (mdp->cd->no_psr || mdp->no_ether_link)
1402 sh_eth_rcv_snd_disable(ndev);
1405 if (new_state && netif_msg_link(mdp))
1406 phy_print_status(phydev);
1409 /* PHY init function */
1410 static int sh_eth_phy_init(struct net_device *ndev)
1412 struct sh_eth_private *mdp = netdev_priv(ndev);
1413 char phy_id[MII_BUS_ID_SIZE + 3];
1414 struct phy_device *phydev = NULL;
1416 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1417 mdp->mii_bus->id , mdp->phy_id);
1419 mdp->link = PHY_DOWN;
1423 /* Try connect to PHY */
1424 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1425 mdp->phy_interface);
1426 if (IS_ERR(phydev)) {
1427 dev_err(&ndev->dev, "phy_connect failed\n");
1428 return PTR_ERR(phydev);
1431 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1432 phydev->addr, phydev->drv->name);
1434 mdp->phydev = phydev;
1439 /* PHY control start function */
1440 static int sh_eth_phy_start(struct net_device *ndev)
1442 struct sh_eth_private *mdp = netdev_priv(ndev);
1445 ret = sh_eth_phy_init(ndev);
1449 /* reset phy - this also wakes it from PDOWN */
1450 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1451 phy_start(mdp->phydev);
1456 static int sh_eth_get_settings(struct net_device *ndev,
1457 struct ethtool_cmd *ecmd)
1459 struct sh_eth_private *mdp = netdev_priv(ndev);
1460 unsigned long flags;
1463 spin_lock_irqsave(&mdp->lock, flags);
1464 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1465 spin_unlock_irqrestore(&mdp->lock, flags);
1470 static int sh_eth_set_settings(struct net_device *ndev,
1471 struct ethtool_cmd *ecmd)
1473 struct sh_eth_private *mdp = netdev_priv(ndev);
1474 unsigned long flags;
1477 spin_lock_irqsave(&mdp->lock, flags);
1479 /* disable tx and rx */
1480 sh_eth_rcv_snd_disable(ndev);
1482 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1486 if (ecmd->duplex == DUPLEX_FULL)
1491 if (mdp->cd->set_duplex)
1492 mdp->cd->set_duplex(ndev);
1497 /* enable tx and rx */
1498 sh_eth_rcv_snd_enable(ndev);
1500 spin_unlock_irqrestore(&mdp->lock, flags);
1505 static int sh_eth_nway_reset(struct net_device *ndev)
1507 struct sh_eth_private *mdp = netdev_priv(ndev);
1508 unsigned long flags;
1511 spin_lock_irqsave(&mdp->lock, flags);
1512 ret = phy_start_aneg(mdp->phydev);
1513 spin_unlock_irqrestore(&mdp->lock, flags);
1518 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1520 struct sh_eth_private *mdp = netdev_priv(ndev);
1521 return mdp->msg_enable;
1524 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1526 struct sh_eth_private *mdp = netdev_priv(ndev);
1527 mdp->msg_enable = value;
1530 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1531 "rx_current", "tx_current",
1532 "rx_dirty", "tx_dirty",
1534 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1536 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1540 return SH_ETH_STATS_LEN;
1546 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1547 struct ethtool_stats *stats, u64 *data)
1549 struct sh_eth_private *mdp = netdev_priv(ndev);
1552 /* device-specific stats */
1553 data[i++] = mdp->cur_rx;
1554 data[i++] = mdp->cur_tx;
1555 data[i++] = mdp->dirty_rx;
1556 data[i++] = mdp->dirty_tx;
1559 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1561 switch (stringset) {
1563 memcpy(data, *sh_eth_gstrings_stats,
1564 sizeof(sh_eth_gstrings_stats));
1569 static void sh_eth_get_ringparam(struct net_device *ndev,
1570 struct ethtool_ringparam *ring)
1572 struct sh_eth_private *mdp = netdev_priv(ndev);
1574 ring->rx_max_pending = RX_RING_MAX;
1575 ring->tx_max_pending = TX_RING_MAX;
1576 ring->rx_pending = mdp->num_rx_ring;
1577 ring->tx_pending = mdp->num_tx_ring;
1580 static int sh_eth_set_ringparam(struct net_device *ndev,
1581 struct ethtool_ringparam *ring)
1583 struct sh_eth_private *mdp = netdev_priv(ndev);
1586 if (ring->tx_pending > TX_RING_MAX ||
1587 ring->rx_pending > RX_RING_MAX ||
1588 ring->tx_pending < TX_RING_MIN ||
1589 ring->rx_pending < RX_RING_MIN)
1591 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1594 if (netif_running(ndev)) {
1595 netif_tx_disable(ndev);
1596 /* Disable interrupts by clearing the interrupt mask. */
1597 sh_eth_write(ndev, 0x0000, EESIPR);
1598 /* Stop the chip's Tx and Rx processes. */
1599 sh_eth_write(ndev, 0, EDTRR);
1600 sh_eth_write(ndev, 0, EDRRR);
1601 synchronize_irq(ndev->irq);
1604 /* Free all the skbuffs in the Rx queue. */
1605 sh_eth_ring_free(ndev);
1606 /* Free DMA buffer */
1607 sh_eth_free_dma_buffer(mdp);
1609 /* Set new parameters */
1610 mdp->num_rx_ring = ring->rx_pending;
1611 mdp->num_tx_ring = ring->tx_pending;
1613 ret = sh_eth_ring_init(ndev);
1615 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1618 ret = sh_eth_dev_init(ndev, false);
1620 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1624 if (netif_running(ndev)) {
1625 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1626 /* Setting the Rx mode will start the Rx process. */
1627 sh_eth_write(ndev, EDRRR_R, EDRRR);
1628 netif_wake_queue(ndev);
1634 static const struct ethtool_ops sh_eth_ethtool_ops = {
1635 .get_settings = sh_eth_get_settings,
1636 .set_settings = sh_eth_set_settings,
1637 .nway_reset = sh_eth_nway_reset,
1638 .get_msglevel = sh_eth_get_msglevel,
1639 .set_msglevel = sh_eth_set_msglevel,
1640 .get_link = ethtool_op_get_link,
1641 .get_strings = sh_eth_get_strings,
1642 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1643 .get_sset_count = sh_eth_get_sset_count,
1644 .get_ringparam = sh_eth_get_ringparam,
1645 .set_ringparam = sh_eth_set_ringparam,
1648 /* network device open function */
1649 static int sh_eth_open(struct net_device *ndev)
1652 struct sh_eth_private *mdp = netdev_priv(ndev);
1654 pm_runtime_get_sync(&mdp->pdev->dev);
1656 ret = request_irq(ndev->irq, sh_eth_interrupt,
1657 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1658 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1659 defined(CONFIG_CPU_SUBTYPE_SH7757)
1666 dev_err(&ndev->dev, "Can not assign IRQ number\n");
1670 /* Descriptor set */
1671 ret = sh_eth_ring_init(ndev);
1676 ret = sh_eth_dev_init(ndev, true);
1680 /* PHY control start*/
1681 ret = sh_eth_phy_start(ndev);
1688 free_irq(ndev->irq, ndev);
1689 pm_runtime_put_sync(&mdp->pdev->dev);
1693 /* Timeout function */
1694 static void sh_eth_tx_timeout(struct net_device *ndev)
1696 struct sh_eth_private *mdp = netdev_priv(ndev);
1697 struct sh_eth_rxdesc *rxdesc;
1700 netif_stop_queue(ndev);
1702 if (netif_msg_timer(mdp))
1703 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1704 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1706 /* tx_errors count up */
1707 ndev->stats.tx_errors++;
1709 /* Free all the skbuffs in the Rx queue. */
1710 for (i = 0; i < mdp->num_rx_ring; i++) {
1711 rxdesc = &mdp->rx_ring[i];
1713 rxdesc->addr = 0xBADF00D0;
1714 if (mdp->rx_skbuff[i])
1715 dev_kfree_skb(mdp->rx_skbuff[i]);
1716 mdp->rx_skbuff[i] = NULL;
1718 for (i = 0; i < mdp->num_tx_ring; i++) {
1719 if (mdp->tx_skbuff[i])
1720 dev_kfree_skb(mdp->tx_skbuff[i]);
1721 mdp->tx_skbuff[i] = NULL;
1725 sh_eth_dev_init(ndev, true);
1728 /* Packet transmit function */
1729 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1731 struct sh_eth_private *mdp = netdev_priv(ndev);
1732 struct sh_eth_txdesc *txdesc;
1734 unsigned long flags;
1736 spin_lock_irqsave(&mdp->lock, flags);
1737 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
1738 if (!sh_eth_txfree(ndev)) {
1739 if (netif_msg_tx_queued(mdp))
1740 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1741 netif_stop_queue(ndev);
1742 spin_unlock_irqrestore(&mdp->lock, flags);
1743 return NETDEV_TX_BUSY;
1746 spin_unlock_irqrestore(&mdp->lock, flags);
1748 entry = mdp->cur_tx % mdp->num_tx_ring;
1749 mdp->tx_skbuff[entry] = skb;
1750 txdesc = &mdp->tx_ring[entry];
1752 if (!mdp->cd->hw_swap)
1753 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1755 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1757 if (skb->len < ETHERSMALL)
1758 txdesc->buffer_length = ETHERSMALL;
1760 txdesc->buffer_length = skb->len;
1762 if (entry >= mdp->num_tx_ring - 1)
1763 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1765 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1769 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1770 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1772 return NETDEV_TX_OK;
1775 /* device close function */
1776 static int sh_eth_close(struct net_device *ndev)
1778 struct sh_eth_private *mdp = netdev_priv(ndev);
1780 netif_stop_queue(ndev);
1782 /* Disable interrupts by clearing the interrupt mask. */
1783 sh_eth_write(ndev, 0x0000, EESIPR);
1785 /* Stop the chip's Tx and Rx processes. */
1786 sh_eth_write(ndev, 0, EDTRR);
1787 sh_eth_write(ndev, 0, EDRRR);
1789 /* PHY Disconnect */
1791 phy_stop(mdp->phydev);
1792 phy_disconnect(mdp->phydev);
1795 free_irq(ndev->irq, ndev);
1797 /* Free all the skbuffs in the Rx queue. */
1798 sh_eth_ring_free(ndev);
1800 /* free DMA buffer */
1801 sh_eth_free_dma_buffer(mdp);
1803 pm_runtime_put_sync(&mdp->pdev->dev);
1808 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1810 struct sh_eth_private *mdp = netdev_priv(ndev);
1812 pm_runtime_get_sync(&mdp->pdev->dev);
1814 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1815 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
1816 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
1817 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
1818 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1819 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
1820 if (sh_eth_is_gether(mdp)) {
1821 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1822 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
1823 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1824 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
1826 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1827 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
1829 pm_runtime_put_sync(&mdp->pdev->dev);
1831 return &ndev->stats;
1834 /* ioctl to device function */
1835 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1838 struct sh_eth_private *mdp = netdev_priv(ndev);
1839 struct phy_device *phydev = mdp->phydev;
1841 if (!netif_running(ndev))
1847 return phy_mii_ioctl(phydev, rq, cmd);
1850 #if defined(SH_ETH_HAS_TSU)
1851 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
1852 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
1855 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
1858 static u32 sh_eth_tsu_get_post_mask(int entry)
1860 return 0x0f << (28 - ((entry % 8) * 4));
1863 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
1865 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
1868 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
1871 struct sh_eth_private *mdp = netdev_priv(ndev);
1875 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1876 tmp = ioread32(reg_offset);
1877 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
1880 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
1883 struct sh_eth_private *mdp = netdev_priv(ndev);
1884 u32 post_mask, ref_mask, tmp;
1887 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1888 post_mask = sh_eth_tsu_get_post_mask(entry);
1889 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
1891 tmp = ioread32(reg_offset);
1892 iowrite32(tmp & ~post_mask, reg_offset);
1894 /* If other port enables, the function returns "true" */
1895 return tmp & ref_mask;
1898 static int sh_eth_tsu_busy(struct net_device *ndev)
1900 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
1901 struct sh_eth_private *mdp = netdev_priv(ndev);
1903 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
1907 dev_err(&ndev->dev, "%s: timeout\n", __func__);
1915 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
1920 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
1921 iowrite32(val, reg);
1922 if (sh_eth_tsu_busy(ndev) < 0)
1925 val = addr[4] << 8 | addr[5];
1926 iowrite32(val, reg + 4);
1927 if (sh_eth_tsu_busy(ndev) < 0)
1933 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
1937 val = ioread32(reg);
1938 addr[0] = (val >> 24) & 0xff;
1939 addr[1] = (val >> 16) & 0xff;
1940 addr[2] = (val >> 8) & 0xff;
1941 addr[3] = val & 0xff;
1942 val = ioread32(reg + 4);
1943 addr[4] = (val >> 8) & 0xff;
1944 addr[5] = val & 0xff;
1948 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
1950 struct sh_eth_private *mdp = netdev_priv(ndev);
1951 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1953 u8 c_addr[ETH_ALEN];
1955 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
1956 sh_eth_tsu_read_entry(reg_offset, c_addr);
1957 if (memcmp(addr, c_addr, ETH_ALEN) == 0)
1964 static int sh_eth_tsu_find_empty(struct net_device *ndev)
1969 memset(blank, 0, sizeof(blank));
1970 entry = sh_eth_tsu_find_entry(ndev, blank);
1971 return (entry < 0) ? -ENOMEM : entry;
1974 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
1977 struct sh_eth_private *mdp = netdev_priv(ndev);
1978 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1982 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
1983 ~(1 << (31 - entry)), TSU_TEN);
1985 memset(blank, 0, sizeof(blank));
1986 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
1992 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
1994 struct sh_eth_private *mdp = netdev_priv(ndev);
1995 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2001 i = sh_eth_tsu_find_entry(ndev, addr);
2003 /* No entry found, create one */
2004 i = sh_eth_tsu_find_empty(ndev);
2007 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2011 /* Enable the entry */
2012 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2013 (1 << (31 - i)), TSU_TEN);
2016 /* Entry found or created, enable POST */
2017 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2022 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2024 struct sh_eth_private *mdp = netdev_priv(ndev);
2030 i = sh_eth_tsu_find_entry(ndev, addr);
2033 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2036 /* Disable the entry if both ports was disabled */
2037 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2045 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2047 struct sh_eth_private *mdp = netdev_priv(ndev);
2050 if (unlikely(!mdp->cd->tsu))
2053 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2054 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2057 /* Disable the entry if both ports was disabled */
2058 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2066 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2068 struct sh_eth_private *mdp = netdev_priv(ndev);
2070 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2073 if (unlikely(!mdp->cd->tsu))
2076 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2077 sh_eth_tsu_read_entry(reg_offset, addr);
2078 if (is_multicast_ether_addr(addr))
2079 sh_eth_tsu_del_entry(ndev, addr);
2083 /* Multicast reception directions set */
2084 static void sh_eth_set_multicast_list(struct net_device *ndev)
2086 struct sh_eth_private *mdp = netdev_priv(ndev);
2089 unsigned long flags;
2091 spin_lock_irqsave(&mdp->lock, flags);
2093 * Initial condition is MCT = 1, PRM = 0.
2094 * Depending on ndev->flags, set PRM or clear MCT
2096 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2098 if (!(ndev->flags & IFF_MULTICAST)) {
2099 sh_eth_tsu_purge_mcast(ndev);
2102 if (ndev->flags & IFF_ALLMULTI) {
2103 sh_eth_tsu_purge_mcast(ndev);
2104 ecmr_bits &= ~ECMR_MCT;
2108 if (ndev->flags & IFF_PROMISC) {
2109 sh_eth_tsu_purge_all(ndev);
2110 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2111 } else if (mdp->cd->tsu) {
2112 struct netdev_hw_addr *ha;
2113 netdev_for_each_mc_addr(ha, ndev) {
2114 if (mcast_all && is_multicast_ether_addr(ha->addr))
2117 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2119 sh_eth_tsu_purge_mcast(ndev);
2120 ecmr_bits &= ~ECMR_MCT;
2126 /* Normal, unicast/broadcast-only mode. */
2127 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2130 /* update the ethernet mode */
2131 sh_eth_write(ndev, ecmr_bits, ECMR);
2133 spin_unlock_irqrestore(&mdp->lock, flags);
2136 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2144 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2146 struct sh_eth_private *mdp = netdev_priv(ndev);
2147 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2149 if (unlikely(!mdp->cd->tsu))
2152 /* No filtering if vid = 0 */
2156 mdp->vlan_num_ids++;
2159 * The controller has one VLAN tag HW filter. So, if the filter is
2160 * already enabled, the driver disables it and the filte
2162 if (mdp->vlan_num_ids > 1) {
2163 /* disable VLAN filter */
2164 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2168 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2174 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2176 struct sh_eth_private *mdp = netdev_priv(ndev);
2177 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2179 if (unlikely(!mdp->cd->tsu))
2182 /* No filtering if vid = 0 */
2186 mdp->vlan_num_ids--;
2187 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2191 #endif /* SH_ETH_HAS_TSU */
2193 /* SuperH's TSU register init function */
2194 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2196 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2197 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2198 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2199 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2200 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2201 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2202 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2203 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2204 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2205 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2206 if (sh_eth_is_gether(mdp)) {
2207 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2208 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2210 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2211 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2213 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2214 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2215 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2216 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2217 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2218 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2219 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
2222 /* MDIO bus release function */
2223 static int sh_mdio_release(struct net_device *ndev)
2225 struct sh_eth_private *mdp = netdev_priv(ndev);
2226 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2228 /* unregister mdio bus */
2229 mdiobus_unregister(bus);
2231 /* remove mdio bus info from net_device */
2232 dev_set_drvdata(&ndev->dev, NULL);
2234 /* free interrupts memory */
2237 /* free bitbang info */
2238 free_mdio_bitbang(bus);
2240 /* free bitbang memory */
2241 kfree(mdp->bitbang);
2246 /* MDIO bus init function */
2247 static int sh_mdio_init(struct net_device *ndev, int id,
2248 struct sh_eth_plat_data *pd)
2251 struct bb_info *bitbang;
2252 struct sh_eth_private *mdp = netdev_priv(ndev);
2254 /* create bit control struct for PHY */
2255 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
2262 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2263 bitbang->set_gate = pd->set_mdio_gate;
2264 bitbang->mdi_msk = 0x08;
2265 bitbang->mdo_msk = 0x04;
2266 bitbang->mmd_msk = 0x02;/* MMD */
2267 bitbang->mdc_msk = 0x01;
2268 bitbang->ctrl.ops = &bb_ops;
2270 /* MII controller setting */
2271 mdp->bitbang = bitbang;
2272 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2273 if (!mdp->mii_bus) {
2275 goto out_free_bitbang;
2278 /* Hook up MII support for ethtool */
2279 mdp->mii_bus->name = "sh_mii";
2280 mdp->mii_bus->parent = &ndev->dev;
2281 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2282 mdp->pdev->name, id);
2285 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
2286 if (!mdp->mii_bus->irq) {
2291 for (i = 0; i < PHY_MAX_ADDR; i++)
2292 mdp->mii_bus->irq[i] = PHY_POLL;
2294 /* register mdio bus */
2295 ret = mdiobus_register(mdp->mii_bus);
2299 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2304 kfree(mdp->mii_bus->irq);
2307 free_mdio_bitbang(mdp->mii_bus);
2316 static const u16 *sh_eth_get_register_offset(int register_type)
2318 const u16 *reg_offset = NULL;
2320 switch (register_type) {
2321 case SH_ETH_REG_GIGABIT:
2322 reg_offset = sh_eth_offset_gigabit;
2324 case SH_ETH_REG_FAST_SH4:
2325 reg_offset = sh_eth_offset_fast_sh4;
2327 case SH_ETH_REG_FAST_SH3_SH2:
2328 reg_offset = sh_eth_offset_fast_sh3_sh2;
2331 printk(KERN_ERR "Unknown register type (%d)\n", register_type);
2338 static const struct net_device_ops sh_eth_netdev_ops = {
2339 .ndo_open = sh_eth_open,
2340 .ndo_stop = sh_eth_close,
2341 .ndo_start_xmit = sh_eth_start_xmit,
2342 .ndo_get_stats = sh_eth_get_stats,
2343 #if defined(SH_ETH_HAS_TSU)
2344 .ndo_set_rx_mode = sh_eth_set_multicast_list,
2345 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2346 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2348 .ndo_tx_timeout = sh_eth_tx_timeout,
2349 .ndo_do_ioctl = sh_eth_do_ioctl,
2350 .ndo_validate_addr = eth_validate_addr,
2351 .ndo_set_mac_address = eth_mac_addr,
2352 .ndo_change_mtu = eth_change_mtu,
2355 static int sh_eth_drv_probe(struct platform_device *pdev)
2358 struct resource *res;
2359 struct net_device *ndev = NULL;
2360 struct sh_eth_private *mdp = NULL;
2361 struct sh_eth_plat_data *pd;
2364 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2365 if (unlikely(res == NULL)) {
2366 dev_err(&pdev->dev, "invalid resource\n");
2371 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2377 /* The sh Ether-specific entries in the device structure. */
2378 ndev->base_addr = res->start;
2384 ret = platform_get_irq(pdev, 0);
2391 SET_NETDEV_DEV(ndev, &pdev->dev);
2393 /* Fill in the fields of the device structure with ethernet values. */
2396 mdp = netdev_priv(ndev);
2397 mdp->num_tx_ring = TX_RING_SIZE;
2398 mdp->num_rx_ring = RX_RING_SIZE;
2399 mdp->addr = ioremap(res->start, resource_size(res));
2400 if (mdp->addr == NULL) {
2402 dev_err(&pdev->dev, "ioremap failed.\n");
2406 spin_lock_init(&mdp->lock);
2408 pm_runtime_enable(&pdev->dev);
2409 pm_runtime_resume(&pdev->dev);
2411 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
2413 mdp->phy_id = pd->phy;
2414 mdp->phy_interface = pd->phy_interface;
2416 mdp->edmac_endian = pd->edmac_endian;
2417 mdp->no_ether_link = pd->no_ether_link;
2418 mdp->ether_link_active_low = pd->ether_link_active_low;
2419 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2422 #if defined(SH_ETH_HAS_BOTH_MODULES)
2423 mdp->cd = sh_eth_get_cpu_data(mdp);
2425 mdp->cd = &sh_eth_my_cpu_data;
2427 sh_eth_set_default_cpu_data(mdp->cd);
2430 ndev->netdev_ops = &sh_eth_netdev_ops;
2431 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2432 ndev->watchdog_timeo = TX_TIMEOUT;
2434 /* debug message level */
2435 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2437 /* read and set MAC address */
2438 read_mac_address(ndev, pd->mac_addr);
2440 /* ioremap the TSU registers */
2442 struct resource *rtsu;
2443 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2445 dev_err(&pdev->dev, "Not found TSU resource\n");
2449 mdp->tsu_addr = ioremap(rtsu->start,
2450 resource_size(rtsu));
2451 if (mdp->tsu_addr == NULL) {
2453 dev_err(&pdev->dev, "TSU ioremap failed.\n");
2456 mdp->port = devno % 2;
2457 ndev->features = NETIF_F_HW_VLAN_FILTER;
2460 /* initialize first or needed device */
2461 if (!devno || pd->needs_init) {
2462 if (mdp->cd->chip_reset)
2463 mdp->cd->chip_reset(ndev);
2466 /* TSU init (Init only)*/
2467 sh_eth_tsu_init(mdp);
2471 /* network device register */
2472 ret = register_netdev(ndev);
2477 ret = sh_mdio_init(ndev, pdev->id, pd);
2479 goto out_unregister;
2481 /* print device information */
2482 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2483 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2485 platform_set_drvdata(pdev, ndev);
2490 unregister_netdev(ndev);
2494 if (mdp && mdp->addr)
2496 if (mdp && mdp->tsu_addr)
2497 iounmap(mdp->tsu_addr);
2505 static int sh_eth_drv_remove(struct platform_device *pdev)
2507 struct net_device *ndev = platform_get_drvdata(pdev);
2508 struct sh_eth_private *mdp = netdev_priv(ndev);
2511 iounmap(mdp->tsu_addr);
2512 sh_mdio_release(ndev);
2513 unregister_netdev(ndev);
2514 pm_runtime_disable(&pdev->dev);
2517 platform_set_drvdata(pdev, NULL);
2522 static int sh_eth_runtime_nop(struct device *dev)
2525 * Runtime PM callback shared between ->runtime_suspend()
2526 * and ->runtime_resume(). Simply returns success.
2528 * This driver re-initializes all registers after
2529 * pm_runtime_get_sync() anyway so there is no need
2530 * to save and restore registers here.
2535 static struct dev_pm_ops sh_eth_dev_pm_ops = {
2536 .runtime_suspend = sh_eth_runtime_nop,
2537 .runtime_resume = sh_eth_runtime_nop,
2540 static struct platform_driver sh_eth_driver = {
2541 .probe = sh_eth_drv_probe,
2542 .remove = sh_eth_drv_remove,
2545 .pm = &sh_eth_dev_pm_ops,
2549 module_platform_driver(sh_eth_driver);
2551 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2552 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2553 MODULE_LICENSE("GPL v2");