2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/mii.h>
23 #include <linux/usb.h>
24 #include <linux/crc32.h>
25 #include <linux/signal.h>
26 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <linux/uaccess.h>
29 #include <linux/list.h>
31 #include <linux/ipv6.h>
32 #include <linux/mdio.h>
33 #include <net/ip6_checksum.h>
36 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME "lan78xx"
39 #define DRIVER_VERSION "1.0.0"
41 #define TX_TIMEOUT_JIFFIES (5 * HZ)
42 #define THROTTLE_JIFFIES (HZ / 8)
43 #define UNLINK_TIMEOUT_MS 3
45 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
47 #define SS_USB_PKT_SIZE (1024)
48 #define HS_USB_PKT_SIZE (512)
49 #define FS_USB_PKT_SIZE (64)
51 #define MAX_RX_FIFO_SIZE (12 * 1024)
52 #define MAX_TX_FIFO_SIZE (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE (9000)
56 #define DEFAULT_TX_CSUM_ENABLE (true)
57 #define DEFAULT_RX_CSUM_ENABLE (true)
58 #define DEFAULT_TSO_CSUM_ENABLE (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE (true)
60 #define INTERNAL_PHY_ID (2) /* 2: GMII */
61 #define TX_OVERHEAD (8)
64 #define LAN78XX_USB_VENDOR_ID (0x0424)
65 #define LAN7800_USB_PRODUCT_ID (0x7800)
66 #define LAN7850_USB_PRODUCT_ID (0x7850)
67 #define LAN78XX_EEPROM_MAGIC (0x78A5)
68 #define LAN78XX_OTP_MAGIC (0x78F3)
73 #define EEPROM_INDICATOR (0xA5)
74 #define EEPROM_MAC_OFFSET (0x01)
75 #define MAX_EEPROM_SIZE 512
76 #define OTP_INDICATOR_1 (0xF3)
77 #define OTP_INDICATOR_2 (0xF7)
79 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
80 WAKE_MCAST | WAKE_BCAST | \
81 WAKE_ARP | WAKE_MAGIC)
83 /* USB related defines */
84 #define BULK_IN_PIPE 1
85 #define BULK_OUT_PIPE 2
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
90 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
92 "RX Alignment Errors",
95 "RX Undersize Frame Errors",
96 "RX Oversize Frame Errors",
98 "RX Unicast Byte Count",
99 "RX Broadcast Byte Count",
100 "RX Multicast Byte Count",
102 "RX Broadcast Frames",
103 "RX Multicast Frames",
106 "RX 65 - 127 Byte Frames",
107 "RX 128 - 255 Byte Frames",
108 "RX 256 - 511 Bytes Frames",
109 "RX 512 - 1023 Byte Frames",
110 "RX 1024 - 1518 Byte Frames",
111 "RX Greater 1518 Byte Frames",
112 "EEE RX LPI Transitions",
115 "TX Excess Deferral Errors",
118 "TX Single Collisions",
119 "TX Multiple Collisions",
120 "TX Excessive Collision",
121 "TX Late Collisions",
122 "TX Unicast Byte Count",
123 "TX Broadcast Byte Count",
124 "TX Multicast Byte Count",
126 "TX Broadcast Frames",
127 "TX Multicast Frames",
130 "TX 65 - 127 Byte Frames",
131 "TX 128 - 255 Byte Frames",
132 "TX 256 - 511 Bytes Frames",
133 "TX 512 - 1023 Byte Frames",
134 "TX 1024 - 1518 Byte Frames",
135 "TX Greater 1518 Byte Frames",
136 "EEE TX LPI Transitions",
140 struct lan78xx_statstage {
142 u32 rx_alignment_errors;
143 u32 rx_fragment_errors;
144 u32 rx_jabber_errors;
145 u32 rx_undersize_frame_errors;
146 u32 rx_oversize_frame_errors;
147 u32 rx_dropped_frames;
148 u32 rx_unicast_byte_count;
149 u32 rx_broadcast_byte_count;
150 u32 rx_multicast_byte_count;
151 u32 rx_unicast_frames;
152 u32 rx_broadcast_frames;
153 u32 rx_multicast_frames;
155 u32 rx_64_byte_frames;
156 u32 rx_65_127_byte_frames;
157 u32 rx_128_255_byte_frames;
158 u32 rx_256_511_bytes_frames;
159 u32 rx_512_1023_byte_frames;
160 u32 rx_1024_1518_byte_frames;
161 u32 rx_greater_1518_byte_frames;
162 u32 eee_rx_lpi_transitions;
165 u32 tx_excess_deferral_errors;
166 u32 tx_carrier_errors;
167 u32 tx_bad_byte_count;
168 u32 tx_single_collisions;
169 u32 tx_multiple_collisions;
170 u32 tx_excessive_collision;
171 u32 tx_late_collisions;
172 u32 tx_unicast_byte_count;
173 u32 tx_broadcast_byte_count;
174 u32 tx_multicast_byte_count;
175 u32 tx_unicast_frames;
176 u32 tx_broadcast_frames;
177 u32 tx_multicast_frames;
179 u32 tx_64_byte_frames;
180 u32 tx_65_127_byte_frames;
181 u32 tx_128_255_byte_frames;
182 u32 tx_256_511_bytes_frames;
183 u32 tx_512_1023_byte_frames;
184 u32 tx_1024_1518_byte_frames;
185 u32 tx_greater_1518_byte_frames;
186 u32 eee_tx_lpi_transitions;
192 struct lan78xx_priv {
193 struct lan78xx_net *dev;
195 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
196 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
197 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
198 struct mutex dataport_mutex; /* for dataport access */
199 spinlock_t rfe_ctl_lock; /* for rfe register access */
200 struct work_struct set_multicast;
201 struct work_struct set_vlan;
215 struct skb_data { /* skb->cb is one of these */
217 struct lan78xx_net *dev;
218 enum skb_state state;
223 struct usb_ctrlrequest req;
224 struct lan78xx_net *dev;
227 #define EVENT_TX_HALT 0
228 #define EVENT_RX_HALT 1
229 #define EVENT_RX_MEMORY 2
230 #define EVENT_STS_SPLIT 3
231 #define EVENT_LINK_RESET 4
232 #define EVENT_RX_PAUSED 5
233 #define EVENT_DEV_WAKING 6
234 #define EVENT_DEV_ASLEEP 7
235 #define EVENT_DEV_OPEN 8
238 struct net_device *net;
239 struct usb_device *udev;
240 struct usb_interface *intf;
245 struct sk_buff_head rxq;
246 struct sk_buff_head txq;
247 struct sk_buff_head done;
248 struct sk_buff_head rxq_pause;
249 struct sk_buff_head txq_pend;
251 struct tasklet_struct bh;
252 struct delayed_work wq;
254 struct usb_host_endpoint *ep_blkin;
255 struct usb_host_endpoint *ep_blkout;
256 struct usb_host_endpoint *ep_intr;
260 struct urb *urb_intr;
261 struct usb_anchor deferred;
263 struct mutex phy_mutex; /* for phy access */
264 unsigned pipe_in, pipe_out, pipe_intr;
266 u32 hard_mtu; /* count any extra framing */
267 size_t rx_urb_size; /* size for rx urbs */
271 wait_queue_head_t *wait;
272 unsigned char suspend_count;
275 struct timer_list delay;
277 unsigned long data[5];
278 struct mii_if_info mii;
284 /* use ethtool to change the level for any given device */
285 static int msg_level = -1;
286 module_param(msg_level, int, 0);
287 MODULE_PARM_DESC(msg_level, "Override default message level");
289 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
291 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
297 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
298 USB_VENDOR_REQUEST_READ_REGISTER,
299 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
301 if (likely(ret >= 0)) {
305 netdev_warn(dev->net,
306 "Failed to read register index 0x%08x. ret = %d",
315 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
317 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
326 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
327 USB_VENDOR_REQUEST_WRITE_REGISTER,
328 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
329 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
330 if (unlikely(ret < 0)) {
331 netdev_warn(dev->net,
332 "Failed to write register index 0x%08x. ret = %d",
341 static int lan78xx_read_stats(struct lan78xx_net *dev,
342 struct lan78xx_statstage *data)
346 struct lan78xx_statstage *stats;
350 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
354 ret = usb_control_msg(dev->udev,
355 usb_rcvctrlpipe(dev->udev, 0),
356 USB_VENDOR_REQUEST_GET_STATS,
357 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
362 USB_CTRL_SET_TIMEOUT);
363 if (likely(ret >= 0)) {
366 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
367 le32_to_cpus(&src[i]);
371 netdev_warn(dev->net,
372 "Failed to read stat ret = 0x%x", ret);
380 /* Loop until the read is completed with timeout called with phy_mutex held */
381 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
383 unsigned long start_time = jiffies;
388 ret = lan78xx_read_reg(dev, MII_ACC, &val);
389 if (unlikely(ret < 0))
392 if (!(val & MII_ACC_MII_BUSY_))
394 } while (!time_after(jiffies, start_time + HZ));
399 static inline u32 mii_access(int id, int index, int read)
403 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
404 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
406 ret |= MII_ACC_MII_READ_;
408 ret |= MII_ACC_MII_WRITE_;
409 ret |= MII_ACC_MII_BUSY_;
414 static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
416 struct lan78xx_net *dev = netdev_priv(netdev);
420 ret = usb_autopm_get_interface(dev->intf);
424 mutex_lock(&dev->phy_mutex);
426 /* confirm MII not busy */
427 ret = lan78xx_phy_wait_not_busy(dev);
431 /* set the address, index & direction (read from PHY) */
432 phy_id &= dev->mii.phy_id_mask;
433 idx &= dev->mii.reg_num_mask;
434 addr = mii_access(phy_id, idx, MII_READ);
435 ret = lan78xx_write_reg(dev, MII_ACC, addr);
437 ret = lan78xx_phy_wait_not_busy(dev);
441 ret = lan78xx_read_reg(dev, MII_DATA, &val);
443 ret = (int)(val & 0xFFFF);
446 mutex_unlock(&dev->phy_mutex);
447 usb_autopm_put_interface(dev->intf);
451 static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
454 struct lan78xx_net *dev = netdev_priv(netdev);
458 if (usb_autopm_get_interface(dev->intf) < 0)
461 mutex_lock(&dev->phy_mutex);
463 /* confirm MII not busy */
464 ret = lan78xx_phy_wait_not_busy(dev);
469 ret = lan78xx_write_reg(dev, MII_DATA, val);
471 /* set the address, index & direction (write to PHY) */
472 phy_id &= dev->mii.phy_id_mask;
473 idx &= dev->mii.reg_num_mask;
474 addr = mii_access(phy_id, idx, MII_WRITE);
475 ret = lan78xx_write_reg(dev, MII_ACC, addr);
477 ret = lan78xx_phy_wait_not_busy(dev);
482 mutex_unlock(&dev->phy_mutex);
483 usb_autopm_put_interface(dev->intf);
486 static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
487 int mmddev, int mmdidx, int regval)
489 struct lan78xx_net *dev = netdev_priv(netdev);
493 if (usb_autopm_get_interface(dev->intf) < 0)
496 mutex_lock(&dev->phy_mutex);
498 /* confirm MII not busy */
499 ret = lan78xx_phy_wait_not_busy(dev);
505 /* set up device address for MMD */
506 ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
508 phy_id &= dev->mii.phy_id_mask;
509 addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
510 ret = lan78xx_write_reg(dev, MII_ACC, addr);
512 ret = lan78xx_phy_wait_not_busy(dev);
516 /* select register of MMD */
518 ret = lan78xx_write_reg(dev, MII_DATA, val);
520 phy_id &= dev->mii.phy_id_mask;
521 addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
522 ret = lan78xx_write_reg(dev, MII_ACC, addr);
524 ret = lan78xx_phy_wait_not_busy(dev);
528 /* select register data for MMD */
529 val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
530 ret = lan78xx_write_reg(dev, MII_DATA, val);
532 phy_id &= dev->mii.phy_id_mask;
533 addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
534 ret = lan78xx_write_reg(dev, MII_ACC, addr);
536 ret = lan78xx_phy_wait_not_busy(dev);
542 ret = lan78xx_write_reg(dev, MII_DATA, val);
544 phy_id &= dev->mii.phy_id_mask;
545 addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
546 ret = lan78xx_write_reg(dev, MII_ACC, addr);
548 ret = lan78xx_phy_wait_not_busy(dev);
553 mutex_unlock(&dev->phy_mutex);
554 usb_autopm_put_interface(dev->intf);
557 static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
558 int mmddev, int mmdidx)
560 struct lan78xx_net *dev = netdev_priv(netdev);
564 ret = usb_autopm_get_interface(dev->intf);
568 mutex_lock(&dev->phy_mutex);
570 /* confirm MII not busy */
571 ret = lan78xx_phy_wait_not_busy(dev);
575 /* set up device address for MMD */
576 ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
578 phy_id &= dev->mii.phy_id_mask;
579 addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
580 ret = lan78xx_write_reg(dev, MII_ACC, addr);
582 ret = lan78xx_phy_wait_not_busy(dev);
586 /* select register of MMD */
588 ret = lan78xx_write_reg(dev, MII_DATA, val);
590 phy_id &= dev->mii.phy_id_mask;
591 addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
592 ret = lan78xx_write_reg(dev, MII_ACC, addr);
594 ret = lan78xx_phy_wait_not_busy(dev);
598 /* select register data for MMD */
599 val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
600 ret = lan78xx_write_reg(dev, MII_DATA, val);
602 phy_id &= dev->mii.phy_id_mask;
603 addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
604 ret = lan78xx_write_reg(dev, MII_ACC, addr);
606 ret = lan78xx_phy_wait_not_busy(dev);
610 /* set the address, index & direction (read from PHY) */
611 phy_id &= dev->mii.phy_id_mask;
612 addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
613 ret = lan78xx_write_reg(dev, MII_ACC, addr);
615 ret = lan78xx_phy_wait_not_busy(dev);
620 ret = lan78xx_read_reg(dev, MII_DATA, &val);
622 ret = (int)(val & 0xFFFF);
625 mutex_unlock(&dev->phy_mutex);
626 usb_autopm_put_interface(dev->intf);
630 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
632 unsigned long start_time = jiffies;
637 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
638 if (unlikely(ret < 0))
641 if (!(val & E2P_CMD_EPC_BUSY_) ||
642 (val & E2P_CMD_EPC_TIMEOUT_))
644 usleep_range(40, 100);
645 } while (!time_after(jiffies, start_time + HZ));
647 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
648 netdev_warn(dev->net, "EEPROM read operation timeout");
655 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
657 unsigned long start_time = jiffies;
662 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
663 if (unlikely(ret < 0))
666 if (!(val & E2P_CMD_EPC_BUSY_))
669 usleep_range(40, 100);
670 } while (!time_after(jiffies, start_time + HZ));
672 netdev_warn(dev->net, "EEPROM is busy");
676 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
677 u32 length, u8 *data)
682 ret = lan78xx_eeprom_confirm_not_busy(dev);
686 for (i = 0; i < length; i++) {
687 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
688 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
689 ret = lan78xx_write_reg(dev, E2P_CMD, val);
690 if (unlikely(ret < 0))
693 ret = lan78xx_wait_eeprom(dev);
697 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
698 if (unlikely(ret < 0))
701 data[i] = val & 0xFF;
708 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
709 u32 length, u8 *data)
714 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
715 if ((ret == 0) && (sig == EEPROM_INDICATOR))
716 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
723 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
724 u32 length, u8 *data)
729 ret = lan78xx_eeprom_confirm_not_busy(dev);
733 /* Issue write/erase enable command */
734 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
735 ret = lan78xx_write_reg(dev, E2P_CMD, val);
736 if (unlikely(ret < 0))
739 ret = lan78xx_wait_eeprom(dev);
743 for (i = 0; i < length; i++) {
744 /* Fill data register */
746 ret = lan78xx_write_reg(dev, E2P_DATA, val);
750 /* Send "write" command */
751 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
752 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
753 ret = lan78xx_write_reg(dev, E2P_CMD, val);
757 ret = lan78xx_wait_eeprom(dev);
767 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
768 u32 length, u8 *data)
773 unsigned long timeout;
775 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
777 if (buf & OTP_PWR_DN_PWRDN_N_) {
778 /* clear it and wait to be cleared */
779 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
781 timeout = jiffies + HZ;
784 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
785 if (time_after(jiffies, timeout)) {
786 netdev_warn(dev->net,
787 "timeout on OTP_PWR_DN");
790 } while (buf & OTP_PWR_DN_PWRDN_N_);
793 for (i = 0; i < length; i++) {
794 ret = lan78xx_write_reg(dev, OTP_ADDR1,
795 ((offset + i) >> 8) & OTP_ADDR1_15_11);
796 ret = lan78xx_write_reg(dev, OTP_ADDR2,
797 ((offset + i) & OTP_ADDR2_10_3));
799 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
800 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
802 timeout = jiffies + HZ;
805 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
806 if (time_after(jiffies, timeout)) {
807 netdev_warn(dev->net,
808 "timeout on OTP_STATUS");
811 } while (buf & OTP_STATUS_BUSY_);
813 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
815 data[i] = (u8)(buf & 0xFF);
821 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
822 u32 length, u8 *data)
827 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
830 if (sig == OTP_INDICATOR_1)
832 else if (sig == OTP_INDICATOR_2)
836 ret = lan78xx_read_raw_otp(dev, offset, length, data);
842 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
846 for (i = 0; i < 100; i++) {
849 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
850 if (unlikely(ret < 0))
853 if (dp_sel & DP_SEL_DPRDY_)
856 usleep_range(40, 100);
859 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
864 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
865 u32 addr, u32 length, u32 *buf)
867 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
871 if (usb_autopm_get_interface(dev->intf) < 0)
874 mutex_lock(&pdata->dataport_mutex);
876 ret = lan78xx_dataport_wait_not_busy(dev);
880 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
882 dp_sel &= ~DP_SEL_RSEL_MASK_;
883 dp_sel |= ram_select;
884 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
886 for (i = 0; i < length; i++) {
887 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
889 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
891 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
893 ret = lan78xx_dataport_wait_not_busy(dev);
899 mutex_unlock(&pdata->dataport_mutex);
900 usb_autopm_put_interface(dev->intf);
905 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
906 int index, u8 addr[ETH_ALEN])
910 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
912 temp = addr[2] | (temp << 8);
913 temp = addr[1] | (temp << 8);
914 temp = addr[0] | (temp << 8);
915 pdata->pfilter_table[index][1] = temp;
917 temp = addr[4] | (temp << 8);
918 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
919 pdata->pfilter_table[index][0] = temp;
923 /* returns hash bit number for given MAC address */
924 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
926 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
929 static void lan78xx_deferred_multicast_write(struct work_struct *param)
931 struct lan78xx_priv *pdata =
932 container_of(param, struct lan78xx_priv, set_multicast);
933 struct lan78xx_net *dev = pdata->dev;
937 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
940 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
941 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
943 for (i = 1; i < NUM_OF_MAF; i++) {
944 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
945 ret = lan78xx_write_reg(dev, MAF_LO(i),
946 pdata->pfilter_table[i][1]);
947 ret = lan78xx_write_reg(dev, MAF_HI(i),
948 pdata->pfilter_table[i][0]);
951 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
954 static void lan78xx_set_multicast(struct net_device *netdev)
956 struct lan78xx_net *dev = netdev_priv(netdev);
957 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
961 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
963 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
964 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
966 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
967 pdata->mchash_table[i] = 0;
968 /* pfilter_table[0] has own HW address */
969 for (i = 1; i < NUM_OF_MAF; i++) {
970 pdata->pfilter_table[i][0] =
971 pdata->pfilter_table[i][1] = 0;
974 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
976 if (dev->net->flags & IFF_PROMISC) {
977 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
978 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
980 if (dev->net->flags & IFF_ALLMULTI) {
981 netif_dbg(dev, drv, dev->net,
982 "receive all multicast enabled");
983 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
987 if (netdev_mc_count(dev->net)) {
988 struct netdev_hw_addr *ha;
991 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
993 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
996 netdev_for_each_mc_addr(ha, netdev) {
997 /* set first 32 into Perfect Filter */
999 lan78xx_set_addr_filter(pdata, i, ha->addr);
1001 u32 bitnum = lan78xx_hash(ha->addr);
1003 pdata->mchash_table[bitnum / 32] |=
1004 (1 << (bitnum % 32));
1005 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1011 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1013 /* defer register writes to a sleepable context */
1014 schedule_work(&pdata->set_multicast);
1017 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1018 u16 lcladv, u16 rmtadv)
1020 u32 flow = 0, fct_flow = 0;
1023 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1025 if (cap & FLOW_CTRL_TX)
1026 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
1028 if (cap & FLOW_CTRL_RX)
1029 flow |= FLOW_CR_RX_FCEN_;
1031 if (dev->udev->speed == USB_SPEED_SUPER)
1033 else if (dev->udev->speed == USB_SPEED_HIGH)
1036 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1037 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1038 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1040 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1042 /* threshold value should be set before enabling flow */
1043 ret = lan78xx_write_reg(dev, FLOW, flow);
1048 static int lan78xx_link_reset(struct lan78xx_net *dev)
1050 struct mii_if_info *mii = &dev->mii;
1051 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1052 int ladv, radv, ret;
1055 /* clear PHY interrupt status */
1057 ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
1058 if (unlikely(ret < 0))
1061 /* clear LAN78xx interrupt status */
1062 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1063 if (unlikely(ret < 0))
1066 if (!mii_link_ok(mii) && dev->link_on) {
1067 dev->link_on = false;
1068 netif_carrier_off(dev->net);
1071 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1072 if (unlikely(ret < 0))
1075 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1076 if (unlikely(ret < 0))
1078 } else if (mii_link_ok(mii) && !dev->link_on) {
1079 dev->link_on = true;
1081 mii_check_media(mii, 1, 1);
1082 mii_ethtool_gset(&dev->mii, &ecmd);
1084 mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
1086 if (dev->udev->speed == USB_SPEED_SUPER) {
1087 if (ethtool_cmd_speed(&ecmd) == 1000) {
1089 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1090 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1091 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1093 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1094 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1095 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1097 /* enable U1 & U2 */
1098 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1099 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1100 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1101 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1105 ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
1109 radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
1113 netif_dbg(dev, link, dev->net,
1114 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1115 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1117 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1118 netif_carrier_on(dev->net);
1124 /* some work can't be done in tasklets, so we use keventd
1126 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1127 * but tasklet_schedule() doesn't. hope the failure is rare.
1129 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1131 set_bit(work, &dev->flags);
1132 if (!schedule_delayed_work(&dev->wq, 0))
1133 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1136 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1140 if (urb->actual_length != 4) {
1141 netdev_warn(dev->net,
1142 "unexpected urb length %d", urb->actual_length);
1146 memcpy(&intdata, urb->transfer_buffer, 4);
1147 le32_to_cpus(&intdata);
1149 if (intdata & INT_ENP_PHY_INT) {
1150 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1151 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1153 netdev_warn(dev->net,
1154 "unexpected interrupt: 0x%08x\n", intdata);
1157 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1159 return MAX_EEPROM_SIZE;
1162 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1163 struct ethtool_eeprom *ee, u8 *data)
1165 struct lan78xx_net *dev = netdev_priv(netdev);
1167 ee->magic = LAN78XX_EEPROM_MAGIC;
1169 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1172 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1173 struct ethtool_eeprom *ee, u8 *data)
1175 struct lan78xx_net *dev = netdev_priv(netdev);
1177 /* Allow entire eeprom update only */
1178 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1179 (ee->offset == 0) &&
1181 (data[0] == EEPROM_INDICATOR))
1182 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1183 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1184 (ee->offset == 0) &&
1186 (data[0] == OTP_INDICATOR_1))
1187 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1192 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1195 if (stringset == ETH_SS_STATS)
1196 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1199 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1201 if (sset == ETH_SS_STATS)
1202 return ARRAY_SIZE(lan78xx_gstrings);
1207 static void lan78xx_get_stats(struct net_device *netdev,
1208 struct ethtool_stats *stats, u64 *data)
1210 struct lan78xx_net *dev = netdev_priv(netdev);
1211 struct lan78xx_statstage lan78xx_stat;
1215 if (usb_autopm_get_interface(dev->intf) < 0)
1218 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1219 p = (u32 *)&lan78xx_stat;
1220 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1224 usb_autopm_put_interface(dev->intf);
1227 static void lan78xx_get_wol(struct net_device *netdev,
1228 struct ethtool_wolinfo *wol)
1230 struct lan78xx_net *dev = netdev_priv(netdev);
1233 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1235 if (usb_autopm_get_interface(dev->intf) < 0)
1238 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1239 if (unlikely(ret < 0)) {
1243 if (buf & USB_CFG_RMT_WKP_) {
1244 wol->supported = WAKE_ALL;
1245 wol->wolopts = pdata->wol;
1252 usb_autopm_put_interface(dev->intf);
1255 static int lan78xx_set_wol(struct net_device *netdev,
1256 struct ethtool_wolinfo *wol)
1258 struct lan78xx_net *dev = netdev_priv(netdev);
1259 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1262 ret = usb_autopm_get_interface(dev->intf);
1267 if (wol->wolopts & WAKE_UCAST)
1268 pdata->wol |= WAKE_UCAST;
1269 if (wol->wolopts & WAKE_MCAST)
1270 pdata->wol |= WAKE_MCAST;
1271 if (wol->wolopts & WAKE_BCAST)
1272 pdata->wol |= WAKE_BCAST;
1273 if (wol->wolopts & WAKE_MAGIC)
1274 pdata->wol |= WAKE_MAGIC;
1275 if (wol->wolopts & WAKE_PHY)
1276 pdata->wol |= WAKE_PHY;
1277 if (wol->wolopts & WAKE_ARP)
1278 pdata->wol |= WAKE_ARP;
1280 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1282 usb_autopm_put_interface(dev->intf);
1287 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1289 struct lan78xx_net *dev = netdev_priv(net);
1294 ret = usb_autopm_get_interface(dev->intf);
1298 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1299 if (buf & MAC_CR_EEE_EN_) {
1300 buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1301 PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
1302 adv = mmd_eee_adv_to_ethtool_adv_t(buf);
1303 buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1304 PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
1305 lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
1307 edata->eee_enabled = true;
1308 edata->supported = true;
1309 edata->eee_active = !!(adv & lpadv);
1310 edata->advertised = adv;
1311 edata->lp_advertised = lpadv;
1312 edata->tx_lpi_enabled = true;
1313 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1314 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1315 edata->tx_lpi_timer = buf;
1317 buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1318 PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
1319 lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
1321 edata->eee_enabled = false;
1322 edata->eee_active = false;
1323 edata->supported = false;
1324 edata->advertised = 0;
1325 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
1326 edata->tx_lpi_enabled = false;
1327 edata->tx_lpi_timer = 0;
1330 usb_autopm_put_interface(dev->intf);
1335 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1337 struct lan78xx_net *dev = netdev_priv(net);
1341 ret = usb_autopm_get_interface(dev->intf);
1345 if (edata->eee_enabled) {
1346 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1347 buf |= MAC_CR_EEE_EN_;
1348 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1350 buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
1351 lan78xx_mmd_write(dev->net, dev->mii.phy_id,
1352 PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
1354 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1355 buf &= ~MAC_CR_EEE_EN_;
1356 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1359 usb_autopm_put_interface(dev->intf);
1364 static u32 lan78xx_get_link(struct net_device *net)
1366 struct lan78xx_net *dev = netdev_priv(net);
1368 return mii_link_ok(&dev->mii);
1371 int lan78xx_nway_reset(struct net_device *net)
1373 struct lan78xx_net *dev = netdev_priv(net);
1375 if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1378 return mii_nway_restart(&dev->mii);
1381 static void lan78xx_get_drvinfo(struct net_device *net,
1382 struct ethtool_drvinfo *info)
1384 struct lan78xx_net *dev = netdev_priv(net);
1386 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1387 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1388 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1391 static u32 lan78xx_get_msglevel(struct net_device *net)
1393 struct lan78xx_net *dev = netdev_priv(net);
1395 return dev->msg_enable;
1398 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1400 struct lan78xx_net *dev = netdev_priv(net);
1402 dev->msg_enable = level;
1405 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1407 struct lan78xx_net *dev = netdev_priv(net);
1408 struct mii_if_info *mii = &dev->mii;
1412 if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1415 ret = usb_autopm_get_interface(dev->intf);
1419 ret = mii_ethtool_gset(&dev->mii, cmd);
1421 mii->mdio_write(mii->dev, mii->phy_id,
1422 PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1423 buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
1424 mii->mdio_write(mii->dev, mii->phy_id,
1425 PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
1427 buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
1428 if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
1429 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1430 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1431 } else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
1432 cmd->eth_tp_mdix = ETH_TP_MDI;
1433 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1434 } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
1435 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1436 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1439 usb_autopm_put_interface(dev->intf);
1444 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1446 struct lan78xx_net *dev = netdev_priv(net);
1447 struct mii_if_info *mii = &dev->mii;
1451 if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1454 ret = usb_autopm_get_interface(dev->intf);
1458 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1459 if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
1460 mii->mdio_write(mii->dev, mii->phy_id,
1462 PHY_EXT_GPIO_PAGE_SPACE_1);
1463 temp = mii->mdio_read(mii->dev, mii->phy_id,
1465 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1466 mii->mdio_write(mii->dev, mii->phy_id,
1468 temp | PHY_EXT_MODE_CTRL_MDI_);
1469 mii->mdio_write(mii->dev, mii->phy_id,
1471 PHY_EXT_GPIO_PAGE_SPACE_0);
1472 } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
1473 mii->mdio_write(mii->dev, mii->phy_id,
1475 PHY_EXT_GPIO_PAGE_SPACE_1);
1476 temp = mii->mdio_read(mii->dev, mii->phy_id,
1478 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1479 mii->mdio_write(mii->dev, mii->phy_id,
1481 temp | PHY_EXT_MODE_CTRL_MDI_X_);
1482 mii->mdio_write(mii->dev, mii->phy_id,
1484 PHY_EXT_GPIO_PAGE_SPACE_0);
1485 } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
1486 mii->mdio_write(mii->dev, mii->phy_id,
1488 PHY_EXT_GPIO_PAGE_SPACE_1);
1489 temp = mii->mdio_read(mii->dev, mii->phy_id,
1491 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1492 mii->mdio_write(mii->dev, mii->phy_id,
1494 temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1495 mii->mdio_write(mii->dev, mii->phy_id,
1497 PHY_EXT_GPIO_PAGE_SPACE_0);
1501 /* change speed & duplex */
1502 ret = mii_ethtool_sset(&dev->mii, cmd);
1504 if (!cmd->autoneg) {
1505 /* force link down */
1506 temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
1507 mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
1508 temp | BMCR_LOOPBACK);
1510 mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
1513 usb_autopm_put_interface(dev->intf);
1518 static const struct ethtool_ops lan78xx_ethtool_ops = {
1519 .get_link = lan78xx_get_link,
1520 .nway_reset = lan78xx_nway_reset,
1521 .get_drvinfo = lan78xx_get_drvinfo,
1522 .get_msglevel = lan78xx_get_msglevel,
1523 .set_msglevel = lan78xx_set_msglevel,
1524 .get_settings = lan78xx_get_settings,
1525 .set_settings = lan78xx_set_settings,
1526 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1527 .get_eeprom = lan78xx_ethtool_get_eeprom,
1528 .set_eeprom = lan78xx_ethtool_set_eeprom,
1529 .get_ethtool_stats = lan78xx_get_stats,
1530 .get_sset_count = lan78xx_get_sset_count,
1531 .get_strings = lan78xx_get_strings,
1532 .get_wol = lan78xx_get_wol,
1533 .set_wol = lan78xx_set_wol,
1534 .get_eee = lan78xx_get_eee,
1535 .set_eee = lan78xx_set_eee,
1538 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1540 struct lan78xx_net *dev = netdev_priv(netdev);
1542 if (!netif_running(netdev))
1545 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
1548 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1550 u32 addr_lo, addr_hi;
1554 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1555 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1557 addr[0] = addr_lo & 0xFF;
1558 addr[1] = (addr_lo >> 8) & 0xFF;
1559 addr[2] = (addr_lo >> 16) & 0xFF;
1560 addr[3] = (addr_lo >> 24) & 0xFF;
1561 addr[4] = addr_hi & 0xFF;
1562 addr[5] = (addr_hi >> 8) & 0xFF;
1564 if (!is_valid_ether_addr(addr)) {
1565 /* reading mac address from EEPROM or OTP */
1566 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1568 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1570 if (is_valid_ether_addr(addr)) {
1571 /* eeprom values are valid so use them */
1572 netif_dbg(dev, ifup, dev->net,
1573 "MAC address read from EEPROM");
1575 /* generate random MAC */
1576 random_ether_addr(addr);
1577 netif_dbg(dev, ifup, dev->net,
1578 "MAC address set to random addr");
1581 addr_lo = addr[0] | (addr[1] << 8) |
1582 (addr[2] << 16) | (addr[3] << 24);
1583 addr_hi = addr[4] | (addr[5] << 8);
1585 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1586 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1588 /* generate random MAC */
1589 random_ether_addr(addr);
1590 netif_dbg(dev, ifup, dev->net,
1591 "MAC address set to random addr");
1595 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1596 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1598 ether_addr_copy(dev->net->dev_addr, addr);
1601 static void lan78xx_mii_init(struct lan78xx_net *dev)
1603 /* Initialize MII structure */
1604 dev->mii.dev = dev->net;
1605 dev->mii.mdio_read = lan78xx_mdio_read;
1606 dev->mii.mdio_write = lan78xx_mdio_write;
1607 dev->mii.phy_id_mask = 0x1f;
1608 dev->mii.reg_num_mask = 0x1f;
1609 dev->mii.phy_id = INTERNAL_PHY_ID;
1610 dev->mii.supports_gmii = true;
1613 static int lan78xx_phy_init(struct lan78xx_net *dev)
1616 struct mii_if_info *mii = &dev->mii;
1618 if ((!mii->mdio_write) || (!mii->mdio_read))
1621 temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
1622 temp |= ADVERTISE_ALL;
1623 mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
1624 temp | ADVERTISE_CSMA |
1625 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1627 /* set to AUTOMDIX */
1628 mii->mdio_write(mii->dev, mii->phy_id,
1629 PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1630 temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
1631 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1632 mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
1633 temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1634 mii->mdio_write(mii->dev, mii->phy_id,
1635 PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
1636 dev->mdix_ctrl = ETH_TP_MDI_AUTO;
1638 /* MAC doesn't support 1000HD */
1639 temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
1640 mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
1641 temp & ~ADVERTISE_1000HALF);
1643 /* clear interrupt */
1644 mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
1645 mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
1646 PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
1647 PHY_VTSE_INT_MASK_LINK_CHANGE_);
1649 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1654 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1660 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1662 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1665 buf &= ~MAC_RX_RXEN_;
1666 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1669 /* add 4 to size for FCS */
1670 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1671 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1673 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1676 buf |= MAC_RX_RXEN_;
1677 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1683 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1685 struct sk_buff *skb;
1686 unsigned long flags;
1689 spin_lock_irqsave(&q->lock, flags);
1690 while (!skb_queue_empty(q)) {
1691 struct skb_data *entry;
1695 skb_queue_walk(q, skb) {
1696 entry = (struct skb_data *)skb->cb;
1697 if (entry->state != unlink_start)
1702 entry->state = unlink_start;
1705 /* Get reference count of the URB to avoid it to be
1706 * freed during usb_unlink_urb, which may trigger
1707 * use-after-free problem inside usb_unlink_urb since
1708 * usb_unlink_urb is always racing with .complete
1709 * handler(include defer_bh).
1712 spin_unlock_irqrestore(&q->lock, flags);
1713 /* during some PM-driven resume scenarios,
1714 * these (async) unlinks complete immediately
1716 ret = usb_unlink_urb(urb);
1717 if (ret != -EINPROGRESS && ret != 0)
1718 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1722 spin_lock_irqsave(&q->lock, flags);
1724 spin_unlock_irqrestore(&q->lock, flags);
1728 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1730 struct lan78xx_net *dev = netdev_priv(netdev);
1731 int ll_mtu = new_mtu + netdev->hard_header_len;
1732 int old_hard_mtu = dev->hard_mtu;
1733 int old_rx_urb_size = dev->rx_urb_size;
1736 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1741 /* no second zero-length packet read wanted after mtu-sized packets */
1742 if ((ll_mtu % dev->maxpacket) == 0)
1745 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1747 netdev->mtu = new_mtu;
1749 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1750 if (dev->rx_urb_size == old_hard_mtu) {
1751 dev->rx_urb_size = dev->hard_mtu;
1752 if (dev->rx_urb_size > old_rx_urb_size) {
1753 if (netif_running(dev->net)) {
1754 unlink_urbs(dev, &dev->rxq);
1755 tasklet_schedule(&dev->bh);
1763 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1765 struct lan78xx_net *dev = netdev_priv(netdev);
1766 struct sockaddr *addr = p;
1767 u32 addr_lo, addr_hi;
1770 if (netif_running(netdev))
1773 if (!is_valid_ether_addr(addr->sa_data))
1774 return -EADDRNOTAVAIL;
1776 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1778 addr_lo = netdev->dev_addr[0] |
1779 netdev->dev_addr[1] << 8 |
1780 netdev->dev_addr[2] << 16 |
1781 netdev->dev_addr[3] << 24;
1782 addr_hi = netdev->dev_addr[4] |
1783 netdev->dev_addr[5] << 8;
1785 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1786 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1791 /* Enable or disable Rx checksum offload engine */
1792 static int lan78xx_set_features(struct net_device *netdev,
1793 netdev_features_t features)
1795 struct lan78xx_net *dev = netdev_priv(netdev);
1796 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1797 unsigned long flags;
1800 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1802 if (features & NETIF_F_RXCSUM) {
1803 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1804 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1806 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1807 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1810 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1811 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1813 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1815 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1817 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1822 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1824 struct lan78xx_priv *pdata =
1825 container_of(param, struct lan78xx_priv, set_vlan);
1826 struct lan78xx_net *dev = pdata->dev;
1828 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1829 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1832 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1833 __be16 proto, u16 vid)
1835 struct lan78xx_net *dev = netdev_priv(netdev);
1836 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1838 u16 vid_dword_index;
1840 vid_dword_index = (vid >> 5) & 0x7F;
1841 vid_bit_index = vid & 0x1F;
1843 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1845 /* defer register writes to a sleepable context */
1846 schedule_work(&pdata->set_vlan);
1851 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1852 __be16 proto, u16 vid)
1854 struct lan78xx_net *dev = netdev_priv(netdev);
1855 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1857 u16 vid_dword_index;
1859 vid_dword_index = (vid >> 5) & 0x7F;
1860 vid_bit_index = vid & 0x1F;
1862 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1864 /* defer register writes to a sleepable context */
1865 schedule_work(&pdata->set_vlan);
1870 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1874 u32 regs[6] = { 0 };
1876 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1877 if (buf & USB_CFG1_LTM_ENABLE_) {
1879 /* Get values from EEPROM first */
1880 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1881 if (temp[0] == 24) {
1882 ret = lan78xx_read_raw_eeprom(dev,
1889 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1890 if (temp[0] == 24) {
1891 ret = lan78xx_read_raw_otp(dev,
1901 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1902 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1903 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1904 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1905 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1906 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1909 static int lan78xx_reset(struct lan78xx_net *dev)
1911 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1914 unsigned long timeout;
1916 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1917 buf |= HW_CFG_LRST_;
1918 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1920 timeout = jiffies + HZ;
1923 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1924 if (time_after(jiffies, timeout)) {
1925 netdev_warn(dev->net,
1926 "timeout on completion of LiteReset");
1929 } while (buf & HW_CFG_LRST_);
1931 lan78xx_init_mac_address(dev);
1933 /* Respond to the IN token with a NAK */
1934 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1935 buf |= USB_CFG_BIR_;
1936 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1939 lan78xx_init_ltm(dev);
1941 dev->net->hard_header_len += TX_OVERHEAD;
1942 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1944 if (dev->udev->speed == USB_SPEED_SUPER) {
1945 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1946 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1949 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1950 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1951 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1952 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1953 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1955 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1956 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1960 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1961 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1963 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1965 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1967 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1968 buf |= USB_CFG_BCE_;
1969 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1971 /* set FIFO sizes */
1972 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1973 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1975 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1976 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1978 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1979 ret = lan78xx_write_reg(dev, FLOW, 0);
1980 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1982 /* Don't need rfe_ctl_lock during initialisation */
1983 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1984 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1985 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1987 /* Enable or disable checksum offload engines */
1988 lan78xx_set_features(dev->net, dev->net->features);
1990 lan78xx_set_multicast(dev->net);
1993 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1994 buf |= PMT_CTL_PHY_RST_;
1995 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1997 timeout = jiffies + HZ;
2000 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2001 if (time_after(jiffies, timeout)) {
2002 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2005 } while (buf & PMT_CTL_PHY_RST_);
2007 lan78xx_mii_init(dev);
2009 ret = lan78xx_phy_init(dev);
2011 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2013 buf |= MAC_CR_GMII_EN_;
2014 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2016 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2019 if (buf & MAC_CR_EEE_EN_)
2020 lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
2022 /* enable PHY interrupts */
2023 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2024 buf |= INT_ENP_PHY_INT;
2025 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2027 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2028 buf |= MAC_TX_TXEN_;
2029 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2031 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2032 buf |= FCT_TX_CTL_EN_;
2033 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2035 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2037 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2038 buf |= MAC_RX_RXEN_;
2039 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2041 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2042 buf |= FCT_RX_CTL_EN_;
2043 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2045 if (!mii_nway_restart(&dev->mii))
2046 netif_dbg(dev, link, dev->net, "autoneg initiated");
2051 static int lan78xx_open(struct net_device *net)
2053 struct lan78xx_net *dev = netdev_priv(net);
2056 ret = usb_autopm_get_interface(dev->intf);
2060 ret = lan78xx_reset(dev);
2064 /* for Link Check */
2065 if (dev->urb_intr) {
2066 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2068 netif_err(dev, ifup, dev->net,
2069 "intr submit %d\n", ret);
2074 set_bit(EVENT_DEV_OPEN, &dev->flags);
2076 netif_start_queue(net);
2078 dev->link_on = false;
2080 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2082 usb_autopm_put_interface(dev->intf);
2088 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2090 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2091 DECLARE_WAITQUEUE(wait, current);
2094 /* ensure there are no more active urbs */
2095 add_wait_queue(&unlink_wakeup, &wait);
2096 set_current_state(TASK_UNINTERRUPTIBLE);
2097 dev->wait = &unlink_wakeup;
2098 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2100 /* maybe wait for deletions to finish. */
2101 while (!skb_queue_empty(&dev->rxq) &&
2102 !skb_queue_empty(&dev->txq) &&
2103 !skb_queue_empty(&dev->done)) {
2104 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2105 set_current_state(TASK_UNINTERRUPTIBLE);
2106 netif_dbg(dev, ifdown, dev->net,
2107 "waited for %d urb completions\n", temp);
2109 set_current_state(TASK_RUNNING);
2111 remove_wait_queue(&unlink_wakeup, &wait);
2114 int lan78xx_stop(struct net_device *net)
2116 struct lan78xx_net *dev = netdev_priv(net);
2118 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2119 netif_stop_queue(net);
2121 netif_info(dev, ifdown, dev->net,
2122 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2123 net->stats.rx_packets, net->stats.tx_packets,
2124 net->stats.rx_errors, net->stats.tx_errors);
2126 lan78xx_terminate_urbs(dev);
2128 usb_kill_urb(dev->urb_intr);
2130 skb_queue_purge(&dev->rxq_pause);
2132 /* deferred work (task, timer, softirq) must also stop.
2133 * can't flush_scheduled_work() until we drop rtnl (later),
2134 * else workers could deadlock; so make workers a NOP.
2137 cancel_delayed_work_sync(&dev->wq);
2138 tasklet_kill(&dev->bh);
2140 usb_autopm_put_interface(dev->intf);
2145 static int lan78xx_linearize(struct sk_buff *skb)
2147 return skb_linearize(skb);
2150 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2151 struct sk_buff *skb, gfp_t flags)
2153 u32 tx_cmd_a, tx_cmd_b;
2155 if (skb_headroom(skb) < TX_OVERHEAD) {
2156 struct sk_buff *skb2;
2158 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2159 dev_kfree_skb_any(skb);
2165 if (lan78xx_linearize(skb) < 0)
2168 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2170 if (skb->ip_summed == CHECKSUM_PARTIAL)
2171 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2174 if (skb_is_gso(skb)) {
2175 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2177 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2179 tx_cmd_a |= TX_CMD_A_LSO_;
2182 if (skb_vlan_tag_present(skb)) {
2183 tx_cmd_a |= TX_CMD_A_IVTG_;
2184 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2188 cpu_to_le32s(&tx_cmd_b);
2189 memcpy(skb->data, &tx_cmd_b, 4);
2192 cpu_to_le32s(&tx_cmd_a);
2193 memcpy(skb->data, &tx_cmd_a, 4);
2198 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2199 struct sk_buff_head *list, enum skb_state state)
2201 unsigned long flags;
2202 enum skb_state old_state;
2203 struct skb_data *entry = (struct skb_data *)skb->cb;
2205 spin_lock_irqsave(&list->lock, flags);
2206 old_state = entry->state;
2207 entry->state = state;
2209 __skb_unlink(skb, list);
2210 spin_unlock(&list->lock);
2211 spin_lock(&dev->done.lock);
2213 __skb_queue_tail(&dev->done, skb);
2214 if (skb_queue_len(&dev->done) == 1)
2215 tasklet_schedule(&dev->bh);
2216 spin_unlock_irqrestore(&dev->done.lock, flags);
2221 static void tx_complete(struct urb *urb)
2223 struct sk_buff *skb = (struct sk_buff *)urb->context;
2224 struct skb_data *entry = (struct skb_data *)skb->cb;
2225 struct lan78xx_net *dev = entry->dev;
2227 if (urb->status == 0) {
2228 dev->net->stats.tx_packets++;
2229 dev->net->stats.tx_bytes += entry->length;
2231 dev->net->stats.tx_errors++;
2233 switch (urb->status) {
2235 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2238 /* software-driven interface shutdown */
2246 netif_stop_queue(dev->net);
2249 netif_dbg(dev, tx_err, dev->net,
2250 "tx err %d\n", entry->urb->status);
2255 usb_autopm_put_interface_async(dev->intf);
2257 defer_bh(dev, skb, &dev->txq, tx_done);
2260 static void lan78xx_queue_skb(struct sk_buff_head *list,
2261 struct sk_buff *newsk, enum skb_state state)
2263 struct skb_data *entry = (struct skb_data *)newsk->cb;
2265 __skb_queue_tail(list, newsk);
2266 entry->state = state;
2269 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2271 struct lan78xx_net *dev = netdev_priv(net);
2272 struct sk_buff *skb2 = NULL;
2275 skb_tx_timestamp(skb);
2276 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2280 skb_queue_tail(&dev->txq_pend, skb2);
2282 if (skb_queue_len(&dev->txq_pend) > 10)
2283 netif_stop_queue(net);
2285 netif_dbg(dev, tx_err, dev->net,
2286 "lan78xx_tx_prep return NULL\n");
2287 dev->net->stats.tx_errors++;
2288 dev->net->stats.tx_dropped++;
2291 tasklet_schedule(&dev->bh);
2293 return NETDEV_TX_OK;
2296 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2299 struct usb_host_interface *alt = NULL;
2300 struct usb_host_endpoint *in = NULL, *out = NULL;
2301 struct usb_host_endpoint *status = NULL;
2303 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2309 alt = intf->altsetting + tmp;
2311 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2312 struct usb_host_endpoint *e;
2315 e = alt->endpoint + ep;
2316 switch (e->desc.bmAttributes) {
2317 case USB_ENDPOINT_XFER_INT:
2318 if (!usb_endpoint_dir_in(&e->desc))
2322 case USB_ENDPOINT_XFER_BULK:
2327 if (usb_endpoint_dir_in(&e->desc)) {
2330 else if (intr && !status)
2340 if (!alt || !in || !out)
2343 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2344 in->desc.bEndpointAddress &
2345 USB_ENDPOINT_NUMBER_MASK);
2346 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2347 out->desc.bEndpointAddress &
2348 USB_ENDPOINT_NUMBER_MASK);
2349 dev->ep_intr = status;
2354 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2356 struct lan78xx_priv *pdata = NULL;
2360 ret = lan78xx_get_endpoints(dev, intf);
2362 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2364 pdata = (struct lan78xx_priv *)(dev->data[0]);
2366 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2372 spin_lock_init(&pdata->rfe_ctl_lock);
2373 mutex_init(&pdata->dataport_mutex);
2375 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2377 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2378 pdata->vlan_table[i] = 0;
2380 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2382 dev->net->features = 0;
2384 if (DEFAULT_TX_CSUM_ENABLE)
2385 dev->net->features |= NETIF_F_HW_CSUM;
2387 if (DEFAULT_RX_CSUM_ENABLE)
2388 dev->net->features |= NETIF_F_RXCSUM;
2390 if (DEFAULT_TSO_CSUM_ENABLE)
2391 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2393 dev->net->hw_features = dev->net->features;
2395 /* Init all registers */
2396 ret = lan78xx_reset(dev);
2398 dev->net->flags |= IFF_MULTICAST;
2400 pdata->wol = WAKE_MAGIC;
2405 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2407 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2410 netif_dbg(dev, ifdown, dev->net, "free pdata");
2417 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2418 struct sk_buff *skb,
2419 u32 rx_cmd_a, u32 rx_cmd_b)
2421 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2422 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2423 skb->ip_summed = CHECKSUM_NONE;
2425 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2426 skb->ip_summed = CHECKSUM_COMPLETE;
2430 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2434 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2435 skb_queue_tail(&dev->rxq_pause, skb);
2439 skb->protocol = eth_type_trans(skb, dev->net);
2440 dev->net->stats.rx_packets++;
2441 dev->net->stats.rx_bytes += skb->len;
2443 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2444 skb->len + sizeof(struct ethhdr), skb->protocol);
2445 memset(skb->cb, 0, sizeof(struct skb_data));
2447 if (skb_defer_rx_timestamp(skb))
2450 status = netif_rx(skb);
2451 if (status != NET_RX_SUCCESS)
2452 netif_dbg(dev, rx_err, dev->net,
2453 "netif_rx status %d\n", status);
2456 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2458 if (skb->len < dev->net->hard_header_len)
2461 while (skb->len > 0) {
2462 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2464 struct sk_buff *skb2;
2465 unsigned char *packet;
2467 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2468 le32_to_cpus(&rx_cmd_a);
2469 skb_pull(skb, sizeof(rx_cmd_a));
2471 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2472 le32_to_cpus(&rx_cmd_b);
2473 skb_pull(skb, sizeof(rx_cmd_b));
2475 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2476 le16_to_cpus(&rx_cmd_c);
2477 skb_pull(skb, sizeof(rx_cmd_c));
2481 /* get the packet length */
2482 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2483 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2485 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2486 netif_dbg(dev, rx_err, dev->net,
2487 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2489 /* last frame in this batch */
2490 if (skb->len == size) {
2491 lan78xx_rx_csum_offload(dev, skb,
2492 rx_cmd_a, rx_cmd_b);
2494 skb_trim(skb, skb->len - 4); /* remove fcs */
2495 skb->truesize = size + sizeof(struct sk_buff);
2500 skb2 = skb_clone(skb, GFP_ATOMIC);
2501 if (unlikely(!skb2)) {
2502 netdev_warn(dev->net, "Error allocating skb");
2507 skb2->data = packet;
2508 skb_set_tail_pointer(skb2, size);
2510 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2512 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2513 skb2->truesize = size + sizeof(struct sk_buff);
2515 lan78xx_skb_return(dev, skb2);
2518 skb_pull(skb, size);
2520 /* padding bytes before the next frame starts */
2522 skb_pull(skb, align_count);
2525 if (unlikely(skb->len < 0)) {
2526 netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
2533 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2535 if (!lan78xx_rx(dev, skb)) {
2536 dev->net->stats.rx_errors++;
2541 lan78xx_skb_return(dev, skb);
2545 netif_dbg(dev, rx_err, dev->net, "drop\n");
2546 dev->net->stats.rx_errors++;
2548 skb_queue_tail(&dev->done, skb);
2551 static void rx_complete(struct urb *urb);
2553 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2555 struct sk_buff *skb;
2556 struct skb_data *entry;
2557 unsigned long lockflags;
2558 size_t size = dev->rx_urb_size;
2561 skb = netdev_alloc_skb_ip_align(dev->net, size);
2567 entry = (struct skb_data *)skb->cb;
2572 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2573 skb->data, size, rx_complete, skb);
2575 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2577 if (netif_device_present(dev->net) &&
2578 netif_running(dev->net) &&
2579 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2580 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2581 ret = usb_submit_urb(urb, GFP_ATOMIC);
2584 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2587 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2590 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2591 netif_device_detach(dev->net);
2597 netif_dbg(dev, rx_err, dev->net,
2598 "rx submit, %d\n", ret);
2599 tasklet_schedule(&dev->bh);
2602 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2605 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2607 dev_kfree_skb_any(skb);
2613 static void rx_complete(struct urb *urb)
2615 struct sk_buff *skb = (struct sk_buff *)urb->context;
2616 struct skb_data *entry = (struct skb_data *)skb->cb;
2617 struct lan78xx_net *dev = entry->dev;
2618 int urb_status = urb->status;
2619 enum skb_state state;
2621 skb_put(skb, urb->actual_length);
2625 switch (urb_status) {
2627 if (skb->len < dev->net->hard_header_len) {
2629 dev->net->stats.rx_errors++;
2630 dev->net->stats.rx_length_errors++;
2631 netif_dbg(dev, rx_err, dev->net,
2632 "rx length %d\n", skb->len);
2634 usb_mark_last_busy(dev->udev);
2637 dev->net->stats.rx_errors++;
2638 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2640 case -ECONNRESET: /* async unlink */
2641 case -ESHUTDOWN: /* hardware gone */
2642 netif_dbg(dev, ifdown, dev->net,
2643 "rx shutdown, code %d\n", urb_status);
2651 dev->net->stats.rx_errors++;
2657 /* data overrun ... flush fifo? */
2659 dev->net->stats.rx_over_errors++;
2664 dev->net->stats.rx_errors++;
2665 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2669 state = defer_bh(dev, skb, &dev->rxq, state);
2672 if (netif_running(dev->net) &&
2673 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2674 state != unlink_start) {
2675 rx_submit(dev, urb, GFP_ATOMIC);
2680 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2683 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2686 struct urb *urb = NULL;
2687 struct skb_data *entry;
2688 unsigned long flags;
2689 struct sk_buff_head *tqp = &dev->txq_pend;
2690 struct sk_buff *skb, *skb2;
2693 int skb_totallen, pkt_cnt;
2697 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2698 if (skb_is_gso(skb)) {
2700 /* handle previous packets first */
2704 skb2 = skb_dequeue(tqp);
2708 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2710 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2714 /* copy to a single skb */
2715 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2719 skb_put(skb, skb_totallen);
2721 for (count = pos = 0; count < pkt_cnt; count++) {
2722 skb2 = skb_dequeue(tqp);
2724 memcpy(skb->data + pos, skb2->data, skb2->len);
2725 pos += roundup(skb2->len, sizeof(u32));
2726 dev_kfree_skb(skb2);
2730 length = skb_totallen;
2733 urb = usb_alloc_urb(0, GFP_ATOMIC);
2735 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2739 entry = (struct skb_data *)skb->cb;
2742 entry->length = length;
2744 spin_lock_irqsave(&dev->txq.lock, flags);
2745 ret = usb_autopm_get_interface_async(dev->intf);
2747 spin_unlock_irqrestore(&dev->txq.lock, flags);
2751 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2752 skb->data, skb->len, tx_complete, skb);
2754 if (length % dev->maxpacket == 0) {
2755 /* send USB_ZERO_PACKET */
2756 urb->transfer_flags |= URB_ZERO_PACKET;
2760 /* if this triggers the device is still a sleep */
2761 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2762 /* transmission will be done in resume */
2763 usb_anchor_urb(urb, &dev->deferred);
2764 /* no use to process more packets */
2765 netif_stop_queue(dev->net);
2767 spin_unlock_irqrestore(&dev->txq.lock, flags);
2768 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2773 ret = usb_submit_urb(urb, GFP_ATOMIC);
2776 dev->net->trans_start = jiffies;
2777 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2778 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2779 netif_stop_queue(dev->net);
2782 netif_stop_queue(dev->net);
2783 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2784 usb_autopm_put_interface_async(dev->intf);
2787 usb_autopm_put_interface_async(dev->intf);
2788 netif_dbg(dev, tx_err, dev->net,
2789 "tx: submit urb err %d\n", ret);
2793 spin_unlock_irqrestore(&dev->txq.lock, flags);
2796 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2798 dev->net->stats.tx_dropped++;
2800 dev_kfree_skb_any(skb);
2803 netif_dbg(dev, tx_queued, dev->net,
2804 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2807 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2812 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2813 for (i = 0; i < 10; i++) {
2814 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2816 urb = usb_alloc_urb(0, GFP_ATOMIC);
2818 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2822 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2823 tasklet_schedule(&dev->bh);
2825 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2826 netif_wake_queue(dev->net);
2829 static void lan78xx_bh(unsigned long param)
2831 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2832 struct sk_buff *skb;
2833 struct skb_data *entry;
2835 while ((skb = skb_dequeue(&dev->done))) {
2836 entry = (struct skb_data *)(skb->cb);
2837 switch (entry->state) {
2839 entry->state = rx_cleanup;
2840 rx_process(dev, skb);
2843 usb_free_urb(entry->urb);
2847 usb_free_urb(entry->urb);
2851 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2856 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2857 if (!skb_queue_empty(&dev->txq_pend))
2860 if (!timer_pending(&dev->delay) &&
2861 !test_bit(EVENT_RX_HALT, &dev->flags))
2866 static void lan78xx_delayedwork(struct work_struct *work)
2869 struct lan78xx_net *dev;
2871 dev = container_of(work, struct lan78xx_net, wq.work);
2873 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2874 unlink_urbs(dev, &dev->txq);
2875 status = usb_autopm_get_interface(dev->intf);
2878 status = usb_clear_halt(dev->udev, dev->pipe_out);
2879 usb_autopm_put_interface(dev->intf);
2882 status != -ESHUTDOWN) {
2883 if (netif_msg_tx_err(dev))
2885 netdev_err(dev->net,
2886 "can't clear tx halt, status %d\n",
2889 clear_bit(EVENT_TX_HALT, &dev->flags);
2890 if (status != -ESHUTDOWN)
2891 netif_wake_queue(dev->net);
2894 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2895 unlink_urbs(dev, &dev->rxq);
2896 status = usb_autopm_get_interface(dev->intf);
2899 status = usb_clear_halt(dev->udev, dev->pipe_in);
2900 usb_autopm_put_interface(dev->intf);
2903 status != -ESHUTDOWN) {
2904 if (netif_msg_rx_err(dev))
2906 netdev_err(dev->net,
2907 "can't clear rx halt, status %d\n",
2910 clear_bit(EVENT_RX_HALT, &dev->flags);
2911 tasklet_schedule(&dev->bh);
2915 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2918 clear_bit(EVENT_LINK_RESET, &dev->flags);
2919 status = usb_autopm_get_interface(dev->intf);
2922 if (lan78xx_link_reset(dev) < 0) {
2923 usb_autopm_put_interface(dev->intf);
2925 netdev_info(dev->net, "link reset failed (%d)\n",
2928 usb_autopm_put_interface(dev->intf);
2933 static void intr_complete(struct urb *urb)
2935 struct lan78xx_net *dev = urb->context;
2936 int status = urb->status;
2941 lan78xx_status(dev, urb);
2944 /* software-driven interface shutdown */
2945 case -ENOENT: /* urb killed */
2946 case -ESHUTDOWN: /* hardware gone */
2947 netif_dbg(dev, ifdown, dev->net,
2948 "intr shutdown, code %d\n", status);
2951 /* NOTE: not throttling like RX/TX, since this endpoint
2952 * already polls infrequently
2955 netdev_dbg(dev->net, "intr status %d\n", status);
2959 if (!netif_running(dev->net))
2962 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2963 status = usb_submit_urb(urb, GFP_ATOMIC);
2965 netif_err(dev, timer, dev->net,
2966 "intr resubmit --> %d\n", status);
2969 static void lan78xx_disconnect(struct usb_interface *intf)
2971 struct lan78xx_net *dev;
2972 struct usb_device *udev;
2973 struct net_device *net;
2975 dev = usb_get_intfdata(intf);
2976 usb_set_intfdata(intf, NULL);
2980 udev = interface_to_usbdev(intf);
2983 unregister_netdev(net);
2985 cancel_delayed_work_sync(&dev->wq);
2987 usb_scuttle_anchored_urbs(&dev->deferred);
2989 lan78xx_unbind(dev, intf);
2991 usb_kill_urb(dev->urb_intr);
2992 usb_free_urb(dev->urb_intr);
2998 void lan78xx_tx_timeout(struct net_device *net)
3000 struct lan78xx_net *dev = netdev_priv(net);
3002 unlink_urbs(dev, &dev->txq);
3003 tasklet_schedule(&dev->bh);
3006 static const struct net_device_ops lan78xx_netdev_ops = {
3007 .ndo_open = lan78xx_open,
3008 .ndo_stop = lan78xx_stop,
3009 .ndo_start_xmit = lan78xx_start_xmit,
3010 .ndo_tx_timeout = lan78xx_tx_timeout,
3011 .ndo_change_mtu = lan78xx_change_mtu,
3012 .ndo_set_mac_address = lan78xx_set_mac_addr,
3013 .ndo_validate_addr = eth_validate_addr,
3014 .ndo_do_ioctl = lan78xx_ioctl,
3015 .ndo_set_rx_mode = lan78xx_set_multicast,
3016 .ndo_set_features = lan78xx_set_features,
3017 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3018 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3021 static int lan78xx_probe(struct usb_interface *intf,
3022 const struct usb_device_id *id)
3024 struct lan78xx_net *dev;
3025 struct net_device *netdev;
3026 struct usb_device *udev;
3032 udev = interface_to_usbdev(intf);
3033 udev = usb_get_dev(udev);
3036 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3038 dev_err(&intf->dev, "Error: OOM\n");
3042 /* netdev_printk() needs this */
3043 SET_NETDEV_DEV(netdev, &intf->dev);
3045 dev = netdev_priv(netdev);
3049 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3050 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3052 skb_queue_head_init(&dev->rxq);
3053 skb_queue_head_init(&dev->txq);
3054 skb_queue_head_init(&dev->done);
3055 skb_queue_head_init(&dev->rxq_pause);
3056 skb_queue_head_init(&dev->txq_pend);
3057 mutex_init(&dev->phy_mutex);
3059 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3060 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3061 init_usb_anchor(&dev->deferred);
3063 netdev->netdev_ops = &lan78xx_netdev_ops;
3064 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3065 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3067 ret = lan78xx_bind(dev, intf);
3070 strcpy(netdev->name, "eth%d");
3072 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3073 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3075 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3076 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3077 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3079 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3080 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3082 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3083 dev->ep_intr->desc.bEndpointAddress &
3084 USB_ENDPOINT_NUMBER_MASK);
3085 period = dev->ep_intr->desc.bInterval;
3087 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3088 buf = kmalloc(maxp, GFP_KERNEL);
3090 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3091 if (!dev->urb_intr) {
3095 usb_fill_int_urb(dev->urb_intr, dev->udev,
3096 dev->pipe_intr, buf, maxp,
3097 intr_complete, dev, period);
3101 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3103 /* driver requires remote-wakeup capability during autosuspend. */
3104 intf->needs_remote_wakeup = 1;
3106 ret = register_netdev(netdev);
3108 netif_err(dev, probe, netdev, "couldn't register the device\n");
3112 usb_set_intfdata(intf, dev);
3114 ret = device_set_wakeup_enable(&udev->dev, true);
3116 /* Default delay of 2sec has more overhead than advantage.
3117 * Set to 10sec as default.
3119 pm_runtime_set_autosuspend_delay(&udev->dev,
3120 DEFAULT_AUTOSUSPEND_DELAY);
3125 lan78xx_unbind(dev, intf);
3127 free_netdev(netdev);
3134 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3136 const u16 crc16poly = 0x8005;
3142 for (i = 0; i < len; i++) {
3144 for (bit = 0; bit < 8; bit++) {
3148 if (msb ^ (u16)(data & 1)) {
3150 crc |= (u16)0x0001U;
3159 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3167 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3168 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3169 const u8 arp_type[2] = { 0x08, 0x06 };
3171 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3172 buf &= ~MAC_TX_TXEN_;
3173 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3174 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3175 buf &= ~MAC_RX_RXEN_;
3176 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3178 ret = lan78xx_write_reg(dev, WUCSR, 0);
3179 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3180 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3185 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3186 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3187 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3189 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3190 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3193 if (wol & WAKE_PHY) {
3194 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3196 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3197 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3198 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3200 if (wol & WAKE_MAGIC) {
3201 temp_wucsr |= WUCSR_MPEN_;
3203 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3204 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3205 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3207 if (wol & WAKE_BCAST) {
3208 temp_wucsr |= WUCSR_BCST_EN_;
3210 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3211 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3212 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3214 if (wol & WAKE_MCAST) {
3215 temp_wucsr |= WUCSR_WAKE_EN_;
3217 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3218 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3219 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3221 WUF_CFGX_TYPE_MCAST_ |
3222 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3223 (crc & WUF_CFGX_CRC16_MASK_));
3225 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3226 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3227 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3228 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3231 /* for IPv6 Multicast */
3232 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3233 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3235 WUF_CFGX_TYPE_MCAST_ |
3236 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3237 (crc & WUF_CFGX_CRC16_MASK_));
3239 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3240 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3241 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3242 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3245 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3246 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3247 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3249 if (wol & WAKE_UCAST) {
3250 temp_wucsr |= WUCSR_PFDA_EN_;
3252 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3253 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3254 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3256 if (wol & WAKE_ARP) {
3257 temp_wucsr |= WUCSR_WAKE_EN_;
3259 /* set WUF_CFG & WUF_MASK
3260 * for packettype (offset 12,13) = ARP (0x0806)
3262 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3263 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3265 WUF_CFGX_TYPE_ALL_ |
3266 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3267 (crc & WUF_CFGX_CRC16_MASK_));
3269 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3270 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3271 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3272 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3275 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3276 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3277 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3280 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3282 /* when multiple WOL bits are set */
3283 if (hweight_long((unsigned long)wol) > 1) {
3284 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3285 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3286 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3288 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3291 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3292 buf |= PMT_CTL_WUPS_MASK_;
3293 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3295 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3296 buf |= MAC_RX_RXEN_;
3297 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3302 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3304 struct lan78xx_net *dev = usb_get_intfdata(intf);
3305 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3311 event = message.event;
3313 if (!dev->suspend_count++) {
3314 spin_lock_irq(&dev->txq.lock);
3315 /* don't autosuspend while transmitting */
3316 if ((skb_queue_len(&dev->txq) ||
3317 skb_queue_len(&dev->txq_pend)) &&
3318 PMSG_IS_AUTO(message)) {
3319 spin_unlock_irq(&dev->txq.lock);
3323 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3324 spin_unlock_irq(&dev->txq.lock);
3328 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3329 buf &= ~MAC_TX_TXEN_;
3330 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3331 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3332 buf &= ~MAC_RX_RXEN_;
3333 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3335 /* empty out the rx and queues */
3336 netif_device_detach(dev->net);
3337 lan78xx_terminate_urbs(dev);
3338 usb_kill_urb(dev->urb_intr);
3341 netif_device_attach(dev->net);
3344 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3345 if (PMSG_IS_AUTO(message)) {
3346 /* auto suspend (selective suspend) */
3347 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3348 buf &= ~MAC_TX_TXEN_;
3349 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3350 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3351 buf &= ~MAC_RX_RXEN_;
3352 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3354 ret = lan78xx_write_reg(dev, WUCSR, 0);
3355 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3356 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3358 /* set goodframe wakeup */
3359 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3361 buf |= WUCSR_RFE_WAKE_EN_;
3362 buf |= WUCSR_STORE_WAKE_;
3364 ret = lan78xx_write_reg(dev, WUCSR, buf);
3366 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3368 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3369 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3371 buf |= PMT_CTL_PHY_WAKE_EN_;
3372 buf |= PMT_CTL_WOL_EN_;
3373 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3374 buf |= PMT_CTL_SUS_MODE_3_;
3376 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3378 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3380 buf |= PMT_CTL_WUPS_MASK_;
3382 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3384 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3385 buf |= MAC_RX_RXEN_;
3386 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3388 lan78xx_set_suspend(dev, pdata->wol);
3396 int lan78xx_resume(struct usb_interface *intf)
3398 struct lan78xx_net *dev = usb_get_intfdata(intf);
3399 struct sk_buff *skb;
3404 if (!--dev->suspend_count) {
3405 /* resume interrupt URBs */
3406 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3407 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3409 spin_lock_irq(&dev->txq.lock);
3410 while ((res = usb_get_from_anchor(&dev->deferred))) {
3411 skb = (struct sk_buff *)res->context;
3412 ret = usb_submit_urb(res, GFP_ATOMIC);
3414 dev_kfree_skb_any(skb);
3416 usb_autopm_put_interface_async(dev->intf);
3418 dev->net->trans_start = jiffies;
3419 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3423 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3424 spin_unlock_irq(&dev->txq.lock);
3426 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3427 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3428 netif_start_queue(dev->net);
3429 tasklet_schedule(&dev->bh);
3433 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3434 ret = lan78xx_write_reg(dev, WUCSR, 0);
3435 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3437 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3439 WUCSR2_IPV6_TCPSYN_RCD_ |
3440 WUCSR2_IPV4_TCPSYN_RCD_);
3442 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3443 WUCSR_EEE_RX_WAKE_ |
3445 WUCSR_RFE_WAKE_FR_ |
3450 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3451 buf |= MAC_TX_TXEN_;
3452 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3457 int lan78xx_reset_resume(struct usb_interface *intf)
3459 struct lan78xx_net *dev = usb_get_intfdata(intf);
3462 return lan78xx_resume(intf);
3465 static const struct usb_device_id products[] = {
3467 /* LAN7800 USB Gigabit Ethernet Device */
3468 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3471 /* LAN7850 USB Gigabit Ethernet Device */
3472 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3476 MODULE_DEVICE_TABLE(usb, products);
3478 static struct usb_driver lan78xx_driver = {
3479 .name = DRIVER_NAME,
3480 .id_table = products,
3481 .probe = lan78xx_probe,
3482 .disconnect = lan78xx_disconnect,
3483 .suspend = lan78xx_suspend,
3484 .resume = lan78xx_resume,
3485 .reset_resume = lan78xx_reset_resume,
3486 .supports_autosuspend = 1,
3487 .disable_hub_initiated_lpm = 1,
3490 module_usb_driver(lan78xx_driver);
3492 MODULE_AUTHOR(DRIVER_AUTHOR);
3493 MODULE_DESCRIPTION(DRIVER_DESC);
3494 MODULE_LICENSE("GPL");