2 * PXA168 ethernet driver.
3 * Most of the code is derived from mv643xx ethernet driver.
5 * Copyright (C) 2010 Marvell International Ltd.
6 * Sachin Sanap <ssanap@marvell.com>
7 * Zhangfei Gao <zgao6@marvell.com>
8 * Philip Rakity <prakity@marvell.com>
9 * Mark Brown <markb@marvell.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include <linux/init.h>
27 #include <linux/dma-mapping.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/etherdevice.h>
33 #include <linux/bitops.h>
34 #include <linux/delay.h>
35 #include <linux/ethtool.h>
36 #include <linux/platform_device.h>
37 #include <linux/module.h>
38 #include <linux/kernel.h>
39 #include <linux/workqueue.h>
40 #include <linux/clk.h>
41 #include <linux/phy.h>
43 #include <linux/interrupt.h>
44 #include <linux/types.h>
45 #include <asm/pgtable.h>
46 #include <asm/cacheflush.h>
47 #include <linux/pxa168_eth.h>
49 #define DRIVER_NAME "pxa168-eth"
50 #define DRIVER_VERSION "0.3"
56 #define PHY_ADDRESS 0x0000
58 #define PORT_CONFIG 0x0400
59 #define PORT_CONFIG_EXT 0x0408
60 #define PORT_COMMAND 0x0410
61 #define PORT_STATUS 0x0418
63 #define SDMA_CONFIG 0x0440
64 #define SDMA_CMD 0x0448
65 #define INT_CAUSE 0x0450
66 #define INT_W_CLEAR 0x0454
67 #define INT_MASK 0x0458
68 #define ETH_F_RX_DESC_0 0x0480
69 #define ETH_C_RX_DESC_0 0x04A0
70 #define ETH_C_TX_DESC_1 0x04E4
73 #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
74 #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
75 #define SMI_OP_W (0 << 26) /* Write operation */
76 #define SMI_OP_R (1 << 26) /* Read operation */
78 #define PHY_WAIT_ITERATIONS 10
80 #define PXA168_ETH_PHY_ADDR_DEFAULT 0
81 /* RX & TX descriptor command */
82 #define BUF_OWNED_BY_DMA (1 << 31)
84 /* RX descriptor status */
85 #define RX_EN_INT (1 << 23)
86 #define RX_FIRST_DESC (1 << 17)
87 #define RX_LAST_DESC (1 << 16)
88 #define RX_ERROR (1 << 15)
90 /* TX descriptor command */
91 #define TX_EN_INT (1 << 23)
92 #define TX_GEN_CRC (1 << 22)
93 #define TX_ZERO_PADDING (1 << 18)
94 #define TX_FIRST_DESC (1 << 17)
95 #define TX_LAST_DESC (1 << 16)
96 #define TX_ERROR (1 << 15)
99 #define SDMA_CMD_AT (1 << 31)
100 #define SDMA_CMD_TXDL (1 << 24)
101 #define SDMA_CMD_TXDH (1 << 23)
102 #define SDMA_CMD_AR (1 << 15)
103 #define SDMA_CMD_ERD (1 << 7)
105 /* Bit definitions of the Port Config Reg */
106 #define PCR_HS (1 << 12)
107 #define PCR_EN (1 << 7)
108 #define PCR_PM (1 << 0)
110 /* Bit definitions of the Port Config Extend Reg */
111 #define PCXR_2BSM (1 << 28)
112 #define PCXR_DSCP_EN (1 << 21)
113 #define PCXR_MFL_1518 (0 << 14)
114 #define PCXR_MFL_1536 (1 << 14)
115 #define PCXR_MFL_2048 (2 << 14)
116 #define PCXR_MFL_64K (3 << 14)
117 #define PCXR_FLP (1 << 11)
118 #define PCXR_PRIO_TX_OFF 3
119 #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
121 /* Bit definitions of the SDMA Config Reg */
122 #define SDCR_BSZ_OFF 12
123 #define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
124 #define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
125 #define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
126 #define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
127 #define SDCR_BLMR (1 << 6)
128 #define SDCR_BLMT (1 << 7)
129 #define SDCR_RIFB (1 << 9)
130 #define SDCR_RC_OFF 2
131 #define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
134 * Bit definitions of the Interrupt Cause Reg
135 * and Interrupt MASK Reg is the same
137 #define ICR_RXBUF (1 << 0)
138 #define ICR_TXBUF_H (1 << 2)
139 #define ICR_TXBUF_L (1 << 3)
140 #define ICR_TXEND_H (1 << 6)
141 #define ICR_TXEND_L (1 << 7)
142 #define ICR_RXERR (1 << 8)
143 #define ICR_TXERR_H (1 << 10)
144 #define ICR_TXERR_L (1 << 11)
145 #define ICR_TX_UDR (1 << 13)
146 #define ICR_MII_CH (1 << 28)
148 #define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
149 ICR_TXERR_H | ICR_TXERR_L |\
150 ICR_TXEND_H | ICR_TXEND_L |\
151 ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
153 #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
155 #define NUM_RX_DESCS 64
156 #define NUM_TX_DESCS 64
159 #define HASH_DELETE 1
160 #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
161 #define HOP_NUMBER 12
163 /* Bit definitions for Port status */
164 #define PORT_SPEED_100 (1 << 0)
165 #define FULL_DUPLEX (1 << 1)
166 #define FLOW_CONTROL_ENABLED (1 << 2)
167 #define LINK_UP (1 << 3)
169 /* Bit definitions for work to be done */
170 #define WORK_LINK (1 << 0)
171 #define WORK_TX_DONE (1 << 1)
176 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
179 u32 cmd_sts; /* Descriptor command status */
180 u16 byte_cnt; /* Descriptor buffer byte count */
181 u16 buf_size; /* Buffer size */
182 u32 buf_ptr; /* Descriptor buffer pointer */
183 u32 next_desc_ptr; /* Next descriptor pointer */
187 u32 cmd_sts; /* Command/status field */
189 u16 byte_cnt; /* buffer byte count */
190 u32 buf_ptr; /* pointer to buffer for this descriptor */
191 u32 next_desc_ptr; /* Pointer to next descriptor */
194 struct pxa168_eth_private {
195 int port_num; /* User Ethernet port number */
197 int rx_resource_err; /* Rx ring resource error flag */
199 /* Next available and first returning Rx resource */
200 int rx_curr_desc_q, rx_used_desc_q;
202 /* Next available and first returning Tx resource */
203 int tx_curr_desc_q, tx_used_desc_q;
205 struct rx_desc *p_rx_desc_area;
206 dma_addr_t rx_desc_dma;
207 int rx_desc_area_size;
208 struct sk_buff **rx_skb;
210 struct tx_desc *p_tx_desc_area;
211 dma_addr_t tx_desc_dma;
212 int tx_desc_area_size;
213 struct sk_buff **tx_skb;
215 struct work_struct tx_timeout_task;
217 struct net_device *dev;
218 struct napi_struct napi;
222 /* Size of Tx Ring per queue */
224 /* Number of tx descriptors in use */
226 /* Size of Rx Ring per queue */
228 /* Number of rx descriptors in use */
232 * Used in case RX Ring is empty, which can occur when
233 * system does not have resources (skb's)
235 struct timer_list timeout;
236 struct mii_bus *smi_bus;
237 struct phy_device *phy;
241 struct pxa168_eth_platform_data *pd;
243 * Ethernet controller base address.
247 /* Pointer to the hardware address filter table */
252 struct addr_table_entry {
257 /* Bit fields of a Hash Table Entry */
258 enum hash_table_entry {
259 HASH_ENTRY_VALID = 1,
261 HASH_ENTRY_RECEIVE_DISCARD = 4,
262 HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
265 static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
266 static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
267 static int pxa168_init_hw(struct pxa168_eth_private *pep);
268 static void eth_port_reset(struct net_device *dev);
269 static void eth_port_start(struct net_device *dev);
270 static int pxa168_eth_open(struct net_device *dev);
271 static int pxa168_eth_stop(struct net_device *dev);
272 static int ethernet_phy_setup(struct net_device *dev);
274 static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
276 return readl(pep->base + offset);
279 static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
281 writel(data, pep->base + offset);
284 static void abort_dma(struct pxa168_eth_private *pep)
287 int max_retries = 40;
290 wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
294 while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
298 } while (max_retries-- > 0 && delay <= 0);
300 if (max_retries <= 0)
301 printk(KERN_ERR "%s : DMA Stuck\n", __func__);
304 static int ethernet_phy_get(struct pxa168_eth_private *pep)
306 unsigned int reg_data;
308 reg_data = rdl(pep, PHY_ADDRESS);
310 return (reg_data >> (5 * pep->port_num)) & 0x1f;
313 static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
316 int addr_shift = 5 * pep->port_num;
318 reg_data = rdl(pep, PHY_ADDRESS);
319 reg_data &= ~(0x1f << addr_shift);
320 reg_data |= (phy_addr & 0x1f) << addr_shift;
321 wrl(pep, PHY_ADDRESS, reg_data);
324 static void ethernet_phy_reset(struct pxa168_eth_private *pep)
328 data = phy_read(pep->phy, MII_BMCR);
333 if (phy_write(pep->phy, MII_BMCR, data) < 0)
337 data = phy_read(pep->phy, MII_BMCR);
338 } while (data >= 0 && data & BMCR_RESET);
341 static void rxq_refill(struct net_device *dev)
343 struct pxa168_eth_private *pep = netdev_priv(dev);
345 struct rx_desc *p_used_rx_desc;
348 while (pep->rx_desc_count < pep->rx_ring_size) {
351 skb = netdev_alloc_skb(dev, pep->skb_size);
355 skb_reserve(skb, SKB_DMA_REALIGN);
356 pep->rx_desc_count++;
357 /* Get 'used' Rx descriptor */
358 used_rx_desc = pep->rx_used_desc_q;
359 p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
360 size = skb->end - skb->data;
361 p_used_rx_desc->buf_ptr = dma_map_single(NULL,
365 p_used_rx_desc->buf_size = size;
366 pep->rx_skb[used_rx_desc] = skb;
368 /* Return the descriptor to DMA ownership */
370 p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
373 /* Move the used descriptor pointer to the next descriptor */
374 pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
376 /* Any Rx return cancels the Rx resource error status */
377 pep->rx_resource_err = 0;
379 skb_reserve(skb, ETH_HW_IP_ALIGN);
383 * If RX ring is empty of SKB, set a timer to try allocating
384 * again at a later time.
386 if (pep->rx_desc_count == 0) {
387 pep->timeout.expires = jiffies + (HZ / 10);
388 add_timer(&pep->timeout);
392 static inline void rxq_refill_timer_wrapper(unsigned long data)
394 struct pxa168_eth_private *pep = (void *)data;
395 napi_schedule(&pep->napi);
398 static inline u8 flip_8_bits(u8 x)
400 return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
401 | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
402 | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
403 | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
406 static void nibble_swap_every_byte(unsigned char *mac_addr)
409 for (i = 0; i < ETH_ALEN; i++) {
410 mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
411 ((mac_addr[i] & 0xf0) >> 4);
415 static void inverse_every_nibble(unsigned char *mac_addr)
418 for (i = 0; i < ETH_ALEN; i++)
419 mac_addr[i] = flip_8_bits(mac_addr[i]);
423 * ----------------------------------------------------------------------------
424 * This function will calculate the hash function of the address.
426 * mac_addr_orig - MAC address.
428 * return the calculated entry.
430 static u32 hash_function(unsigned char *mac_addr_orig)
437 unsigned char mac_addr[ETH_ALEN];
439 /* Make a copy of MAC address since we are going to performe bit
442 memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
444 nibble_swap_every_byte(mac_addr);
445 inverse_every_nibble(mac_addr);
447 addr0 = (mac_addr[5] >> 2) & 0x3f;
448 addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
449 addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
450 addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
452 hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
453 hash_result = hash_result & 0x07ff;
458 * ----------------------------------------------------------------------------
459 * This function will add/del an entry to the address table.
462 * mac_addr - MAC address.
463 * skip - if 1, skip this address.Used in case of deleting an entry which is a
464 * part of chain in the hash table.We can't just delete the entry since
465 * that will break the chain.We need to defragment the tables time to
467 * rd - 0 Discard packet upon match.
468 * - 1 Receive packet upon match.
470 * address table entry is added/deleted.
472 * -ENOSPC if table full
474 static int add_del_hash_entry(struct pxa168_eth_private *pep,
475 unsigned char *mac_addr,
476 u32 rd, u32 skip, int del)
478 struct addr_table_entry *entry, *start;
483 new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
484 | (((mac_addr[1] >> 0) & 0xf) << 11)
485 | (((mac_addr[0] >> 4) & 0xf) << 7)
486 | (((mac_addr[0] >> 0) & 0xf) << 3)
487 | (((mac_addr[3] >> 4) & 0x1) << 31)
488 | (((mac_addr[3] >> 0) & 0xf) << 27)
489 | (((mac_addr[2] >> 4) & 0xf) << 23)
490 | (((mac_addr[2] >> 0) & 0xf) << 19)
491 | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
494 new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
495 | (((mac_addr[5] >> 0) & 0xf) << 11)
496 | (((mac_addr[4] >> 4) & 0xf) << 7)
497 | (((mac_addr[4] >> 0) & 0xf) << 3)
498 | (((mac_addr[3] >> 5) & 0x7) << 0);
501 * Pick the appropriate table, start scanning for free/reusable
502 * entries at the index obtained by hashing the specified MAC address
505 entry = start + hash_function(mac_addr);
506 for (i = 0; i < HOP_NUMBER; i++) {
507 if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
510 /* if same address put in same position */
511 if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
512 (new_low & 0xfffffff8)) &&
513 (le32_to_cpu(entry->hi) == new_high)) {
517 if (entry == start + 0x7ff)
523 if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
524 (le32_to_cpu(entry->hi) != new_high) && del)
527 if (i == HOP_NUMBER) {
529 printk(KERN_INFO "%s: table section is full, need to "
530 "move to 16kB implementation?\n",
538 * Update the selected entry
544 entry->hi = cpu_to_le32(new_high);
545 entry->lo = cpu_to_le32(new_low);
552 * ----------------------------------------------------------------------------
553 * Create an addressTable entry from MAC address info
554 * found in the specifed net_device struct
556 * Input : pointer to ethernet interface network device structure
559 static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
560 unsigned char *oaddr,
563 /* Delete old entry */
565 add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
567 add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
570 static int init_hash_table(struct pxa168_eth_private *pep)
573 * Hardware expects CPU to build a hash table based on a predefined
574 * hash function and populate it based on hardware address. The
575 * location of the hash table is identified by 32-bit pointer stored
576 * in HTPR internal register. Two possible sizes exists for the hash
577 * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
578 * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
581 /* TODO: Add support for 8kB hash table and alternative hash
582 * function.Driver can dynamically switch to them if the 1/2kB hash
585 if (pep->htpr == NULL) {
586 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
587 HASH_ADDR_TABLE_SIZE,
589 GFP_KERNEL | __GFP_ZERO);
590 if (pep->htpr == NULL)
593 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
595 wrl(pep, HTPR, pep->htpr_dma);
599 static void pxa168_eth_set_rx_mode(struct net_device *dev)
601 struct pxa168_eth_private *pep = netdev_priv(dev);
602 struct netdev_hw_addr *ha;
605 val = rdl(pep, PORT_CONFIG);
606 if (dev->flags & IFF_PROMISC)
610 wrl(pep, PORT_CONFIG, val);
613 * Remove the old list of MAC address and add dev->addr
614 * and multicast address.
616 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
617 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
619 netdev_for_each_mc_addr(ha, dev)
620 update_hash_table_mac_address(pep, NULL, ha->addr);
623 static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
625 struct sockaddr *sa = addr;
626 struct pxa168_eth_private *pep = netdev_priv(dev);
627 unsigned char oldMac[ETH_ALEN];
629 if (!is_valid_ether_addr(sa->sa_data))
630 return -EADDRNOTAVAIL;
631 memcpy(oldMac, dev->dev_addr, ETH_ALEN);
632 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
633 netif_addr_lock_bh(dev);
634 update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
635 netif_addr_unlock_bh(dev);
639 static void eth_port_start(struct net_device *dev)
641 unsigned int val = 0;
642 struct pxa168_eth_private *pep = netdev_priv(dev);
643 int tx_curr_desc, rx_curr_desc;
645 /* Perform PHY reset, if there is a PHY. */
646 if (pep->phy != NULL) {
647 struct ethtool_cmd cmd;
649 pxa168_get_settings(pep->dev, &cmd);
650 ethernet_phy_reset(pep);
651 pxa168_set_settings(pep->dev, &cmd);
654 /* Assignment of Tx CTRP of given queue */
655 tx_curr_desc = pep->tx_curr_desc_q;
656 wrl(pep, ETH_C_TX_DESC_1,
657 (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
659 /* Assignment of Rx CRDP of given queue */
660 rx_curr_desc = pep->rx_curr_desc_q;
661 wrl(pep, ETH_C_RX_DESC_0,
662 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
664 wrl(pep, ETH_F_RX_DESC_0,
665 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
667 /* Clear all interrupts */
668 wrl(pep, INT_CAUSE, 0);
670 /* Enable all interrupts for receive, transmit and error. */
671 wrl(pep, INT_MASK, ALL_INTS);
673 val = rdl(pep, PORT_CONFIG);
675 wrl(pep, PORT_CONFIG, val);
677 /* Start RX DMA engine */
678 val = rdl(pep, SDMA_CMD);
680 wrl(pep, SDMA_CMD, val);
683 static void eth_port_reset(struct net_device *dev)
685 struct pxa168_eth_private *pep = netdev_priv(dev);
686 unsigned int val = 0;
688 /* Stop all interrupts for receive, transmit and error. */
689 wrl(pep, INT_MASK, 0);
691 /* Clear all interrupts */
692 wrl(pep, INT_CAUSE, 0);
695 val = rdl(pep, SDMA_CMD);
696 val &= ~SDMA_CMD_ERD; /* abort dma command */
698 /* Abort any transmit and receive operations and put DMA
704 val = rdl(pep, PORT_CONFIG);
706 wrl(pep, PORT_CONFIG, val);
710 * txq_reclaim - Free the tx desc data for completed descriptors
711 * If force is non-zero, frees uncompleted descriptors as well
713 static int txq_reclaim(struct net_device *dev, int force)
715 struct pxa168_eth_private *pep = netdev_priv(dev);
716 struct tx_desc *desc;
726 pep->work_todo &= ~WORK_TX_DONE;
727 while (pep->tx_desc_count > 0) {
728 tx_index = pep->tx_used_desc_q;
729 desc = &pep->p_tx_desc_area[tx_index];
730 cmd_sts = desc->cmd_sts;
731 if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
733 goto txq_reclaim_end;
736 goto txq_reclaim_end;
739 pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
740 pep->tx_desc_count--;
741 addr = desc->buf_ptr;
742 count = desc->byte_cnt;
743 skb = pep->tx_skb[tx_index];
745 pep->tx_skb[tx_index] = NULL;
747 if (cmd_sts & TX_ERROR) {
749 printk(KERN_ERR "%s: Error in TX\n", dev->name);
750 dev->stats.tx_errors++;
752 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
754 dev_kfree_skb_irq(skb);
758 netif_tx_unlock(dev);
762 static void pxa168_eth_tx_timeout(struct net_device *dev)
764 struct pxa168_eth_private *pep = netdev_priv(dev);
766 printk(KERN_INFO "%s: TX timeout desc_count %d\n",
767 dev->name, pep->tx_desc_count);
769 schedule_work(&pep->tx_timeout_task);
772 static void pxa168_eth_tx_timeout_task(struct work_struct *work)
774 struct pxa168_eth_private *pep = container_of(work,
775 struct pxa168_eth_private,
777 struct net_device *dev = pep->dev;
778 pxa168_eth_stop(dev);
779 pxa168_eth_open(dev);
782 static int rxq_process(struct net_device *dev, int budget)
784 struct pxa168_eth_private *pep = netdev_priv(dev);
785 struct net_device_stats *stats = &dev->stats;
786 unsigned int received_packets = 0;
789 while (budget-- > 0) {
790 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
791 struct rx_desc *rx_desc;
792 unsigned int cmd_sts;
794 /* Do not process Rx ring in case of Rx ring resource error */
795 if (pep->rx_resource_err)
797 rx_curr_desc = pep->rx_curr_desc_q;
798 rx_used_desc = pep->rx_used_desc_q;
799 rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
800 cmd_sts = rx_desc->cmd_sts;
802 if (cmd_sts & (BUF_OWNED_BY_DMA))
804 skb = pep->rx_skb[rx_curr_desc];
805 pep->rx_skb[rx_curr_desc] = NULL;
807 rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
808 pep->rx_curr_desc_q = rx_next_curr_desc;
810 /* Rx descriptors exhausted. */
811 /* Set the Rx ring resource error flag */
812 if (rx_next_curr_desc == rx_used_desc)
813 pep->rx_resource_err = 1;
814 pep->rx_desc_count--;
815 dma_unmap_single(NULL, rx_desc->buf_ptr,
821 * Note byte count includes 4 byte CRC count
824 stats->rx_bytes += rx_desc->byte_cnt;
826 * In case received a packet without first / last bits on OR
827 * the error summary bit is on, the packets needs to be droped.
829 if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
830 (RX_FIRST_DESC | RX_LAST_DESC))
831 || (cmd_sts & RX_ERROR)) {
834 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
835 (RX_FIRST_DESC | RX_LAST_DESC)) {
838 "%s: Rx pkt on multiple desc\n",
841 if (cmd_sts & RX_ERROR)
843 dev_kfree_skb_irq(skb);
846 * The -4 is for the CRC in the trailer of the
849 skb_put(skb, rx_desc->byte_cnt - 4);
850 skb->protocol = eth_type_trans(skb, dev);
851 netif_receive_skb(skb);
854 /* Fill RX ring with skb's */
856 return received_packets;
859 static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
860 struct net_device *dev)
865 icr = rdl(pep, INT_CAUSE);
869 wrl(pep, INT_CAUSE, ~icr);
870 if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
871 pep->work_todo |= WORK_TX_DONE;
876 if (icr & ICR_MII_CH) {
877 pep->work_todo |= WORK_LINK;
883 static void handle_link_event(struct pxa168_eth_private *pep)
885 struct net_device *dev = pep->dev;
891 port_status = rdl(pep, PORT_STATUS);
892 if (!(port_status & LINK_UP)) {
893 if (netif_carrier_ok(dev)) {
894 printk(KERN_INFO "%s: link down\n", dev->name);
895 netif_carrier_off(dev);
900 if (port_status & PORT_SPEED_100)
905 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
906 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
907 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
908 "flow control %sabled\n", dev->name,
909 speed, duplex ? "full" : "half", fc ? "en" : "dis");
910 if (!netif_carrier_ok(dev))
911 netif_carrier_on(dev);
914 static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
916 struct net_device *dev = (struct net_device *)dev_id;
917 struct pxa168_eth_private *pep = netdev_priv(dev);
919 if (unlikely(!pxa168_eth_collect_events(pep, dev)))
921 /* Disable interrupts */
922 wrl(pep, INT_MASK, 0);
923 napi_schedule(&pep->napi);
927 static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
932 * Reserve 2+14 bytes for an ethernet header (the hardware
933 * automatically prepends 2 bytes of dummy data to each
934 * received packet), 16 bytes for up to four VLAN tags, and
935 * 4 bytes for the trailing FCS -- 36 bytes total.
937 skb_size = pep->dev->mtu + 36;
940 * Make sure that the skb size is a multiple of 8 bytes, as
941 * the lower three bits of the receive descriptor's buffer
942 * size field are ignored by the hardware.
944 pep->skb_size = (skb_size + 7) & ~7;
947 * If NET_SKB_PAD is smaller than a cache line,
948 * netdev_alloc_skb() will cause skb->data to be misaligned
949 * to a cache line boundary. If this is the case, include
950 * some extra space to allow re-aligning the data area.
952 pep->skb_size += SKB_DMA_REALIGN;
956 static int set_port_config_ext(struct pxa168_eth_private *pep)
960 pxa168_eth_recalc_skb_size(pep);
961 if (pep->skb_size <= 1518)
962 skb_size = PCXR_MFL_1518;
963 else if (pep->skb_size <= 1536)
964 skb_size = PCXR_MFL_1536;
965 else if (pep->skb_size <= 2048)
966 skb_size = PCXR_MFL_2048;
968 skb_size = PCXR_MFL_64K;
970 /* Extended Port Configuration */
972 PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
973 PCXR_DSCP_EN | /* Enable DSCP in IP */
974 skb_size | PCXR_FLP | /* do not force link pass */
975 PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
980 static int pxa168_init_hw(struct pxa168_eth_private *pep)
984 /* Disable interrupts */
985 wrl(pep, INT_MASK, 0);
986 wrl(pep, INT_CAUSE, 0);
987 /* Write to ICR to clear interrupts. */
988 wrl(pep, INT_W_CLEAR, 0);
989 /* Abort any transmit and receive operations and put DMA
993 /* Initialize address hash table */
994 err = init_hash_table(pep);
997 /* SDMA configuration */
998 wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
999 SDCR_RIFB | /* Rx interrupt on frame */
1000 SDCR_BLMT | /* Little endian transmit */
1001 SDCR_BLMR | /* Little endian receive */
1002 SDCR_RC_MAX_RETRANS); /* Max retransmit count */
1003 /* Port Configuration */
1004 wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
1005 set_port_config_ext(pep);
1010 static int rxq_init(struct net_device *dev)
1012 struct pxa168_eth_private *pep = netdev_priv(dev);
1013 struct rx_desc *p_rx_desc;
1014 int size = 0, i = 0;
1015 int rx_desc_num = pep->rx_ring_size;
1017 /* Allocate RX skb rings */
1018 pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
1023 /* Allocate RX ring */
1024 pep->rx_desc_count = 0;
1025 size = pep->rx_ring_size * sizeof(struct rx_desc);
1026 pep->rx_desc_area_size = size;
1027 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1029 GFP_KERNEL | __GFP_ZERO);
1030 if (!pep->p_rx_desc_area)
1033 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1034 p_rx_desc = pep->p_rx_desc_area;
1035 for (i = 0; i < rx_desc_num; i++) {
1036 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1037 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1039 /* Save Rx desc pointer to driver struct. */
1040 pep->rx_curr_desc_q = 0;
1041 pep->rx_used_desc_q = 0;
1042 pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1049 static void rxq_deinit(struct net_device *dev)
1051 struct pxa168_eth_private *pep = netdev_priv(dev);
1054 /* Free preallocated skb's on RX rings */
1055 for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
1056 if (pep->rx_skb[curr]) {
1057 dev_kfree_skb(pep->rx_skb[curr]);
1058 pep->rx_desc_count--;
1061 if (pep->rx_desc_count)
1063 "Error in freeing Rx Ring. %d skb's still\n",
1064 pep->rx_desc_count);
1066 if (pep->p_rx_desc_area)
1067 dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
1068 pep->p_rx_desc_area, pep->rx_desc_dma);
1072 static int txq_init(struct net_device *dev)
1074 struct pxa168_eth_private *pep = netdev_priv(dev);
1075 struct tx_desc *p_tx_desc;
1076 int size = 0, i = 0;
1077 int tx_desc_num = pep->tx_ring_size;
1079 pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
1084 /* Allocate TX ring */
1085 pep->tx_desc_count = 0;
1086 size = pep->tx_ring_size * sizeof(struct tx_desc);
1087 pep->tx_desc_area_size = size;
1088 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1090 GFP_KERNEL | __GFP_ZERO);
1091 if (!pep->p_tx_desc_area)
1093 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1094 p_tx_desc = pep->p_tx_desc_area;
1095 for (i = 0; i < tx_desc_num; i++) {
1096 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1097 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1099 pep->tx_curr_desc_q = 0;
1100 pep->tx_used_desc_q = 0;
1101 pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1108 static void txq_deinit(struct net_device *dev)
1110 struct pxa168_eth_private *pep = netdev_priv(dev);
1112 /* Free outstanding skb's on TX ring */
1113 txq_reclaim(dev, 1);
1114 BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
1116 if (pep->p_tx_desc_area)
1117 dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
1118 pep->p_tx_desc_area, pep->tx_desc_dma);
1122 static int pxa168_eth_open(struct net_device *dev)
1124 struct pxa168_eth_private *pep = netdev_priv(dev);
1127 err = request_irq(dev->irq, pxa168_eth_int_handler,
1128 IRQF_DISABLED, dev->name, dev);
1130 dev_err(&dev->dev, "can't assign irq\n");
1133 pep->rx_resource_err = 0;
1134 err = rxq_init(dev);
1137 err = txq_init(dev);
1139 goto out_free_rx_skb;
1140 pep->rx_used_desc_q = 0;
1141 pep->rx_curr_desc_q = 0;
1143 /* Fill RX ring with skb's */
1145 pep->rx_used_desc_q = 0;
1146 pep->rx_curr_desc_q = 0;
1147 netif_carrier_off(dev);
1148 eth_port_start(dev);
1149 napi_enable(&pep->napi);
1154 free_irq(dev->irq, dev);
1158 static int pxa168_eth_stop(struct net_device *dev)
1160 struct pxa168_eth_private *pep = netdev_priv(dev);
1161 eth_port_reset(dev);
1163 /* Disable interrupts */
1164 wrl(pep, INT_MASK, 0);
1165 wrl(pep, INT_CAUSE, 0);
1166 /* Write to ICR to clear interrupts. */
1167 wrl(pep, INT_W_CLEAR, 0);
1168 napi_disable(&pep->napi);
1169 del_timer_sync(&pep->timeout);
1170 netif_carrier_off(dev);
1171 free_irq(dev->irq, dev);
1178 static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
1181 struct pxa168_eth_private *pep = netdev_priv(dev);
1183 if ((mtu > 9500) || (mtu < 68))
1187 retval = set_port_config_ext(pep);
1189 if (!netif_running(dev))
1193 * Stop and then re-open the interface. This will allocate RX
1194 * skbs of the new MTU.
1195 * There is a possible danger that the open will not succeed,
1196 * due to memory being full.
1198 pxa168_eth_stop(dev);
1199 if (pxa168_eth_open(dev)) {
1201 "fatal error on re-opening device after MTU change\n");
1207 static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
1211 tx_desc_curr = pep->tx_curr_desc_q;
1212 pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
1213 BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
1214 pep->tx_desc_count++;
1216 return tx_desc_curr;
1219 static int pxa168_rx_poll(struct napi_struct *napi, int budget)
1221 struct pxa168_eth_private *pep =
1222 container_of(napi, struct pxa168_eth_private, napi);
1223 struct net_device *dev = pep->dev;
1226 if (unlikely(pep->work_todo & WORK_LINK)) {
1227 pep->work_todo &= ~(WORK_LINK);
1228 handle_link_event(pep);
1231 * We call txq_reclaim every time since in NAPI interupts are disabled
1232 * and due to this we miss the TX_DONE interrupt,which is not updated in
1233 * interrupt status register.
1235 txq_reclaim(dev, 0);
1236 if (netif_queue_stopped(dev)
1237 && pep->tx_ring_size - pep->tx_desc_count > 1) {
1238 netif_wake_queue(dev);
1240 work_done = rxq_process(dev, budget);
1241 if (work_done < budget) {
1242 napi_complete(napi);
1243 wrl(pep, INT_MASK, ALL_INTS);
1249 static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1251 struct pxa168_eth_private *pep = netdev_priv(dev);
1252 struct net_device_stats *stats = &dev->stats;
1253 struct tx_desc *desc;
1257 tx_index = eth_alloc_tx_desc_index(pep);
1258 desc = &pep->p_tx_desc_area[tx_index];
1260 pep->tx_skb[tx_index] = skb;
1261 desc->byte_cnt = length;
1262 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1264 skb_tx_timestamp(skb);
1267 desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
1268 TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
1270 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1272 stats->tx_bytes += length;
1273 stats->tx_packets++;
1274 dev->trans_start = jiffies;
1275 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
1276 /* We handled the current skb, but now we are out of space.*/
1277 netif_stop_queue(dev);
1280 return NETDEV_TX_OK;
1283 static int smi_wait_ready(struct pxa168_eth_private *pep)
1287 /* wait for the SMI register to become available */
1288 for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
1289 if (i == PHY_WAIT_ITERATIONS)
1297 static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
1299 struct pxa168_eth_private *pep = bus->priv;
1303 if (smi_wait_ready(pep)) {
1304 printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
1307 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
1308 /* now wait for the data to be valid */
1309 for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
1310 if (i == PHY_WAIT_ITERATIONS) {
1312 "pxa168_eth: SMI bus read not valid\n");
1318 return val & 0xffff;
1321 static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
1324 struct pxa168_eth_private *pep = bus->priv;
1326 if (smi_wait_ready(pep)) {
1327 printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
1331 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
1332 SMI_OP_W | (value & 0xffff));
1334 if (smi_wait_ready(pep)) {
1335 printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
1342 static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
1345 struct pxa168_eth_private *pep = netdev_priv(dev);
1346 if (pep->phy != NULL)
1347 return phy_mii_ioctl(pep->phy, ifr, cmd);
1352 static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
1354 struct mii_bus *bus = pep->smi_bus;
1355 struct phy_device *phydev;
1360 if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
1361 /* Scan entire range */
1362 start = ethernet_phy_get(pep);
1365 /* Use phy addr specific to platform */
1366 start = phy_addr & 0x1f;
1370 for (i = 0; i < num; i++) {
1371 int addr = (start + i) & 0x1f;
1372 if (bus->phy_map[addr] == NULL)
1373 mdiobus_scan(bus, addr);
1375 if (phydev == NULL) {
1376 phydev = bus->phy_map[addr];
1378 ethernet_phy_set_addr(pep, addr);
1385 static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
1387 struct phy_device *phy = pep->phy;
1388 ethernet_phy_reset(pep);
1390 phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
1393 phy->autoneg = AUTONEG_ENABLE;
1396 phy->supported &= PHY_BASIC_FEATURES;
1397 phy->advertising = phy->supported | ADVERTISED_Autoneg;
1399 phy->autoneg = AUTONEG_DISABLE;
1400 phy->advertising = 0;
1402 phy->duplex = duplex;
1404 phy_start_aneg(phy);
1407 static int ethernet_phy_setup(struct net_device *dev)
1409 struct pxa168_eth_private *pep = netdev_priv(dev);
1413 pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
1414 if (pep->phy != NULL)
1415 phy_init(pep, pep->pd->speed, pep->pd->duplex);
1416 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
1421 static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1423 struct pxa168_eth_private *pep = netdev_priv(dev);
1426 err = phy_read_status(pep->phy);
1428 err = phy_ethtool_gset(pep->phy, cmd);
1433 static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1435 struct pxa168_eth_private *pep = netdev_priv(dev);
1437 return phy_ethtool_sset(pep->phy, cmd);
1440 static void pxa168_get_drvinfo(struct net_device *dev,
1441 struct ethtool_drvinfo *info)
1443 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1444 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
1445 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1446 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1449 static const struct ethtool_ops pxa168_ethtool_ops = {
1450 .get_settings = pxa168_get_settings,
1451 .set_settings = pxa168_set_settings,
1452 .get_drvinfo = pxa168_get_drvinfo,
1453 .get_link = ethtool_op_get_link,
1454 .get_ts_info = ethtool_op_get_ts_info,
1457 static const struct net_device_ops pxa168_eth_netdev_ops = {
1458 .ndo_open = pxa168_eth_open,
1459 .ndo_stop = pxa168_eth_stop,
1460 .ndo_start_xmit = pxa168_eth_start_xmit,
1461 .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
1462 .ndo_set_mac_address = pxa168_eth_set_mac_address,
1463 .ndo_validate_addr = eth_validate_addr,
1464 .ndo_do_ioctl = pxa168_eth_do_ioctl,
1465 .ndo_change_mtu = pxa168_eth_change_mtu,
1466 .ndo_tx_timeout = pxa168_eth_tx_timeout,
1469 static int pxa168_eth_probe(struct platform_device *pdev)
1471 struct pxa168_eth_private *pep = NULL;
1472 struct net_device *dev = NULL;
1473 struct resource *res;
1477 printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
1479 clk = clk_get(&pdev->dev, "MFUCLK");
1481 printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
1487 dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
1493 platform_set_drvdata(pdev, dev);
1494 pep = netdev_priv(dev);
1497 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 pep->base = ioremap(res->start, resource_size(res));
1503 if (pep->base == NULL) {
1507 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1509 dev->irq = res->start;
1510 dev->netdev_ops = &pxa168_eth_netdev_ops;
1511 dev->watchdog_timeo = 2 * HZ;
1513 SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
1515 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1517 printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
1518 eth_hw_addr_random(dev);
1520 pep->pd = pdev->dev.platform_data;
1521 pep->rx_ring_size = NUM_RX_DESCS;
1522 if (pep->pd->rx_queue_size)
1523 pep->rx_ring_size = pep->pd->rx_queue_size;
1525 pep->tx_ring_size = NUM_TX_DESCS;
1526 if (pep->pd->tx_queue_size)
1527 pep->tx_ring_size = pep->pd->tx_queue_size;
1529 pep->port_num = pep->pd->port_number;
1530 /* Hardware supports only 3 ports */
1531 BUG_ON(pep->port_num > 2);
1532 netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
1534 memset(&pep->timeout, 0, sizeof(struct timer_list));
1535 init_timer(&pep->timeout);
1536 pep->timeout.function = rxq_refill_timer_wrapper;
1537 pep->timeout.data = (unsigned long)pep;
1539 pep->smi_bus = mdiobus_alloc();
1540 if (pep->smi_bus == NULL) {
1544 pep->smi_bus->priv = pep;
1545 pep->smi_bus->name = "pxa168_eth smi";
1546 pep->smi_bus->read = pxa168_smi_read;
1547 pep->smi_bus->write = pxa168_smi_write;
1548 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
1549 pdev->name, pdev->id);
1550 pep->smi_bus->parent = &pdev->dev;
1551 pep->smi_bus->phy_mask = 0xffffffff;
1552 err = mdiobus_register(pep->smi_bus);
1556 pxa168_init_hw(pep);
1557 err = ethernet_phy_setup(dev);
1560 SET_NETDEV_DEV(dev, &pdev->dev);
1561 err = register_netdev(dev);
1567 mdiobus_unregister(pep->smi_bus);
1569 mdiobus_free(pep->smi_bus);
1580 static int pxa168_eth_remove(struct platform_device *pdev)
1582 struct net_device *dev = platform_get_drvdata(pdev);
1583 struct pxa168_eth_private *pep = netdev_priv(dev);
1586 dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
1587 pep->htpr, pep->htpr_dma);
1591 clk_disable(pep->clk);
1595 if (pep->phy != NULL)
1596 phy_detach(pep->phy);
1600 mdiobus_unregister(pep->smi_bus);
1601 mdiobus_free(pep->smi_bus);
1602 unregister_netdev(dev);
1603 cancel_work_sync(&pep->tx_timeout_task);
1605 platform_set_drvdata(pdev, NULL);
1609 static void pxa168_eth_shutdown(struct platform_device *pdev)
1611 struct net_device *dev = platform_get_drvdata(pdev);
1612 eth_port_reset(dev);
1616 static int pxa168_eth_resume(struct platform_device *pdev)
1621 static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
1627 #define pxa168_eth_resume NULL
1628 #define pxa168_eth_suspend NULL
1631 static struct platform_driver pxa168_eth_driver = {
1632 .probe = pxa168_eth_probe,
1633 .remove = pxa168_eth_remove,
1634 .shutdown = pxa168_eth_shutdown,
1635 .resume = pxa168_eth_resume,
1636 .suspend = pxa168_eth_suspend,
1638 .name = DRIVER_NAME,
1642 module_platform_driver(pxa168_eth_driver);
1644 MODULE_LICENSE("GPL");
1645 MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
1646 MODULE_ALIAS("platform:pxa168_eth");