1 /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
3 * Copyright (C) 2004 Sun Microsystems Inc.
4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * This driver uses the sungem driver (c) David Miller
22 * (davem@redhat.com) as its basis.
24 * The cassini chip has a number of features that distinguish it from
26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or
27 * load balancing (non-VLAN mode)
28 * batching of multiple packets
29 * multiple CPU dispatching
30 * page-based RX descriptor engine with separate completion rings
31 * Gigabit support (GMII and PCS interface)
32 * MIF link up/down detection works
34 * RX is handled by page sized buffers that are attached as fragments to
35 * the skb. here's what's done:
36 * -- driver allocates pages at a time and keeps reference counts
38 * -- the upper protocol layers assume that the header is in the skb
39 * itself. as a result, cassini will copy a small amount (64 bytes)
41 * -- driver appends the rest of the data pages as frags to skbuffs
42 * and increments the reference count
43 * -- on page reclamation, the driver swaps the page with a spare page.
44 * if that page is still in use, it frees its reference to that page,
45 * and allocates a new page for use. otherwise, it just recycles the
48 * NOTE: cassini can parse the header. however, it's not worth it
49 * as long as the network stack requires a header copy.
51 * TX has 4 queues. currently these queues are used in a round-robin
52 * fashion for load balancing. They can also be used for QoS. for that
53 * to work, however, QoS information needs to be exposed down to the driver
54 * level so that subqueues get targetted to particular transmit rings.
55 * alternatively, the queues can be configured via use of the all-purpose
58 * RX DATA: the rx completion ring has all the info, but the rx desc
59 * ring has all of the data. RX can conceivably come in under multiple
60 * interrupts, but the INT# assignment needs to be set up properly by
61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
62 * that. also, the two descriptor rings are designed to distinguish between
63 * encrypted and non-encrypted packets, but we use them for buffering
66 * by default, the selective clear mask is set up to process rx packets.
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/types.h>
73 #include <linux/compiler.h>
74 #include <linux/slab.h>
75 #include <linux/delay.h>
76 #include <linux/init.h>
77 #include <linux/ioport.h>
78 #include <linux/pci.h>
80 #include <linux/highmem.h>
81 #include <linux/list.h>
82 #include <linux/dma-mapping.h>
84 #include <linux/netdevice.h>
85 #include <linux/etherdevice.h>
86 #include <linux/skbuff.h>
87 #include <linux/ethtool.h>
88 #include <linux/crc32.h>
89 #include <linux/random.h>
90 #include <linux/mii.h>
92 #include <linux/tcp.h>
93 #include <linux/mutex.h>
95 #include <net/checksum.h>
97 #include <asm/atomic.h>
98 #include <asm/system.h>
100 #include <asm/byteorder.h>
101 #include <asm/uaccess.h>
103 #define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
104 #define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
105 #define CAS_NCPUS num_online_cpus()
107 #if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
109 #define cas_skb_release(x) netif_receive_skb(x)
111 #define cas_skb_release(x) netif_rx(x)
114 /* select which firmware to use */
115 #define USE_HP_WORKAROUND
116 #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
117 #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
121 #define USE_TX_COMPWB /* use completion writeback registers */
122 #define USE_CSMA_CD_PROTO /* standard CSMA/CD */
123 #define USE_RX_BLANK /* hw interrupt mitigation */
124 #undef USE_ENTROPY_DEV /* don't test for entropy device */
126 /* NOTE: these aren't useable unless PCI interrupts can be assigned.
127 * also, we need to make cp->lock finer-grained.
134 #undef USE_VPD_DEBUG /* debug vpd information if defined */
136 /* rx processing options */
137 #define USE_PAGE_ORDER /* specify to allocate large rx pages */
138 #define RX_DONT_BATCH 0 /* if 1, don't batch flows */
139 #define RX_COPY_ALWAYS 0 /* if 0, use frags */
140 #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
141 #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
143 #define DRV_MODULE_NAME "cassini"
144 #define PFX DRV_MODULE_NAME ": "
145 #define DRV_MODULE_VERSION "1.4"
146 #define DRV_MODULE_RELDATE "1 July 2004"
148 #define CAS_DEF_MSG_ENABLE \
158 /* length of time before we decide the hardware is borked,
159 * and dev->tx_timeout() should be called to fix the problem
161 #define CAS_TX_TIMEOUT (HZ)
162 #define CAS_LINK_TIMEOUT (22*HZ/10)
163 #define CAS_LINK_FAST_TIMEOUT (1)
165 /* timeout values for state changing. these specify the number
166 * of 10us delays to be used before giving up.
168 #define STOP_TRIES_PHY 1000
169 #define STOP_TRIES 5000
171 /* specify a minimum frame size to deal with some fifo issues
172 * max mtu == 2 * page size - ethernet header - 64 - swivel =
173 * 2 * page_size - 0x50
175 #define CAS_MIN_FRAME 97
176 #define CAS_1000MB_MIN_FRAME 255
177 #define CAS_MIN_MTU 60
178 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
182 * Eliminate these and use separate atomic counters for each, to
183 * avoid a race condition.
186 #define CAS_RESET_MTU 1
187 #define CAS_RESET_ALL 2
188 #define CAS_RESET_SPARE 3
191 static char version[] __devinitdata =
192 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
194 static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
195 static int link_mode;
197 MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
198 MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
199 MODULE_LICENSE("GPL");
200 module_param(cassini_debug, int, 0);
201 MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
202 module_param(link_mode, int, 0);
203 MODULE_PARM_DESC(link_mode, "default link mode");
206 * Work around for a PCS bug in which the link goes down due to the chip
207 * being confused and never showing a link status of "up."
209 #define DEFAULT_LINKDOWN_TIMEOUT 5
211 * Value in seconds, for user input.
213 static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
214 module_param(linkdown_timeout, int, 0);
215 MODULE_PARM_DESC(linkdown_timeout,
216 "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
219 * value in 'ticks' (units used by jiffies). Set when we init the
220 * module because 'HZ' in actually a function call on some flavors of
221 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
223 static int link_transition_timeout;
227 static u16 link_modes[] __devinitdata = {
228 BMCR_ANENABLE, /* 0 : autoneg */
229 0, /* 1 : 10bt half duplex */
230 BMCR_SPEED100, /* 2 : 100bt half duplex */
231 BMCR_FULLDPLX, /* 3 : 10bt full duplex */
232 BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
233 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
236 static struct pci_device_id cas_pci_tbl[] __devinitdata = {
237 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
246 static void cas_set_link_modes(struct cas *cp);
248 static inline void cas_lock_tx(struct cas *cp)
252 for (i = 0; i < N_TX_RINGS; i++)
253 spin_lock(&cp->tx_lock[i]);
256 static inline void cas_lock_all(struct cas *cp)
258 spin_lock_irq(&cp->lock);
262 /* WTZ: QA was finding deadlock problems with the previous
263 * versions after long test runs with multiple cards per machine.
264 * See if replacing cas_lock_all with safer versions helps. The
265 * symptoms QA is reporting match those we'd expect if interrupts
266 * aren't being properly restored, and we fixed a previous deadlock
267 * with similar symptoms by using save/restore versions in other
270 #define cas_lock_all_save(cp, flags) \
272 struct cas *xxxcp = (cp); \
273 spin_lock_irqsave(&xxxcp->lock, flags); \
274 cas_lock_tx(xxxcp); \
277 static inline void cas_unlock_tx(struct cas *cp)
281 for (i = N_TX_RINGS; i > 0; i--)
282 spin_unlock(&cp->tx_lock[i - 1]);
285 static inline void cas_unlock_all(struct cas *cp)
288 spin_unlock_irq(&cp->lock);
291 #define cas_unlock_all_restore(cp, flags) \
293 struct cas *xxxcp = (cp); \
294 cas_unlock_tx(xxxcp); \
295 spin_unlock_irqrestore(&xxxcp->lock, flags); \
298 static void cas_disable_irq(struct cas *cp, const int ring)
300 /* Make sure we won't get any more interrupts */
302 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
306 /* disable completion interrupts and selectively mask */
307 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
309 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
319 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
320 cp->regs + REG_PLUS_INTRN_MASK(ring));
324 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
325 REG_PLUS_INTRN_MASK(ring));
331 static inline void cas_mask_intr(struct cas *cp)
335 for (i = 0; i < N_RX_COMP_RINGS; i++)
336 cas_disable_irq(cp, i);
339 static inline void cas_buffer_init(cas_page_t *cp)
341 struct page *page = cp->buffer;
342 atomic_set((atomic_t *)&page->lru.next, 1);
345 static inline int cas_buffer_count(cas_page_t *cp)
347 struct page *page = cp->buffer;
348 return atomic_read((atomic_t *)&page->lru.next);
351 static inline void cas_buffer_inc(cas_page_t *cp)
353 struct page *page = cp->buffer;
354 atomic_inc((atomic_t *)&page->lru.next);
357 static inline void cas_buffer_dec(cas_page_t *cp)
359 struct page *page = cp->buffer;
360 atomic_dec((atomic_t *)&page->lru.next);
363 static void cas_enable_irq(struct cas *cp, const int ring)
365 if (ring == 0) { /* all but TX_DONE */
366 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
370 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
372 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
382 writel(INTRN_MASK_RX_EN, cp->regs +
383 REG_PLUS_INTRN_MASK(ring));
392 static inline void cas_unmask_intr(struct cas *cp)
396 for (i = 0; i < N_RX_COMP_RINGS; i++)
397 cas_enable_irq(cp, i);
400 static inline void cas_entropy_gather(struct cas *cp)
402 #ifdef USE_ENTROPY_DEV
403 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
406 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
407 readl(cp->regs + REG_ENTROPY_IV),
412 static inline void cas_entropy_reset(struct cas *cp)
414 #ifdef USE_ENTROPY_DEV
415 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
418 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
419 cp->regs + REG_BIM_LOCAL_DEV_EN);
420 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
421 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
423 /* if we read back 0x0, we don't have an entropy device */
424 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
425 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
429 /* access to the phy. the following assumes that we've initialized the MIF to
430 * be in frame rather than bit-bang mode
432 static u16 cas_phy_read(struct cas *cp, int reg)
435 int limit = STOP_TRIES_PHY;
437 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
438 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
439 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
440 cmd |= MIF_FRAME_TURN_AROUND_MSB;
441 writel(cmd, cp->regs + REG_MIF_FRAME);
443 /* poll for completion */
444 while (limit-- > 0) {
446 cmd = readl(cp->regs + REG_MIF_FRAME);
447 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
448 return (cmd & MIF_FRAME_DATA_MASK);
450 return 0xFFFF; /* -1 */
453 static int cas_phy_write(struct cas *cp, int reg, u16 val)
455 int limit = STOP_TRIES_PHY;
458 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
459 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
460 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
461 cmd |= MIF_FRAME_TURN_AROUND_MSB;
462 cmd |= val & MIF_FRAME_DATA_MASK;
463 writel(cmd, cp->regs + REG_MIF_FRAME);
465 /* poll for completion */
466 while (limit-- > 0) {
468 cmd = readl(cp->regs + REG_MIF_FRAME);
469 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
475 static void cas_phy_powerup(struct cas *cp)
477 u16 ctl = cas_phy_read(cp, MII_BMCR);
479 if ((ctl & BMCR_PDOWN) == 0)
482 cas_phy_write(cp, MII_BMCR, ctl);
485 static void cas_phy_powerdown(struct cas *cp)
487 u16 ctl = cas_phy_read(cp, MII_BMCR);
489 if (ctl & BMCR_PDOWN)
492 cas_phy_write(cp, MII_BMCR, ctl);
495 /* cp->lock held. note: the last put_page will free the buffer */
496 static int cas_page_free(struct cas *cp, cas_page_t *page)
498 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
500 cas_buffer_dec(page);
501 __free_pages(page->buffer, cp->page_order);
506 #ifdef RX_COUNT_BUFFERS
507 #define RX_USED_ADD(x, y) ((x)->used += (y))
508 #define RX_USED_SET(x, y) ((x)->used = (y))
510 #define RX_USED_ADD(x, y)
511 #define RX_USED_SET(x, y)
514 /* local page allocation routines for the receive buffers. jumbo pages
515 * require at least 8K contiguous and 8K aligned buffers.
517 static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
521 page = kmalloc(sizeof(cas_page_t), flags);
525 INIT_LIST_HEAD(&page->list);
526 RX_USED_SET(page, 0);
527 page->buffer = alloc_pages(flags, cp->page_order);
530 cas_buffer_init(page);
531 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
532 cp->page_size, PCI_DMA_FROMDEVICE);
540 /* initialize spare pool of rx buffers, but allocate during the open */
541 static void cas_spare_init(struct cas *cp)
543 spin_lock(&cp->rx_inuse_lock);
544 INIT_LIST_HEAD(&cp->rx_inuse_list);
545 spin_unlock(&cp->rx_inuse_lock);
547 spin_lock(&cp->rx_spare_lock);
548 INIT_LIST_HEAD(&cp->rx_spare_list);
549 cp->rx_spares_needed = RX_SPARE_COUNT;
550 spin_unlock(&cp->rx_spare_lock);
553 /* used on close. free all the spare buffers. */
554 static void cas_spare_free(struct cas *cp)
556 struct list_head list, *elem, *tmp;
558 /* free spare buffers */
559 INIT_LIST_HEAD(&list);
560 spin_lock(&cp->rx_spare_lock);
561 list_splice(&cp->rx_spare_list, &list);
562 INIT_LIST_HEAD(&cp->rx_spare_list);
563 spin_unlock(&cp->rx_spare_lock);
564 list_for_each_safe(elem, tmp, &list) {
565 cas_page_free(cp, list_entry(elem, cas_page_t, list));
568 INIT_LIST_HEAD(&list);
571 * Looks like Adrian had protected this with a different
572 * lock than used everywhere else to manipulate this list.
574 spin_lock(&cp->rx_inuse_lock);
575 list_splice(&cp->rx_inuse_list, &list);
576 INIT_LIST_HEAD(&cp->rx_inuse_list);
577 spin_unlock(&cp->rx_inuse_lock);
579 spin_lock(&cp->rx_spare_lock);
580 list_splice(&cp->rx_inuse_list, &list);
581 INIT_LIST_HEAD(&cp->rx_inuse_list);
582 spin_unlock(&cp->rx_spare_lock);
584 list_for_each_safe(elem, tmp, &list) {
585 cas_page_free(cp, list_entry(elem, cas_page_t, list));
589 /* replenish spares if needed */
590 static void cas_spare_recover(struct cas *cp, const gfp_t flags)
592 struct list_head list, *elem, *tmp;
595 /* check inuse list. if we don't need any more free buffers,
599 /* make a local copy of the list */
600 INIT_LIST_HEAD(&list);
601 spin_lock(&cp->rx_inuse_lock);
602 list_splice(&cp->rx_inuse_list, &list);
603 INIT_LIST_HEAD(&cp->rx_inuse_list);
604 spin_unlock(&cp->rx_inuse_lock);
606 list_for_each_safe(elem, tmp, &list) {
607 cas_page_t *page = list_entry(elem, cas_page_t, list);
609 if (cas_buffer_count(page) > 1)
613 spin_lock(&cp->rx_spare_lock);
614 if (cp->rx_spares_needed > 0) {
615 list_add(elem, &cp->rx_spare_list);
616 cp->rx_spares_needed--;
617 spin_unlock(&cp->rx_spare_lock);
619 spin_unlock(&cp->rx_spare_lock);
620 cas_page_free(cp, page);
624 /* put any inuse buffers back on the list */
625 if (!list_empty(&list)) {
626 spin_lock(&cp->rx_inuse_lock);
627 list_splice(&list, &cp->rx_inuse_list);
628 spin_unlock(&cp->rx_inuse_lock);
631 spin_lock(&cp->rx_spare_lock);
632 needed = cp->rx_spares_needed;
633 spin_unlock(&cp->rx_spare_lock);
637 /* we still need spares, so try to allocate some */
638 INIT_LIST_HEAD(&list);
641 cas_page_t *spare = cas_page_alloc(cp, flags);
644 list_add(&spare->list, &list);
648 spin_lock(&cp->rx_spare_lock);
649 list_splice(&list, &cp->rx_spare_list);
650 cp->rx_spares_needed -= i;
651 spin_unlock(&cp->rx_spare_lock);
654 /* pull a page from the list. */
655 static cas_page_t *cas_page_dequeue(struct cas *cp)
657 struct list_head *entry;
660 spin_lock(&cp->rx_spare_lock);
661 if (list_empty(&cp->rx_spare_list)) {
662 /* try to do a quick recovery */
663 spin_unlock(&cp->rx_spare_lock);
664 cas_spare_recover(cp, GFP_ATOMIC);
665 spin_lock(&cp->rx_spare_lock);
666 if (list_empty(&cp->rx_spare_list)) {
667 if (netif_msg_rx_err(cp))
668 printk(KERN_ERR "%s: no spare buffers "
669 "available.\n", cp->dev->name);
670 spin_unlock(&cp->rx_spare_lock);
675 entry = cp->rx_spare_list.next;
677 recover = ++cp->rx_spares_needed;
678 spin_unlock(&cp->rx_spare_lock);
680 /* trigger the timer to do the recovery */
681 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
683 atomic_inc(&cp->reset_task_pending);
684 atomic_inc(&cp->reset_task_pending_spare);
685 schedule_work(&cp->reset_task);
687 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
688 schedule_work(&cp->reset_task);
691 return list_entry(entry, cas_page_t, list);
695 static void cas_mif_poll(struct cas *cp, const int enable)
699 cfg = readl(cp->regs + REG_MIF_CFG);
700 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
702 if (cp->phy_type & CAS_PHY_MII_MDIO1)
703 cfg |= MIF_CFG_PHY_SELECT;
705 /* poll and interrupt on link status change. */
707 cfg |= MIF_CFG_POLL_EN;
708 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
709 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
711 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
712 cp->regs + REG_MIF_MASK);
713 writel(cfg, cp->regs + REG_MIF_CFG);
716 /* Must be invoked under cp->lock */
717 static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
723 int oldstate = cp->lstate;
724 int link_was_not_down = !(oldstate == link_down);
726 /* Setup link parameters */
729 lcntl = cp->link_cntl;
730 if (ep->autoneg == AUTONEG_ENABLE)
731 cp->link_cntl = BMCR_ANENABLE;
734 if (ep->speed == SPEED_100)
735 cp->link_cntl |= BMCR_SPEED100;
736 else if (ep->speed == SPEED_1000)
737 cp->link_cntl |= CAS_BMCR_SPEED1000;
738 if (ep->duplex == DUPLEX_FULL)
739 cp->link_cntl |= BMCR_FULLDPLX;
742 changed = (lcntl != cp->link_cntl);
745 if (cp->lstate == link_up) {
746 printk(KERN_INFO "%s: PCS link down.\n",
750 printk(KERN_INFO "%s: link configuration changed\n",
754 cp->lstate = link_down;
755 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
760 * WTZ: If the old state was link_up, we turn off the carrier
761 * to replicate everything we do elsewhere on a link-down
762 * event when we were already in a link-up state..
764 if (oldstate == link_up)
765 netif_carrier_off(cp->dev);
766 if (changed && link_was_not_down) {
768 * WTZ: This branch will simply schedule a full reset after
769 * we explicitly changed link modes in an ioctl. See if this
770 * fixes the link-problems we were having for forced mode.
772 atomic_inc(&cp->reset_task_pending);
773 atomic_inc(&cp->reset_task_pending_all);
774 schedule_work(&cp->reset_task);
776 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
780 if (cp->phy_type & CAS_PHY_SERDES) {
781 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
783 if (cp->link_cntl & BMCR_ANENABLE) {
784 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
785 cp->lstate = link_aneg;
787 if (cp->link_cntl & BMCR_FULLDPLX)
788 val |= PCS_MII_CTRL_DUPLEX;
789 val &= ~PCS_MII_AUTONEG_EN;
790 cp->lstate = link_force_ok;
792 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
793 writel(val, cp->regs + REG_PCS_MII_CTRL);
797 ctl = cas_phy_read(cp, MII_BMCR);
798 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
799 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
800 ctl |= cp->link_cntl;
801 if (ctl & BMCR_ANENABLE) {
802 ctl |= BMCR_ANRESTART;
803 cp->lstate = link_aneg;
805 cp->lstate = link_force_ok;
807 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
808 cas_phy_write(cp, MII_BMCR, ctl);
813 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
816 /* Must be invoked under cp->lock. */
817 static int cas_reset_mii_phy(struct cas *cp)
819 int limit = STOP_TRIES_PHY;
822 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
825 val = cas_phy_read(cp, MII_BMCR);
826 if ((val & BMCR_RESET) == 0)
833 static void cas_saturn_firmware_load(struct cas *cp)
835 cas_saturn_patch_t *patch = cas_saturn_patch;
837 cas_phy_powerdown(cp);
839 /* expanded memory access mode */
840 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
842 /* pointer configuration for new firmware */
843 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
844 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
845 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
846 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
847 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
848 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
849 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
850 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
852 /* download new firmware */
853 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
854 cas_phy_write(cp, DP83065_MII_REGE, patch->addr);
855 while (patch->addr) {
856 cas_phy_write(cp, DP83065_MII_REGD, patch->val);
860 /* enable firmware */
861 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
862 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
866 /* phy initialization */
867 static void cas_phy_init(struct cas *cp)
871 /* if we're in MII/GMII mode, set up phy */
872 if (CAS_PHY_MII(cp->phy_type)) {
873 writel(PCS_DATAPATH_MODE_MII,
874 cp->regs + REG_PCS_DATAPATH_MODE);
877 cas_reset_mii_phy(cp); /* take out of isolate mode */
879 if (PHY_LUCENT_B0 == cp->phy_id) {
880 /* workaround link up/down issue with lucent */
881 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
882 cas_phy_write(cp, MII_BMCR, 0x00f1);
883 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
885 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
886 /* workarounds for broadcom phy */
887 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
888 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
889 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
890 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
891 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
892 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
893 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
894 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
895 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
896 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
897 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
899 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
900 val = cas_phy_read(cp, BROADCOM_MII_REG4);
901 val = cas_phy_read(cp, BROADCOM_MII_REG4);
903 /* link workaround */
904 cas_phy_write(cp, BROADCOM_MII_REG4,
908 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
909 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
910 SATURN_PCFG_FSI : 0x0,
911 cp->regs + REG_SATURN_PCFG);
913 /* load firmware to address 10Mbps auto-negotiation
914 * issue. NOTE: this will need to be changed if the
915 * default firmware gets fixed.
917 if (PHY_NS_DP83065 == cp->phy_id) {
918 cas_saturn_firmware_load(cp);
923 /* advertise capabilities */
924 val = cas_phy_read(cp, MII_BMCR);
925 val &= ~BMCR_ANENABLE;
926 cas_phy_write(cp, MII_BMCR, val);
929 cas_phy_write(cp, MII_ADVERTISE,
930 cas_phy_read(cp, MII_ADVERTISE) |
931 (ADVERTISE_10HALF | ADVERTISE_10FULL |
932 ADVERTISE_100HALF | ADVERTISE_100FULL |
933 CAS_ADVERTISE_PAUSE |
934 CAS_ADVERTISE_ASYM_PAUSE));
936 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
937 /* make sure that we don't advertise half
938 * duplex to avoid a chip issue
940 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
941 val &= ~CAS_ADVERTISE_1000HALF;
942 val |= CAS_ADVERTISE_1000FULL;
943 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
947 /* reset pcs for serdes */
951 writel(PCS_DATAPATH_MODE_SERDES,
952 cp->regs + REG_PCS_DATAPATH_MODE);
954 /* enable serdes pins on saturn */
955 if (cp->cas_flags & CAS_FLAG_SATURN)
956 writel(0, cp->regs + REG_SATURN_PCFG);
958 /* Reset PCS unit. */
959 val = readl(cp->regs + REG_PCS_MII_CTRL);
960 val |= PCS_MII_RESET;
961 writel(val, cp->regs + REG_PCS_MII_CTRL);
964 while (limit-- > 0) {
966 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
971 printk(KERN_WARNING "%s: PCS reset bit would not "
972 "clear [%08x].\n", cp->dev->name,
973 readl(cp->regs + REG_PCS_STATE_MACHINE));
975 /* Make sure PCS is disabled while changing advertisement
978 writel(0x0, cp->regs + REG_PCS_CFG);
980 /* Advertise all capabilities except half-duplex. */
981 val = readl(cp->regs + REG_PCS_MII_ADVERT);
982 val &= ~PCS_MII_ADVERT_HD;
983 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
984 PCS_MII_ADVERT_ASYM_PAUSE);
985 writel(val, cp->regs + REG_PCS_MII_ADVERT);
988 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
990 /* pcs workaround: enable sync detect */
991 writel(PCS_SERDES_CTRL_SYNCD_EN,
992 cp->regs + REG_PCS_SERDES_CTRL);
997 static int cas_pcs_link_check(struct cas *cp)
999 u32 stat, state_machine;
1002 /* The link status bit latches on zero, so you must
1003 * read it twice in such a case to see a transition
1004 * to the link being up.
1006 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1007 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1008 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1010 /* The remote-fault indication is only valid
1011 * when autoneg has completed.
1013 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1014 PCS_MII_STATUS_REMOTE_FAULT)) ==
1015 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
1016 if (netif_msg_link(cp))
1017 printk(KERN_INFO "%s: PCS RemoteFault\n",
1021 /* work around link detection issue by querying the PCS state
1024 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1025 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1026 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1027 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1028 stat |= PCS_MII_STATUS_LINK_STATUS;
1031 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1032 if (cp->lstate != link_up) {
1034 cp->lstate = link_up;
1035 cp->link_transition = LINK_TRANSITION_LINK_UP;
1037 cas_set_link_modes(cp);
1038 netif_carrier_on(cp->dev);
1041 } else if (cp->lstate == link_up) {
1042 cp->lstate = link_down;
1043 if (link_transition_timeout != 0 &&
1044 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1045 !cp->link_transition_jiffies_valid) {
1047 * force a reset, as a workaround for the
1048 * link-failure problem. May want to move this to a
1049 * point a bit earlier in the sequence. If we had
1050 * generated a reset a short time ago, we'll wait for
1051 * the link timer to check the status until a
1052 * timer expires (link_transistion_jiffies_valid is
1053 * true when the timer is running.) Instead of using
1054 * a system timer, we just do a check whenever the
1055 * link timer is running - this clears the flag after
1059 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1060 cp->link_transition_jiffies = jiffies;
1061 cp->link_transition_jiffies_valid = 1;
1063 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1065 netif_carrier_off(cp->dev);
1066 if (cp->opened && netif_msg_link(cp)) {
1067 printk(KERN_INFO "%s: PCS link down.\n",
1071 /* Cassini only: if you force a mode, there can be
1072 * sync problems on link down. to fix that, the following
1073 * things need to be checked:
1074 * 1) read serialink state register
1075 * 2) read pcs status register to verify link down.
1076 * 3) if link down and serial link == 0x03, then you need
1077 * to global reset the chip.
1079 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1080 /* should check to see if we're in a forced mode */
1081 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1085 } else if (cp->lstate == link_down) {
1086 if (link_transition_timeout != 0 &&
1087 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1088 !cp->link_transition_jiffies_valid) {
1089 /* force a reset, as a workaround for the
1090 * link-failure problem. May want to move
1091 * this to a point a bit earlier in the
1095 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1096 cp->link_transition_jiffies = jiffies;
1097 cp->link_transition_jiffies_valid = 1;
1099 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1106 static int cas_pcs_interrupt(struct net_device *dev,
1107 struct cas *cp, u32 status)
1109 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1111 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1113 return cas_pcs_link_check(cp);
1116 static int cas_txmac_interrupt(struct net_device *dev,
1117 struct cas *cp, u32 status)
1119 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1124 if (netif_msg_intr(cp))
1125 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
1126 cp->dev->name, txmac_stat);
1128 /* Defer timer expiration is quite normal,
1129 * don't even log the event.
1131 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1132 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1135 spin_lock(&cp->stat_lock[0]);
1136 if (txmac_stat & MAC_TX_UNDERRUN) {
1137 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
1139 cp->net_stats[0].tx_fifo_errors++;
1142 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1143 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
1145 cp->net_stats[0].tx_errors++;
1148 /* The rest are all cases of one of the 16-bit TX
1149 * counters expiring.
1151 if (txmac_stat & MAC_TX_COLL_NORMAL)
1152 cp->net_stats[0].collisions += 0x10000;
1154 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1155 cp->net_stats[0].tx_aborted_errors += 0x10000;
1156 cp->net_stats[0].collisions += 0x10000;
1159 if (txmac_stat & MAC_TX_COLL_LATE) {
1160 cp->net_stats[0].tx_aborted_errors += 0x10000;
1161 cp->net_stats[0].collisions += 0x10000;
1163 spin_unlock(&cp->stat_lock[0]);
1165 /* We do not keep track of MAC_TX_COLL_FIRST and
1166 * MAC_TX_PEAK_ATTEMPTS events.
1171 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1173 cas_hp_inst_t *inst;
1178 while ((inst = firmware) && inst->note) {
1179 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1181 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1182 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1183 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1185 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1186 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1187 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1188 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1189 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1190 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1191 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1192 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1194 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1195 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1196 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1197 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1198 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1204 static void cas_init_rx_dma(struct cas *cp)
1206 u64 desc_dma = cp->block_dvma;
1210 /* rx free descriptors */
1211 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1212 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1213 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1214 if ((N_RX_DESC_RINGS > 1) &&
1215 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
1216 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1217 writel(val, cp->regs + REG_RX_CFG);
1219 val = (unsigned long) cp->init_rxds[0] -
1220 (unsigned long) cp->init_block;
1221 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1222 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1223 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1225 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1226 /* rx desc 2 is for IPSEC packets. however,
1227 * we don't it that for that purpose.
1229 val = (unsigned long) cp->init_rxds[1] -
1230 (unsigned long) cp->init_block;
1231 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1232 writel((desc_dma + val) & 0xffffffff, cp->regs +
1233 REG_PLUS_RX_DB1_LOW);
1234 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1238 /* rx completion registers */
1239 val = (unsigned long) cp->init_rxcs[0] -
1240 (unsigned long) cp->init_block;
1241 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1242 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1244 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1246 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1247 val = (unsigned long) cp->init_rxcs[i] -
1248 (unsigned long) cp->init_block;
1249 writel((desc_dma + val) >> 32, cp->regs +
1250 REG_PLUS_RX_CBN_HI(i));
1251 writel((desc_dma + val) & 0xffffffff, cp->regs +
1252 REG_PLUS_RX_CBN_LOW(i));
1256 /* read selective clear regs to prevent spurious interrupts
1257 * on reset because complete == kick.
1258 * selective clear set up to prevent interrupts on resets
1260 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1261 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1262 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1263 for (i = 1; i < N_RX_COMP_RINGS; i++)
1264 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1266 /* 2 is different from 3 and 4 */
1267 if (N_RX_COMP_RINGS > 1)
1268 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1269 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1271 for (i = 2; i < N_RX_COMP_RINGS; i++)
1272 writel(INTR_RX_DONE_ALT,
1273 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1276 /* set up pause thresholds */
1277 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1278 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1279 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1280 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1281 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1283 /* zero out dma reassembly buffers */
1284 for (i = 0; i < 64; i++) {
1285 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1286 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1287 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1288 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1291 /* make sure address register is 0 for normal operation */
1292 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1293 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1295 /* interrupt mitigation */
1297 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1298 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1299 writel(val, cp->regs + REG_RX_BLANK);
1301 writel(0x0, cp->regs + REG_RX_BLANK);
1304 /* interrupt generation as a function of low water marks for
1305 * free desc and completion entries. these are used to trigger
1306 * housekeeping for rx descs. we don't use the free interrupt
1307 * as it's not very useful
1309 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1310 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1311 writel(val, cp->regs + REG_RX_AE_THRESH);
1312 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1313 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1314 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1317 /* Random early detect registers. useful for congestion avoidance.
1318 * this should be tunable.
1320 writel(0x0, cp->regs + REG_RX_RED);
1322 /* receive page sizes. default == 2K (0x800) */
1324 if (cp->page_size == 0x1000)
1326 else if (cp->page_size == 0x2000)
1328 else if (cp->page_size == 0x4000)
1331 /* round mtu + offset. constrain to page size. */
1332 size = cp->dev->mtu + 64;
1333 if (size > cp->page_size)
1334 size = cp->page_size;
1338 else if (size <= 0x800)
1340 else if (size <= 0x1000)
1345 cp->mtu_stride = 1 << (i + 10);
1346 val = CAS_BASE(RX_PAGE_SIZE, val);
1347 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1348 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1349 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1350 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1352 /* enable the header parser if desired */
1353 if (CAS_HP_FIRMWARE == cas_prog_null)
1356 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1357 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1358 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1359 writel(val, cp->regs + REG_HP_CFG);
1362 static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1364 memset(rxc, 0, sizeof(*rxc));
1365 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1368 /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1369 * flipping is protected by the fact that the chip will not
1370 * hand back the same page index while it's being processed.
1372 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1374 cas_page_t *page = cp->rx_pages[1][index];
1377 if (cas_buffer_count(page) == 1)
1380 new = cas_page_dequeue(cp);
1382 spin_lock(&cp->rx_inuse_lock);
1383 list_add(&page->list, &cp->rx_inuse_list);
1384 spin_unlock(&cp->rx_inuse_lock);
1389 /* this needs to be changed if we actually use the ENC RX DESC ring */
1390 static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1393 cas_page_t **page0 = cp->rx_pages[0];
1394 cas_page_t **page1 = cp->rx_pages[1];
1396 /* swap if buffer is in use */
1397 if (cas_buffer_count(page0[index]) > 1) {
1398 cas_page_t *new = cas_page_spare(cp, index);
1400 page1[index] = page0[index];
1404 RX_USED_SET(page0[index], 0);
1405 return page0[index];
1408 static void cas_clean_rxds(struct cas *cp)
1410 /* only clean ring 0 as ring 1 is used for spare buffers */
1411 struct cas_rx_desc *rxd = cp->init_rxds[0];
1414 /* release all rx flows */
1415 for (i = 0; i < N_RX_FLOWS; i++) {
1416 struct sk_buff *skb;
1417 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1418 cas_skb_release(skb);
1422 /* initialize descriptors */
1423 size = RX_DESC_RINGN_SIZE(0);
1424 for (i = 0; i < size; i++) {
1425 cas_page_t *page = cas_page_swap(cp, 0, i);
1426 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1427 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1428 CAS_BASE(RX_INDEX_RING, 0));
1431 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1433 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1436 static void cas_clean_rxcs(struct cas *cp)
1440 /* take ownership of rx comp descriptors */
1441 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1442 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1443 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1444 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1445 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1446 cas_rxc_init(rxc + j);
1452 /* When we get a RX fifo overflow, the RX unit is probably hung
1453 * so we do the following.
1455 * If any part of the reset goes wrong, we return 1 and that causes the
1456 * whole chip to be reset.
1458 static int cas_rxmac_reset(struct cas *cp)
1460 struct net_device *dev = cp->dev;
1464 /* First, reset MAC RX. */
1465 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1466 for (limit = 0; limit < STOP_TRIES; limit++) {
1467 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1471 if (limit == STOP_TRIES) {
1472 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
1473 "chip.\n", dev->name);
1477 /* Second, disable RX DMA. */
1478 writel(0, cp->regs + REG_RX_CFG);
1479 for (limit = 0; limit < STOP_TRIES; limit++) {
1480 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1484 if (limit == STOP_TRIES) {
1485 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
1486 "chip.\n", dev->name);
1492 /* Execute RX reset command. */
1493 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1494 for (limit = 0; limit < STOP_TRIES; limit++) {
1495 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1499 if (limit == STOP_TRIES) {
1500 printk(KERN_ERR "%s: RX reset command will not execute, "
1501 "resetting whole chip.\n", dev->name);
1505 /* reset driver rx state */
1509 /* Now, reprogram the rest of RX unit. */
1510 cas_init_rx_dma(cp);
1513 val = readl(cp->regs + REG_RX_CFG);
1514 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1515 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1516 val = readl(cp->regs + REG_MAC_RX_CFG);
1517 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1522 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1525 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1530 if (netif_msg_intr(cp))
1531 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
1532 cp->dev->name, stat);
1534 /* these are all rollovers */
1535 spin_lock(&cp->stat_lock[0]);
1536 if (stat & MAC_RX_ALIGN_ERR)
1537 cp->net_stats[0].rx_frame_errors += 0x10000;
1539 if (stat & MAC_RX_CRC_ERR)
1540 cp->net_stats[0].rx_crc_errors += 0x10000;
1542 if (stat & MAC_RX_LEN_ERR)
1543 cp->net_stats[0].rx_length_errors += 0x10000;
1545 if (stat & MAC_RX_OVERFLOW) {
1546 cp->net_stats[0].rx_over_errors++;
1547 cp->net_stats[0].rx_fifo_errors++;
1550 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1553 spin_unlock(&cp->stat_lock[0]);
1557 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1560 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1565 if (netif_msg_intr(cp))
1566 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
1567 cp->dev->name, stat);
1569 /* This interrupt is just for pause frame and pause
1570 * tracking. It is useful for diagnostics and debug
1571 * but probably by default we will mask these events.
1573 if (stat & MAC_CTRL_PAUSE_STATE)
1574 cp->pause_entered++;
1576 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1577 cp->pause_last_time_recvd = (stat >> 16);
1583 /* Must be invoked under cp->lock. */
1584 static inline int cas_mdio_link_not_up(struct cas *cp)
1588 switch (cp->lstate) {
1589 case link_force_ret:
1590 if (netif_msg_link(cp))
1591 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1592 " forced mode\n", cp->dev->name);
1593 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1594 cp->timer_ticks = 5;
1595 cp->lstate = link_force_ok;
1596 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1600 val = cas_phy_read(cp, MII_BMCR);
1602 /* Try forced modes. we try things in the following order:
1603 * 1000 full -> 100 full/half -> 10 half
1605 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1606 val |= BMCR_FULLDPLX;
1607 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1608 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1609 cas_phy_write(cp, MII_BMCR, val);
1610 cp->timer_ticks = 5;
1611 cp->lstate = link_force_try;
1612 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1615 case link_force_try:
1616 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1617 val = cas_phy_read(cp, MII_BMCR);
1618 cp->timer_ticks = 5;
1619 if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1620 val &= ~CAS_BMCR_SPEED1000;
1621 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1622 cas_phy_write(cp, MII_BMCR, val);
1626 if (val & BMCR_SPEED100) {
1627 if (val & BMCR_FULLDPLX) /* fd failed */
1628 val &= ~BMCR_FULLDPLX;
1629 else { /* 100Mbps failed */
1630 val &= ~BMCR_SPEED100;
1632 cas_phy_write(cp, MII_BMCR, val);
1642 /* must be invoked with cp->lock held */
1643 static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1647 if (bmsr & BMSR_LSTATUS) {
1648 /* Ok, here we got a link. If we had it due to a forced
1649 * fallback, and we were configured for autoneg, we
1650 * retry a short autoneg pass. If you know your hub is
1651 * broken, use ethtool ;)
1653 if ((cp->lstate == link_force_try) &&
1654 (cp->link_cntl & BMCR_ANENABLE)) {
1655 cp->lstate = link_force_ret;
1656 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1657 cas_mif_poll(cp, 0);
1658 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1659 cp->timer_ticks = 5;
1660 if (cp->opened && netif_msg_link(cp))
1661 printk(KERN_INFO "%s: Got link after fallback, retrying"
1662 " autoneg once...\n", cp->dev->name);
1663 cas_phy_write(cp, MII_BMCR,
1664 cp->link_fcntl | BMCR_ANENABLE |
1666 cas_mif_poll(cp, 1);
1668 } else if (cp->lstate != link_up) {
1669 cp->lstate = link_up;
1670 cp->link_transition = LINK_TRANSITION_LINK_UP;
1673 cas_set_link_modes(cp);
1674 netif_carrier_on(cp->dev);
1680 /* link not up. if the link was previously up, we restart the
1684 if (cp->lstate == link_up) {
1685 cp->lstate = link_down;
1686 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1688 netif_carrier_off(cp->dev);
1689 if (cp->opened && netif_msg_link(cp))
1690 printk(KERN_INFO "%s: Link down\n",
1694 } else if (++cp->timer_ticks > 10)
1695 cas_mdio_link_not_up(cp);
1700 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1703 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1706 /* check for a link change */
1707 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1710 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1711 return cas_mii_link_check(cp, bmsr);
1714 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1717 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1722 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
1723 readl(cp->regs + REG_BIM_DIAG));
1725 /* cassini+ has this reserved */
1726 if ((stat & PCI_ERR_BADACK) &&
1727 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1728 printk("<No ACK64# during ABS64 cycle> ");
1730 if (stat & PCI_ERR_DTRTO)
1731 printk("<Delayed transaction timeout> ");
1732 if (stat & PCI_ERR_OTHER)
1734 if (stat & PCI_ERR_BIM_DMA_WRITE)
1735 printk("<BIM DMA 0 write req> ");
1736 if (stat & PCI_ERR_BIM_DMA_READ)
1737 printk("<BIM DMA 0 read req> ");
1740 if (stat & PCI_ERR_OTHER) {
1743 /* Interrogate PCI config space for the
1746 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1747 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
1749 if (cfg & PCI_STATUS_PARITY)
1750 printk(KERN_ERR "%s: PCI parity error detected.\n",
1752 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1753 printk(KERN_ERR "%s: PCI target abort.\n",
1755 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1756 printk(KERN_ERR "%s: PCI master acks target abort.\n",
1758 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1759 printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
1760 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1761 printk(KERN_ERR "%s: PCI system error SERR#.\n",
1763 if (cfg & PCI_STATUS_DETECTED_PARITY)
1764 printk(KERN_ERR "%s: PCI parity error.\n",
1767 /* Write the error bits back to clear them. */
1768 cfg &= (PCI_STATUS_PARITY |
1769 PCI_STATUS_SIG_TARGET_ABORT |
1770 PCI_STATUS_REC_TARGET_ABORT |
1771 PCI_STATUS_REC_MASTER_ABORT |
1772 PCI_STATUS_SIG_SYSTEM_ERROR |
1773 PCI_STATUS_DETECTED_PARITY);
1774 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1777 /* For all PCI errors, we should reset the chip. */
1781 /* All non-normal interrupt conditions get serviced here.
1782 * Returns non-zero if we should just exit the interrupt
1783 * handler right now (ie. if we reset the card which invalidates
1784 * all of the other original irq status bits).
1786 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1789 if (status & INTR_RX_TAG_ERROR) {
1790 /* corrupt RX tag framing */
1791 if (netif_msg_rx_err(cp))
1792 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
1794 spin_lock(&cp->stat_lock[0]);
1795 cp->net_stats[0].rx_errors++;
1796 spin_unlock(&cp->stat_lock[0]);
1800 if (status & INTR_RX_LEN_MISMATCH) {
1801 /* length mismatch. */
1802 if (netif_msg_rx_err(cp))
1803 printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
1805 spin_lock(&cp->stat_lock[0]);
1806 cp->net_stats[0].rx_errors++;
1807 spin_unlock(&cp->stat_lock[0]);
1811 if (status & INTR_PCS_STATUS) {
1812 if (cas_pcs_interrupt(dev, cp, status))
1816 if (status & INTR_TX_MAC_STATUS) {
1817 if (cas_txmac_interrupt(dev, cp, status))
1821 if (status & INTR_RX_MAC_STATUS) {
1822 if (cas_rxmac_interrupt(dev, cp, status))
1826 if (status & INTR_MAC_CTRL_STATUS) {
1827 if (cas_mac_interrupt(dev, cp, status))
1831 if (status & INTR_MIF_STATUS) {
1832 if (cas_mif_interrupt(dev, cp, status))
1836 if (status & INTR_PCI_ERROR_STATUS) {
1837 if (cas_pci_interrupt(dev, cp, status))
1844 atomic_inc(&cp->reset_task_pending);
1845 atomic_inc(&cp->reset_task_pending_all);
1846 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
1848 schedule_work(&cp->reset_task);
1850 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1851 printk(KERN_ERR "reset called in cas_abnormal_irq\n");
1852 schedule_work(&cp->reset_task);
1857 /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1858 * determining whether to do a netif_stop/wakeup
1860 #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1861 #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1862 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1865 unsigned long off = addr + len;
1867 if (CAS_TABORT(cp) == 1)
1869 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1871 return TX_TARGET_ABORT_LEN;
1874 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1876 struct cas_tx_desc *txds;
1877 struct sk_buff **skbs;
1878 struct net_device *dev = cp->dev;
1881 spin_lock(&cp->tx_lock[ring]);
1882 txds = cp->init_txds[ring];
1883 skbs = cp->tx_skbs[ring];
1884 entry = cp->tx_old[ring];
1886 count = TX_BUFF_COUNT(ring, entry, limit);
1887 while (entry != limit) {
1888 struct sk_buff *skb = skbs[entry];
1894 /* this should never occur */
1895 entry = TX_DESC_NEXT(ring, entry);
1899 /* however, we might get only a partial skb release. */
1900 count -= skb_shinfo(skb)->nr_frags +
1901 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1905 if (netif_msg_tx_done(cp))
1906 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
1907 cp->dev->name, ring, entry);
1910 cp->tx_tiny_use[ring][entry].nbufs = 0;
1912 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1913 struct cas_tx_desc *txd = txds + entry;
1915 daddr = le64_to_cpu(txd->buffer);
1916 dlen = CAS_VAL(TX_DESC_BUFLEN,
1917 le64_to_cpu(txd->control));
1918 pci_unmap_page(cp->pdev, daddr, dlen,
1920 entry = TX_DESC_NEXT(ring, entry);
1922 /* tiny buffer may follow */
1923 if (cp->tx_tiny_use[ring][entry].used) {
1924 cp->tx_tiny_use[ring][entry].used = 0;
1925 entry = TX_DESC_NEXT(ring, entry);
1929 spin_lock(&cp->stat_lock[ring]);
1930 cp->net_stats[ring].tx_packets++;
1931 cp->net_stats[ring].tx_bytes += skb->len;
1932 spin_unlock(&cp->stat_lock[ring]);
1933 dev_kfree_skb_irq(skb);
1935 cp->tx_old[ring] = entry;
1937 /* this is wrong for multiple tx rings. the net device needs
1938 * multiple queues for this to do the right thing. we wait
1939 * for 2*packets to be available when using tiny buffers
1941 if (netif_queue_stopped(dev) &&
1942 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1943 netif_wake_queue(dev);
1944 spin_unlock(&cp->tx_lock[ring]);
1947 static void cas_tx(struct net_device *dev, struct cas *cp,
1951 #ifdef USE_TX_COMPWB
1952 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1954 if (netif_msg_intr(cp))
1955 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
1956 cp->dev->name, status, (unsigned long long)compwb);
1957 /* process all the rings */
1958 for (ring = 0; ring < N_TX_RINGS; ring++) {
1959 #ifdef USE_TX_COMPWB
1960 /* use the completion writeback registers */
1961 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1962 CAS_VAL(TX_COMPWB_LSB, compwb);
1963 compwb = TX_COMPWB_NEXT(compwb);
1965 limit = readl(cp->regs + REG_TX_COMPN(ring));
1967 if (cp->tx_old[ring] != limit)
1968 cas_tx_ringN(cp, ring, limit);
1973 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1974 int entry, const u64 *words,
1975 struct sk_buff **skbref)
1977 int dlen, hlen, len, i, alloclen;
1978 int off, swivel = RX_SWIVEL_OFF_VAL;
1979 struct cas_page *page;
1980 struct sk_buff *skb;
1981 void *addr, *crcaddr;
1985 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1986 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1989 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1992 alloclen = max(hlen, RX_COPY_MIN);
1994 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
1999 skb_reserve(skb, swivel);
2002 addr = crcaddr = NULL;
2003 if (hlen) { /* always copy header pages */
2004 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2005 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2006 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
2010 if (!dlen) /* attach FCS */
2012 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2013 PCI_DMA_FROMDEVICE);
2014 addr = cas_page_map(page->buffer);
2015 memcpy(p, addr + off, i);
2016 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2017 PCI_DMA_FROMDEVICE);
2018 cas_page_unmap(addr);
2019 RX_USED_ADD(page, 0x100);
2025 if (alloclen < (hlen + dlen)) {
2026 skb_frag_t *frag = skb_shinfo(skb)->frags;
2028 /* normal or jumbo packets. we use frags */
2029 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2030 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2031 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2033 hlen = min(cp->page_size - off, dlen);
2035 if (netif_msg_rx_err(cp)) {
2036 printk(KERN_DEBUG "%s: rx page overflow: "
2037 "%d\n", cp->dev->name, hlen);
2039 dev_kfree_skb_irq(skb);
2043 if (i == dlen) /* attach FCS */
2045 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2046 PCI_DMA_FROMDEVICE);
2048 /* make sure we always copy a header */
2050 if (p == (char *) skb->data) { /* not split */
2051 addr = cas_page_map(page->buffer);
2052 memcpy(p, addr + off, RX_COPY_MIN);
2053 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2054 PCI_DMA_FROMDEVICE);
2055 cas_page_unmap(addr);
2057 swivel = RX_COPY_MIN;
2058 RX_USED_ADD(page, cp->mtu_stride);
2060 RX_USED_ADD(page, hlen);
2062 skb_put(skb, alloclen);
2064 skb_shinfo(skb)->nr_frags++;
2065 skb->data_len += hlen - swivel;
2066 skb->len += hlen - swivel;
2068 get_page(page->buffer);
2069 cas_buffer_inc(page);
2070 frag->page = page->buffer;
2071 frag->page_offset = off;
2072 frag->size = hlen - swivel;
2074 /* any more data? */
2075 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2079 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2080 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2081 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2082 hlen + cp->crc_size,
2083 PCI_DMA_FROMDEVICE);
2084 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2085 hlen + cp->crc_size,
2086 PCI_DMA_FROMDEVICE);
2088 skb_shinfo(skb)->nr_frags++;
2089 skb->data_len += hlen;
2093 get_page(page->buffer);
2094 cas_buffer_inc(page);
2095 frag->page = page->buffer;
2096 frag->page_offset = 0;
2098 RX_USED_ADD(page, hlen + cp->crc_size);
2102 addr = cas_page_map(page->buffer);
2103 crcaddr = addr + off + hlen;
2107 /* copying packet */
2111 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2112 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2113 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2114 hlen = min(cp->page_size - off, dlen);
2116 if (netif_msg_rx_err(cp)) {
2117 printk(KERN_DEBUG "%s: rx page overflow: "
2118 "%d\n", cp->dev->name, hlen);
2120 dev_kfree_skb_irq(skb);
2124 if (i == dlen) /* attach FCS */
2126 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2127 PCI_DMA_FROMDEVICE);
2128 addr = cas_page_map(page->buffer);
2129 memcpy(p, addr + off, i);
2130 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2131 PCI_DMA_FROMDEVICE);
2132 cas_page_unmap(addr);
2133 if (p == (char *) skb->data) /* not split */
2134 RX_USED_ADD(page, cp->mtu_stride);
2136 RX_USED_ADD(page, i);
2138 /* any more data? */
2139 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2141 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2142 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2143 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2144 dlen + cp->crc_size,
2145 PCI_DMA_FROMDEVICE);
2146 addr = cas_page_map(page->buffer);
2147 memcpy(p, addr, dlen + cp->crc_size);
2148 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2149 dlen + cp->crc_size,
2150 PCI_DMA_FROMDEVICE);
2151 cas_page_unmap(addr);
2152 RX_USED_ADD(page, dlen + cp->crc_size);
2157 crcaddr = skb->data + alloclen;
2159 skb_put(skb, alloclen);
2162 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2164 /* checksum includes FCS. strip it out. */
2165 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2166 csum_unfold(csum)));
2168 cas_page_unmap(addr);
2170 skb->csum = csum_unfold(~csum);
2171 skb->ip_summed = CHECKSUM_COMPLETE;
2172 skb->protocol = eth_type_trans(skb, cp->dev);
2177 /* we can handle up to 64 rx flows at a time. we do the same thing
2178 * as nonreassm except that we batch up the buffers.
2179 * NOTE: we currently just treat each flow as a bunch of packets that
2180 * we pass up. a better way would be to coalesce the packets
2181 * into a jumbo packet. to do that, we need to do the following:
2182 * 1) the first packet will have a clean split between header and
2184 * 2) each time the next flow packet comes in, extend the
2185 * data length and merge the checksums.
2186 * 3) on flow release, fix up the header.
2187 * 4) make sure the higher layer doesn't care.
2188 * because packets get coalesced, we shouldn't run into fragment count
2191 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2192 struct sk_buff *skb)
2194 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2195 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2197 /* this is protected at a higher layer, so no need to
2198 * do any additional locking here. stick the buffer
2201 __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow);
2202 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2203 while ((skb = __skb_dequeue(flow))) {
2204 cas_skb_release(skb);
2209 /* put rx descriptor back on ring. if a buffer is in use by a higher
2210 * layer, this will need to put in a replacement.
2212 static void cas_post_page(struct cas *cp, const int ring, const int index)
2217 entry = cp->rx_old[ring];
2219 new = cas_page_swap(cp, ring, index);
2220 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2221 cp->init_rxds[ring][entry].index =
2222 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2223 CAS_BASE(RX_INDEX_RING, ring));
2225 entry = RX_DESC_ENTRY(ring, entry + 1);
2226 cp->rx_old[ring] = entry;
2232 writel(entry, cp->regs + REG_RX_KICK);
2233 else if ((N_RX_DESC_RINGS > 1) &&
2234 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2235 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2239 /* only when things are bad */
2240 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2242 unsigned int entry, last, count, released;
2244 cas_page_t **page = cp->rx_pages[ring];
2246 entry = cp->rx_old[ring];
2248 if (netif_msg_intr(cp))
2249 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
2250 cp->dev->name, ring, entry);
2253 count = entry & 0x3;
2254 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2256 while (entry != last) {
2257 /* make a new buffer if it's still in use */
2258 if (cas_buffer_count(page[entry]) > 1) {
2259 cas_page_t *new = cas_page_dequeue(cp);
2261 /* let the timer know that we need to
2264 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2265 if (!timer_pending(&cp->link_timer))
2266 mod_timer(&cp->link_timer, jiffies +
2267 CAS_LINK_FAST_TIMEOUT);
2268 cp->rx_old[ring] = entry;
2269 cp->rx_last[ring] = num ? num - released : 0;
2272 spin_lock(&cp->rx_inuse_lock);
2273 list_add(&page[entry]->list, &cp->rx_inuse_list);
2274 spin_unlock(&cp->rx_inuse_lock);
2275 cp->init_rxds[ring][entry].buffer =
2276 cpu_to_le64(new->dma_addr);
2286 entry = RX_DESC_ENTRY(ring, entry + 1);
2288 cp->rx_old[ring] = entry;
2294 writel(cluster, cp->regs + REG_RX_KICK);
2295 else if ((N_RX_DESC_RINGS > 1) &&
2296 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2297 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2302 /* process a completion ring. packets are set up in three basic ways:
2303 * small packets: should be copied header + data in single buffer.
2304 * large packets: header and data in a single buffer.
2305 * split packets: header in a separate buffer from data.
2306 * data may be in multiple pages. data may be > 256
2307 * bytes but in a single page.
2309 * NOTE: RX page posting is done in this routine as well. while there's
2310 * the capability of using multiple RX completion rings, it isn't
2311 * really worthwhile due to the fact that the page posting will
2312 * force serialization on the single descriptor ring.
2314 static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2316 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2320 if (netif_msg_intr(cp))
2321 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
2322 cp->dev->name, ring,
2323 readl(cp->regs + REG_RX_COMP_HEAD),
2326 entry = cp->rx_new[ring];
2329 struct cas_rx_comp *rxc = rxcs + entry;
2330 struct sk_buff *skb;
2335 words[0] = le64_to_cpu(rxc->word1);
2336 words[1] = le64_to_cpu(rxc->word2);
2337 words[2] = le64_to_cpu(rxc->word3);
2338 words[3] = le64_to_cpu(rxc->word4);
2340 /* don't touch if still owned by hw */
2341 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2345 /* hw hasn't cleared the zero bit yet */
2346 if (words[3] & RX_COMP4_ZERO) {
2350 /* get info on the packet */
2351 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2352 spin_lock(&cp->stat_lock[ring]);
2353 cp->net_stats[ring].rx_errors++;
2354 if (words[3] & RX_COMP4_LEN_MISMATCH)
2355 cp->net_stats[ring].rx_length_errors++;
2356 if (words[3] & RX_COMP4_BAD)
2357 cp->net_stats[ring].rx_crc_errors++;
2358 spin_unlock(&cp->stat_lock[ring]);
2360 /* We'll just return it to Cassini. */
2362 spin_lock(&cp->stat_lock[ring]);
2363 ++cp->net_stats[ring].rx_dropped;
2364 spin_unlock(&cp->stat_lock[ring]);
2368 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2374 /* see if it's a flow re-assembly or not. the driver
2375 * itself handles release back up.
2377 if (RX_DONT_BATCH || (type == 0x2)) {
2378 /* non-reassm: these always get released */
2379 cas_skb_release(skb);
2381 cas_rx_flow_pkt(cp, words, skb);
2384 spin_lock(&cp->stat_lock[ring]);
2385 cp->net_stats[ring].rx_packets++;
2386 cp->net_stats[ring].rx_bytes += len;
2387 spin_unlock(&cp->stat_lock[ring]);
2388 cp->dev->last_rx = jiffies;
2393 /* should it be released? */
2394 if (words[0] & RX_COMP1_RELEASE_HDR) {
2395 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2396 dring = CAS_VAL(RX_INDEX_RING, i);
2397 i = CAS_VAL(RX_INDEX_NUM, i);
2398 cas_post_page(cp, dring, i);
2401 if (words[0] & RX_COMP1_RELEASE_DATA) {
2402 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2403 dring = CAS_VAL(RX_INDEX_RING, i);
2404 i = CAS_VAL(RX_INDEX_NUM, i);
2405 cas_post_page(cp, dring, i);
2408 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2409 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2410 dring = CAS_VAL(RX_INDEX_RING, i);
2411 i = CAS_VAL(RX_INDEX_NUM, i);
2412 cas_post_page(cp, dring, i);
2415 /* skip to the next entry */
2416 entry = RX_COMP_ENTRY(ring, entry + 1 +
2417 CAS_VAL(RX_COMP1_SKIP, words[0]));
2419 if (budget && (npackets >= budget))
2423 cp->rx_new[ring] = entry;
2426 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
2432 /* put completion entries back on the ring */
2433 static void cas_post_rxcs_ringN(struct net_device *dev,
2434 struct cas *cp, int ring)
2436 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2439 last = cp->rx_cur[ring];
2440 entry = cp->rx_new[ring];
2441 if (netif_msg_intr(cp))
2442 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
2443 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
2446 /* zero and re-mark descriptors */
2447 while (last != entry) {
2448 cas_rxc_init(rxc + last);
2449 last = RX_COMP_ENTRY(ring, last + 1);
2451 cp->rx_cur[ring] = last;
2454 writel(last, cp->regs + REG_RX_COMP_TAIL);
2455 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2456 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2461 /* cassini can use all four PCI interrupts for the completion ring.
2462 * rings 3 and 4 are identical
2464 #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2465 static inline void cas_handle_irqN(struct net_device *dev,
2466 struct cas *cp, const u32 status,
2469 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2470 cas_post_rxcs_ringN(dev, cp, ring);
2473 static irqreturn_t cas_interruptN(int irq, void *dev_id)
2475 struct net_device *dev = dev_id;
2476 struct cas *cp = netdev_priv(dev);
2477 unsigned long flags;
2479 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2481 /* check for shared irq */
2485 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2486 spin_lock_irqsave(&cp->lock, flags);
2487 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2490 netif_rx_schedule(dev, &cp->napi);
2492 cas_rx_ringN(cp, ring, 0);
2494 status &= ~INTR_RX_DONE_ALT;
2498 cas_handle_irqN(dev, cp, status, ring);
2499 spin_unlock_irqrestore(&cp->lock, flags);
2505 /* everything but rx packets */
2506 static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2508 if (status & INTR_RX_BUF_UNAVAIL_1) {
2509 /* Frame arrived, no free RX buffers available.
2510 * NOTE: we can get this on a link transition. */
2511 cas_post_rxds_ringN(cp, 1, 0);
2512 spin_lock(&cp->stat_lock[1]);
2513 cp->net_stats[1].rx_dropped++;
2514 spin_unlock(&cp->stat_lock[1]);
2517 if (status & INTR_RX_BUF_AE_1)
2518 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2519 RX_AE_FREEN_VAL(1));
2521 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2522 cas_post_rxcs_ringN(cp, 1);
2525 /* ring 2 handles a few more events than 3 and 4 */
2526 static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2528 struct net_device *dev = dev_id;
2529 struct cas *cp = netdev_priv(dev);
2530 unsigned long flags;
2531 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2533 /* check for shared interrupt */
2537 spin_lock_irqsave(&cp->lock, flags);
2538 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2541 netif_rx_schedule(dev, &cp->napi);
2543 cas_rx_ringN(cp, 1, 0);
2545 status &= ~INTR_RX_DONE_ALT;
2548 cas_handle_irq1(cp, status);
2549 spin_unlock_irqrestore(&cp->lock, flags);
2554 static inline void cas_handle_irq(struct net_device *dev,
2555 struct cas *cp, const u32 status)
2557 /* housekeeping interrupts */
2558 if (status & INTR_ERROR_MASK)
2559 cas_abnormal_irq(dev, cp, status);
2561 if (status & INTR_RX_BUF_UNAVAIL) {
2562 /* Frame arrived, no free RX buffers available.
2563 * NOTE: we can get this on a link transition.
2565 cas_post_rxds_ringN(cp, 0, 0);
2566 spin_lock(&cp->stat_lock[0]);
2567 cp->net_stats[0].rx_dropped++;
2568 spin_unlock(&cp->stat_lock[0]);
2569 } else if (status & INTR_RX_BUF_AE) {
2570 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2571 RX_AE_FREEN_VAL(0));
2574 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2575 cas_post_rxcs_ringN(dev, cp, 0);
2578 static irqreturn_t cas_interrupt(int irq, void *dev_id)
2580 struct net_device *dev = dev_id;
2581 struct cas *cp = netdev_priv(dev);
2582 unsigned long flags;
2583 u32 status = readl(cp->regs + REG_INTR_STATUS);
2588 spin_lock_irqsave(&cp->lock, flags);
2589 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2590 cas_tx(dev, cp, status);
2591 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2594 if (status & INTR_RX_DONE) {
2597 netif_rx_schedule(dev, &cp->napi);
2599 cas_rx_ringN(cp, 0, 0);
2601 status &= ~INTR_RX_DONE;
2605 cas_handle_irq(dev, cp, status);
2606 spin_unlock_irqrestore(&cp->lock, flags);
2612 static int cas_poll(struct napi_struct *napi, int budget)
2614 struct cas *cp = container_of(napi, struct cas, napi);
2615 struct net_device *dev = cp->dev;
2616 int i, enable_intr, todo, credits;
2617 u32 status = readl(cp->regs + REG_INTR_STATUS);
2618 unsigned long flags;
2620 spin_lock_irqsave(&cp->lock, flags);
2621 cas_tx(dev, cp, status);
2622 spin_unlock_irqrestore(&cp->lock, flags);
2624 /* NAPI rx packets. we spread the credits across all of the
2627 * to make sure we're fair with the work we loop through each
2628 * ring N_RX_COMP_RING times with a request of
2629 * budget / N_RX_COMP_RINGS
2633 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2635 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2636 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2637 if (credits >= budget) {
2645 /* final rx completion */
2646 spin_lock_irqsave(&cp->lock, flags);
2648 cas_handle_irq(dev, cp, status);
2651 if (N_RX_COMP_RINGS > 1) {
2652 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2654 cas_handle_irq1(dev, cp, status);
2659 if (N_RX_COMP_RINGS > 2) {
2660 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2662 cas_handle_irqN(dev, cp, status, 2);
2667 if (N_RX_COMP_RINGS > 3) {
2668 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2670 cas_handle_irqN(dev, cp, status, 3);
2673 spin_unlock_irqrestore(&cp->lock, flags);
2675 netif_rx_complete(dev, napi);
2676 cas_unmask_intr(cp);
2682 #ifdef CONFIG_NET_POLL_CONTROLLER
2683 static void cas_netpoll(struct net_device *dev)
2685 struct cas *cp = netdev_priv(dev);
2687 cas_disable_irq(cp, 0);
2688 cas_interrupt(cp->pdev->irq, dev);
2689 cas_enable_irq(cp, 0);
2692 if (N_RX_COMP_RINGS > 1) {
2693 /* cas_interrupt1(); */
2697 if (N_RX_COMP_RINGS > 2) {
2698 /* cas_interruptN(); */
2702 if (N_RX_COMP_RINGS > 3) {
2703 /* cas_interruptN(); */
2709 static void cas_tx_timeout(struct net_device *dev)
2711 struct cas *cp = netdev_priv(dev);
2713 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2714 if (!cp->hw_running) {
2715 printk("%s: hrm.. hw not running!\n", dev->name);
2719 printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
2720 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
2722 printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
2723 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
2725 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
2726 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2728 readl(cp->regs + REG_TX_CFG),
2729 readl(cp->regs + REG_MAC_TX_STATUS),
2730 readl(cp->regs + REG_MAC_TX_CFG),
2731 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2732 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2733 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2734 readl(cp->regs + REG_TX_SM_1),
2735 readl(cp->regs + REG_TX_SM_2));
2737 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
2739 readl(cp->regs + REG_RX_CFG),
2740 readl(cp->regs + REG_MAC_RX_STATUS),
2741 readl(cp->regs + REG_MAC_RX_CFG));
2743 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
2745 readl(cp->regs + REG_HP_STATE_MACHINE),
2746 readl(cp->regs + REG_HP_STATUS0),
2747 readl(cp->regs + REG_HP_STATUS1),
2748 readl(cp->regs + REG_HP_STATUS2));
2751 atomic_inc(&cp->reset_task_pending);
2752 atomic_inc(&cp->reset_task_pending_all);
2753 schedule_work(&cp->reset_task);
2755 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2756 schedule_work(&cp->reset_task);
2760 static inline int cas_intme(int ring, int entry)
2762 /* Algorithm: IRQ every 1/2 of descriptors. */
2763 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2769 static void cas_write_txd(struct cas *cp, int ring, int entry,
2770 dma_addr_t mapping, int len, u64 ctrl, int last)
2772 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2774 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2775 if (cas_intme(ring, entry))
2776 ctrl |= TX_DESC_INTME;
2778 ctrl |= TX_DESC_EOF;
2779 txd->control = cpu_to_le64(ctrl);
2780 txd->buffer = cpu_to_le64(mapping);
2783 static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2786 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2789 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2790 const int entry, const int tentry)
2792 cp->tx_tiny_use[ring][tentry].nbufs++;
2793 cp->tx_tiny_use[ring][entry].used = 1;
2794 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2797 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2798 struct sk_buff *skb)
2800 struct net_device *dev = cp->dev;
2801 int entry, nr_frags, frag, tabort, tentry;
2803 unsigned long flags;
2807 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2809 /* This is a hard error, log it. */
2810 if (TX_BUFFS_AVAIL(cp, ring) <=
2811 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2812 netif_stop_queue(dev);
2813 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2814 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
2815 "queue awake!\n", dev->name);
2820 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2821 const u64 csum_start_off = skb_transport_offset(skb);
2822 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2824 ctrl = TX_DESC_CSUM_EN |
2825 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2826 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2829 entry = cp->tx_new[ring];
2830 cp->tx_skbs[ring][entry] = skb;
2832 nr_frags = skb_shinfo(skb)->nr_frags;
2833 len = skb_headlen(skb);
2834 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2835 offset_in_page(skb->data), len,
2839 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2840 if (unlikely(tabort)) {
2841 /* NOTE: len is always > tabort */
2842 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2843 ctrl | TX_DESC_SOF, 0);
2844 entry = TX_DESC_NEXT(ring, entry);
2846 skb_copy_from_linear_data_offset(skb, len - tabort,
2847 tx_tiny_buf(cp, ring, entry), tabort);
2848 mapping = tx_tiny_map(cp, ring, entry, tentry);
2849 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2852 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2853 TX_DESC_SOF, (nr_frags == 0));
2855 entry = TX_DESC_NEXT(ring, entry);
2857 for (frag = 0; frag < nr_frags; frag++) {
2858 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2861 mapping = pci_map_page(cp->pdev, fragp->page,
2862 fragp->page_offset, len,
2865 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2866 if (unlikely(tabort)) {
2869 /* NOTE: len is always > tabort */
2870 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2872 entry = TX_DESC_NEXT(ring, entry);
2874 addr = cas_page_map(fragp->page);
2875 memcpy(tx_tiny_buf(cp, ring, entry),
2876 addr + fragp->page_offset + len - tabort,
2878 cas_page_unmap(addr);
2879 mapping = tx_tiny_map(cp, ring, entry, tentry);
2883 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2884 (frag + 1 == nr_frags));
2885 entry = TX_DESC_NEXT(ring, entry);
2888 cp->tx_new[ring] = entry;
2889 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2890 netif_stop_queue(dev);
2892 if (netif_msg_tx_queued(cp))
2893 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
2895 dev->name, ring, entry, skb->len,
2896 TX_BUFFS_AVAIL(cp, ring));
2897 writel(entry, cp->regs + REG_TX_KICKN(ring));
2898 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2902 static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2904 struct cas *cp = netdev_priv(dev);
2906 /* this is only used as a load-balancing hint, so it doesn't
2907 * need to be SMP safe
2911 if (skb_padto(skb, cp->min_frame_size))
2914 /* XXX: we need some higher-level QoS hooks to steer packets to
2915 * individual queues.
2917 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2919 dev->trans_start = jiffies;
2923 static void cas_init_tx_dma(struct cas *cp)
2925 u64 desc_dma = cp->block_dvma;
2930 /* set up tx completion writeback registers. must be 8-byte aligned */
2931 #ifdef USE_TX_COMPWB
2932 off = offsetof(struct cas_init_block, tx_compwb);
2933 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2934 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2937 /* enable completion writebacks, enable paced mode,
2938 * disable read pipe, and disable pre-interrupt compwbs
2940 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2941 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2942 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2943 TX_CFG_INTR_COMPWB_DIS;
2945 /* write out tx ring info and tx desc bases */
2946 for (i = 0; i < MAX_TX_RINGS; i++) {
2947 off = (unsigned long) cp->init_txds[i] -
2948 (unsigned long) cp->init_block;
2950 val |= CAS_TX_RINGN_BASE(i);
2951 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2952 writel((desc_dma + off) & 0xffffffff, cp->regs +
2954 /* don't zero out the kick register here as the system
2958 writel(val, cp->regs + REG_TX_CFG);
2960 /* program max burst sizes. these numbers should be different
2964 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2965 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2966 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2967 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2969 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2970 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2971 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2972 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2976 /* Must be invoked under cp->lock. */
2977 static inline void cas_init_dma(struct cas *cp)
2979 cas_init_tx_dma(cp);
2980 cas_init_rx_dma(cp);
2983 /* Must be invoked under cp->lock. */
2984 static u32 cas_setup_multicast(struct cas *cp)
2989 if (cp->dev->flags & IFF_PROMISC) {
2990 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2992 } else if (cp->dev->flags & IFF_ALLMULTI) {
2993 for (i=0; i < 16; i++)
2994 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2995 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3000 struct dev_mc_list *dmi = cp->dev->mc_list;
3003 /* use the alternate mac address registers for the
3004 * first 15 multicast addresses
3006 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
3008 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
3009 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
3010 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
3013 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
3014 cp->regs + REG_MAC_ADDRN(i*3 + 0));
3015 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
3016 cp->regs + REG_MAC_ADDRN(i*3 + 1));
3017 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
3018 cp->regs + REG_MAC_ADDRN(i*3 + 2));
3022 /* use hw hash table for the next series of
3023 * multicast addresses
3025 memset(hash_table, 0, sizeof(hash_table));
3027 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
3029 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
3032 for (i=0; i < 16; i++)
3033 writel(hash_table[i], cp->regs +
3034 REG_MAC_HASH_TABLEN(i));
3035 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3041 /* must be invoked under cp->stat_lock[N_TX_RINGS] */
3042 static void cas_clear_mac_err(struct cas *cp)
3044 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3045 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3046 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3047 writel(0, cp->regs + REG_MAC_COLL_LATE);
3048 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3049 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3050 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3051 writel(0, cp->regs + REG_MAC_LEN_ERR);
3052 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3053 writel(0, cp->regs + REG_MAC_FCS_ERR);
3054 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3058 static void cas_mac_reset(struct cas *cp)
3062 /* do both TX and RX reset */
3063 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3064 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3069 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3077 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3082 if (readl(cp->regs + REG_MAC_TX_RESET) |
3083 readl(cp->regs + REG_MAC_RX_RESET))
3084 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
3085 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
3086 readl(cp->regs + REG_MAC_RX_RESET),
3087 readl(cp->regs + REG_MAC_STATE_MACHINE));
3091 /* Must be invoked under cp->lock. */
3092 static void cas_init_mac(struct cas *cp)
3094 unsigned char *e = &cp->dev->dev_addr[0];
3096 #ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE
3101 /* setup core arbitration weight register */
3102 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3104 /* XXX Use pci_dma_burst_advice() */
3105 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3106 /* set the infinite burst register for chips that don't have
3109 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3110 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3113 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3115 writel(0x00, cp->regs + REG_MAC_IPG0);
3116 writel(0x08, cp->regs + REG_MAC_IPG1);
3117 writel(0x04, cp->regs + REG_MAC_IPG2);
3119 /* change later for 802.3z */
3120 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3122 /* min frame + FCS */
3123 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3125 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3126 * specify the maximum frame size to prevent RX tag errors on
3129 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3130 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3131 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3132 cp->regs + REG_MAC_FRAMESIZE_MAX);
3134 /* NOTE: crc_size is used as a surrogate for half-duplex.
3135 * workaround saturn half-duplex issue by increasing preamble
3138 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3139 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3141 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3142 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3143 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3144 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3146 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3148 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3149 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3150 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3151 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3152 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3154 /* setup mac address in perfect filter array */
3155 for (i = 0; i < 45; i++)
3156 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3158 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3159 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3160 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3162 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3163 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3164 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3166 #ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE
3167 cp->mac_rx_cfg = cas_setup_multicast(cp);
3169 /* WTZ: Do what Adrian did in cas_set_multicast. Doing
3170 * a writel does not seem to be necessary because Cassini
3171 * seems to preserve the configuration when we do the reset.
3172 * If the chip is in trouble, though, it is not clear if we
3173 * can really count on this behavior. cas_set_multicast uses
3174 * spin_lock_irqsave, but we are called only in cas_init_hw and
3175 * cas_init_hw is protected by cas_lock_all, which calls
3176 * spin_lock_irq (so it doesn't need to save the flags, and
3177 * we should be OK for the writel, as that is the only
3180 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
3181 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
3183 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3184 cas_clear_mac_err(cp);
3185 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3187 /* Setup MAC interrupts. We want to get all of the interesting
3188 * counter expiration events, but we do not want to hear about
3189 * normal rx/tx as the DMA engine tells us that.
3191 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3192 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3194 /* Don't enable even the PAUSE interrupts for now, we
3195 * make no use of those events other than to record them.
3197 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3200 /* Must be invoked under cp->lock. */
3201 static void cas_init_pause_thresholds(struct cas *cp)
3203 /* Calculate pause thresholds. Setting the OFF threshold to the
3204 * full RX fifo size effectively disables PAUSE generation
3206 if (cp->rx_fifo_size <= (2 * 1024)) {
3207 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3209 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3210 if (max_frame * 3 > cp->rx_fifo_size) {
3211 cp->rx_pause_off = 7104;
3212 cp->rx_pause_on = 960;
3214 int off = (cp->rx_fifo_size - (max_frame * 2));
3215 int on = off - max_frame;
3216 cp->rx_pause_off = off;
3217 cp->rx_pause_on = on;
3222 static int cas_vpd_match(const void __iomem *p, const char *str)
3224 int len = strlen(str) + 1;
3227 for (i = 0; i < len; i++) {
3228 if (readb(p + i) != str[i])
3235 /* get the mac address by reading the vpd information in the rom.
3236 * also get the phy type and determine if there's an entropy generator.
3237 * NOTE: this is a bit convoluted for the following reasons:
3238 * 1) vpd info has order-dependent mac addresses for multinic cards
3239 * 2) the only way to determine the nic order is to use the slot
3241 * 3) fiber cards don't have bridges, so their slot numbers don't
3243 * 4) we don't actually know we have a fiber card until after
3244 * the mac addresses are parsed.
3246 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3249 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3250 void __iomem *base, *kstart;
3253 #define VPD_FOUND_MAC 0x01
3254 #define VPD_FOUND_PHY 0x02
3256 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3259 /* give us access to the PROM */
3260 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3261 cp->regs + REG_BIM_LOCAL_DEV_EN);
3263 /* check for an expansion rom */
3264 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3265 goto use_random_mac_addr;
3267 /* search for beginning of vpd */
3269 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3270 /* check for PCIR */
3271 if ((readb(p + i + 0) == 0x50) &&
3272 (readb(p + i + 1) == 0x43) &&
3273 (readb(p + i + 2) == 0x49) &&
3274 (readb(p + i + 3) == 0x52)) {
3275 base = p + (readb(p + i + 8) |
3276 (readb(p + i + 9) << 8));
3281 if (!base || (readb(base) != 0x82))
3282 goto use_random_mac_addr;
3284 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3285 while (i < EXPANSION_ROM_SIZE) {
3286 if (readb(base + i) != 0x90) /* no vpd found */
3287 goto use_random_mac_addr;
3289 /* found a vpd field */
3290 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3292 /* extract keywords */
3293 kstart = base + i + 3;
3295 while ((p - kstart) < len) {
3296 int klen = readb(p + 2);
3302 /* look for the following things:
3303 * -- correct length == 29
3304 * 3 (type) + 2 (size) +
3305 * 18 (strlen("local-mac-address") + 1) +
3307 * -- VPD Instance 'I'
3308 * -- VPD Type Bytes 'B'
3309 * -- VPD data length == 6
3310 * -- property string == local-mac-address
3312 * -- correct length == 24
3313 * 3 (type) + 2 (size) +
3314 * 12 (strlen("entropy-dev") + 1) +
3315 * 7 (strlen("vms110") + 1)
3316 * -- VPD Instance 'I'
3317 * -- VPD Type String 'B'
3318 * -- VPD data length == 7
3319 * -- property string == entropy-dev
3321 * -- correct length == 18
3322 * 3 (type) + 2 (size) +
3323 * 9 (strlen("phy-type") + 1) +
3324 * 4 (strlen("pcs") + 1)
3325 * -- VPD Instance 'I'
3326 * -- VPD Type String 'S'
3327 * -- VPD data length == 4
3328 * -- property string == phy-type
3330 * -- correct length == 23
3331 * 3 (type) + 2 (size) +
3332 * 14 (strlen("phy-interface") + 1) +
3333 * 4 (strlen("pcs") + 1)
3334 * -- VPD Instance 'I'
3335 * -- VPD Type String 'S'
3336 * -- VPD data length == 4
3337 * -- property string == phy-interface
3339 if (readb(p) != 'I')
3342 /* finally, check string and length */
3343 type = readb(p + 3);
3345 if ((klen == 29) && readb(p + 4) == 6 &&
3346 cas_vpd_match(p + 5,
3347 "local-mac-address")) {
3348 if (mac_off++ > offset)
3351 /* set mac address */
3352 for (j = 0; j < 6; j++)
3362 #ifdef USE_ENTROPY_DEV
3364 cas_vpd_match(p + 5, "entropy-dev") &&
3365 cas_vpd_match(p + 17, "vms110")) {
3366 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3371 if (found & VPD_FOUND_PHY)
3374 if ((klen == 18) && readb(p + 4) == 4 &&
3375 cas_vpd_match(p + 5, "phy-type")) {
3376 if (cas_vpd_match(p + 14, "pcs")) {
3377 phy_type = CAS_PHY_SERDES;
3382 if ((klen == 23) && readb(p + 4) == 4 &&
3383 cas_vpd_match(p + 5, "phy-interface")) {
3384 if (cas_vpd_match(p + 19, "pcs")) {
3385 phy_type = CAS_PHY_SERDES;
3390 found |= VPD_FOUND_MAC;
3394 found |= VPD_FOUND_PHY;
3402 use_random_mac_addr:
3403 if (found & VPD_FOUND_MAC)
3406 /* Sun MAC prefix then 3 random bytes. */
3407 printk(PFX "MAC address not found in ROM VPD\n");
3411 get_random_bytes(dev_addr + 3, 3);
3414 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3418 /* check pci invariants */
3419 static void cas_check_pci_invariants(struct cas *cp)
3421 struct pci_dev *pdev = cp->pdev;
3424 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3425 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3426 if (pdev->revision >= CAS_ID_REVPLUS)
3427 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3428 if (pdev->revision < CAS_ID_REVPLUS02u)
3429 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3431 /* Original Cassini supports HW CSUM, but it's not
3432 * enabled by default as it can trigger TX hangs.
3434 if (pdev->revision < CAS_ID_REV2)
3435 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3437 /* Only sun has original cassini chips. */
3438 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3440 /* We use a flag because the same phy might be externally
3443 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3444 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3445 cp->cas_flags |= CAS_FLAG_SATURN;
3450 static int cas_check_invariants(struct cas *cp)
3452 struct pci_dev *pdev = cp->pdev;
3456 /* get page size for rx buffers. */
3458 #ifdef USE_PAGE_ORDER
3459 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3460 /* see if we can allocate larger pages */
3461 struct page *page = alloc_pages(GFP_ATOMIC,
3462 CAS_JUMBO_PAGE_SHIFT -
3465 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3466 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3468 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
3472 cp->page_size = (PAGE_SIZE << cp->page_order);
3474 /* Fetch the FIFO configurations. */
3475 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3476 cp->rx_fifo_size = RX_FIFO_SIZE;
3478 /* finish phy determination. MDIO1 takes precedence over MDIO0 if
3479 * they're both connected.
3481 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3482 PCI_SLOT(pdev->devfn));
3483 if (cp->phy_type & CAS_PHY_SERDES) {
3484 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3485 return 0; /* no more checking needed */
3489 cfg = readl(cp->regs + REG_MIF_CFG);
3490 if (cfg & MIF_CFG_MDIO_1) {
3491 cp->phy_type = CAS_PHY_MII_MDIO1;
3492 } else if (cfg & MIF_CFG_MDIO_0) {
3493 cp->phy_type = CAS_PHY_MII_MDIO0;
3496 cas_mif_poll(cp, 0);
3497 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3499 for (i = 0; i < 32; i++) {
3503 for (j = 0; j < 3; j++) {
3505 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3506 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3507 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3508 cp->phy_id = phy_id;
3513 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
3514 readl(cp->regs + REG_MIF_STATE_MACHINE));
3518 /* see if we can do gigabit */
3519 cfg = cas_phy_read(cp, MII_BMSR);
3520 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3521 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3522 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3526 /* Must be invoked under cp->lock. */
3527 static inline void cas_start_dma(struct cas *cp)
3534 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3535 writel(val, cp->regs + REG_TX_CFG);
3536 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3537 writel(val, cp->regs + REG_RX_CFG);
3539 /* enable the mac */
3540 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3541 writel(val, cp->regs + REG_MAC_TX_CFG);
3542 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3543 writel(val, cp->regs + REG_MAC_RX_CFG);
3547 val = readl(cp->regs + REG_MAC_TX_CFG);
3548 if ((val & MAC_TX_CFG_EN))
3552 if (i < 0) txfailed = 1;
3555 val = readl(cp->regs + REG_MAC_RX_CFG);
3556 if ((val & MAC_RX_CFG_EN)) {
3559 "%s: enabling mac failed [tx:%08x:%08x].\n",
3561 readl(cp->regs + REG_MIF_STATE_MACHINE),
3562 readl(cp->regs + REG_MAC_STATE_MACHINE));
3564 goto enable_rx_done;
3568 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
3570 (txfailed? "tx,rx":"rx"),
3571 readl(cp->regs + REG_MIF_STATE_MACHINE),
3572 readl(cp->regs + REG_MAC_STATE_MACHINE));
3575 cas_unmask_intr(cp); /* enable interrupts */
3576 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3577 writel(0, cp->regs + REG_RX_COMP_TAIL);
3579 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3580 if (N_RX_DESC_RINGS > 1)
3581 writel(RX_DESC_RINGN_SIZE(1) - 4,
3582 cp->regs + REG_PLUS_RX_KICK1);
3584 for (i = 1; i < N_RX_COMP_RINGS; i++)
3585 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3589 /* Must be invoked under cp->lock. */
3590 static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3593 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3594 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3595 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3596 if (val & PCS_MII_LPA_ASYM_PAUSE)
3601 /* Must be invoked under cp->lock. */
3602 static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3611 /* use GMII registers */
3612 val = cas_phy_read(cp, MII_LPA);
3613 if (val & CAS_LPA_PAUSE)
3616 if (val & CAS_LPA_ASYM_PAUSE)
3619 if (val & LPA_DUPLEX)
3624 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3625 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3626 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3628 if (val & CAS_LPA_1000FULL)
3633 /* A link-up condition has occurred, initialize and enable the
3636 * Must be invoked under cp->lock.
3638 static void cas_set_link_modes(struct cas *cp)
3641 int full_duplex, speed, pause;
3647 if (CAS_PHY_MII(cp->phy_type)) {
3648 cas_mif_poll(cp, 0);
3649 val = cas_phy_read(cp, MII_BMCR);
3650 if (val & BMCR_ANENABLE) {
3651 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3654 if (val & BMCR_FULLDPLX)
3657 if (val & BMCR_SPEED100)
3659 else if (val & CAS_BMCR_SPEED1000)
3660 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3663 cas_mif_poll(cp, 1);
3666 val = readl(cp->regs + REG_PCS_MII_CTRL);
3667 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3668 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3669 if (val & PCS_MII_CTRL_DUPLEX)
3674 if (netif_msg_link(cp))
3675 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
3676 cp->dev->name, speed, (full_duplex ? "full" : "half"));
3678 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3679 if (CAS_PHY_MII(cp->phy_type)) {
3680 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3682 val |= MAC_XIF_DISABLE_ECHO;
3685 val |= MAC_XIF_FDPLX_LED;
3687 val |= MAC_XIF_GMII_MODE;
3688 writel(val, cp->regs + REG_MAC_XIF_CFG);
3690 /* deal with carrier and collision detect. */
3691 val = MAC_TX_CFG_IPG_EN;
3693 val |= MAC_TX_CFG_IGNORE_CARRIER;
3694 val |= MAC_TX_CFG_IGNORE_COLL;
3696 #ifndef USE_CSMA_CD_PROTO
3697 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3698 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3701 /* val now set up for REG_MAC_TX_CFG */
3703 /* If gigabit and half-duplex, enable carrier extension
3704 * mode. increase slot time to 512 bytes as well.
3705 * else, disable it and make sure slot time is 64 bytes.
3706 * also activate checksum bug workaround
3708 if ((speed == 1000) && !full_duplex) {
3709 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3710 cp->regs + REG_MAC_TX_CFG);
3712 val = readl(cp->regs + REG_MAC_RX_CFG);
3713 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3714 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3715 cp->regs + REG_MAC_RX_CFG);
3717 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3720 /* minimum size gigabit frame at half duplex */
3721 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3724 writel(val, cp->regs + REG_MAC_TX_CFG);
3726 /* checksum bug workaround. don't strip FCS when in
3729 val = readl(cp->regs + REG_MAC_RX_CFG);
3731 val |= MAC_RX_CFG_STRIP_FCS;
3733 cp->min_frame_size = CAS_MIN_MTU;
3735 val &= ~MAC_RX_CFG_STRIP_FCS;
3737 cp->min_frame_size = CAS_MIN_FRAME;
3739 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3740 cp->regs + REG_MAC_RX_CFG);
3741 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3744 if (netif_msg_link(cp)) {
3746 printk(KERN_INFO "%s: Pause is enabled "
3747 "(rxfifo: %d off: %d on: %d)\n",
3752 } else if (pause & 0x10) {
3753 printk(KERN_INFO "%s: TX pause enabled\n",
3756 printk(KERN_INFO "%s: Pause is disabled\n",
3761 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3762 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3763 if (pause) { /* symmetric or asymmetric pause */
3764 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3765 if (pause & 0x01) { /* symmetric pause */
3766 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3769 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3773 /* Must be invoked under cp->lock. */
3774 static void cas_init_hw(struct cas *cp, int restart_link)
3779 cas_init_pause_thresholds(cp);
3784 /* Default aneg parameters */
3785 cp->timer_ticks = 0;
3786 cas_begin_auto_negotiation(cp, NULL);
3787 } else if (cp->lstate == link_up) {
3788 cas_set_link_modes(cp);
3789 netif_carrier_on(cp->dev);
3793 /* Must be invoked under cp->lock. on earlier cassini boards,
3794 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3795 * let it settle out, and then restore pci state.
3797 static void cas_hard_reset(struct cas *cp)
3799 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3801 pci_restore_state(cp->pdev);
3805 static void cas_global_reset(struct cas *cp, int blkflag)
3809 /* issue a global reset. don't use RSTOUT. */
3810 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3811 /* For PCS, when the blkflag is set, we should set the
3812 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3813 * the last autonegotiation from being cleared. We'll
3814 * need some special handling if the chip is set into a
3817 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3818 cp->regs + REG_SW_RESET);
3820 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3823 /* need to wait at least 3ms before polling register */
3827 while (limit-- > 0) {
3828 u32 val = readl(cp->regs + REG_SW_RESET);
3829 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3833 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
3836 /* enable various BIM interrupts */
3837 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3838 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3840 /* clear out pci error status mask for handled errors.
3841 * we don't deal with DMA counter overflows as they happen
3844 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3845 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3846 PCI_ERR_BIM_DMA_READ), cp->regs +
3847 REG_PCI_ERR_STATUS_MASK);
3849 /* set up for MII by default to address mac rx reset timeout
3852 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3855 static void cas_reset(struct cas *cp, int blkflag)
3860 cas_global_reset(cp, blkflag);
3862 cas_entropy_reset(cp);
3864 /* disable dma engines. */
3865 val = readl(cp->regs + REG_TX_CFG);
3866 val &= ~TX_CFG_DMA_EN;
3867 writel(val, cp->regs + REG_TX_CFG);
3869 val = readl(cp->regs + REG_RX_CFG);
3870 val &= ~RX_CFG_DMA_EN;
3871 writel(val, cp->regs + REG_RX_CFG);
3873 /* program header parser */
3874 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3875 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3876 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3878 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3881 /* clear out error registers */
3882 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3883 cas_clear_mac_err(cp);
3884 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3887 /* Shut down the chip, must be called with pm_mutex held. */
3888 static void cas_shutdown(struct cas *cp)
3890 unsigned long flags;
3892 /* Make us not-running to avoid timers respawning */
3895 del_timer_sync(&cp->link_timer);
3897 /* Stop the reset task */
3899 while (atomic_read(&cp->reset_task_pending_mtu) ||
3900 atomic_read(&cp->reset_task_pending_spare) ||
3901 atomic_read(&cp->reset_task_pending_all))
3905 while (atomic_read(&cp->reset_task_pending))
3908 /* Actually stop the chip */
3909 cas_lock_all_save(cp, flags);
3911 if (cp->cas_flags & CAS_FLAG_SATURN)
3912 cas_phy_powerdown(cp);
3913 cas_unlock_all_restore(cp, flags);
3916 static int cas_change_mtu(struct net_device *dev, int new_mtu)
3918 struct cas *cp = netdev_priv(dev);
3920 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3924 if (!netif_running(dev) || !netif_device_present(dev))
3927 /* let the reset task handle it */
3929 atomic_inc(&cp->reset_task_pending);
3930 if ((cp->phy_type & CAS_PHY_SERDES)) {
3931 atomic_inc(&cp->reset_task_pending_all);
3933 atomic_inc(&cp->reset_task_pending_mtu);
3935 schedule_work(&cp->reset_task);
3937 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3938 CAS_RESET_ALL : CAS_RESET_MTU);
3939 printk(KERN_ERR "reset called in cas_change_mtu\n");
3940 schedule_work(&cp->reset_task);
3943 flush_scheduled_work();
3947 static void cas_clean_txd(struct cas *cp, int ring)
3949 struct cas_tx_desc *txd = cp->init_txds[ring];
3950 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3954 size = TX_DESC_RINGN_SIZE(ring);
3955 for (i = 0; i < size; i++) {
3958 if (skbs[i] == NULL)
3964 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3965 int ent = i & (size - 1);
3967 /* first buffer is never a tiny buffer and so
3968 * needs to be unmapped.
3970 daddr = le64_to_cpu(txd[ent].buffer);
3971 dlen = CAS_VAL(TX_DESC_BUFLEN,
3972 le64_to_cpu(txd[ent].control));
3973 pci_unmap_page(cp->pdev, daddr, dlen,
3976 if (frag != skb_shinfo(skb)->nr_frags) {
3979 /* next buffer might by a tiny buffer.
3982 ent = i & (size - 1);
3983 if (cp->tx_tiny_use[ring][ent].used)
3987 dev_kfree_skb_any(skb);
3990 /* zero out tiny buf usage */
3991 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3994 /* freed on close */
3995 static inline void cas_free_rx_desc(struct cas *cp, int ring)
3997 cas_page_t **page = cp->rx_pages[ring];
4000 size = RX_DESC_RINGN_SIZE(ring);
4001 for (i = 0; i < size; i++) {
4003 cas_page_free(cp, page[i]);
4009 static void cas_free_rxds(struct cas *cp)
4013 for (i = 0; i < N_RX_DESC_RINGS; i++)
4014 cas_free_rx_desc(cp, i);
4017 /* Must be invoked under cp->lock. */
4018 static void cas_clean_rings(struct cas *cp)
4022 /* need to clean all tx rings */
4023 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
4024 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
4025 for (i = 0; i < N_TX_RINGS; i++)
4026 cas_clean_txd(cp, i);
4028 /* zero out init block */
4029 memset(cp->init_block, 0, sizeof(struct cas_init_block));
4034 /* allocated on open */
4035 static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
4037 cas_page_t **page = cp->rx_pages[ring];
4040 size = RX_DESC_RINGN_SIZE(ring);
4041 for (i = 0; i < size; i++) {
4042 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
4048 static int cas_alloc_rxds(struct cas *cp)
4052 for (i = 0; i < N_RX_DESC_RINGS; i++) {
4053 if (cas_alloc_rx_desc(cp, i) < 0) {
4061 static void cas_reset_task(struct work_struct *work)
4063 struct cas *cp = container_of(work, struct cas, reset_task);
4065 int pending = atomic_read(&cp->reset_task_pending);
4067 int pending_all = atomic_read(&cp->reset_task_pending_all);
4068 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4069 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4071 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4072 /* We can have more tasks scheduled than actually
4075 atomic_dec(&cp->reset_task_pending);
4079 /* The link went down, we reset the ring, but keep
4080 * DMA stopped. Use this function for reset
4083 if (cp->hw_running) {
4084 unsigned long flags;
4086 /* Make sure we don't get interrupts or tx packets */
4087 netif_device_detach(cp->dev);
4088 cas_lock_all_save(cp, flags);
4091 /* We call cas_spare_recover when we call cas_open.
4092 * but we do not initialize the lists cas_spare_recover
4093 * uses until cas_open is called.
4095 cas_spare_recover(cp, GFP_ATOMIC);
4098 /* test => only pending_spare set */
4099 if (!pending_all && !pending_mtu)
4102 if (pending == CAS_RESET_SPARE)
4105 /* when pending == CAS_RESET_ALL, the following
4106 * call to cas_init_hw will restart auto negotiation.
4107 * Setting the second argument of cas_reset to
4108 * !(pending == CAS_RESET_ALL) will set this argument
4109 * to 1 (avoiding reinitializing the PHY for the normal
4110 * PCS case) when auto negotiation is not restarted.
4113 cas_reset(cp, !(pending_all > 0));
4115 cas_clean_rings(cp);
4116 cas_init_hw(cp, (pending_all > 0));
4118 cas_reset(cp, !(pending == CAS_RESET_ALL));
4120 cas_clean_rings(cp);
4121 cas_init_hw(cp, pending == CAS_RESET_ALL);
4125 cas_unlock_all_restore(cp, flags);
4126 netif_device_attach(cp->dev);
4129 atomic_sub(pending_all, &cp->reset_task_pending_all);
4130 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4131 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4132 atomic_dec(&cp->reset_task_pending);
4134 atomic_set(&cp->reset_task_pending, 0);
4138 static void cas_link_timer(unsigned long data)
4140 struct cas *cp = (struct cas *) data;
4141 int mask, pending = 0, reset = 0;
4142 unsigned long flags;
4144 if (link_transition_timeout != 0 &&
4145 cp->link_transition_jiffies_valid &&
4146 ((jiffies - cp->link_transition_jiffies) >
4147 (link_transition_timeout))) {
4148 /* One-second counter so link-down workaround doesn't
4149 * cause resets to occur so fast as to fool the switch
4150 * into thinking the link is down.
4152 cp->link_transition_jiffies_valid = 0;
4155 if (!cp->hw_running)
4158 spin_lock_irqsave(&cp->lock, flags);
4160 cas_entropy_gather(cp);
4162 /* If the link task is still pending, we just
4163 * reschedule the link timer
4166 if (atomic_read(&cp->reset_task_pending_all) ||
4167 atomic_read(&cp->reset_task_pending_spare) ||
4168 atomic_read(&cp->reset_task_pending_mtu))
4171 if (atomic_read(&cp->reset_task_pending))
4175 /* check for rx cleaning */
4176 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4179 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4180 rmask = CAS_FLAG_RXD_POST(i);
4181 if ((mask & rmask) == 0)
4184 /* post_rxds will do a mod_timer */
4185 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4189 cp->cas_flags &= ~rmask;
4193 if (CAS_PHY_MII(cp->phy_type)) {
4195 cas_mif_poll(cp, 0);
4196 bmsr = cas_phy_read(cp, MII_BMSR);
4197 /* WTZ: Solaris driver reads this twice, but that
4198 * may be due to the PCS case and the use of a
4199 * common implementation. Read it twice here to be
4202 bmsr = cas_phy_read(cp, MII_BMSR);
4203 cas_mif_poll(cp, 1);
4204 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4205 reset = cas_mii_link_check(cp, bmsr);
4207 reset = cas_pcs_link_check(cp);
4213 /* check for tx state machine confusion */
4214 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4215 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4217 int tlm = CAS_VAL(MAC_SM_TLM, val);
4219 if (((tlm == 0x5) || (tlm == 0x3)) &&
4220 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4221 if (netif_msg_tx_err(cp))
4222 printk(KERN_DEBUG "%s: tx err: "
4223 "MAC_STATE[%08x]\n",
4224 cp->dev->name, val);
4229 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4230 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4231 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4232 if ((val == 0) && (wptr != rptr)) {
4233 if (netif_msg_tx_err(cp))
4234 printk(KERN_DEBUG "%s: tx err: "
4235 "TX_FIFO[%08x:%08x:%08x]\n",
4236 cp->dev->name, val, wptr, rptr);
4247 atomic_inc(&cp->reset_task_pending);
4248 atomic_inc(&cp->reset_task_pending_all);
4249 schedule_work(&cp->reset_task);
4251 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4252 printk(KERN_ERR "reset called in cas_link_timer\n");
4253 schedule_work(&cp->reset_task);
4258 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4260 spin_unlock_irqrestore(&cp->lock, flags);
4263 /* tiny buffers are used to avoid target abort issues with
4266 static void cas_tx_tiny_free(struct cas *cp)
4268 struct pci_dev *pdev = cp->pdev;
4271 for (i = 0; i < N_TX_RINGS; i++) {
4272 if (!cp->tx_tiny_bufs[i])
4275 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4276 cp->tx_tiny_bufs[i],
4277 cp->tx_tiny_dvma[i]);
4278 cp->tx_tiny_bufs[i] = NULL;
4282 static int cas_tx_tiny_alloc(struct cas *cp)
4284 struct pci_dev *pdev = cp->pdev;
4287 for (i = 0; i < N_TX_RINGS; i++) {
4288 cp->tx_tiny_bufs[i] =
4289 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4290 &cp->tx_tiny_dvma[i]);
4291 if (!cp->tx_tiny_bufs[i]) {
4292 cas_tx_tiny_free(cp);
4300 static int cas_open(struct net_device *dev)
4302 struct cas *cp = netdev_priv(dev);
4304 unsigned long flags;
4306 mutex_lock(&cp->pm_mutex);
4308 hw_was_up = cp->hw_running;
4310 /* The power-management mutex protects the hw_running
4311 * etc. state so it is safe to do this bit without cp->lock
4313 if (!cp->hw_running) {
4314 /* Reset the chip */
4315 cas_lock_all_save(cp, flags);
4316 /* We set the second arg to cas_reset to zero
4317 * because cas_init_hw below will have its second
4318 * argument set to non-zero, which will force
4319 * autonegotiation to start.
4323 cas_unlock_all_restore(cp, flags);
4326 if (cas_tx_tiny_alloc(cp) < 0)
4329 /* alloc rx descriptors */
4331 if (cas_alloc_rxds(cp) < 0)
4334 /* allocate spares */
4336 cas_spare_recover(cp, GFP_KERNEL);
4338 /* We can now request the interrupt as we know it's masked
4339 * on the controller. cassini+ has up to 4 interrupts
4340 * that can be used, but you need to do explicit pci interrupt
4341 * mapping to expose them
4343 if (request_irq(cp->pdev->irq, cas_interrupt,
4344 IRQF_SHARED, dev->name, (void *) dev)) {
4345 printk(KERN_ERR "%s: failed to request irq !\n",
4352 napi_enable(&cp->napi);
4355 cas_lock_all_save(cp, flags);
4356 cas_clean_rings(cp);
4357 cas_init_hw(cp, !hw_was_up);
4359 cas_unlock_all_restore(cp, flags);
4361 netif_start_queue(dev);
4362 mutex_unlock(&cp->pm_mutex);
4369 cas_tx_tiny_free(cp);
4370 mutex_unlock(&cp->pm_mutex);
4374 static int cas_close(struct net_device *dev)
4376 unsigned long flags;
4377 struct cas *cp = netdev_priv(dev);
4380 napi_enable(&cp->napi);
4382 /* Make sure we don't get distracted by suspend/resume */
4383 mutex_lock(&cp->pm_mutex);
4385 netif_stop_queue(dev);
4387 /* Stop traffic, mark us closed */
4388 cas_lock_all_save(cp, flags);
4392 cas_begin_auto_negotiation(cp, NULL);
4393 cas_clean_rings(cp);
4394 cas_unlock_all_restore(cp, flags);
4396 free_irq(cp->pdev->irq, (void *) dev);
4399 cas_tx_tiny_free(cp);
4400 mutex_unlock(&cp->pm_mutex);
4405 const char name[ETH_GSTRING_LEN];
4406 } ethtool_cassini_statnames[] = {
4413 {"rx_frame_errors"},
4414 {"rx_length_errors"},
4417 {"tx_aborted_errors"},
4424 #define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN)
4427 const int offsets; /* neg. values for 2nd arg to cas_read_phy */
4428 } ethtool_register_table[] = {
4443 {REG_PCS_MII_STATUS},
4444 {REG_PCS_STATE_MACHINE},
4445 {REG_MAC_COLL_EXCESS},
4448 #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4449 #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4451 static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4455 unsigned long flags;
4457 spin_lock_irqsave(&cp->lock, flags);
4458 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4461 if (ethtool_register_table[i].offsets < 0) {
4462 hval = cas_phy_read(cp,
4463 -ethtool_register_table[i].offsets);
4466 val= readl(cp->regs+ethtool_register_table[i].offsets);
4468 memcpy(p, (u8 *)&val, sizeof(u32));
4470 spin_unlock_irqrestore(&cp->lock, flags);
4473 static struct net_device_stats *cas_get_stats(struct net_device *dev)
4475 struct cas *cp = netdev_priv(dev);
4476 struct net_device_stats *stats = cp->net_stats;
4477 unsigned long flags;
4481 /* we collate all of the stats into net_stats[N_TX_RING] */
4482 if (!cp->hw_running)
4483 return stats + N_TX_RINGS;
4485 /* collect outstanding stats */
4486 /* WTZ: the Cassini spec gives these as 16 bit counters but
4487 * stored in 32-bit words. Added a mask of 0xffff to be safe,
4488 * in case the chip somehow puts any garbage in the other bits.
4489 * Also, counter usage didn't seem to mach what Adrian did
4490 * in the parts of the code that set these quantities. Made
4493 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4494 stats[N_TX_RINGS].rx_crc_errors +=
4495 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4496 stats[N_TX_RINGS].rx_frame_errors +=
4497 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4498 stats[N_TX_RINGS].rx_length_errors +=
4499 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4501 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4502 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4503 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4504 stats[N_TX_RINGS].collisions +=
4505 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4507 stats[N_TX_RINGS].tx_aborted_errors +=
4508 readl(cp->regs + REG_MAC_COLL_EXCESS);
4509 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4510 readl(cp->regs + REG_MAC_COLL_LATE);
4512 cas_clear_mac_err(cp);
4514 /* saved bits that are unique to ring 0 */
4515 spin_lock(&cp->stat_lock[0]);
4516 stats[N_TX_RINGS].collisions += stats[0].collisions;
4517 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4518 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4519 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4520 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4521 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4522 spin_unlock(&cp->stat_lock[0]);
4524 for (i = 0; i < N_TX_RINGS; i++) {
4525 spin_lock(&cp->stat_lock[i]);
4526 stats[N_TX_RINGS].rx_length_errors +=
4527 stats[i].rx_length_errors;
4528 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4529 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4530 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4531 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4532 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4533 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4534 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4535 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4536 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4537 memset(stats + i, 0, sizeof(struct net_device_stats));
4538 spin_unlock(&cp->stat_lock[i]);
4540 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4541 return stats + N_TX_RINGS;
4545 static void cas_set_multicast(struct net_device *dev)
4547 struct cas *cp = netdev_priv(dev);
4548 u32 rxcfg, rxcfg_new;
4549 unsigned long flags;
4550 int limit = STOP_TRIES;
4552 if (!cp->hw_running)
4555 spin_lock_irqsave(&cp->lock, flags);
4556 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4558 /* disable RX MAC and wait for completion */
4559 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4560 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4566 /* disable hash filter and wait for completion */
4568 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4569 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4570 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4576 /* program hash filters */
4577 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4579 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4580 spin_unlock_irqrestore(&cp->lock, flags);
4583 static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4585 struct cas *cp = netdev_priv(dev);
4586 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
4587 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
4588 info->fw_version[0] = '\0';
4589 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
4590 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4591 cp->casreg_len : CAS_MAX_REGS;
4592 info->n_stats = CAS_NUM_STAT_KEYS;
4595 static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4597 struct cas *cp = netdev_priv(dev);
4599 int full_duplex, speed, pause;
4600 unsigned long flags;
4601 enum link_state linkstate = link_up;
4603 cmd->advertising = 0;
4604 cmd->supported = SUPPORTED_Autoneg;
4605 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4606 cmd->supported |= SUPPORTED_1000baseT_Full;
4607 cmd->advertising |= ADVERTISED_1000baseT_Full;
4610 /* Record PHY settings if HW is on. */
4611 spin_lock_irqsave(&cp->lock, flags);
4613 linkstate = cp->lstate;
4614 if (CAS_PHY_MII(cp->phy_type)) {
4615 cmd->port = PORT_MII;
4616 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4617 XCVR_INTERNAL : XCVR_EXTERNAL;
4618 cmd->phy_address = cp->phy_addr;
4619 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4620 ADVERTISED_10baseT_Half |
4621 ADVERTISED_10baseT_Full |
4622 ADVERTISED_100baseT_Half |
4623 ADVERTISED_100baseT_Full;
4626 (SUPPORTED_10baseT_Half |
4627 SUPPORTED_10baseT_Full |
4628 SUPPORTED_100baseT_Half |
4629 SUPPORTED_100baseT_Full |
4630 SUPPORTED_TP | SUPPORTED_MII);
4632 if (cp->hw_running) {
4633 cas_mif_poll(cp, 0);
4634 bmcr = cas_phy_read(cp, MII_BMCR);
4635 cas_read_mii_link_mode(cp, &full_duplex,
4637 cas_mif_poll(cp, 1);
4641 cmd->port = PORT_FIBRE;
4642 cmd->transceiver = XCVR_INTERNAL;
4643 cmd->phy_address = 0;
4644 cmd->supported |= SUPPORTED_FIBRE;
4645 cmd->advertising |= ADVERTISED_FIBRE;
4647 if (cp->hw_running) {
4648 /* pcs uses the same bits as mii */
4649 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4650 cas_read_pcs_link_mode(cp, &full_duplex,
4654 spin_unlock_irqrestore(&cp->lock, flags);
4656 if (bmcr & BMCR_ANENABLE) {
4657 cmd->advertising |= ADVERTISED_Autoneg;
4658 cmd->autoneg = AUTONEG_ENABLE;
4659 cmd->speed = ((speed == 10) ?
4662 SPEED_1000 : SPEED_100));
4663 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4665 cmd->autoneg = AUTONEG_DISABLE;
4667 (bmcr & CAS_BMCR_SPEED1000) ?
4669 ((bmcr & BMCR_SPEED100) ? SPEED_100:
4672 (bmcr & BMCR_FULLDPLX) ?
4673 DUPLEX_FULL : DUPLEX_HALF;
4675 if (linkstate != link_up) {
4676 /* Force these to "unknown" if the link is not up and
4677 * autonogotiation in enabled. We can set the link
4678 * speed to 0, but not cmd->duplex,
4679 * because its legal values are 0 and 1. Ethtool will
4680 * print the value reported in parentheses after the
4681 * word "Unknown" for unrecognized values.
4683 * If in forced mode, we report the speed and duplex
4684 * settings that we configured.
4686 if (cp->link_cntl & BMCR_ANENABLE) {
4690 cmd->speed = SPEED_10;
4691 if (cp->link_cntl & BMCR_SPEED100) {
4692 cmd->speed = SPEED_100;
4693 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4694 cmd->speed = SPEED_1000;
4696 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4697 DUPLEX_FULL : DUPLEX_HALF;
4703 static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4705 struct cas *cp = netdev_priv(dev);
4706 unsigned long flags;
4708 /* Verify the settings we care about. */
4709 if (cmd->autoneg != AUTONEG_ENABLE &&
4710 cmd->autoneg != AUTONEG_DISABLE)
4713 if (cmd->autoneg == AUTONEG_DISABLE &&
4714 ((cmd->speed != SPEED_1000 &&
4715 cmd->speed != SPEED_100 &&
4716 cmd->speed != SPEED_10) ||
4717 (cmd->duplex != DUPLEX_HALF &&
4718 cmd->duplex != DUPLEX_FULL)))
4721 /* Apply settings and restart link process. */
4722 spin_lock_irqsave(&cp->lock, flags);
4723 cas_begin_auto_negotiation(cp, cmd);
4724 spin_unlock_irqrestore(&cp->lock, flags);
4728 static int cas_nway_reset(struct net_device *dev)
4730 struct cas *cp = netdev_priv(dev);
4731 unsigned long flags;
4733 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4736 /* Restart link process. */
4737 spin_lock_irqsave(&cp->lock, flags);
4738 cas_begin_auto_negotiation(cp, NULL);
4739 spin_unlock_irqrestore(&cp->lock, flags);
4744 static u32 cas_get_link(struct net_device *dev)
4746 struct cas *cp = netdev_priv(dev);
4747 return cp->lstate == link_up;
4750 static u32 cas_get_msglevel(struct net_device *dev)
4752 struct cas *cp = netdev_priv(dev);
4753 return cp->msg_enable;
4756 static void cas_set_msglevel(struct net_device *dev, u32 value)
4758 struct cas *cp = netdev_priv(dev);
4759 cp->msg_enable = value;
4762 static int cas_get_regs_len(struct net_device *dev)
4764 struct cas *cp = netdev_priv(dev);
4765 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4768 static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4771 struct cas *cp = netdev_priv(dev);
4773 /* cas_read_regs handles locks (cp->lock). */
4774 cas_read_regs(cp, p, regs->len / sizeof(u32));
4777 static int cas_get_sset_count(struct net_device *dev, int sset)
4781 return CAS_NUM_STAT_KEYS;
4787 static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4789 memcpy(data, ðtool_cassini_statnames,
4790 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4793 static void cas_get_ethtool_stats(struct net_device *dev,
4794 struct ethtool_stats *estats, u64 *data)
4796 struct cas *cp = netdev_priv(dev);
4797 struct net_device_stats *stats = cas_get_stats(cp->dev);
4799 data[i++] = stats->collisions;
4800 data[i++] = stats->rx_bytes;
4801 data[i++] = stats->rx_crc_errors;
4802 data[i++] = stats->rx_dropped;
4803 data[i++] = stats->rx_errors;
4804 data[i++] = stats->rx_fifo_errors;
4805 data[i++] = stats->rx_frame_errors;
4806 data[i++] = stats->rx_length_errors;
4807 data[i++] = stats->rx_over_errors;
4808 data[i++] = stats->rx_packets;
4809 data[i++] = stats->tx_aborted_errors;
4810 data[i++] = stats->tx_bytes;
4811 data[i++] = stats->tx_dropped;
4812 data[i++] = stats->tx_errors;
4813 data[i++] = stats->tx_fifo_errors;
4814 data[i++] = stats->tx_packets;
4815 BUG_ON(i != CAS_NUM_STAT_KEYS);
4818 static const struct ethtool_ops cas_ethtool_ops = {
4819 .get_drvinfo = cas_get_drvinfo,
4820 .get_settings = cas_get_settings,
4821 .set_settings = cas_set_settings,
4822 .nway_reset = cas_nway_reset,
4823 .get_link = cas_get_link,
4824 .get_msglevel = cas_get_msglevel,
4825 .set_msglevel = cas_set_msglevel,
4826 .get_regs_len = cas_get_regs_len,
4827 .get_regs = cas_get_regs,
4828 .get_sset_count = cas_get_sset_count,
4829 .get_strings = cas_get_strings,
4830 .get_ethtool_stats = cas_get_ethtool_stats,
4833 static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4835 struct cas *cp = netdev_priv(dev);
4836 struct mii_ioctl_data *data = if_mii(ifr);
4837 unsigned long flags;
4838 int rc = -EOPNOTSUPP;
4840 /* Hold the PM mutex while doing ioctl's or we may collide
4841 * with open/close and power management and oops.
4843 mutex_lock(&cp->pm_mutex);
4845 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
4846 data->phy_id = cp->phy_addr;
4847 /* Fallthrough... */
4849 case SIOCGMIIREG: /* Read MII PHY register. */
4850 spin_lock_irqsave(&cp->lock, flags);
4851 cas_mif_poll(cp, 0);
4852 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4853 cas_mif_poll(cp, 1);
4854 spin_unlock_irqrestore(&cp->lock, flags);
4858 case SIOCSMIIREG: /* Write MII PHY register. */
4859 if (!capable(CAP_NET_ADMIN)) {
4863 spin_lock_irqsave(&cp->lock, flags);
4864 cas_mif_poll(cp, 0);
4865 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4866 cas_mif_poll(cp, 1);
4867 spin_unlock_irqrestore(&cp->lock, flags);
4873 mutex_unlock(&cp->pm_mutex);
4877 static int __devinit cas_init_one(struct pci_dev *pdev,
4878 const struct pci_device_id *ent)
4880 static int cas_version_printed = 0;
4881 unsigned long casreg_len;
4882 struct net_device *dev;
4884 int i, err, pci_using_dac;
4886 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4887 DECLARE_MAC_BUF(mac);
4889 if (cas_version_printed++ == 0)
4890 printk(KERN_INFO "%s", version);
4892 err = pci_enable_device(pdev);
4894 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
4898 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4899 dev_err(&pdev->dev, "Cannot find proper PCI device "
4900 "base address, aborting.\n");
4902 goto err_out_disable_pdev;
4905 dev = alloc_etherdev(sizeof(*cp));
4907 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
4909 goto err_out_disable_pdev;
4911 SET_NETDEV_DEV(dev, &pdev->dev);
4913 err = pci_request_regions(pdev, dev->name);
4915 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
4916 goto err_out_free_netdev;
4918 pci_set_master(pdev);
4920 /* we must always turn on parity response or else parity
4921 * doesn't get generated properly. disable SERR/PERR as well.
4922 * in addition, we want to turn MWI on.
4924 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4925 pci_cmd &= ~PCI_COMMAND_SERR;
4926 pci_cmd |= PCI_COMMAND_PARITY;
4927 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4928 if (pci_try_set_mwi(pdev))
4929 printk(KERN_WARNING PFX "Could not enable MWI for %s\n",
4933 * On some architectures, the default cache line size set
4934 * by pci_try_set_mwi reduces perforamnce. We have to increase
4935 * it for this case. To start, we'll print some configuration
4939 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4940 &orig_cacheline_size);
4941 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4942 cas_cacheline_size =
4943 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4944 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4945 if (pci_write_config_byte(pdev,
4946 PCI_CACHE_LINE_SIZE,
4947 cas_cacheline_size)) {
4948 dev_err(&pdev->dev, "Could not set PCI cache "
4950 goto err_write_cacheline;
4956 /* Configure DMA attributes. */
4957 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4959 err = pci_set_consistent_dma_mask(pdev,
4962 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
4963 "for consistent allocations\n");
4964 goto err_out_free_res;
4968 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4970 dev_err(&pdev->dev, "No usable DMA configuration, "
4972 goto err_out_free_res;
4977 casreg_len = pci_resource_len(pdev, 0);
4979 cp = netdev_priv(dev);
4982 /* A value of 0 indicates we never explicitly set it */
4983 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4986 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4989 cp->link_transition = LINK_TRANSITION_UNKNOWN;
4990 cp->link_transition_jiffies_valid = 0;
4992 spin_lock_init(&cp->lock);
4993 spin_lock_init(&cp->rx_inuse_lock);
4994 spin_lock_init(&cp->rx_spare_lock);
4995 for (i = 0; i < N_TX_RINGS; i++) {
4996 spin_lock_init(&cp->stat_lock[i]);
4997 spin_lock_init(&cp->tx_lock[i]);
4999 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5000 mutex_init(&cp->pm_mutex);
5002 init_timer(&cp->link_timer);
5003 cp->link_timer.function = cas_link_timer;
5004 cp->link_timer.data = (unsigned long) cp;
5007 /* Just in case the implementation of atomic operations
5008 * change so that an explicit initialization is necessary.
5010 atomic_set(&cp->reset_task_pending, 0);
5011 atomic_set(&cp->reset_task_pending_all, 0);
5012 atomic_set(&cp->reset_task_pending_spare, 0);
5013 atomic_set(&cp->reset_task_pending_mtu, 0);
5015 INIT_WORK(&cp->reset_task, cas_reset_task);
5017 /* Default link parameters */
5018 if (link_mode >= 0 && link_mode <= 6)
5019 cp->link_cntl = link_modes[link_mode];
5021 cp->link_cntl = BMCR_ANENABLE;
5022 cp->lstate = link_down;
5023 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5024 netif_carrier_off(cp->dev);
5025 cp->timer_ticks = 0;
5027 /* give us access to cassini registers */
5028 cp->regs = pci_iomap(pdev, 0, casreg_len);
5029 if (cp->regs == 0UL) {
5030 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
5031 goto err_out_free_res;
5033 cp->casreg_len = casreg_len;
5035 pci_save_state(pdev);
5036 cas_check_pci_invariants(cp);
5039 if (cas_check_invariants(cp))
5040 goto err_out_iounmap;
5042 cp->init_block = (struct cas_init_block *)
5043 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5045 if (!cp->init_block) {
5046 dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n");
5047 goto err_out_iounmap;
5050 for (i = 0; i < N_TX_RINGS; i++)
5051 cp->init_txds[i] = cp->init_block->txds[i];
5053 for (i = 0; i < N_RX_DESC_RINGS; i++)
5054 cp->init_rxds[i] = cp->init_block->rxds[i];
5056 for (i = 0; i < N_RX_COMP_RINGS; i++)
5057 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5059 for (i = 0; i < N_RX_FLOWS; i++)
5060 skb_queue_head_init(&cp->rx_flows[i]);
5062 dev->open = cas_open;
5063 dev->stop = cas_close;
5064 dev->hard_start_xmit = cas_start_xmit;
5065 dev->get_stats = cas_get_stats;
5066 dev->set_multicast_list = cas_set_multicast;
5067 dev->do_ioctl = cas_ioctl;
5068 dev->ethtool_ops = &cas_ethtool_ops;
5069 dev->tx_timeout = cas_tx_timeout;
5070 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5071 dev->change_mtu = cas_change_mtu;
5073 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5075 #ifdef CONFIG_NET_POLL_CONTROLLER
5076 dev->poll_controller = cas_netpoll;
5078 dev->irq = pdev->irq;
5081 /* Cassini features. */
5082 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5083 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5086 dev->features |= NETIF_F_HIGHDMA;
5088 if (register_netdev(dev)) {
5089 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
5090 goto err_out_free_consistent;
5093 i = readl(cp->regs + REG_BIM_CFG);
5094 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
5095 "Ethernet[%d] %s\n", dev->name,
5096 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5097 (i & BIM_CFG_32BIT) ? "32" : "64",
5098 (i & BIM_CFG_66MHZ) ? "66" : "33",
5099 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5100 print_mac(mac, dev->dev_addr));
5102 pci_set_drvdata(pdev, dev);
5104 cas_entropy_reset(cp);
5106 cas_begin_auto_negotiation(cp, NULL);
5109 err_out_free_consistent:
5110 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5111 cp->init_block, cp->block_dvma);
5114 mutex_lock(&cp->pm_mutex);
5117 mutex_unlock(&cp->pm_mutex);
5119 pci_iounmap(pdev, cp->regs);
5123 pci_release_regions(pdev);
5125 err_write_cacheline:
5126 /* Try to restore it in case the error occured after we
5129 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5131 err_out_free_netdev:
5134 err_out_disable_pdev:
5135 pci_disable_device(pdev);
5136 pci_set_drvdata(pdev, NULL);
5140 static void __devexit cas_remove_one(struct pci_dev *pdev)
5142 struct net_device *dev = pci_get_drvdata(pdev);
5147 cp = netdev_priv(dev);
5148 unregister_netdev(dev);
5150 mutex_lock(&cp->pm_mutex);
5151 flush_scheduled_work();
5154 mutex_unlock(&cp->pm_mutex);
5157 if (cp->orig_cacheline_size) {
5158 /* Restore the cache line size if we had modified
5161 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5162 cp->orig_cacheline_size);
5165 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5166 cp->init_block, cp->block_dvma);
5167 pci_iounmap(pdev, cp->regs);
5169 pci_release_regions(pdev);
5170 pci_disable_device(pdev);
5171 pci_set_drvdata(pdev, NULL);
5175 static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5177 struct net_device *dev = pci_get_drvdata(pdev);
5178 struct cas *cp = netdev_priv(dev);
5179 unsigned long flags;
5181 mutex_lock(&cp->pm_mutex);
5183 /* If the driver is opened, we stop the DMA */
5185 netif_device_detach(dev);
5187 cas_lock_all_save(cp, flags);
5189 /* We can set the second arg of cas_reset to 0
5190 * because on resume, we'll call cas_init_hw with
5191 * its second arg set so that autonegotiation is
5195 cas_clean_rings(cp);
5196 cas_unlock_all_restore(cp, flags);
5201 mutex_unlock(&cp->pm_mutex);
5206 static int cas_resume(struct pci_dev *pdev)
5208 struct net_device *dev = pci_get_drvdata(pdev);
5209 struct cas *cp = netdev_priv(dev);
5211 printk(KERN_INFO "%s: resuming\n", dev->name);
5213 mutex_lock(&cp->pm_mutex);
5216 unsigned long flags;
5217 cas_lock_all_save(cp, flags);
5220 cas_clean_rings(cp);
5222 cas_unlock_all_restore(cp, flags);
5224 netif_device_attach(dev);
5226 mutex_unlock(&cp->pm_mutex);
5229 #endif /* CONFIG_PM */
5231 static struct pci_driver cas_driver = {
5232 .name = DRV_MODULE_NAME,
5233 .id_table = cas_pci_tbl,
5234 .probe = cas_init_one,
5235 .remove = __devexit_p(cas_remove_one),
5237 .suspend = cas_suspend,
5238 .resume = cas_resume
5242 static int __init cas_init(void)
5244 if (linkdown_timeout > 0)
5245 link_transition_timeout = linkdown_timeout * HZ;
5247 link_transition_timeout = 0;
5249 return pci_register_driver(&cas_driver);
5252 static void __exit cas_cleanup(void)
5254 pci_unregister_driver(&cas_driver);
5257 module_init(cas_init);
5258 module_exit(cas_cleanup);