1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/dma-mapping.h>
25 #include <asm/uaccess.h>
31 #define DRV_MODULE_NAME "b44"
32 #define PFX DRV_MODULE_NAME ": "
33 #define DRV_MODULE_VERSION "1.01"
34 #define DRV_MODULE_RELDATE "Jun 16, 2006"
36 #define B44_DEF_MSG_ENABLE \
46 /* length of time before we decide the hardware is borked,
47 * and dev->tx_timeout() should be called to fix the problem
49 #define B44_TX_TIMEOUT (5 * HZ)
51 /* hardware minimum and maximum for a single frame's data payload */
52 #define B44_MIN_MTU 60
53 #define B44_MAX_MTU 1500
55 #define B44_RX_RING_SIZE 512
56 #define B44_DEF_RX_RING_PENDING 200
57 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
59 #define B44_TX_RING_SIZE 512
60 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
61 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_OFFSET 30
73 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE 0x400
80 #define B44_PATTERN_SIZE 0x80
81 #define B44_PMASK_BASE 0x600
82 #define B44_PMASK_SIZE 0x10
83 #define B44_MAX_PATTERNS 16
84 #define B44_ETHIPV6UDP_HLEN 62
85 #define B44_ETHIPV4UDP_HLEN 42
87 static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
95 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
99 static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
114 #define B44_FULL_RESET 1
115 #define B44_FULL_RESET_SKIP_PHY 2
116 #define B44_PARTIAL_RESET 3
118 static void b44_init_hw(struct b44 *, int);
120 static int dma_desc_align_mask;
121 static int dma_desc_sync_size;
123 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
124 #define _B44(x...) # x,
129 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
131 unsigned long offset,
132 enum dma_data_direction dir)
134 dma_sync_single_range_for_device(&pdev->dev, dma_base,
135 offset & dma_desc_align_mask,
136 dma_desc_sync_size, dir);
139 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
141 unsigned long offset,
142 enum dma_data_direction dir)
144 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
145 offset & dma_desc_align_mask,
146 dma_desc_sync_size, dir);
149 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
151 return readl(bp->regs + reg);
154 static inline void bw32(const struct b44 *bp,
155 unsigned long reg, unsigned long val)
157 writel(val, bp->regs + reg);
160 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
161 u32 bit, unsigned long timeout, const int clear)
165 for (i = 0; i < timeout; i++) {
166 u32 val = br32(bp, reg);
168 if (clear && !(val & bit))
170 if (!clear && (val & bit))
175 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
179 (clear ? "clear" : "set"));
185 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
186 * buzz words used on this company's website :-)
188 * All of these routines must be invoked with bp->lock held and
189 * interrupts disabled.
192 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
193 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
195 static u32 ssb_get_core_rev(struct b44 *bp)
197 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
200 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
202 u32 bar_orig, pci_rev, val;
204 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
205 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
206 pci_rev = ssb_get_core_rev(bp);
208 val = br32(bp, B44_SBINTVEC);
210 bw32(bp, B44_SBINTVEC, val);
212 val = br32(bp, SSB_PCI_TRANS_2);
213 val |= SSB_PCI_PREF | SSB_PCI_BURST;
214 bw32(bp, SSB_PCI_TRANS_2, val);
216 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
221 static void ssb_core_disable(struct b44 *bp)
223 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
226 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
227 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
228 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
230 SBTMSLOW_REJECT | SBTMSLOW_RESET));
231 br32(bp, B44_SBTMSLOW);
233 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
234 br32(bp, B44_SBTMSLOW);
238 static void ssb_core_reset(struct b44 *bp)
242 ssb_core_disable(bp);
243 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
244 br32(bp, B44_SBTMSLOW);
247 /* Clear SERR if set, this is a hw bug workaround. */
248 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
249 bw32(bp, B44_SBTMSHIGH, 0);
251 val = br32(bp, B44_SBIMSTATE);
252 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
253 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
255 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
256 br32(bp, B44_SBTMSLOW);
259 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
260 br32(bp, B44_SBTMSLOW);
264 static int ssb_core_unit(struct b44 *bp)
267 u32 val = br32(bp, B44_SBADMATCH0);
270 type = val & SBADMATCH0_TYPE_MASK;
273 base = val & SBADMATCH0_BS0_MASK;
277 base = val & SBADMATCH0_BS1_MASK;
282 base = val & SBADMATCH0_BS2_MASK;
289 static int ssb_is_core_up(struct b44 *bp)
291 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
295 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
299 val = ((u32) data[2]) << 24;
300 val |= ((u32) data[3]) << 16;
301 val |= ((u32) data[4]) << 8;
302 val |= ((u32) data[5]) << 0;
303 bw32(bp, B44_CAM_DATA_LO, val);
304 val = (CAM_DATA_HI_VALID |
305 (((u32) data[0]) << 8) |
306 (((u32) data[1]) << 0));
307 bw32(bp, B44_CAM_DATA_HI, val);
308 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
309 (index << CAM_CTRL_INDEX_SHIFT)));
310 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
313 static inline void __b44_disable_ints(struct b44 *bp)
315 bw32(bp, B44_IMASK, 0);
318 static void b44_disable_ints(struct b44 *bp)
320 __b44_disable_ints(bp);
322 /* Flush posted writes. */
326 static void b44_enable_ints(struct b44 *bp)
328 bw32(bp, B44_IMASK, bp->imask);
331 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
341 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
342 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
347 static int b44_writephy(struct b44 *bp, int reg, u32 val)
349 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
350 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
351 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
352 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
353 (reg << MDIO_DATA_RA_SHIFT) |
354 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
355 (val & MDIO_DATA_DATA)));
356 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
359 /* miilib interface */
360 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
361 * due to code existing before miilib use was added to this driver.
362 * Someone should remove this artificial driver limitation in
363 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
365 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
368 struct b44 *bp = netdev_priv(dev);
369 int rc = b44_readphy(bp, location, &val);
375 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
378 struct b44 *bp = netdev_priv(dev);
379 b44_writephy(bp, location, val);
382 static int b44_phy_reset(struct b44 *bp)
387 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
391 err = b44_readphy(bp, MII_BMCR, &val);
393 if (val & BMCR_RESET) {
394 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
403 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
407 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
408 bp->flags |= pause_flags;
410 val = br32(bp, B44_RXCONFIG);
411 if (pause_flags & B44_FLAG_RX_PAUSE)
412 val |= RXCONFIG_FLOW;
414 val &= ~RXCONFIG_FLOW;
415 bw32(bp, B44_RXCONFIG, val);
417 val = br32(bp, B44_MAC_FLOW);
418 if (pause_flags & B44_FLAG_TX_PAUSE)
419 val |= (MAC_FLOW_PAUSE_ENAB |
420 (0xc0 & MAC_FLOW_RX_HI_WATER));
422 val &= ~MAC_FLOW_PAUSE_ENAB;
423 bw32(bp, B44_MAC_FLOW, val);
426 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
430 /* The driver supports only rx pause by default because
431 the b44 mac tx pause mechanism generates excessive
433 Use ethtool to turn on b44 tx pause if necessary.
435 if ((local & ADVERTISE_PAUSE_CAP) &&
436 (local & ADVERTISE_PAUSE_ASYM)){
437 if ((remote & LPA_PAUSE_ASYM) &&
438 !(remote & LPA_PAUSE_CAP))
439 pause_enab |= B44_FLAG_RX_PAUSE;
442 __b44_set_flow_ctrl(bp, pause_enab);
445 static int b44_setup_phy(struct b44 *bp)
450 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
452 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
453 val & MII_ALEDCTRL_ALLMSK)) != 0)
455 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
457 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
458 val | MII_TLEDCTRL_ENABLE)) != 0)
461 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
462 u32 adv = ADVERTISE_CSMA;
464 if (bp->flags & B44_FLAG_ADV_10HALF)
465 adv |= ADVERTISE_10HALF;
466 if (bp->flags & B44_FLAG_ADV_10FULL)
467 adv |= ADVERTISE_10FULL;
468 if (bp->flags & B44_FLAG_ADV_100HALF)
469 adv |= ADVERTISE_100HALF;
470 if (bp->flags & B44_FLAG_ADV_100FULL)
471 adv |= ADVERTISE_100FULL;
473 if (bp->flags & B44_FLAG_PAUSE_AUTO)
474 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
476 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
478 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
479 BMCR_ANRESTART))) != 0)
484 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
486 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
487 if (bp->flags & B44_FLAG_100_BASE_T)
488 bmcr |= BMCR_SPEED100;
489 if (bp->flags & B44_FLAG_FULL_DUPLEX)
490 bmcr |= BMCR_FULLDPLX;
491 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
494 /* Since we will not be negotiating there is no safe way
495 * to determine if the link partner supports flow control
496 * or not. So just disable it completely in this case.
498 b44_set_flow_ctrl(bp, 0, 0);
505 static void b44_stats_update(struct b44 *bp)
510 val = &bp->hw_stats.tx_good_octets;
511 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
512 *val++ += br32(bp, reg);
518 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
519 *val++ += br32(bp, reg);
523 static void b44_link_report(struct b44 *bp)
525 if (!netif_carrier_ok(bp->dev)) {
526 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
528 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
530 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
531 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
533 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
536 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
537 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
541 static void b44_check_phy(struct b44 *bp)
545 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
546 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
548 if (aux & MII_AUXCTRL_SPEED)
549 bp->flags |= B44_FLAG_100_BASE_T;
551 bp->flags &= ~B44_FLAG_100_BASE_T;
552 if (aux & MII_AUXCTRL_DUPLEX)
553 bp->flags |= B44_FLAG_FULL_DUPLEX;
555 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
557 if (!netif_carrier_ok(bp->dev) &&
558 (bmsr & BMSR_LSTATUS)) {
559 u32 val = br32(bp, B44_TX_CTRL);
560 u32 local_adv, remote_adv;
562 if (bp->flags & B44_FLAG_FULL_DUPLEX)
563 val |= TX_CTRL_DUPLEX;
565 val &= ~TX_CTRL_DUPLEX;
566 bw32(bp, B44_TX_CTRL, val);
568 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
569 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
570 !b44_readphy(bp, MII_LPA, &remote_adv))
571 b44_set_flow_ctrl(bp, local_adv, remote_adv);
574 netif_carrier_on(bp->dev);
576 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
578 netif_carrier_off(bp->dev);
582 if (bmsr & BMSR_RFAULT)
583 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
586 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
591 static void b44_timer(unsigned long __opaque)
593 struct b44 *bp = (struct b44 *) __opaque;
595 spin_lock_irq(&bp->lock);
599 b44_stats_update(bp);
601 spin_unlock_irq(&bp->lock);
603 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
606 static void b44_tx(struct b44 *bp)
610 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
611 cur /= sizeof(struct dma_desc);
613 /* XXX needs updating when NETIF_F_SG is supported */
614 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
615 struct ring_info *rp = &bp->tx_buffers[cons];
616 struct sk_buff *skb = rp->skb;
620 pci_unmap_single(bp->pdev,
621 pci_unmap_addr(rp, mapping),
625 dev_kfree_skb_irq(skb);
629 if (netif_queue_stopped(bp->dev) &&
630 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
631 netif_wake_queue(bp->dev);
633 bw32(bp, B44_GPTIMER, 0);
636 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
637 * before the DMA address you give it. So we allocate 30 more bytes
638 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
639 * point the chip at 30 bytes past where the rx_header will go.
641 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
644 struct ring_info *src_map, *map;
645 struct rx_header *rh;
653 src_map = &bp->rx_buffers[src_idx];
654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655 map = &bp->rx_buffers[dest_idx];
656 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
660 mapping = pci_map_single(bp->pdev, skb->data,
664 /* Hardware bug work-around, the chip is unable to do PCI DMA
665 to/from anything above 1GB :-( */
666 if (dma_mapping_error(mapping) ||
667 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
669 if (!dma_mapping_error(mapping))
670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
671 dev_kfree_skb_any(skb);
672 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
675 mapping = pci_map_single(bp->pdev, skb->data,
678 if (dma_mapping_error(mapping) ||
679 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
680 if (!dma_mapping_error(mapping))
681 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
682 dev_kfree_skb_any(skb);
688 rh = (struct rx_header *) skb->data;
689 skb_reserve(skb, RX_PKT_OFFSET);
695 pci_unmap_addr_set(map, mapping, mapping);
700 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
701 if (dest_idx == (B44_RX_RING_SIZE - 1))
702 ctrl |= DESC_CTRL_EOT;
704 dp = &bp->rx_ring[dest_idx];
705 dp->ctrl = cpu_to_le32(ctrl);
706 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
708 if (bp->flags & B44_FLAG_RX_RING_HACK)
709 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
710 dest_idx * sizeof(dp),
713 return RX_PKT_BUF_SZ;
716 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
718 struct dma_desc *src_desc, *dest_desc;
719 struct ring_info *src_map, *dest_map;
720 struct rx_header *rh;
724 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
725 dest_desc = &bp->rx_ring[dest_idx];
726 dest_map = &bp->rx_buffers[dest_idx];
727 src_desc = &bp->rx_ring[src_idx];
728 src_map = &bp->rx_buffers[src_idx];
730 dest_map->skb = src_map->skb;
731 rh = (struct rx_header *) src_map->skb->data;
734 pci_unmap_addr_set(dest_map, mapping,
735 pci_unmap_addr(src_map, mapping));
737 if (bp->flags & B44_FLAG_RX_RING_HACK)
738 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
739 src_idx * sizeof(src_desc),
742 ctrl = src_desc->ctrl;
743 if (dest_idx == (B44_RX_RING_SIZE - 1))
744 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
746 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
748 dest_desc->ctrl = ctrl;
749 dest_desc->addr = src_desc->addr;
753 if (bp->flags & B44_FLAG_RX_RING_HACK)
754 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
755 dest_idx * sizeof(dest_desc),
758 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
763 static int b44_rx(struct b44 *bp, int budget)
769 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
770 prod /= sizeof(struct dma_desc);
773 while (cons != prod && budget > 0) {
774 struct ring_info *rp = &bp->rx_buffers[cons];
775 struct sk_buff *skb = rp->skb;
776 dma_addr_t map = pci_unmap_addr(rp, mapping);
777 struct rx_header *rh;
780 pci_dma_sync_single_for_cpu(bp->pdev, map,
783 rh = (struct rx_header *) skb->data;
784 len = le16_to_cpu(rh->len);
785 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
786 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
788 b44_recycle_rx(bp, cons, bp->rx_prod);
790 bp->stats.rx_dropped++;
800 len = le16_to_cpu(rh->len);
801 } while (len == 0 && i++ < 5);
809 if (len > RX_COPY_THRESHOLD) {
811 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
814 pci_unmap_single(bp->pdev, map,
815 skb_size, PCI_DMA_FROMDEVICE);
816 /* Leave out rx_header */
817 skb_put(skb, len + RX_PKT_OFFSET);
818 skb_pull(skb, RX_PKT_OFFSET);
820 struct sk_buff *copy_skb;
822 b44_recycle_rx(bp, cons, bp->rx_prod);
823 copy_skb = dev_alloc_skb(len + 2);
824 if (copy_skb == NULL)
825 goto drop_it_no_recycle;
827 skb_reserve(copy_skb, 2);
828 skb_put(copy_skb, len);
829 /* DMA sync done above, copy just the actual packet */
830 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
831 copy_skb->data, len);
834 skb->ip_summed = CHECKSUM_NONE;
835 skb->protocol = eth_type_trans(skb, bp->dev);
836 netif_receive_skb(skb);
837 bp->dev->last_rx = jiffies;
841 bp->rx_prod = (bp->rx_prod + 1) &
842 (B44_RX_RING_SIZE - 1);
843 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
847 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
852 static int b44_poll(struct net_device *netdev, int *budget)
854 struct b44 *bp = netdev_priv(netdev);
857 spin_lock_irq(&bp->lock);
859 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
860 /* spin_lock(&bp->tx_lock); */
862 /* spin_unlock(&bp->tx_lock); */
864 spin_unlock_irq(&bp->lock);
867 if (bp->istat & ISTAT_RX) {
868 int orig_budget = *budget;
871 if (orig_budget > netdev->quota)
872 orig_budget = netdev->quota;
874 work_done = b44_rx(bp, orig_budget);
876 *budget -= work_done;
877 netdev->quota -= work_done;
879 if (work_done >= orig_budget)
883 if (bp->istat & ISTAT_ERRORS) {
886 spin_lock_irqsave(&bp->lock, flags);
889 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
890 netif_wake_queue(bp->dev);
891 spin_unlock_irqrestore(&bp->lock, flags);
896 netif_rx_complete(netdev);
900 return (done ? 0 : 1);
903 static irqreturn_t b44_interrupt(int irq, void *dev_id)
905 struct net_device *dev = dev_id;
906 struct b44 *bp = netdev_priv(dev);
910 spin_lock(&bp->lock);
912 istat = br32(bp, B44_ISTAT);
913 imask = br32(bp, B44_IMASK);
915 /* The interrupt mask register controls which interrupt bits
916 * will actually raise an interrupt to the CPU when set by hw/firmware,
917 * but doesn't mask off the bits.
923 if (unlikely(!netif_running(dev))) {
924 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
928 if (netif_rx_schedule_prep(dev)) {
929 /* NOTE: These writes are posted by the readback of
930 * the ISTAT register below.
933 __b44_disable_ints(bp);
934 __netif_rx_schedule(dev);
936 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
941 bw32(bp, B44_ISTAT, istat);
944 spin_unlock(&bp->lock);
945 return IRQ_RETVAL(handled);
948 static void b44_tx_timeout(struct net_device *dev)
950 struct b44 *bp = netdev_priv(dev);
952 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
955 spin_lock_irq(&bp->lock);
959 b44_init_hw(bp, B44_FULL_RESET);
961 spin_unlock_irq(&bp->lock);
965 netif_wake_queue(dev);
968 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
970 struct b44 *bp = netdev_priv(dev);
971 int rc = NETDEV_TX_OK;
973 u32 len, entry, ctrl;
976 spin_lock_irq(&bp->lock);
978 /* This is a hard error, log it. */
979 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
980 netif_stop_queue(dev);
981 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
986 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
987 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
988 struct sk_buff *bounce_skb;
990 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
991 if (!dma_mapping_error(mapping))
992 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
994 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
998 mapping = pci_map_single(bp->pdev, bounce_skb->data,
999 len, PCI_DMA_TODEVICE);
1000 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
1001 if (!dma_mapping_error(mapping))
1002 pci_unmap_single(bp->pdev, mapping,
1003 len, PCI_DMA_TODEVICE);
1004 dev_kfree_skb_any(bounce_skb);
1008 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1009 dev_kfree_skb_any(skb);
1013 entry = bp->tx_prod;
1014 bp->tx_buffers[entry].skb = skb;
1015 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1017 ctrl = (len & DESC_CTRL_LEN);
1018 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1019 if (entry == (B44_TX_RING_SIZE - 1))
1020 ctrl |= DESC_CTRL_EOT;
1022 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1023 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1025 if (bp->flags & B44_FLAG_TX_RING_HACK)
1026 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1027 entry * sizeof(bp->tx_ring[0]),
1030 entry = NEXT_TX(entry);
1032 bp->tx_prod = entry;
1036 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1037 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1038 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1039 if (bp->flags & B44_FLAG_REORDER_BUG)
1040 br32(bp, B44_DMATX_PTR);
1042 if (TX_BUFFS_AVAIL(bp) < 1)
1043 netif_stop_queue(dev);
1045 dev->trans_start = jiffies;
1048 spin_unlock_irq(&bp->lock);
1053 rc = NETDEV_TX_BUSY;
1057 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1059 struct b44 *bp = netdev_priv(dev);
1061 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1064 if (!netif_running(dev)) {
1065 /* We'll just catch it later when the
1072 spin_lock_irq(&bp->lock);
1076 b44_init_hw(bp, B44_FULL_RESET);
1077 spin_unlock_irq(&bp->lock);
1079 b44_enable_ints(bp);
1084 /* Free up pending packets in all rx/tx rings.
1086 * The chip has been shut down and the driver detached from
1087 * the networking, so no interrupts or new tx packets will
1088 * end up in the driver. bp->lock is not held and we are not
1089 * in an interrupt context and thus may sleep.
1091 static void b44_free_rings(struct b44 *bp)
1093 struct ring_info *rp;
1096 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1097 rp = &bp->rx_buffers[i];
1099 if (rp->skb == NULL)
1101 pci_unmap_single(bp->pdev,
1102 pci_unmap_addr(rp, mapping),
1104 PCI_DMA_FROMDEVICE);
1105 dev_kfree_skb_any(rp->skb);
1109 /* XXX needs changes once NETIF_F_SG is set... */
1110 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1111 rp = &bp->tx_buffers[i];
1113 if (rp->skb == NULL)
1115 pci_unmap_single(bp->pdev,
1116 pci_unmap_addr(rp, mapping),
1119 dev_kfree_skb_any(rp->skb);
1124 /* Initialize tx/rx rings for packet processing.
1126 * The chip has been shut down and the driver detached from
1127 * the networking, so no interrupts or new tx packets will
1128 * end up in the driver.
1130 static void b44_init_rings(struct b44 *bp)
1136 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1137 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1139 if (bp->flags & B44_FLAG_RX_RING_HACK)
1140 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1142 PCI_DMA_BIDIRECTIONAL);
1144 if (bp->flags & B44_FLAG_TX_RING_HACK)
1145 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1149 for (i = 0; i < bp->rx_pending; i++) {
1150 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1156 * Must not be invoked with interrupt sources disabled and
1157 * the hardware shutdown down.
1159 static void b44_free_consistent(struct b44 *bp)
1161 kfree(bp->rx_buffers);
1162 bp->rx_buffers = NULL;
1163 kfree(bp->tx_buffers);
1164 bp->tx_buffers = NULL;
1166 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1167 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1172 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1173 bp->rx_ring, bp->rx_ring_dma);
1175 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1178 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1179 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1184 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1185 bp->tx_ring, bp->tx_ring_dma);
1187 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1192 * Must not be invoked with interrupt sources disabled and
1193 * the hardware shutdown down. Can sleep.
1195 static int b44_alloc_consistent(struct b44 *bp)
1199 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1200 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1201 if (!bp->rx_buffers)
1204 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1205 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1206 if (!bp->tx_buffers)
1209 size = DMA_TABLE_BYTES;
1210 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1212 /* Allocation may have failed due to pci_alloc_consistent
1213 insisting on use of GFP_DMA, which is more restrictive
1214 than necessary... */
1215 struct dma_desc *rx_ring;
1216 dma_addr_t rx_ring_dma;
1218 rx_ring = kzalloc(size, GFP_KERNEL);
1222 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1226 if (dma_mapping_error(rx_ring_dma) ||
1227 rx_ring_dma + size > DMA_30BIT_MASK) {
1232 bp->rx_ring = rx_ring;
1233 bp->rx_ring_dma = rx_ring_dma;
1234 bp->flags |= B44_FLAG_RX_RING_HACK;
1237 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1239 /* Allocation may have failed due to pci_alloc_consistent
1240 insisting on use of GFP_DMA, which is more restrictive
1241 than necessary... */
1242 struct dma_desc *tx_ring;
1243 dma_addr_t tx_ring_dma;
1245 tx_ring = kzalloc(size, GFP_KERNEL);
1249 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1253 if (dma_mapping_error(tx_ring_dma) ||
1254 tx_ring_dma + size > DMA_30BIT_MASK) {
1259 bp->tx_ring = tx_ring;
1260 bp->tx_ring_dma = tx_ring_dma;
1261 bp->flags |= B44_FLAG_TX_RING_HACK;
1267 b44_free_consistent(bp);
1271 /* bp->lock is held. */
1272 static void b44_clear_stats(struct b44 *bp)
1276 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1277 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1279 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1283 /* bp->lock is held. */
1284 static void b44_chip_reset(struct b44 *bp)
1286 if (ssb_is_core_up(bp)) {
1287 bw32(bp, B44_RCV_LAZY, 0);
1288 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1289 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1290 bw32(bp, B44_DMATX_CTRL, 0);
1291 bp->tx_prod = bp->tx_cons = 0;
1292 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1293 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1296 bw32(bp, B44_DMARX_CTRL, 0);
1297 bp->rx_prod = bp->rx_cons = 0;
1299 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1306 b44_clear_stats(bp);
1308 /* Make PHY accessible. */
1309 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1310 (0x0d & MDIO_CTRL_MAXF_MASK)));
1311 br32(bp, B44_MDIO_CTRL);
1313 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1314 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1315 br32(bp, B44_ENET_CTRL);
1316 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1318 u32 val = br32(bp, B44_DEVCTRL);
1320 if (val & DEVCTRL_EPR) {
1321 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1322 br32(bp, B44_DEVCTRL);
1325 bp->flags |= B44_FLAG_INTERNAL_PHY;
1329 /* bp->lock is held. */
1330 static void b44_halt(struct b44 *bp)
1332 b44_disable_ints(bp);
1336 /* bp->lock is held. */
1337 static void __b44_set_mac_addr(struct b44 *bp)
1339 bw32(bp, B44_CAM_CTRL, 0);
1340 if (!(bp->dev->flags & IFF_PROMISC)) {
1343 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1344 val = br32(bp, B44_CAM_CTRL);
1345 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1349 static int b44_set_mac_addr(struct net_device *dev, void *p)
1351 struct b44 *bp = netdev_priv(dev);
1352 struct sockaddr *addr = p;
1354 if (netif_running(dev))
1357 if (!is_valid_ether_addr(addr->sa_data))
1360 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1362 spin_lock_irq(&bp->lock);
1363 __b44_set_mac_addr(bp);
1364 spin_unlock_irq(&bp->lock);
1369 /* Called at device open time to get the chip ready for
1370 * packet processing. Invoked with bp->lock held.
1372 static void __b44_set_rx_mode(struct net_device *);
1373 static void b44_init_hw(struct b44 *bp, int reset_kind)
1378 if (reset_kind == B44_FULL_RESET) {
1383 /* Enable CRC32, set proper LED modes and power on PHY */
1384 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1385 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1387 /* This sets the MAC address too. */
1388 __b44_set_rx_mode(bp->dev);
1390 /* MTU + eth header + possible VLAN tag + struct rx_header */
1391 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1392 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1394 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1395 if (reset_kind == B44_PARTIAL_RESET) {
1396 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1397 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1399 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1400 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1401 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1402 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1403 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1405 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1406 bp->rx_prod = bp->rx_pending;
1408 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1411 val = br32(bp, B44_ENET_CTRL);
1412 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1415 static int b44_open(struct net_device *dev)
1417 struct b44 *bp = netdev_priv(dev);
1420 err = b44_alloc_consistent(bp);
1425 b44_init_hw(bp, B44_FULL_RESET);
1429 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1430 if (unlikely(err < 0)) {
1433 b44_free_consistent(bp);
1437 init_timer(&bp->timer);
1438 bp->timer.expires = jiffies + HZ;
1439 bp->timer.data = (unsigned long) bp;
1440 bp->timer.function = b44_timer;
1441 add_timer(&bp->timer);
1443 b44_enable_ints(bp);
1444 netif_start_queue(dev);
1450 /*static*/ void b44_dump_state(struct b44 *bp)
1452 u32 val32, val32_2, val32_3, val32_4, val32_5;
1455 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1456 printk("DEBUG: PCI status [%04x] \n", val16);
1461 #ifdef CONFIG_NET_POLL_CONTROLLER
1463 * Polling receive - used by netconsole and other diagnostic tools
1464 * to allow network i/o with interrupts disabled.
1466 static void b44_poll_controller(struct net_device *dev)
1468 disable_irq(dev->irq);
1469 b44_interrupt(dev->irq, dev);
1470 enable_irq(dev->irq);
1474 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1477 u32 *pattern = (u32 *) pp;
1479 for (i = 0; i < bytes; i += sizeof(u32)) {
1480 bw32(bp, B44_FILT_ADDR, table_offset + i);
1481 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1485 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1488 int k, j, len = offset;
1489 int ethaddr_bytes = ETH_ALEN;
1491 memset(ppattern + offset, 0xff, magicsync);
1492 for (j = 0; j < magicsync; j++)
1493 set_bit(len++, (unsigned long *) pmask);
1495 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1496 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1497 ethaddr_bytes = ETH_ALEN;
1499 ethaddr_bytes = B44_PATTERN_SIZE - len;
1500 if (ethaddr_bytes <=0)
1502 for (k = 0; k< ethaddr_bytes; k++) {
1503 ppattern[offset + magicsync +
1504 (j * ETH_ALEN) + k] = macaddr[k];
1506 set_bit(len, (unsigned long *) pmask);
1512 /* Setup magic packet patterns in the b44 WOL
1513 * pattern matching filter.
1515 static void b44_setup_pseudo_magicp(struct b44 *bp)
1519 int plen0, plen1, plen2;
1521 u8 pwol_mask[B44_PMASK_SIZE];
1523 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1524 if (!pwol_pattern) {
1525 printk(KERN_ERR PFX "Memory not available for WOL\n");
1529 /* Ipv4 magic packet pattern - pattern 0.*/
1530 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1531 memset(pwol_mask, 0, B44_PMASK_SIZE);
1532 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1533 B44_ETHIPV4UDP_HLEN);
1535 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1536 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1538 /* Raw ethernet II magic packet pattern - pattern 1 */
1539 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1540 memset(pwol_mask, 0, B44_PMASK_SIZE);
1541 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1544 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1545 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1546 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1547 B44_PMASK_BASE + B44_PMASK_SIZE);
1549 /* Ipv6 magic packet pattern - pattern 2 */
1550 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1551 memset(pwol_mask, 0, B44_PMASK_SIZE);
1552 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1553 B44_ETHIPV6UDP_HLEN);
1555 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1556 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1557 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1558 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1560 kfree(pwol_pattern);
1562 /* set these pattern's lengths: one less than each real length */
1563 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1564 bw32(bp, B44_WKUP_LEN, val);
1566 /* enable wakeup pattern matching */
1567 val = br32(bp, B44_DEVCTRL);
1568 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1572 static void b44_setup_wol(struct b44 *bp)
1577 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1579 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1581 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1583 val = bp->dev->dev_addr[2] << 24 |
1584 bp->dev->dev_addr[3] << 16 |
1585 bp->dev->dev_addr[4] << 8 |
1586 bp->dev->dev_addr[5];
1587 bw32(bp, B44_ADDR_LO, val);
1589 val = bp->dev->dev_addr[0] << 8 |
1590 bp->dev->dev_addr[1];
1591 bw32(bp, B44_ADDR_HI, val);
1593 val = br32(bp, B44_DEVCTRL);
1594 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1597 b44_setup_pseudo_magicp(bp);
1600 val = br32(bp, B44_SBTMSLOW);
1601 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1603 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1604 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1608 static int b44_close(struct net_device *dev)
1610 struct b44 *bp = netdev_priv(dev);
1612 netif_stop_queue(dev);
1614 netif_poll_disable(dev);
1616 del_timer_sync(&bp->timer);
1618 spin_lock_irq(&bp->lock);
1625 netif_carrier_off(dev);
1627 spin_unlock_irq(&bp->lock);
1629 free_irq(dev->irq, dev);
1631 netif_poll_enable(dev);
1633 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1634 b44_init_hw(bp, B44_PARTIAL_RESET);
1638 b44_free_consistent(bp);
1643 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1645 struct b44 *bp = netdev_priv(dev);
1646 struct net_device_stats *nstat = &bp->stats;
1647 struct b44_hw_stats *hwstat = &bp->hw_stats;
1649 /* Convert HW stats into netdevice stats. */
1650 nstat->rx_packets = hwstat->rx_pkts;
1651 nstat->tx_packets = hwstat->tx_pkts;
1652 nstat->rx_bytes = hwstat->rx_octets;
1653 nstat->tx_bytes = hwstat->tx_octets;
1654 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1655 hwstat->tx_oversize_pkts +
1656 hwstat->tx_underruns +
1657 hwstat->tx_excessive_cols +
1658 hwstat->tx_late_cols);
1659 nstat->multicast = hwstat->tx_multicast_pkts;
1660 nstat->collisions = hwstat->tx_total_cols;
1662 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1663 hwstat->rx_undersize);
1664 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1665 nstat->rx_frame_errors = hwstat->rx_align_errs;
1666 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1667 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1668 hwstat->rx_oversize_pkts +
1669 hwstat->rx_missed_pkts +
1670 hwstat->rx_crc_align_errs +
1671 hwstat->rx_undersize +
1672 hwstat->rx_crc_errs +
1673 hwstat->rx_align_errs +
1674 hwstat->rx_symbol_errs);
1676 nstat->tx_aborted_errors = hwstat->tx_underruns;
1678 /* Carrier lost counter seems to be broken for some devices */
1679 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1685 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1687 struct dev_mc_list *mclist;
1690 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1691 mclist = dev->mc_list;
1692 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1693 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1698 static void __b44_set_rx_mode(struct net_device *dev)
1700 struct b44 *bp = netdev_priv(dev);
1703 val = br32(bp, B44_RXCONFIG);
1704 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1705 if (dev->flags & IFF_PROMISC) {
1706 val |= RXCONFIG_PROMISC;
1707 bw32(bp, B44_RXCONFIG, val);
1709 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1712 __b44_set_mac_addr(bp);
1714 if ((dev->flags & IFF_ALLMULTI) ||
1715 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1716 val |= RXCONFIG_ALLMULTI;
1718 i = __b44_load_mcast(bp, dev);
1721 __b44_cam_write(bp, zero, i);
1723 bw32(bp, B44_RXCONFIG, val);
1724 val = br32(bp, B44_CAM_CTRL);
1725 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1729 static void b44_set_rx_mode(struct net_device *dev)
1731 struct b44 *bp = netdev_priv(dev);
1733 spin_lock_irq(&bp->lock);
1734 __b44_set_rx_mode(dev);
1735 spin_unlock_irq(&bp->lock);
1738 static u32 b44_get_msglevel(struct net_device *dev)
1740 struct b44 *bp = netdev_priv(dev);
1741 return bp->msg_enable;
1744 static void b44_set_msglevel(struct net_device *dev, u32 value)
1746 struct b44 *bp = netdev_priv(dev);
1747 bp->msg_enable = value;
1750 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1752 struct b44 *bp = netdev_priv(dev);
1753 struct pci_dev *pci_dev = bp->pdev;
1755 strcpy (info->driver, DRV_MODULE_NAME);
1756 strcpy (info->version, DRV_MODULE_VERSION);
1757 strcpy (info->bus_info, pci_name(pci_dev));
1760 static int b44_nway_reset(struct net_device *dev)
1762 struct b44 *bp = netdev_priv(dev);
1766 spin_lock_irq(&bp->lock);
1767 b44_readphy(bp, MII_BMCR, &bmcr);
1768 b44_readphy(bp, MII_BMCR, &bmcr);
1770 if (bmcr & BMCR_ANENABLE) {
1771 b44_writephy(bp, MII_BMCR,
1772 bmcr | BMCR_ANRESTART);
1775 spin_unlock_irq(&bp->lock);
1780 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1782 struct b44 *bp = netdev_priv(dev);
1784 cmd->supported = (SUPPORTED_Autoneg);
1785 cmd->supported |= (SUPPORTED_100baseT_Half |
1786 SUPPORTED_100baseT_Full |
1787 SUPPORTED_10baseT_Half |
1788 SUPPORTED_10baseT_Full |
1791 cmd->advertising = 0;
1792 if (bp->flags & B44_FLAG_ADV_10HALF)
1793 cmd->advertising |= ADVERTISED_10baseT_Half;
1794 if (bp->flags & B44_FLAG_ADV_10FULL)
1795 cmd->advertising |= ADVERTISED_10baseT_Full;
1796 if (bp->flags & B44_FLAG_ADV_100HALF)
1797 cmd->advertising |= ADVERTISED_100baseT_Half;
1798 if (bp->flags & B44_FLAG_ADV_100FULL)
1799 cmd->advertising |= ADVERTISED_100baseT_Full;
1800 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1801 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1802 SPEED_100 : SPEED_10;
1803 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1804 DUPLEX_FULL : DUPLEX_HALF;
1806 cmd->phy_address = bp->phy_addr;
1807 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1808 XCVR_INTERNAL : XCVR_EXTERNAL;
1809 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1810 AUTONEG_DISABLE : AUTONEG_ENABLE;
1811 if (cmd->autoneg == AUTONEG_ENABLE)
1812 cmd->advertising |= ADVERTISED_Autoneg;
1813 if (!netif_running(dev)){
1822 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1824 struct b44 *bp = netdev_priv(dev);
1826 /* We do not support gigabit. */
1827 if (cmd->autoneg == AUTONEG_ENABLE) {
1828 if (cmd->advertising &
1829 (ADVERTISED_1000baseT_Half |
1830 ADVERTISED_1000baseT_Full))
1832 } else if ((cmd->speed != SPEED_100 &&
1833 cmd->speed != SPEED_10) ||
1834 (cmd->duplex != DUPLEX_HALF &&
1835 cmd->duplex != DUPLEX_FULL)) {
1839 spin_lock_irq(&bp->lock);
1841 if (cmd->autoneg == AUTONEG_ENABLE) {
1842 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1843 B44_FLAG_100_BASE_T |
1844 B44_FLAG_FULL_DUPLEX |
1845 B44_FLAG_ADV_10HALF |
1846 B44_FLAG_ADV_10FULL |
1847 B44_FLAG_ADV_100HALF |
1848 B44_FLAG_ADV_100FULL);
1849 if (cmd->advertising == 0) {
1850 bp->flags |= (B44_FLAG_ADV_10HALF |
1851 B44_FLAG_ADV_10FULL |
1852 B44_FLAG_ADV_100HALF |
1853 B44_FLAG_ADV_100FULL);
1855 if (cmd->advertising & ADVERTISED_10baseT_Half)
1856 bp->flags |= B44_FLAG_ADV_10HALF;
1857 if (cmd->advertising & ADVERTISED_10baseT_Full)
1858 bp->flags |= B44_FLAG_ADV_10FULL;
1859 if (cmd->advertising & ADVERTISED_100baseT_Half)
1860 bp->flags |= B44_FLAG_ADV_100HALF;
1861 if (cmd->advertising & ADVERTISED_100baseT_Full)
1862 bp->flags |= B44_FLAG_ADV_100FULL;
1865 bp->flags |= B44_FLAG_FORCE_LINK;
1866 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1867 if (cmd->speed == SPEED_100)
1868 bp->flags |= B44_FLAG_100_BASE_T;
1869 if (cmd->duplex == DUPLEX_FULL)
1870 bp->flags |= B44_FLAG_FULL_DUPLEX;
1873 if (netif_running(dev))
1876 spin_unlock_irq(&bp->lock);
1881 static void b44_get_ringparam(struct net_device *dev,
1882 struct ethtool_ringparam *ering)
1884 struct b44 *bp = netdev_priv(dev);
1886 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1887 ering->rx_pending = bp->rx_pending;
1889 /* XXX ethtool lacks a tx_max_pending, oops... */
1892 static int b44_set_ringparam(struct net_device *dev,
1893 struct ethtool_ringparam *ering)
1895 struct b44 *bp = netdev_priv(dev);
1897 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1898 (ering->rx_mini_pending != 0) ||
1899 (ering->rx_jumbo_pending != 0) ||
1900 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1903 spin_lock_irq(&bp->lock);
1905 bp->rx_pending = ering->rx_pending;
1906 bp->tx_pending = ering->tx_pending;
1910 b44_init_hw(bp, B44_FULL_RESET);
1911 netif_wake_queue(bp->dev);
1912 spin_unlock_irq(&bp->lock);
1914 b44_enable_ints(bp);
1919 static void b44_get_pauseparam(struct net_device *dev,
1920 struct ethtool_pauseparam *epause)
1922 struct b44 *bp = netdev_priv(dev);
1925 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1927 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1929 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1932 static int b44_set_pauseparam(struct net_device *dev,
1933 struct ethtool_pauseparam *epause)
1935 struct b44 *bp = netdev_priv(dev);
1937 spin_lock_irq(&bp->lock);
1938 if (epause->autoneg)
1939 bp->flags |= B44_FLAG_PAUSE_AUTO;
1941 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1942 if (epause->rx_pause)
1943 bp->flags |= B44_FLAG_RX_PAUSE;
1945 bp->flags &= ~B44_FLAG_RX_PAUSE;
1946 if (epause->tx_pause)
1947 bp->flags |= B44_FLAG_TX_PAUSE;
1949 bp->flags &= ~B44_FLAG_TX_PAUSE;
1950 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1953 b44_init_hw(bp, B44_FULL_RESET);
1955 __b44_set_flow_ctrl(bp, bp->flags);
1957 spin_unlock_irq(&bp->lock);
1959 b44_enable_ints(bp);
1964 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1968 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1973 static int b44_get_stats_count(struct net_device *dev)
1975 return ARRAY_SIZE(b44_gstrings);
1978 static void b44_get_ethtool_stats(struct net_device *dev,
1979 struct ethtool_stats *stats, u64 *data)
1981 struct b44 *bp = netdev_priv(dev);
1982 u32 *val = &bp->hw_stats.tx_good_octets;
1985 spin_lock_irq(&bp->lock);
1987 b44_stats_update(bp);
1989 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1992 spin_unlock_irq(&bp->lock);
1995 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1997 struct b44 *bp = netdev_priv(dev);
1999 wol->supported = WAKE_MAGIC;
2000 if (bp->flags & B44_FLAG_WOL_ENABLE)
2001 wol->wolopts = WAKE_MAGIC;
2004 memset(&wol->sopass, 0, sizeof(wol->sopass));
2007 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2009 struct b44 *bp = netdev_priv(dev);
2011 spin_lock_irq(&bp->lock);
2012 if (wol->wolopts & WAKE_MAGIC)
2013 bp->flags |= B44_FLAG_WOL_ENABLE;
2015 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2016 spin_unlock_irq(&bp->lock);
2021 static const struct ethtool_ops b44_ethtool_ops = {
2022 .get_drvinfo = b44_get_drvinfo,
2023 .get_settings = b44_get_settings,
2024 .set_settings = b44_set_settings,
2025 .nway_reset = b44_nway_reset,
2026 .get_link = ethtool_op_get_link,
2027 .get_wol = b44_get_wol,
2028 .set_wol = b44_set_wol,
2029 .get_ringparam = b44_get_ringparam,
2030 .set_ringparam = b44_set_ringparam,
2031 .get_pauseparam = b44_get_pauseparam,
2032 .set_pauseparam = b44_set_pauseparam,
2033 .get_msglevel = b44_get_msglevel,
2034 .set_msglevel = b44_set_msglevel,
2035 .get_strings = b44_get_strings,
2036 .get_stats_count = b44_get_stats_count,
2037 .get_ethtool_stats = b44_get_ethtool_stats,
2038 .get_perm_addr = ethtool_op_get_perm_addr,
2041 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2043 struct mii_ioctl_data *data = if_mii(ifr);
2044 struct b44 *bp = netdev_priv(dev);
2047 if (!netif_running(dev))
2050 spin_lock_irq(&bp->lock);
2051 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2052 spin_unlock_irq(&bp->lock);
2057 /* Read 128-bytes of EEPROM. */
2058 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2061 __le16 *ptr = (__le16 *) data;
2063 for (i = 0; i < 128; i += 2)
2064 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2069 static int __devinit b44_get_invariants(struct b44 *bp)
2074 err = b44_read_eeprom(bp, &eeprom[0]);
2078 bp->dev->dev_addr[0] = eeprom[79];
2079 bp->dev->dev_addr[1] = eeprom[78];
2080 bp->dev->dev_addr[2] = eeprom[81];
2081 bp->dev->dev_addr[3] = eeprom[80];
2082 bp->dev->dev_addr[4] = eeprom[83];
2083 bp->dev->dev_addr[5] = eeprom[82];
2085 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2086 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2090 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2092 bp->phy_addr = eeprom[90] & 0x1f;
2094 bp->imask = IMASK_DEF;
2096 bp->core_unit = ssb_core_unit(bp);
2097 bp->dma_offset = SB_PCI_DMA;
2099 /* XXX - really required?
2100 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2103 if (ssb_get_core_rev(bp) >= 7)
2104 bp->flags |= B44_FLAG_B0_ANDLATER;
2110 static int __devinit b44_init_one(struct pci_dev *pdev,
2111 const struct pci_device_id *ent)
2113 static int b44_version_printed = 0;
2114 unsigned long b44reg_base, b44reg_len;
2115 struct net_device *dev;
2119 if (b44_version_printed++ == 0)
2120 printk(KERN_INFO "%s", version);
2122 err = pci_enable_device(pdev);
2124 dev_err(&pdev->dev, "Cannot enable PCI device, "
2129 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2131 "Cannot find proper PCI device "
2132 "base address, aborting.\n");
2134 goto err_out_disable_pdev;
2137 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2140 "Cannot obtain PCI resources, aborting.\n");
2141 goto err_out_disable_pdev;
2144 pci_set_master(pdev);
2146 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2148 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2149 goto err_out_free_res;
2152 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2154 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2155 goto err_out_free_res;
2158 b44reg_base = pci_resource_start(pdev, 0);
2159 b44reg_len = pci_resource_len(pdev, 0);
2161 dev = alloc_etherdev(sizeof(*bp));
2163 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2165 goto err_out_free_res;
2168 SET_MODULE_OWNER(dev);
2169 SET_NETDEV_DEV(dev,&pdev->dev);
2171 /* No interesting netdevice features in this card... */
2174 bp = netdev_priv(dev);
2178 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2180 spin_lock_init(&bp->lock);
2182 bp->regs = ioremap(b44reg_base, b44reg_len);
2183 if (bp->regs == 0UL) {
2184 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2186 goto err_out_free_dev;
2189 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2190 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2192 dev->open = b44_open;
2193 dev->stop = b44_close;
2194 dev->hard_start_xmit = b44_start_xmit;
2195 dev->get_stats = b44_get_stats;
2196 dev->set_multicast_list = b44_set_rx_mode;
2197 dev->set_mac_address = b44_set_mac_addr;
2198 dev->do_ioctl = b44_ioctl;
2199 dev->tx_timeout = b44_tx_timeout;
2200 dev->poll = b44_poll;
2202 dev->watchdog_timeo = B44_TX_TIMEOUT;
2203 #ifdef CONFIG_NET_POLL_CONTROLLER
2204 dev->poll_controller = b44_poll_controller;
2206 dev->change_mtu = b44_change_mtu;
2207 dev->irq = pdev->irq;
2208 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2210 netif_carrier_off(dev);
2212 err = b44_get_invariants(bp);
2215 "Problem fetching invariants of chip, aborting.\n");
2216 goto err_out_iounmap;
2219 bp->mii_if.dev = dev;
2220 bp->mii_if.mdio_read = b44_mii_read;
2221 bp->mii_if.mdio_write = b44_mii_write;
2222 bp->mii_if.phy_id = bp->phy_addr;
2223 bp->mii_if.phy_id_mask = 0x1f;
2224 bp->mii_if.reg_num_mask = 0x1f;
2226 /* By default, advertise all speed/duplex settings. */
2227 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2228 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2230 /* By default, auto-negotiate PAUSE. */
2231 bp->flags |= B44_FLAG_PAUSE_AUTO;
2233 err = register_netdev(dev);
2235 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2236 goto err_out_iounmap;
2239 pci_set_drvdata(pdev, dev);
2241 pci_save_state(bp->pdev);
2243 /* Chip reset provides power to the b44 MAC & PCI cores, which
2244 * is necessary for MAC register access.
2248 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2249 for (i = 0; i < 6; i++)
2250 printk("%2.2x%c", dev->dev_addr[i],
2251 i == 5 ? '\n' : ':');
2262 pci_release_regions(pdev);
2264 err_out_disable_pdev:
2265 pci_disable_device(pdev);
2266 pci_set_drvdata(pdev, NULL);
2270 static void __devexit b44_remove_one(struct pci_dev *pdev)
2272 struct net_device *dev = pci_get_drvdata(pdev);
2273 struct b44 *bp = netdev_priv(dev);
2275 unregister_netdev(dev);
2278 pci_release_regions(pdev);
2279 pci_disable_device(pdev);
2280 pci_set_drvdata(pdev, NULL);
2283 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2285 struct net_device *dev = pci_get_drvdata(pdev);
2286 struct b44 *bp = netdev_priv(dev);
2288 if (!netif_running(dev))
2291 del_timer_sync(&bp->timer);
2293 spin_lock_irq(&bp->lock);
2296 netif_carrier_off(bp->dev);
2297 netif_device_detach(bp->dev);
2300 spin_unlock_irq(&bp->lock);
2302 free_irq(dev->irq, dev);
2303 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2304 b44_init_hw(bp, B44_PARTIAL_RESET);
2307 pci_disable_device(pdev);
2311 static int b44_resume(struct pci_dev *pdev)
2313 struct net_device *dev = pci_get_drvdata(pdev);
2314 struct b44 *bp = netdev_priv(dev);
2317 pci_restore_state(pdev);
2318 rc = pci_enable_device(pdev);
2320 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2325 pci_set_master(pdev);
2327 if (!netif_running(dev))
2330 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2332 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2333 pci_disable_device(pdev);
2337 spin_lock_irq(&bp->lock);
2340 b44_init_hw(bp, B44_FULL_RESET);
2341 netif_device_attach(bp->dev);
2342 spin_unlock_irq(&bp->lock);
2344 b44_enable_ints(bp);
2345 netif_wake_queue(dev);
2347 mod_timer(&bp->timer, jiffies + 1);
2352 static struct pci_driver b44_driver = {
2353 .name = DRV_MODULE_NAME,
2354 .id_table = b44_pci_tbl,
2355 .probe = b44_init_one,
2356 .remove = __devexit_p(b44_remove_one),
2357 .suspend = b44_suspend,
2358 .resume = b44_resume,
2361 static int __init b44_init(void)
2363 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2365 /* Setup paramaters for syncing RX/TX DMA descriptors */
2366 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2367 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2369 return pci_register_driver(&b44_driver);
2372 static void __exit b44_cleanup(void)
2374 pci_unregister_driver(&b44_driver);
2377 module_init(b44_init);
2378 module_exit(b44_cleanup);