2 * Copyright (C) 2006-2007 PA Semi, Inc
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <asm/dma-mapping.h>
30 #include <linux/skbuff.h>
33 #include <linux/tcp.h>
34 #include <net/checksum.h>
35 #include <linux/inet_lro.h>
38 #include <asm/firmware.h>
39 #include <asm/pasemi_dma.h>
41 #include "pasemi_mac.h"
43 /* We have our own align, since ppc64 in general has it at 0 because
44 * of design flaws in some of the server bridge chips. However, for
45 * PWRficient doing the unaligned copies is more expensive than doing
46 * unaligned DMA, so make sure the data is aligned instead.
48 #define LOCAL_SKB_ALIGN 2
59 /* Must be a power of two */
60 #define RX_RING_SIZE 2048
61 #define TX_RING_SIZE 4096
62 #define CS_RING_SIZE (TX_RING_SIZE*2)
64 #define LRO_MAX_AGGR 64
67 #define PE_MAX_MTU 9000
68 #define PE_DEF_MTU ETH_DATA_LEN
70 #define DEFAULT_MSG_ENABLE \
80 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
81 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
82 #define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
83 #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
84 #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
85 #define CS_DESC(cs, num) ((cs)->chan.ring_virt[(num) & (CS_RING_SIZE-1)])
87 #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
89 #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
91 MODULE_LICENSE("GPL");
92 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
93 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
95 static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
96 module_param(debug, int, 0);
97 MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
99 static int translation_enabled(void)
101 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
104 return firmware_has_feature(FW_FEATURE_LPAR);
108 static void write_iob_reg(unsigned int reg, unsigned int val)
110 pasemi_write_iob_reg(reg, val);
113 static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg)
115 return pasemi_read_mac_reg(mac->dma_if, reg);
118 static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg,
121 pasemi_write_mac_reg(mac->dma_if, reg, val);
124 static unsigned int read_dma_reg(unsigned int reg)
126 return pasemi_read_dma_reg(reg);
129 static void write_dma_reg(unsigned int reg, unsigned int val)
131 pasemi_write_dma_reg(reg, val);
134 static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac)
139 static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac)
144 static inline void prefetch_skb(const struct sk_buff *skb)
154 static int mac_to_intf(struct pasemi_mac *mac)
156 struct pci_dev *pdev = mac->pdev;
158 int nintf, off, i, j;
159 int devfn = pdev->devfn;
161 tmp = read_dma_reg(PAS_DMA_CAP_IFI);
162 nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S;
163 off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S;
165 /* IOFF contains the offset to the registers containing the
166 * DMA interface-to-MAC-pci-id mappings, and NIN contains number
167 * of total interfaces. Each register contains 4 devfns.
168 * Just do a linear search until we find the devfn of the MAC
169 * we're trying to look up.
172 for (i = 0; i < (nintf+3)/4; i++) {
173 tmp = read_dma_reg(off+4*i);
174 for (j = 0; j < 4; j++) {
175 if (((tmp >> (8*j)) & 0xff) == devfn)
182 static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
186 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
187 flags &= ~PAS_MAC_CFG_PCFG_PE;
188 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
191 static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
195 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
196 flags |= PAS_MAC_CFG_PCFG_PE;
197 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
200 static int pasemi_get_mac_addr(struct pasemi_mac *mac)
202 struct pci_dev *pdev = mac->pdev;
203 struct device_node *dn = pci_device_to_OF_node(pdev);
210 "No device node for mac, not configuring\n");
214 maddr = of_get_property(dn, "local-mac-address", &len);
216 if (maddr && len == 6) {
217 memcpy(mac->mac_addr, maddr, 6);
221 /* Some old versions of firmware mistakenly uses mac-address
222 * (and as a string) instead of a byte array in local-mac-address.
226 maddr = of_get_property(dn, "mac-address", NULL);
230 "no mac address in device tree, not configuring\n");
234 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
235 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
237 "can't parse mac address, not configuring\n");
241 memcpy(mac->mac_addr, addr, 6);
246 static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
248 struct pasemi_mac *mac = netdev_priv(dev);
249 struct sockaddr *addr = p;
250 unsigned int adr0, adr1;
252 if (!is_valid_ether_addr(addr->sa_data))
255 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
257 adr0 = dev->dev_addr[2] << 24 |
258 dev->dev_addr[3] << 16 |
259 dev->dev_addr[4] << 8 |
261 adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1);
263 adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1];
265 pasemi_mac_intf_disable(mac);
266 write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0);
267 write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1);
268 pasemi_mac_intf_enable(mac);
273 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
274 void **tcph, u64 *hdr_flags, void *data)
276 u64 macrx = (u64) data;
280 /* IPv4 header checksum failed */
281 if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK)
285 skb_reset_network_header(skb);
287 if (iph->protocol != IPPROTO_TCP)
290 ip_len = ip_hdrlen(skb);
291 skb_set_transport_header(skb, ip_len);
292 *tcph = tcp_hdr(skb);
294 /* check if ip header and tcp header are complete */
295 if (iph->tot_len < ip_len + tcp_hdrlen(skb))
298 *hdr_flags = LRO_IPV4 | LRO_TCP;
304 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
307 const dma_addr_t *dmas)
310 struct pci_dev *pdev = mac->dma_pdev;
312 pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
314 for (f = 0; f < nfrags; f++) {
315 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
317 pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE);
319 dev_kfree_skb_irq(skb);
321 /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
322 * aligned up to a power of 2
324 return (nfrags + 3) & ~1;
327 static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac)
329 struct pasemi_mac_csring *ring;
334 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring),
335 offsetof(struct pasemi_mac_csring, chan));
338 dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n");
342 chno = ring->chan.chno;
344 ring->size = CS_RING_SIZE;
345 ring->next_to_fill = 0;
347 /* Allocate descriptors */
348 if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE))
351 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
352 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
353 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
354 val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3);
356 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
358 ring->events[0] = pasemi_dma_alloc_flag();
359 ring->events[1] = pasemi_dma_alloc_flag();
360 if (ring->events[0] < 0 || ring->events[1] < 0)
363 pasemi_dma_clear_flag(ring->events[0]);
364 pasemi_dma_clear_flag(ring->events[1]);
366 ring->fun = pasemi_dma_alloc_fun();
370 cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP |
371 PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) |
372 PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ;
374 if (translation_enabled())
375 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
377 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
380 pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
381 PAS_DMA_TXCHAN_TCMDSTA_DB |
382 PAS_DMA_TXCHAN_TCMDSTA_DE |
383 PAS_DMA_TXCHAN_TCMDSTA_DA);
389 if (ring->events[0] >= 0)
390 pasemi_dma_free_flag(ring->events[0]);
391 if (ring->events[1] >= 0)
392 pasemi_dma_free_flag(ring->events[1]);
393 pasemi_dma_free_ring(&ring->chan);
395 pasemi_dma_free_chan(&ring->chan);
401 static void pasemi_mac_setup_csrings(struct pasemi_mac *mac)
404 mac->cs[0] = pasemi_mac_setup_csring(mac);
405 if (mac->type == MAC_TYPE_XAUI)
406 mac->cs[1] = pasemi_mac_setup_csring(mac);
410 for (i = 0; i < MAX_CS; i++)
415 static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring)
417 pasemi_dma_stop_chan(&csring->chan);
418 pasemi_dma_free_flag(csring->events[0]);
419 pasemi_dma_free_flag(csring->events[1]);
420 pasemi_dma_free_ring(&csring->chan);
421 pasemi_dma_free_chan(&csring->chan);
424 static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
426 struct pasemi_mac_rxring *ring;
427 struct pasemi_mac *mac = netdev_priv(dev);
431 ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring),
432 offsetof(struct pasemi_mac_rxring, chan));
435 dev_err(&mac->pdev->dev, "Can't allocate RX channel\n");
438 chno = ring->chan.chno;
440 spin_lock_init(&ring->lock);
442 ring->size = RX_RING_SIZE;
443 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
444 RX_RING_SIZE, GFP_KERNEL);
446 if (!ring->ring_info)
449 /* Allocate descriptors */
450 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
453 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
454 RX_RING_SIZE * sizeof(u64),
455 &ring->buf_dma, GFP_KERNEL);
459 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
461 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
462 PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
464 write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno),
465 PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) |
466 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3));
468 cfg = PAS_DMA_RXCHAN_CFG_HBU(2);
470 if (translation_enabled())
471 cfg |= PAS_DMA_RXCHAN_CFG_CTR;
473 write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg);
475 write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if),
476 PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma));
478 write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if),
479 PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) |
480 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
482 cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 |
483 PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP |
484 PAS_DMA_RXINT_CFG_HEN;
486 if (translation_enabled())
487 cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR;
489 write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg);
491 ring->next_to_fill = 0;
492 ring->next_to_clean = 0;
499 kfree(ring->ring_info);
501 pasemi_dma_free_chan(&ring->chan);
506 static struct pasemi_mac_txring *
507 pasemi_mac_setup_tx_resources(const struct net_device *dev)
509 struct pasemi_mac *mac = netdev_priv(dev);
511 struct pasemi_mac_txring *ring;
515 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring),
516 offsetof(struct pasemi_mac_txring, chan));
519 dev_err(&mac->pdev->dev, "Can't allocate TX channel\n");
523 chno = ring->chan.chno;
525 spin_lock_init(&ring->lock);
527 ring->size = TX_RING_SIZE;
528 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
529 TX_RING_SIZE, GFP_KERNEL);
530 if (!ring->ring_info)
533 /* Allocate descriptors */
534 if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE))
537 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
538 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
539 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
540 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3);
542 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
544 cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
545 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
546 PAS_DMA_TXCHAN_CFG_UP |
547 PAS_DMA_TXCHAN_CFG_WT(4);
549 if (translation_enabled())
550 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
552 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
554 ring->next_to_fill = 0;
555 ring->next_to_clean = 0;
561 kfree(ring->ring_info);
563 pasemi_dma_free_chan(&ring->chan);
568 static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
570 struct pasemi_mac_txring *txring = tx_ring(mac);
572 struct pasemi_mac_buffer *info;
573 dma_addr_t dmas[MAX_SKB_FRAGS+1];
577 start = txring->next_to_clean;
578 limit = txring->next_to_fill;
580 /* Compensate for when fill has wrapped and clean has not */
582 limit += TX_RING_SIZE;
584 for (i = start; i < limit; i += freed) {
585 info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)];
586 if (info->dma && info->skb) {
587 nfrags = skb_shinfo(info->skb)->nr_frags;
588 for (j = 0; j <= nfrags; j++)
589 dmas[j] = txring->ring_info[(i+1+j) &
590 (TX_RING_SIZE-1)].dma;
591 freed = pasemi_mac_unmap_tx_skb(mac, nfrags,
597 kfree(txring->ring_info);
598 pasemi_dma_free_chan(&txring->chan);
602 static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac)
604 struct pasemi_mac_rxring *rx = rx_ring(mac);
606 struct pasemi_mac_buffer *info;
608 for (i = 0; i < RX_RING_SIZE; i++) {
609 info = &RX_DESC_INFO(rx, i);
610 if (info->skb && info->dma) {
611 pci_unmap_single(mac->dma_pdev,
615 dev_kfree_skb_any(info->skb);
621 for (i = 0; i < RX_RING_SIZE; i++)
625 static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
627 pasemi_mac_free_rx_buffers(mac);
629 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
630 rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
632 kfree(rx_ring(mac)->ring_info);
633 pasemi_dma_free_chan(&rx_ring(mac)->chan);
637 static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
640 const struct pasemi_mac *mac = netdev_priv(dev);
641 struct pasemi_mac_rxring *rx = rx_ring(mac);
647 fill = rx_ring(mac)->next_to_fill;
648 for (count = 0; count < limit; count++) {
649 struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill);
650 u64 *buff = &RX_BUFF(rx, fill);
657 skb = dev_alloc_skb(mac->bufsz);
658 skb_reserve(skb, LOCAL_SKB_ALIGN);
663 dma = pci_map_single(mac->dma_pdev, skb->data,
664 mac->bufsz - LOCAL_SKB_ALIGN,
667 if (unlikely(dma_mapping_error(dma))) {
668 dev_kfree_skb_irq(info->skb);
674 *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma);
680 write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count);
682 rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) &
686 static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac)
688 struct pasemi_mac_rxring *rx = rx_ring(mac);
689 unsigned int reg, pcnt;
690 /* Re-enable packet count interrupts: finally
691 * ack the packet count interrupt we got in rx_intr.
694 pcnt = *rx->chan.status & PAS_STATUS_PCNT_M;
696 reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
698 if (*rx->chan.status & PAS_STATUS_TIMER)
699 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
701 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg);
704 static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac)
706 unsigned int reg, pcnt;
708 /* Re-enable packet count interrupts */
709 pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
711 reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
713 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg);
717 static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac,
720 unsigned int rcmdsta, ccmdsta;
721 struct pasemi_dmachan *chan = &rx_ring(mac)->chan;
723 if (!netif_msg_rx_err(mac))
726 rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
727 ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
729 printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
730 macrx, *chan->status);
732 printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
736 static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac,
740 struct pasemi_dmachan *chan = &tx_ring(mac)->chan;
742 if (!netif_msg_tx_err(mac))
745 cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
747 printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\
748 "tx status 0x%016lx\n", mactx, *chan->status);
750 printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
753 static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
756 const struct pasemi_dmachan *chan = &rx->chan;
757 struct pasemi_mac *mac = rx->mac;
758 struct pci_dev *pdev = mac->dma_pdev;
760 int count, buf_index, tot_bytes, packets;
761 struct pasemi_mac_buffer *info;
770 spin_lock(&rx->lock);
772 n = rx->next_to_clean;
774 prefetch(&RX_DESC(rx, n));
776 for (count = 0; count < limit; count++) {
777 macrx = RX_DESC(rx, n);
778 prefetch(&RX_DESC(rx, n+4));
780 if ((macrx & XCT_MACRX_E) ||
781 (*chan->status & PAS_STATUS_ERROR))
782 pasemi_mac_rx_error(mac, macrx);
784 if (!(macrx & XCT_MACRX_O))
789 BUG_ON(!(macrx & XCT_MACRX_RR_8BRES));
791 eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >>
795 dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M);
796 info = &RX_DESC_INFO(rx, buf_index);
802 len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
804 pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN,
807 if (macrx & XCT_MACRX_CRC) {
808 /* CRC error flagged */
809 mac->netdev->stats.rx_errors++;
810 mac->netdev->stats.rx_crc_errors++;
811 /* No need to free skb, it'll be reused */
818 if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
819 skb->ip_summed = CHECKSUM_UNNECESSARY;
820 skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
823 skb->ip_summed = CHECKSUM_NONE;
828 /* Don't include CRC */
831 skb->protocol = eth_type_trans(skb, mac->netdev);
832 lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx);
836 RX_DESC(rx, n+1) = 0;
838 /* Need to zero it out since hardware doesn't, since the
839 * replenish loop uses it to tell when it's done.
841 RX_BUFF(rx, buf_index) = 0;
846 if (n > RX_RING_SIZE) {
847 /* Errata 5971 workaround: L2 target of headers */
848 write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0);
849 n &= (RX_RING_SIZE-1);
852 rx_ring(mac)->next_to_clean = n;
854 lro_flush_all(&mac->lro_mgr);
856 /* Increase is in number of 16-byte entries, and since each descriptor
857 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
860 write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1);
862 pasemi_mac_replenish_rx_ring(mac->netdev, count);
864 mac->netdev->stats.rx_bytes += tot_bytes;
865 mac->netdev->stats.rx_packets += packets;
867 spin_unlock(&rx_ring(mac)->lock);
872 /* Can't make this too large or we blow the kernel stack limits */
873 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
875 static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring)
877 struct pasemi_dmachan *chan = &txring->chan;
878 struct pasemi_mac *mac = txring->mac;
880 unsigned int start, descr_count, buf_count, batch_limit;
881 unsigned int ring_limit;
882 unsigned int total_count;
884 struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
885 dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
886 int nf[TX_CLEAN_BATCHSIZE];
890 batch_limit = TX_CLEAN_BATCHSIZE;
892 spin_lock_irqsave(&txring->lock, flags);
894 start = txring->next_to_clean;
895 ring_limit = txring->next_to_fill;
897 prefetch(&TX_DESC_INFO(txring, start+1).skb);
899 /* Compensate for when fill has wrapped but clean has not */
900 if (start > ring_limit)
901 ring_limit += TX_RING_SIZE;
907 descr_count < batch_limit && i < ring_limit;
909 u64 mactx = TX_DESC(txring, i);
912 if ((mactx & XCT_MACTX_E) ||
913 (*chan->status & PAS_STATUS_ERROR))
914 pasemi_mac_tx_error(mac, mactx);
916 /* Skip over control descriptors */
917 if (!(mactx & XCT_MACTX_LLEN_M)) {
918 TX_DESC(txring, i) = 0;
919 TX_DESC(txring, i+1) = 0;
924 skb = TX_DESC_INFO(txring, i+1).skb;
925 nr_frags = TX_DESC_INFO(txring, i).dma;
927 if (unlikely(mactx & XCT_MACTX_O))
928 /* Not yet transmitted */
931 buf_count = 2 + nr_frags;
932 /* Since we always fill with an even number of entries, make
933 * sure we skip any unused one at the end as well.
938 for (j = 0; j <= nr_frags; j++)
939 dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma;
941 skbs[descr_count] = skb;
942 nf[descr_count] = nr_frags;
944 TX_DESC(txring, i) = 0;
945 TX_DESC(txring, i+1) = 0;
949 txring->next_to_clean = i & (TX_RING_SIZE-1);
951 spin_unlock_irqrestore(&txring->lock, flags);
952 netif_wake_queue(mac->netdev);
954 for (i = 0; i < descr_count; i++)
955 pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]);
957 total_count += descr_count;
959 /* If the batch was full, try to clean more */
960 if (descr_count == batch_limit)
967 static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
969 const struct pasemi_mac_rxring *rxring = data;
970 struct pasemi_mac *mac = rxring->mac;
971 struct net_device *dev = mac->netdev;
972 const struct pasemi_dmachan *chan = &rxring->chan;
975 if (!(*chan->status & PAS_STATUS_CAUSE_M))
978 /* Don't reset packet count so it won't fire again but clear
983 if (*chan->status & PAS_STATUS_SOFT)
984 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
985 if (*chan->status & PAS_STATUS_ERROR)
986 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
988 netif_rx_schedule(dev, &mac->napi);
990 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
995 #define TX_CLEAN_INTERVAL HZ
997 static void pasemi_mac_tx_timer(unsigned long data)
999 struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
1000 struct pasemi_mac *mac = txring->mac;
1002 pasemi_mac_clean_tx(txring);
1004 mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL);
1006 pasemi_mac_restart_tx_intr(mac);
1009 static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
1011 struct pasemi_mac_txring *txring = data;
1012 const struct pasemi_dmachan *chan = &txring->chan;
1013 struct pasemi_mac *mac = txring->mac;
1016 if (!(*chan->status & PAS_STATUS_CAUSE_M))
1021 if (*chan->status & PAS_STATUS_SOFT)
1022 reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
1023 if (*chan->status & PAS_STATUS_ERROR)
1024 reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
1026 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
1028 netif_rx_schedule(mac->netdev, &mac->napi);
1031 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
1036 static void pasemi_adjust_link(struct net_device *dev)
1038 struct pasemi_mac *mac = netdev_priv(dev);
1041 unsigned int new_flags;
1043 if (!mac->phydev->link) {
1044 /* If no link, MAC speed settings don't matter. Just report
1045 * link down and return.
1047 if (mac->link && netif_msg_link(mac))
1048 printk(KERN_INFO "%s: Link is down.\n", dev->name);
1050 netif_carrier_off(dev);
1051 pasemi_mac_intf_disable(mac);
1056 pasemi_mac_intf_enable(mac);
1057 netif_carrier_on(dev);
1060 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
1061 new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
1062 PAS_MAC_CFG_PCFG_TSR_M);
1064 if (!mac->phydev->duplex)
1065 new_flags |= PAS_MAC_CFG_PCFG_HD;
1067 switch (mac->phydev->speed) {
1069 new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
1070 PAS_MAC_CFG_PCFG_TSR_1G;
1073 new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
1074 PAS_MAC_CFG_PCFG_TSR_100M;
1077 new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
1078 PAS_MAC_CFG_PCFG_TSR_10M;
1081 printk("Unsupported speed %d\n", mac->phydev->speed);
1084 /* Print on link or speed/duplex change */
1085 msg = mac->link != mac->phydev->link || flags != new_flags;
1087 mac->duplex = mac->phydev->duplex;
1088 mac->speed = mac->phydev->speed;
1089 mac->link = mac->phydev->link;
1091 if (new_flags != flags)
1092 write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
1094 if (msg && netif_msg_link(mac))
1095 printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
1096 dev->name, mac->speed, mac->duplex ? "full" : "half");
1099 static int pasemi_mac_phy_init(struct net_device *dev)
1101 struct pasemi_mac *mac = netdev_priv(dev);
1102 struct device_node *dn, *phy_dn;
1103 struct phy_device *phydev;
1104 unsigned int phy_id;
1106 const unsigned int *prop;
1110 dn = pci_device_to_OF_node(mac->pdev);
1111 ph = of_get_property(dn, "phy-handle", NULL);
1114 phy_dn = of_find_node_by_phandle(*ph);
1116 prop = of_get_property(phy_dn, "reg", NULL);
1117 ret = of_address_to_resource(phy_dn->parent, 0, &r);
1122 snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);
1124 of_node_put(phy_dn);
1130 phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);
1132 if (IS_ERR(phydev)) {
1133 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
1134 return PTR_ERR(phydev);
1137 mac->phydev = phydev;
1142 of_node_put(phy_dn);
1147 static int pasemi_mac_open(struct net_device *dev)
1149 struct pasemi_mac *mac = netdev_priv(dev);
1153 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
1154 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
1155 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
1157 write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);
1159 ret = pasemi_mac_setup_rx_resources(dev);
1161 goto out_rx_resources;
1163 mac->tx = pasemi_mac_setup_tx_resources(dev);
1168 if (dev->mtu > 1500) {
1169 pasemi_mac_setup_csrings(mac);
1174 /* 0x3ff with 33MHz clock is about 31us */
1175 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
1176 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));
1178 write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno),
1179 PAS_IOB_DMA_RXCH_CFG_CNTTH(256));
1181 write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno),
1182 PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
1184 write_mac_reg(mac, PAS_MAC_IPC_CHNL,
1185 PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) |
1186 PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno));
1189 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1190 PAS_DMA_RXINT_RCMDSTA_EN |
1191 PAS_DMA_RXINT_RCMDSTA_DROPS_M |
1192 PAS_DMA_RXINT_RCMDSTA_BP |
1193 PAS_DMA_RXINT_RCMDSTA_OO |
1194 PAS_DMA_RXINT_RCMDSTA_BT);
1196 /* enable rx channel */
1197 pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU |
1198 PAS_DMA_RXCHAN_CCMDSTA_OD |
1199 PAS_DMA_RXCHAN_CCMDSTA_FD |
1200 PAS_DMA_RXCHAN_CCMDSTA_DT);
1202 /* enable tx channel */
1203 pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
1204 PAS_DMA_TXCHAN_TCMDSTA_DB |
1205 PAS_DMA_TXCHAN_TCMDSTA_DE |
1206 PAS_DMA_TXCHAN_TCMDSTA_DA);
1208 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
1210 write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno),
1213 /* Clear out any residual packet count state from firmware */
1214 pasemi_mac_restart_rx_intr(mac);
1215 pasemi_mac_restart_tx_intr(mac);
1217 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
1219 if (mac->type == MAC_TYPE_GMAC)
1220 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
1222 flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;
1224 /* Enable interface in MAC */
1225 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1227 ret = pasemi_mac_phy_init(dev);
1229 /* Since we won't get link notification, just enable RX */
1230 pasemi_mac_intf_enable(mac);
1231 if (mac->type == MAC_TYPE_GMAC) {
1232 /* Warn for missing PHY on SGMII (1Gig) ports */
1233 dev_warn(&mac->pdev->dev,
1234 "PHY init failed: %d.\n", ret);
1235 dev_warn(&mac->pdev->dev,
1236 "Defaulting to 1Gbit full duplex\n");
1240 netif_start_queue(dev);
1241 napi_enable(&mac->napi);
1243 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
1246 ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
1247 mac->tx_irq_name, mac->tx);
1249 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1250 mac->tx->chan.irq, ret);
1254 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
1257 ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
1258 mac->rx_irq_name, mac->rx);
1260 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1261 mac->rx->chan.irq, ret);
1266 phy_start(mac->phydev);
1268 init_timer(&mac->tx->clean_timer);
1269 mac->tx->clean_timer.function = pasemi_mac_tx_timer;
1270 mac->tx->clean_timer.data = (unsigned long)mac->tx;
1271 mac->tx->clean_timer.expires = jiffies+HZ;
1272 add_timer(&mac->tx->clean_timer);
1277 free_irq(mac->tx->chan.irq, mac->tx);
1279 napi_disable(&mac->napi);
1280 netif_stop_queue(dev);
1283 pasemi_mac_free_tx_resources(mac);
1284 pasemi_mac_free_rx_resources(mac);
1290 #define MAX_RETRIES 5000
1292 static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
1294 unsigned int sta, retries;
1295 int txch = tx_ring(mac)->chan.chno;
1297 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
1298 PAS_DMA_TXCHAN_TCMDSTA_ST);
1300 for (retries = 0; retries < MAX_RETRIES; retries++) {
1301 sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
1302 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
1307 if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
1308 dev_err(&mac->dma_pdev->dev,
1309 "Failed to stop tx channel, tcmdsta %08x\n", sta);
1311 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
1314 static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
1316 unsigned int sta, retries;
1317 int rxch = rx_ring(mac)->chan.chno;
1319 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
1320 PAS_DMA_RXCHAN_CCMDSTA_ST);
1321 for (retries = 0; retries < MAX_RETRIES; retries++) {
1322 sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1323 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
1328 if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
1329 dev_err(&mac->dma_pdev->dev,
1330 "Failed to stop rx channel, ccmdsta 08%x\n", sta);
1331 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
1334 static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
1336 unsigned int sta, retries;
1338 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1339 PAS_DMA_RXINT_RCMDSTA_ST);
1340 for (retries = 0; retries < MAX_RETRIES; retries++) {
1341 sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1342 if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
1347 if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
1348 dev_err(&mac->dma_pdev->dev,
1349 "Failed to stop rx interface, rcmdsta %08x\n", sta);
1350 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
1353 static int pasemi_mac_close(struct net_device *dev)
1355 struct pasemi_mac *mac = netdev_priv(dev);
1359 rxch = rx_ring(mac)->chan.chno;
1360 txch = tx_ring(mac)->chan.chno;
1363 phy_stop(mac->phydev);
1364 phy_disconnect(mac->phydev);
1367 del_timer_sync(&mac->tx->clean_timer);
1369 netif_stop_queue(dev);
1370 napi_disable(&mac->napi);
1372 sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1373 if (sta & (PAS_DMA_RXINT_RCMDSTA_BP |
1374 PAS_DMA_RXINT_RCMDSTA_OO |
1375 PAS_DMA_RXINT_RCMDSTA_BT))
1376 printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta);
1378 sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1379 if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU |
1380 PAS_DMA_RXCHAN_CCMDSTA_OD |
1381 PAS_DMA_RXCHAN_CCMDSTA_FD |
1382 PAS_DMA_RXCHAN_CCMDSTA_DT))
1383 printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta);
1385 sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
1386 if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB |
1387 PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA))
1388 printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta);
1390 /* Clean out any pending buffers */
1391 pasemi_mac_clean_tx(tx_ring(mac));
1392 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1394 pasemi_mac_pause_txchan(mac);
1395 pasemi_mac_pause_rxint(mac);
1396 pasemi_mac_pause_rxchan(mac);
1397 pasemi_mac_intf_disable(mac);
1399 free_irq(mac->tx->chan.irq, mac->tx);
1400 free_irq(mac->rx->chan.irq, mac->rx);
1402 for (i = 0; i < mac->num_cs; i++)
1403 pasemi_mac_free_csring(mac->cs[i]);
1405 /* Free resources */
1406 pasemi_mac_free_rx_resources(mac);
1407 pasemi_mac_free_tx_resources(mac);
1412 static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
1413 const dma_addr_t *map,
1414 const unsigned int *map_size,
1415 struct pasemi_mac_txring *txring,
1416 struct pasemi_mac_csring *csring)
1420 const int nh_off = skb_network_offset(skb);
1421 const int nh_len = skb_network_header_len(skb);
1422 const int nfrags = skb_shinfo(skb)->nr_frags;
1423 int cs_size, i, fill, hdr, cpyhdr, evt;
1426 fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
1427 XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
1428 XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) |
1429 XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE;
1431 switch (ip_hdr(skb)->protocol) {
1433 fund |= XCT_FUN_SIG_TCP4;
1434 /* TCP checksum is 16 bytes into the header */
1435 cs_dest = map[0] + skb_transport_offset(skb) + 16;
1438 fund |= XCT_FUN_SIG_UDP4;
1439 /* UDP checksum is 6 bytes into the header */
1440 cs_dest = map[0] + skb_transport_offset(skb) + 6;
1446 /* Do the checksum offloaded */
1447 fill = csring->next_to_fill;
1450 CS_DESC(csring, fill++) = fund;
1451 /* Room for 8BRES. Checksum result is really 2 bytes into it */
1452 csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2;
1453 CS_DESC(csring, fill++) = 0;
1455 CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off);
1456 for (i = 1; i <= nfrags; i++)
1457 CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1463 /* Copy the result into the TCP packet */
1465 CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
1466 XCT_FUN_LLEN(2) | XCT_FUN_SE;
1467 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
1468 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma);
1471 evt = !csring->last_event;
1472 csring->last_event = evt;
1474 /* Event handshaking with MAC TX */
1475 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1476 CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]);
1477 CS_DESC(csring, fill++) = 0;
1478 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1479 CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]);
1480 CS_DESC(csring, fill++) = 0;
1481 csring->next_to_fill = fill & (CS_RING_SIZE-1);
1483 cs_size = fill - hdr;
1484 write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1);
1486 /* TX-side event handshaking */
1487 fill = txring->next_to_fill;
1488 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1489 CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]);
1490 TX_DESC(txring, fill++) = 0;
1491 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1492 CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]);
1493 TX_DESC(txring, fill++) = 0;
1494 txring->next_to_fill = fill;
1496 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
1501 static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1503 struct pasemi_mac * const mac = netdev_priv(dev);
1504 struct pasemi_mac_txring * const txring = tx_ring(mac);
1505 struct pasemi_mac_csring *csring;
1508 dma_addr_t map[MAX_SKB_FRAGS+1];
1509 unsigned int map_size[MAX_SKB_FRAGS+1];
1510 unsigned long flags;
1513 const int nh_off = skb_network_offset(skb);
1514 const int nh_len = skb_network_header_len(skb);
1516 prefetch(&txring->ring_info);
1518 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
1520 nfrags = skb_shinfo(skb)->nr_frags;
1522 map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
1524 map_size[0] = skb_headlen(skb);
1525 if (dma_mapping_error(map[0]))
1526 goto out_err_nolock;
1528 for (i = 0; i < nfrags; i++) {
1529 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1531 map[i+1] = pci_map_page(mac->dma_pdev, frag->page,
1532 frag->page_offset, frag->size,
1534 map_size[i+1] = frag->size;
1535 if (dma_mapping_error(map[i+1])) {
1537 goto out_err_nolock;
1541 if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) {
1542 switch (ip_hdr(skb)->protocol) {
1544 dflags |= XCT_MACTX_CSUM_TCP;
1545 dflags |= XCT_MACTX_IPH(nh_len >> 2);
1546 dflags |= XCT_MACTX_IPO(nh_off);
1549 dflags |= XCT_MACTX_CSUM_UDP;
1550 dflags |= XCT_MACTX_IPH(nh_len >> 2);
1551 dflags |= XCT_MACTX_IPO(nh_off);
1558 mactx = dflags | XCT_MACTX_LLEN(skb->len);
1560 spin_lock_irqsave(&txring->lock, flags);
1562 /* Avoid stepping on the same cache line that the DMA controller
1563 * is currently about to send, so leave at least 8 words available.
1564 * Total free space needed is mactx + fragments + 8
1566 if (RING_AVAIL(txring) < nfrags + 14) {
1567 /* no room -- stop the queue and wait for tx intr */
1568 netif_stop_queue(dev);
1572 /* Queue up checksum + event descriptors, if needed */
1573 if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) {
1574 csring = mac->cs[mac->last_cs];
1575 mac->last_cs = (mac->last_cs + 1) % mac->num_cs;
1577 pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
1580 fill = txring->next_to_fill;
1581 TX_DESC(txring, fill) = mactx;
1582 TX_DESC_INFO(txring, fill).dma = nfrags;
1584 TX_DESC_INFO(txring, fill).skb = skb;
1585 for (i = 0; i <= nfrags; i++) {
1586 TX_DESC(txring, fill+i) =
1587 XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1588 TX_DESC_INFO(txring, fill+i).dma = map[i];
1591 /* We have to add an even number of 8-byte entries to the ring
1592 * even if the last one is unused. That means always an odd number
1593 * of pointers + one mactx descriptor.
1598 txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1);
1600 dev->stats.tx_packets++;
1601 dev->stats.tx_bytes += skb->len;
1603 spin_unlock_irqrestore(&txring->lock, flags);
1605 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1);
1607 return NETDEV_TX_OK;
1610 spin_unlock_irqrestore(&txring->lock, flags);
1613 pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags],
1616 return NETDEV_TX_BUSY;
1619 static void pasemi_mac_set_rx_mode(struct net_device *dev)
1621 const struct pasemi_mac *mac = netdev_priv(dev);
1624 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
1626 /* Set promiscuous */
1627 if (dev->flags & IFF_PROMISC)
1628 flags |= PAS_MAC_CFG_PCFG_PR;
1630 flags &= ~PAS_MAC_CFG_PCFG_PR;
1632 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1636 static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1638 struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
1639 struct net_device *dev = mac->netdev;
1642 pasemi_mac_clean_tx(tx_ring(mac));
1643 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
1644 if (pkts < budget) {
1645 /* all done, no more packets present */
1646 netif_rx_complete(dev, napi);
1648 pasemi_mac_restart_rx_intr(mac);
1649 pasemi_mac_restart_tx_intr(mac);
1654 static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1656 struct pasemi_mac *mac = netdev_priv(dev);
1658 unsigned int rcmdsta = 0;
1662 if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
1665 running = netif_running(dev);
1668 /* Need to stop the interface, clean out all already
1669 * received buffers, free all unused buffers on the RX
1670 * interface ring, then finally re-fill the rx ring with
1671 * the new-size buffers and restart.
1674 napi_disable(&mac->napi);
1675 netif_tx_disable(dev);
1676 pasemi_mac_intf_disable(mac);
1678 rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1679 pasemi_mac_pause_rxint(mac);
1680 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1681 pasemi_mac_free_rx_buffers(mac);
1685 /* Setup checksum channels if large MTU and none already allocated */
1686 if (new_mtu > 1500 && !mac->num_cs) {
1687 pasemi_mac_setup_csrings(mac);
1694 /* Change maxf, i.e. what size frames are accepted.
1695 * Need room for ethernet header and CRC word
1697 reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG);
1698 reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M;
1699 reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
1700 write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
1703 /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1704 mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
1708 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1709 rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
1711 rx_ring(mac)->next_to_fill = 0;
1712 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
1714 napi_enable(&mac->napi);
1715 netif_start_queue(dev);
1716 pasemi_mac_intf_enable(mac);
1722 static int __devinit
1723 pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1725 struct net_device *dev;
1726 struct pasemi_mac *mac;
1728 DECLARE_MAC_BUF(mac_buf);
1730 err = pci_enable_device(pdev);
1734 dev = alloc_etherdev(sizeof(struct pasemi_mac));
1737 "pasemi_mac: Could not allocate ethernet device.\n");
1739 goto out_disable_device;
1742 pci_set_drvdata(pdev, dev);
1743 SET_NETDEV_DEV(dev, &pdev->dev);
1745 mac = netdev_priv(dev);
1750 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
1752 dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
1753 NETIF_F_HIGHDMA | NETIF_F_GSO;
1755 mac->lro_mgr.max_aggr = LRO_MAX_AGGR;
1756 mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1757 mac->lro_mgr.lro_arr = mac->lro_desc;
1758 mac->lro_mgr.get_skb_header = get_skb_hdr;
1759 mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1760 mac->lro_mgr.dev = mac->netdev;
1761 mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1762 mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1765 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
1766 if (!mac->dma_pdev) {
1767 dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
1772 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
1773 if (!mac->iob_pdev) {
1774 dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n");
1779 /* get mac addr from device tree */
1780 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
1784 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
1786 mac->dma_if = mac_to_intf(mac);
1787 if (mac->dma_if < 0) {
1788 dev_err(&mac->pdev->dev, "Can't map DMA interface\n");
1793 switch (pdev->device) {
1795 mac->type = MAC_TYPE_GMAC;
1798 mac->type = MAC_TYPE_XAUI;
1805 dev->open = pasemi_mac_open;
1806 dev->stop = pasemi_mac_close;
1807 dev->hard_start_xmit = pasemi_mac_start_tx;
1808 dev->set_multicast_list = pasemi_mac_set_rx_mode;
1809 dev->set_mac_address = pasemi_mac_set_mac_addr;
1810 dev->mtu = PE_DEF_MTU;
1811 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1812 mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
1814 dev->change_mtu = pasemi_mac_change_mtu;
1819 mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1821 /* Enable most messages by default */
1822 mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1824 err = register_netdev(dev);
1827 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
1830 } else if netif_msg_probe(mac)
1831 printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n",
1832 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
1833 mac->dma_if, print_mac(mac_buf, dev->dev_addr));
1839 pci_dev_put(mac->iob_pdev);
1841 pci_dev_put(mac->dma_pdev);
1845 pci_disable_device(pdev);
1850 static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1852 struct net_device *netdev = pci_get_drvdata(pdev);
1853 struct pasemi_mac *mac;
1858 mac = netdev_priv(netdev);
1860 unregister_netdev(netdev);
1862 pci_disable_device(pdev);
1863 pci_dev_put(mac->dma_pdev);
1864 pci_dev_put(mac->iob_pdev);
1866 pasemi_dma_free_chan(&mac->tx->chan);
1867 pasemi_dma_free_chan(&mac->rx->chan);
1869 pci_set_drvdata(pdev, NULL);
1870 free_netdev(netdev);
1873 static struct pci_device_id pasemi_mac_pci_tbl[] = {
1874 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1875 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1879 MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
1881 static struct pci_driver pasemi_mac_driver = {
1882 .name = "pasemi_mac",
1883 .id_table = pasemi_mac_pci_tbl,
1884 .probe = pasemi_mac_probe,
1885 .remove = __devexit_p(pasemi_mac_remove),
1888 static void __exit pasemi_mac_cleanup_module(void)
1890 pci_unregister_driver(&pasemi_mac_driver);
1893 int pasemi_mac_init_module(void)
1897 err = pasemi_dma_init();
1901 return pci_register_driver(&pasemi_mac_driver);
1904 module_init(pasemi_mac_init_module);
1905 module_exit(pasemi_mac_cleanup_module);