2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
4 A FORE Systems 200E-series driver for ATM on Linux.
5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
9 This driver simultaneously supports PCA-200E and SBA-200E adapters
10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/config.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/capability.h>
33 #include <linux/sched.h>
34 #include <linux/interrupt.h>
35 #include <linux/bitops.h>
36 #include <linux/pci.h>
37 #include <linux/module.h>
38 #include <linux/atmdev.h>
39 #include <linux/sonet.h>
40 #include <linux/atm_suni.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/delay.h>
44 #include <asm/string.h>
48 #include <asm/byteorder.h>
49 #include <asm/uaccess.h>
50 #include <asm/atomic.h>
52 #ifdef CONFIG_ATM_FORE200E_SBA
53 #include <asm/idprom.h>
55 #include <asm/openprom.h>
56 #include <asm/oplib.h>
57 #include <asm/pgtable.h>
60 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
61 #define FORE200E_USE_TASKLET
64 #if 0 /* enable the debugging code of the buffer supply queues */
65 #define FORE200E_BSQ_DEBUG
68 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
69 #define FORE200E_52BYTE_AAL0_SDU
75 #define FORE200E_VERSION "0.3e"
77 #define FORE200E "fore200e: "
79 #if 0 /* override .config */
80 #define CONFIG_ATM_FORE200E_DEBUG 1
82 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
83 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
84 printk(FORE200E format, ##args); } while (0)
86 #define DPRINTK(level, format, args...) do {} while (0)
90 #define FORE200E_ALIGN(addr, alignment) \
91 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
93 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
95 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
97 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
100 #define ASSERT(expr) if (!(expr)) { \
101 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
102 __FUNCTION__, __LINE__, #expr); \
103 panic(FORE200E "%s", __FUNCTION__); \
106 #define ASSERT(expr) do {} while (0)
110 static const struct atmdev_ops fore200e_ops;
111 static const struct fore200e_bus fore200e_bus[];
113 static LIST_HEAD(fore200e_boards);
116 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
117 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
118 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
121 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
122 { BUFFER_S1_NBR, BUFFER_L1_NBR },
123 { BUFFER_S2_NBR, BUFFER_L2_NBR }
126 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
127 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
128 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
132 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
133 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
137 #if 0 /* currently unused */
139 fore200e_fore2atm_aal(enum fore200e_aal aal)
142 case FORE200E_AAL0: return ATM_AAL0;
143 case FORE200E_AAL34: return ATM_AAL34;
144 case FORE200E_AAL5: return ATM_AAL5;
152 static enum fore200e_aal
153 fore200e_atm2fore_aal(int aal)
156 case ATM_AAL0: return FORE200E_AAL0;
157 case ATM_AAL34: return FORE200E_AAL34;
160 case ATM_AAL5: return FORE200E_AAL5;
168 fore200e_irq_itoa(int irq)
171 sprintf(str, "%d", irq);
177 fore200e_kmalloc(int size, gfp_t flags)
179 void *chunk = kzalloc(size, flags);
182 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
189 fore200e_kfree(void* chunk)
195 /* allocate and align a chunk of memory intended to hold the data behing exchanged
196 between the driver and the adapter (using streaming DVMA) */
199 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
201 unsigned long offset = 0;
203 if (alignment <= sizeof(int))
206 chunk->alloc_size = size + alignment;
207 chunk->align_size = size;
208 chunk->direction = direction;
210 chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
211 if (chunk->alloc_addr == NULL)
215 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
217 chunk->align_addr = chunk->alloc_addr + offset;
219 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
225 /* free a chunk of memory */
228 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
230 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
232 fore200e_kfree(chunk->alloc_addr);
237 fore200e_spin(int msecs)
239 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
240 while (time_before(jiffies, timeout));
245 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
247 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
252 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
255 } while (time_before(jiffies, timeout));
259 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
269 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
271 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
275 if ((ok = (fore200e->bus->read(addr) == val)))
278 } while (time_before(jiffies, timeout));
282 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
283 fore200e->bus->read(addr), val);
292 fore200e_free_rx_buf(struct fore200e* fore200e)
294 int scheme, magn, nbr;
295 struct buffer* buffer;
297 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
298 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
300 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
302 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
304 struct chunk* data = &buffer[ nbr ].data;
306 if (data->alloc_addr != NULL)
307 fore200e_chunk_free(fore200e, data);
316 fore200e_uninit_bs_queue(struct fore200e* fore200e)
320 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
321 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
323 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
324 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
326 if (status->alloc_addr)
327 fore200e->bus->dma_chunk_free(fore200e, status);
329 if (rbd_block->alloc_addr)
330 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
337 fore200e_reset(struct fore200e* fore200e, int diag)
341 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
343 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
345 fore200e->bus->reset(fore200e);
348 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
351 printk(FORE200E "device %s self-test failed\n", fore200e->name);
355 printk(FORE200E "device %s self-test passed\n", fore200e->name);
357 fore200e->state = FORE200E_STATE_RESET;
365 fore200e_shutdown(struct fore200e* fore200e)
367 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
368 fore200e->name, fore200e->phys_base,
369 fore200e_irq_itoa(fore200e->irq));
371 if (fore200e->state > FORE200E_STATE_RESET) {
372 /* first, reset the board to prevent further interrupts or data transfers */
373 fore200e_reset(fore200e, 0);
376 /* then, release all allocated resources */
377 switch(fore200e->state) {
379 case FORE200E_STATE_COMPLETE:
380 kfree(fore200e->stats);
382 case FORE200E_STATE_IRQ:
383 free_irq(fore200e->irq, fore200e->atm_dev);
385 case FORE200E_STATE_ALLOC_BUF:
386 fore200e_free_rx_buf(fore200e);
388 case FORE200E_STATE_INIT_BSQ:
389 fore200e_uninit_bs_queue(fore200e);
391 case FORE200E_STATE_INIT_RXQ:
392 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
393 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
395 case FORE200E_STATE_INIT_TXQ:
396 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
397 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
399 case FORE200E_STATE_INIT_CMDQ:
400 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
402 case FORE200E_STATE_INITIALIZE:
403 /* nothing to do for that state */
405 case FORE200E_STATE_START_FW:
406 /* nothing to do for that state */
408 case FORE200E_STATE_LOAD_FW:
409 /* nothing to do for that state */
411 case FORE200E_STATE_RESET:
412 /* nothing to do for that state */
414 case FORE200E_STATE_MAP:
415 fore200e->bus->unmap(fore200e);
417 case FORE200E_STATE_CONFIGURE:
418 /* nothing to do for that state */
420 case FORE200E_STATE_REGISTER:
421 /* XXX shouldn't we *start* by deregistering the device? */
422 atm_dev_deregister(fore200e->atm_dev);
424 case FORE200E_STATE_BLANK:
425 /* nothing to do for that state */
431 #ifdef CONFIG_ATM_FORE200E_PCA
433 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
435 /* on big-endian hosts, the board is configured to convert
436 the endianess of slave RAM accesses */
437 return le32_to_cpu(readl(addr));
441 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
443 /* on big-endian hosts, the board is configured to convert
444 the endianess of slave RAM accesses */
445 writel(cpu_to_le32(val), addr);
450 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
452 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
454 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
455 virt_addr, size, direction, dma_addr);
462 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
464 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
465 dma_addr, size, direction);
467 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
472 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
474 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
476 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
480 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
482 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
484 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
488 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
489 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
492 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
493 int size, int nbr, int alignment)
495 /* returned chunks are page-aligned */
496 chunk->alloc_size = size * nbr;
497 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
501 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
504 chunk->align_addr = chunk->alloc_addr;
510 /* free a DMA consistent chunk of memory */
513 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
515 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
523 fore200e_pca_irq_check(struct fore200e* fore200e)
525 /* this is a 1 bit register */
526 int irq_posted = readl(fore200e->regs.pca.psr);
528 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
529 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
530 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
539 fore200e_pca_irq_ack(struct fore200e* fore200e)
541 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
546 fore200e_pca_reset(struct fore200e* fore200e)
548 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
550 writel(0, fore200e->regs.pca.hcr);
555 fore200e_pca_map(struct fore200e* fore200e)
557 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
559 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
561 if (fore200e->virt_base == NULL) {
562 printk(FORE200E "can't map device %s\n", fore200e->name);
566 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
568 /* gain access to the PCA specific registers */
569 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
570 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
571 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
573 fore200e->state = FORE200E_STATE_MAP;
579 fore200e_pca_unmap(struct fore200e* fore200e)
581 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
583 if (fore200e->virt_base != NULL)
584 iounmap(fore200e->virt_base);
589 fore200e_pca_configure(struct fore200e* fore200e)
591 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
592 u8 master_ctrl, latency;
594 DPRINTK(2, "device %s being configured\n", fore200e->name);
596 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
597 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
601 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
603 master_ctrl = master_ctrl
604 #if defined(__BIG_ENDIAN)
605 /* request the PCA board to convert the endianess of slave RAM accesses */
606 | PCA200E_CTRL_CONVERT_ENDIAN
609 | PCA200E_CTRL_DIS_CACHE_RD
610 | PCA200E_CTRL_DIS_WRT_INVAL
611 | PCA200E_CTRL_ENA_CONT_REQ_MODE
612 | PCA200E_CTRL_2_CACHE_WRT_INVAL
614 | PCA200E_CTRL_LARGE_PCI_BURSTS;
616 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
618 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
619 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
620 this may impact the performances of other PCI devices on the same bus, though */
622 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
624 fore200e->state = FORE200E_STATE_CONFIGURE;
630 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
632 struct host_cmdq* cmdq = &fore200e->host_cmdq;
633 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
634 struct prom_opcode opcode;
638 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
640 opcode.opcode = OPCODE_GET_PROM;
643 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
645 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
647 *entry->status = STATUS_PENDING;
649 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
651 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
653 *entry->status = STATUS_FREE;
655 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
658 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
662 #if defined(__BIG_ENDIAN)
664 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
666 /* MAC address is stored as little-endian */
667 swap_here(&prom->mac_addr[0]);
668 swap_here(&prom->mac_addr[4]);
676 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
678 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
680 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
681 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
684 #endif /* CONFIG_ATM_FORE200E_PCA */
687 #ifdef CONFIG_ATM_FORE200E_SBA
690 fore200e_sba_read(volatile u32 __iomem *addr)
692 return sbus_readl(addr);
697 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
699 sbus_writel(val, addr);
704 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
706 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
708 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
709 virt_addr, size, direction, dma_addr);
716 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
718 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
719 dma_addr, size, direction);
721 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
726 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
728 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
730 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
734 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
736 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
738 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
742 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
743 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
746 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
747 int size, int nbr, int alignment)
749 chunk->alloc_size = chunk->align_size = size * nbr;
751 /* returned chunks are page-aligned */
752 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
756 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
759 chunk->align_addr = chunk->alloc_addr;
765 /* free a DVMA consistent chunk of memory */
768 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
770 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
778 fore200e_sba_irq_enable(struct fore200e* fore200e)
780 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
781 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
786 fore200e_sba_irq_check(struct fore200e* fore200e)
788 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
793 fore200e_sba_irq_ack(struct fore200e* fore200e)
795 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
796 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
801 fore200e_sba_reset(struct fore200e* fore200e)
803 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
805 fore200e->bus->write(0, fore200e->regs.sba.hcr);
810 fore200e_sba_map(struct fore200e* fore200e)
812 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
815 /* gain access to the SBA specific registers */
816 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
817 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
818 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
819 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
821 if (fore200e->virt_base == NULL) {
822 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
826 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
828 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
830 /* get the supported DVMA burst sizes */
831 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
833 if (sbus_can_dma_64bit(sbus_dev))
834 sbus_set_sbus64(sbus_dev, bursts);
836 fore200e->state = FORE200E_STATE_MAP;
842 fore200e_sba_unmap(struct fore200e* fore200e)
844 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
845 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
846 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
847 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
852 fore200e_sba_configure(struct fore200e* fore200e)
854 fore200e->state = FORE200E_STATE_CONFIGURE;
859 static struct fore200e* __init
860 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
862 struct fore200e* fore200e;
863 struct sbus_bus* sbus_bus;
864 struct sbus_dev* sbus_dev = NULL;
866 unsigned int count = 0;
868 for_each_sbus (sbus_bus) {
869 for_each_sbusdev (sbus_dev, sbus_bus) {
870 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
880 if (sbus_dev->num_registers != 4) {
881 printk(FORE200E "this %s device has %d instead of 4 registers\n",
882 bus->model_name, sbus_dev->num_registers);
886 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
887 if (fore200e == NULL)
891 fore200e->bus_dev = sbus_dev;
892 fore200e->irq = sbus_dev->irqs[ 0 ];
894 fore200e->phys_base = (unsigned long)sbus_dev;
896 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
903 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
905 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
908 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
912 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
916 prom_getproperty(sbus_dev->prom_node, "serialnumber",
917 (char*)&prom->serial_number, sizeof(prom->serial_number));
919 prom_getproperty(sbus_dev->prom_node, "promversion",
920 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
927 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
929 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
931 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
933 #endif /* CONFIG_ATM_FORE200E_SBA */
937 fore200e_tx_irq(struct fore200e* fore200e)
939 struct host_txq* txq = &fore200e->host_txq;
940 struct host_txq_entry* entry;
942 struct fore200e_vc_map* vc_map;
944 if (fore200e->host_txq.txing == 0)
949 entry = &txq->host_entry[ txq->tail ];
951 if ((*entry->status & STATUS_COMPLETE) == 0) {
955 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
956 entry, txq->tail, entry->vc_map, entry->skb);
958 /* free copy of misaligned data */
961 /* remove DMA mapping */
962 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
965 vc_map = entry->vc_map;
967 /* vcc closed since the time the entry was submitted for tx? */
968 if ((vc_map->vcc == NULL) ||
969 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
971 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
972 fore200e->atm_dev->number);
974 dev_kfree_skb_any(entry->skb);
979 /* vcc closed then immediately re-opened? */
980 if (vc_map->incarn != entry->incarn) {
982 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
983 if the same vcc is immediately re-opened, those pending PDUs must
984 not be popped after the completion of their emission, as they refer
985 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
986 would be decremented by the size of the (unrelated) skb, possibly
987 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
988 we thus bind the tx entry to the current incarnation of the vcc
989 when the entry is submitted for tx. When the tx later completes,
990 if the incarnation number of the tx entry does not match the one
991 of the vcc, then this implies that the vcc has been closed then re-opened.
992 we thus just drop the skb here. */
994 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
995 fore200e->atm_dev->number);
997 dev_kfree_skb_any(entry->skb);
1003 /* notify tx completion */
1005 vcc->pop(vcc, entry->skb);
1008 dev_kfree_skb_any(entry->skb);
1011 /* race fixed by the above incarnation mechanism, but... */
1012 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
1013 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
1016 /* check error condition */
1017 if (*entry->status & STATUS_ERROR)
1018 atomic_inc(&vcc->stats->tx_err);
1020 atomic_inc(&vcc->stats->tx);
1024 *entry->status = STATUS_FREE;
1026 fore200e->host_txq.txing--;
1028 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1033 #ifdef FORE200E_BSQ_DEBUG
1034 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1036 struct buffer* buffer;
1039 buffer = bsq->freebuf;
1042 if (buffer->supplied) {
1043 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1044 where, scheme, magn, buffer->index);
1047 if (buffer->magn != magn) {
1048 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1049 where, scheme, magn, buffer->index, buffer->magn);
1052 if (buffer->scheme != scheme) {
1053 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1054 where, scheme, magn, buffer->index, buffer->scheme);
1057 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1058 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1059 where, scheme, magn, buffer->index);
1063 buffer = buffer->next;
1066 if (count != bsq->freebuf_count) {
1067 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1068 where, scheme, magn, count, bsq->freebuf_count);
1076 fore200e_supply(struct fore200e* fore200e)
1078 int scheme, magn, i;
1080 struct host_bsq* bsq;
1081 struct host_bsq_entry* entry;
1082 struct buffer* buffer;
1084 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1085 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1087 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1089 #ifdef FORE200E_BSQ_DEBUG
1090 bsq_audit(1, bsq, scheme, magn);
1092 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1094 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1095 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1097 entry = &bsq->host_entry[ bsq->head ];
1099 for (i = 0; i < RBD_BLK_SIZE; i++) {
1101 /* take the first buffer in the free buffer list */
1102 buffer = bsq->freebuf;
1104 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1105 scheme, magn, bsq->freebuf_count);
1108 bsq->freebuf = buffer->next;
1110 #ifdef FORE200E_BSQ_DEBUG
1111 if (buffer->supplied)
1112 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1113 scheme, magn, buffer->index);
1114 buffer->supplied = 1;
1116 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1117 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1120 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1122 /* decrease accordingly the number of free rx buffers */
1123 bsq->freebuf_count -= RBD_BLK_SIZE;
1125 *entry->status = STATUS_PENDING;
1126 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1134 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1136 struct sk_buff* skb;
1137 struct buffer* buffer;
1138 struct fore200e_vcc* fore200e_vcc;
1140 #ifdef FORE200E_52BYTE_AAL0_SDU
1141 u32 cell_header = 0;
1146 fore200e_vcc = FORE200E_VCC(vcc);
1147 ASSERT(fore200e_vcc);
1149 #ifdef FORE200E_52BYTE_AAL0_SDU
1150 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1152 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1153 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1154 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1155 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1156 rpd->atm_header.clp;
1161 /* compute total PDU length */
1162 for (i = 0; i < rpd->nseg; i++)
1163 pdu_len += rpd->rsd[ i ].length;
1165 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1167 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1169 atomic_inc(&vcc->stats->rx_drop);
1173 __net_timestamp(skb);
1175 #ifdef FORE200E_52BYTE_AAL0_SDU
1177 *((u32*)skb_put(skb, 4)) = cell_header;
1181 /* reassemble segments */
1182 for (i = 0; i < rpd->nseg; i++) {
1184 /* rebuild rx buffer address from rsd handle */
1185 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1187 /* Make device DMA transfer visible to CPU. */
1188 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1190 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1192 /* Now let the device get at it again. */
1193 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1196 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1198 if (pdu_len < fore200e_vcc->rx_min_pdu)
1199 fore200e_vcc->rx_min_pdu = pdu_len;
1200 if (pdu_len > fore200e_vcc->rx_max_pdu)
1201 fore200e_vcc->rx_max_pdu = pdu_len;
1202 fore200e_vcc->rx_pdu++;
1205 if (atm_charge(vcc, skb->truesize) == 0) {
1207 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1208 vcc->itf, vcc->vpi, vcc->vci);
1210 dev_kfree_skb_any(skb);
1212 atomic_inc(&vcc->stats->rx_drop);
1216 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1218 vcc->push(vcc, skb);
1219 atomic_inc(&vcc->stats->rx);
1221 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1228 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1230 struct host_bsq* bsq;
1231 struct buffer* buffer;
1234 for (i = 0; i < rpd->nseg; i++) {
1236 /* rebuild rx buffer address from rsd handle */
1237 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1239 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1241 #ifdef FORE200E_BSQ_DEBUG
1242 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1244 if (buffer->supplied == 0)
1245 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1246 buffer->scheme, buffer->magn, buffer->index);
1247 buffer->supplied = 0;
1250 /* re-insert the buffer into the free buffer list */
1251 buffer->next = bsq->freebuf;
1252 bsq->freebuf = buffer;
1254 /* then increment the number of free rx buffers */
1255 bsq->freebuf_count++;
1261 fore200e_rx_irq(struct fore200e* fore200e)
1263 struct host_rxq* rxq = &fore200e->host_rxq;
1264 struct host_rxq_entry* entry;
1265 struct atm_vcc* vcc;
1266 struct fore200e_vc_map* vc_map;
1270 entry = &rxq->host_entry[ rxq->head ];
1272 /* no more received PDUs */
1273 if ((*entry->status & STATUS_COMPLETE) == 0)
1276 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1278 if ((vc_map->vcc == NULL) ||
1279 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1281 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1282 fore200e->atm_dev->number,
1283 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1289 if ((*entry->status & STATUS_ERROR) == 0) {
1291 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1294 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1295 fore200e->atm_dev->number,
1296 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1297 atomic_inc(&vcc->stats->rx_err);
1301 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1303 fore200e_collect_rpd(fore200e, entry->rpd);
1305 /* rewrite the rpd address to ack the received PDU */
1306 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1307 *entry->status = STATUS_FREE;
1309 fore200e_supply(fore200e);
1314 #ifndef FORE200E_USE_TASKLET
1316 fore200e_irq(struct fore200e* fore200e)
1318 unsigned long flags;
1320 spin_lock_irqsave(&fore200e->q_lock, flags);
1321 fore200e_rx_irq(fore200e);
1322 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1324 spin_lock_irqsave(&fore200e->q_lock, flags);
1325 fore200e_tx_irq(fore200e);
1326 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1332 fore200e_interrupt(int irq, void* dev, struct pt_regs* regs)
1334 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1336 if (fore200e->bus->irq_check(fore200e) == 0) {
1338 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1341 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1343 #ifdef FORE200E_USE_TASKLET
1344 tasklet_schedule(&fore200e->tx_tasklet);
1345 tasklet_schedule(&fore200e->rx_tasklet);
1347 fore200e_irq(fore200e);
1350 fore200e->bus->irq_ack(fore200e);
1355 #ifdef FORE200E_USE_TASKLET
1357 fore200e_tx_tasklet(unsigned long data)
1359 struct fore200e* fore200e = (struct fore200e*) data;
1360 unsigned long flags;
1362 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1364 spin_lock_irqsave(&fore200e->q_lock, flags);
1365 fore200e_tx_irq(fore200e);
1366 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1371 fore200e_rx_tasklet(unsigned long data)
1373 struct fore200e* fore200e = (struct fore200e*) data;
1374 unsigned long flags;
1376 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1378 spin_lock_irqsave(&fore200e->q_lock, flags);
1379 fore200e_rx_irq((struct fore200e*) data);
1380 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1386 fore200e_select_scheme(struct atm_vcc* vcc)
1388 /* fairly balance the VCs over (identical) buffer schemes */
1389 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1391 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1392 vcc->itf, vcc->vpi, vcc->vci, scheme);
1399 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1401 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1402 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1403 struct activate_opcode activ_opcode;
1404 struct deactivate_opcode deactiv_opcode;
1407 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1409 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1412 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1414 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1415 activ_opcode.aal = aal;
1416 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1417 activ_opcode.pad = 0;
1420 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1421 deactiv_opcode.pad = 0;
1424 vpvc.vci = vcc->vci;
1425 vpvc.vpi = vcc->vpi;
1427 *entry->status = STATUS_PENDING;
1431 #ifdef FORE200E_52BYTE_AAL0_SDU
1434 /* the MTU is not used by the cp, except in the case of AAL0 */
1435 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1436 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1437 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1440 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1441 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1444 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1446 *entry->status = STATUS_FREE;
1449 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1450 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1454 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1455 activate ? "open" : "clos");
1461 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1464 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1466 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1468 /* compute the data cells to idle cells ratio from the tx PCR */
1469 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1470 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1473 /* disable rate control */
1474 rate->data_cells = rate->idle_cells = 0;
1480 fore200e_open(struct atm_vcc *vcc)
1482 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1483 struct fore200e_vcc* fore200e_vcc;
1484 struct fore200e_vc_map* vc_map;
1485 unsigned long flags;
1487 short vpi = vcc->vpi;
1489 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1490 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1492 spin_lock_irqsave(&fore200e->q_lock, flags);
1494 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1497 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1499 printk(FORE200E "VC %d.%d.%d already in use\n",
1500 fore200e->atm_dev->number, vpi, vci);
1507 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1509 fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1510 if (fore200e_vcc == NULL) {
1515 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1516 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1517 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1518 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1519 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1520 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1521 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1523 /* pseudo-CBR bandwidth requested? */
1524 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1526 down(&fore200e->rate_sf);
1527 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1528 up(&fore200e->rate_sf);
1530 fore200e_kfree(fore200e_vcc);
1535 /* reserve bandwidth */
1536 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1537 up(&fore200e->rate_sf);
1540 vcc->itf = vcc->dev->number;
1542 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1543 set_bit(ATM_VF_ADDR, &vcc->flags);
1545 vcc->dev_data = fore200e_vcc;
1547 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1551 clear_bit(ATM_VF_ADDR, &vcc->flags);
1552 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1554 vcc->dev_data = NULL;
1556 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1558 fore200e_kfree(fore200e_vcc);
1562 /* compute rate control parameters */
1563 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1565 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1566 set_bit(ATM_VF_HASQOS, &vcc->flags);
1568 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1569 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1570 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1571 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1574 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1575 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1576 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1578 /* new incarnation of the vcc */
1579 vc_map->incarn = ++fore200e->incarn_count;
1581 /* VC unusable before this flag is set */
1582 set_bit(ATM_VF_READY, &vcc->flags);
1589 fore200e_close(struct atm_vcc* vcc)
1591 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1592 struct fore200e_vcc* fore200e_vcc;
1593 struct fore200e_vc_map* vc_map;
1594 unsigned long flags;
1597 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1598 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1600 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1602 clear_bit(ATM_VF_READY, &vcc->flags);
1604 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1606 spin_lock_irqsave(&fore200e->q_lock, flags);
1608 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1610 /* the vc is no longer considered as "in use" by fore200e_open() */
1613 vcc->itf = vcc->vci = vcc->vpi = 0;
1615 fore200e_vcc = FORE200E_VCC(vcc);
1616 vcc->dev_data = NULL;
1618 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1620 /* release reserved bandwidth, if any */
1621 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1623 down(&fore200e->rate_sf);
1624 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1625 up(&fore200e->rate_sf);
1627 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1630 clear_bit(ATM_VF_ADDR, &vcc->flags);
1631 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1633 ASSERT(fore200e_vcc);
1634 fore200e_kfree(fore200e_vcc);
1639 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1641 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1642 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1643 struct fore200e_vc_map* vc_map;
1644 struct host_txq* txq = &fore200e->host_txq;
1645 struct host_txq_entry* entry;
1647 struct tpd_haddr tpd_haddr;
1648 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1650 int tx_len = skb->len;
1651 u32* cell_header = NULL;
1652 unsigned char* skb_data;
1654 unsigned char* data;
1655 unsigned long flags;
1658 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1660 ASSERT(fore200e_vcc);
1662 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1663 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1664 dev_kfree_skb_any(skb);
1668 #ifdef FORE200E_52BYTE_AAL0_SDU
1669 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1670 cell_header = (u32*) skb->data;
1671 skb_data = skb->data + 4; /* skip 4-byte cell header */
1672 skb_len = tx_len = skb->len - 4;
1674 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1679 skb_data = skb->data;
1683 if (((unsigned long)skb_data) & 0x3) {
1685 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1690 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1692 /* this simply NUKES the PCA board */
1693 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1695 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1699 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1705 dev_kfree_skb_any(skb);
1710 memcpy(data, skb_data, skb_len);
1711 if (skb_len < tx_len)
1712 memset(data + skb_len, 0x00, tx_len - skb_len);
1718 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1719 ASSERT(vc_map->vcc == vcc);
1723 spin_lock_irqsave(&fore200e->q_lock, flags);
1725 entry = &txq->host_entry[ txq->head ];
1727 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1729 /* try to free completed tx queue entries */
1730 fore200e_tx_irq(fore200e);
1732 if (*entry->status != STATUS_FREE) {
1734 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1736 /* retry once again? */
1742 atomic_inc(&vcc->stats->tx_err);
1745 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1746 fore200e->name, fore200e->cp_queues->heartbeat);
1751 dev_kfree_skb_any(skb);
1761 entry->incarn = vc_map->incarn;
1762 entry->vc_map = vc_map;
1764 entry->data = tx_copy ? data : NULL;
1767 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1768 tpd->tsd[ 0 ].length = tx_len;
1770 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1773 /* The dma_map call above implies a dma_sync so the device can use it,
1774 * thus no explicit dma_sync call is necessary here.
1777 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1778 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1779 tpd->tsd[0].length, skb_len);
1781 if (skb_len < fore200e_vcc->tx_min_pdu)
1782 fore200e_vcc->tx_min_pdu = skb_len;
1783 if (skb_len > fore200e_vcc->tx_max_pdu)
1784 fore200e_vcc->tx_max_pdu = skb_len;
1785 fore200e_vcc->tx_pdu++;
1787 /* set tx rate control information */
1788 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1789 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1792 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1793 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1794 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1795 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1796 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1799 /* set the ATM header, common to all cells conveying the PDU */
1800 tpd->atm_header.clp = 0;
1801 tpd->atm_header.plt = 0;
1802 tpd->atm_header.vci = vcc->vci;
1803 tpd->atm_header.vpi = vcc->vpi;
1804 tpd->atm_header.gfc = 0;
1807 tpd->spec.length = tx_len;
1809 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1812 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1814 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1816 *entry->status = STATUS_PENDING;
1817 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1819 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1826 fore200e_getstats(struct fore200e* fore200e)
1828 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1829 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1830 struct stats_opcode opcode;
1834 if (fore200e->stats == NULL) {
1835 fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1836 if (fore200e->stats == NULL)
1840 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1841 sizeof(struct stats), DMA_FROM_DEVICE);
1843 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1845 opcode.opcode = OPCODE_GET_STATS;
1848 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1850 *entry->status = STATUS_PENDING;
1852 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1854 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1856 *entry->status = STATUS_FREE;
1858 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1861 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1870 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1872 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1874 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1875 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1882 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1884 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1886 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1887 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1893 #if 0 /* currently unused */
1895 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1897 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1898 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1899 struct oc3_opcode opcode;
1901 u32 oc3_regs_dma_addr;
1903 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1905 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1907 opcode.opcode = OPCODE_GET_OC3;
1912 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1914 *entry->status = STATUS_PENDING;
1916 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1918 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1920 *entry->status = STATUS_FREE;
1922 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1925 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1935 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1937 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1938 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1939 struct oc3_opcode opcode;
1942 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1944 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1946 opcode.opcode = OPCODE_SET_OC3;
1948 opcode.value = value;
1951 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1953 *entry->status = STATUS_PENDING;
1955 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1957 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1959 *entry->status = STATUS_FREE;
1962 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1971 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1973 u32 mct_value, mct_mask;
1976 if (!capable(CAP_NET_ADMIN))
1979 switch (loop_mode) {
1983 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1986 case ATM_LM_LOC_PHY:
1987 mct_value = mct_mask = SUNI_MCT_DLE;
1990 case ATM_LM_RMT_PHY:
1991 mct_value = mct_mask = SUNI_MCT_LLE;
1998 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
2000 fore200e->loop_mode = loop_mode;
2006 static inline unsigned int
2007 fore200e_swap(unsigned int in)
2009 #if defined(__LITTLE_ENDIAN)
2018 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
2020 struct sonet_stats tmp;
2022 if (fore200e_getstats(fore200e) < 0)
2025 tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors);
2026 tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors);
2027 tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors);
2028 tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors);
2029 tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors);
2030 tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors);
2031 tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors);
2032 tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) +
2033 fore200e_swap(fore200e->stats->aal34.cells_transmitted) +
2034 fore200e_swap(fore200e->stats->aal5.cells_transmitted);
2035 tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) +
2036 fore200e_swap(fore200e->stats->aal34.cells_received) +
2037 fore200e_swap(fore200e->stats->aal5.cells_received);
2040 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2047 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2049 struct fore200e* fore200e = FORE200E_DEV(dev);
2051 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2056 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2059 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2062 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2065 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2068 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2071 return -ENOSYS; /* not implemented */
2076 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2078 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2079 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2081 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2082 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2086 DPRINTK(2, "change_qos %d.%d.%d, "
2087 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2088 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2089 "available_cell_rate = %u",
2090 vcc->itf, vcc->vpi, vcc->vci,
2091 fore200e_traffic_class[ qos->txtp.traffic_class ],
2092 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2093 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2094 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2095 flags, fore200e->available_cell_rate);
2097 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2099 down(&fore200e->rate_sf);
2100 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2101 up(&fore200e->rate_sf);
2105 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2106 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2108 up(&fore200e->rate_sf);
2110 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2112 /* update rate control parameters */
2113 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2115 set_bit(ATM_VF_HASQOS, &vcc->flags);
2124 static int __devinit
2125 fore200e_irq_request(struct fore200e* fore200e)
2127 if (request_irq(fore200e->irq, fore200e_interrupt, SA_SHIRQ, fore200e->name, fore200e->atm_dev) < 0) {
2129 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2130 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2134 printk(FORE200E "IRQ %s reserved for device %s\n",
2135 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2137 #ifdef FORE200E_USE_TASKLET
2138 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2139 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2142 fore200e->state = FORE200E_STATE_IRQ;
2147 static int __devinit
2148 fore200e_get_esi(struct fore200e* fore200e)
2150 struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2156 ok = fore200e->bus->prom_read(fore200e, prom);
2158 fore200e_kfree(prom);
2162 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2164 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2165 prom->serial_number & 0xFFFF,
2166 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2167 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2169 for (i = 0; i < ESI_LEN; i++) {
2170 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2173 fore200e_kfree(prom);
2179 static int __devinit
2180 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2182 int scheme, magn, nbr, size, i;
2184 struct host_bsq* bsq;
2185 struct buffer* buffer;
2187 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2188 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2190 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2192 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2193 size = fore200e_rx_buf_size[ scheme ][ magn ];
2195 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2197 /* allocate the array of receive buffers */
2198 buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2203 bsq->freebuf = NULL;
2205 for (i = 0; i < nbr; i++) {
2207 buffer[ i ].scheme = scheme;
2208 buffer[ i ].magn = magn;
2209 #ifdef FORE200E_BSQ_DEBUG
2210 buffer[ i ].index = i;
2211 buffer[ i ].supplied = 0;
2214 /* allocate the receive buffer body */
2215 if (fore200e_chunk_alloc(fore200e,
2216 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2217 DMA_FROM_DEVICE) < 0) {
2220 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2221 fore200e_kfree(buffer);
2226 /* insert the buffer into the free buffer list */
2227 buffer[ i ].next = bsq->freebuf;
2228 bsq->freebuf = &buffer[ i ];
2230 /* all the buffers are free, initially */
2231 bsq->freebuf_count = nbr;
2233 #ifdef FORE200E_BSQ_DEBUG
2234 bsq_audit(3, bsq, scheme, magn);
2239 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2244 static int __devinit
2245 fore200e_init_bs_queue(struct fore200e* fore200e)
2247 int scheme, magn, i;
2249 struct host_bsq* bsq;
2250 struct cp_bsq_entry __iomem * cp_entry;
2252 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2253 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2255 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2257 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2259 /* allocate and align the array of status words */
2260 if (fore200e->bus->dma_chunk_alloc(fore200e,
2262 sizeof(enum status),
2264 fore200e->bus->status_alignment) < 0) {
2268 /* allocate and align the array of receive buffer descriptors */
2269 if (fore200e->bus->dma_chunk_alloc(fore200e,
2271 sizeof(struct rbd_block),
2273 fore200e->bus->descr_alignment) < 0) {
2275 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2279 /* get the base address of the cp resident buffer supply queue entries */
2280 cp_entry = fore200e->virt_base +
2281 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2283 /* fill the host resident and cp resident buffer supply queue entries */
2284 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2286 bsq->host_entry[ i ].status =
2287 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2288 bsq->host_entry[ i ].rbd_block =
2289 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2290 bsq->host_entry[ i ].rbd_block_dma =
2291 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2292 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2294 *bsq->host_entry[ i ].status = STATUS_FREE;
2296 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2297 &cp_entry[ i ].status_haddr);
2302 fore200e->state = FORE200E_STATE_INIT_BSQ;
2307 static int __devinit
2308 fore200e_init_rx_queue(struct fore200e* fore200e)
2310 struct host_rxq* rxq = &fore200e->host_rxq;
2311 struct cp_rxq_entry __iomem * cp_entry;
2314 DPRINTK(2, "receive queue is being initialized\n");
2316 /* allocate and align the array of status words */
2317 if (fore200e->bus->dma_chunk_alloc(fore200e,
2319 sizeof(enum status),
2321 fore200e->bus->status_alignment) < 0) {
2325 /* allocate and align the array of receive PDU descriptors */
2326 if (fore200e->bus->dma_chunk_alloc(fore200e,
2330 fore200e->bus->descr_alignment) < 0) {
2332 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2336 /* get the base address of the cp resident rx queue entries */
2337 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2339 /* fill the host resident and cp resident rx entries */
2340 for (i=0; i < QUEUE_SIZE_RX; i++) {
2342 rxq->host_entry[ i ].status =
2343 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2344 rxq->host_entry[ i ].rpd =
2345 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2346 rxq->host_entry[ i ].rpd_dma =
2347 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2348 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2350 *rxq->host_entry[ i ].status = STATUS_FREE;
2352 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2353 &cp_entry[ i ].status_haddr);
2355 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2356 &cp_entry[ i ].rpd_haddr);
2359 /* set the head entry of the queue */
2362 fore200e->state = FORE200E_STATE_INIT_RXQ;
2367 static int __devinit
2368 fore200e_init_tx_queue(struct fore200e* fore200e)
2370 struct host_txq* txq = &fore200e->host_txq;
2371 struct cp_txq_entry __iomem * cp_entry;
2374 DPRINTK(2, "transmit queue is being initialized\n");
2376 /* allocate and align the array of status words */
2377 if (fore200e->bus->dma_chunk_alloc(fore200e,
2379 sizeof(enum status),
2381 fore200e->bus->status_alignment) < 0) {
2385 /* allocate and align the array of transmit PDU descriptors */
2386 if (fore200e->bus->dma_chunk_alloc(fore200e,
2390 fore200e->bus->descr_alignment) < 0) {
2392 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2396 /* get the base address of the cp resident tx queue entries */
2397 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2399 /* fill the host resident and cp resident tx entries */
2400 for (i=0; i < QUEUE_SIZE_TX; i++) {
2402 txq->host_entry[ i ].status =
2403 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2404 txq->host_entry[ i ].tpd =
2405 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2406 txq->host_entry[ i ].tpd_dma =
2407 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2408 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2410 *txq->host_entry[ i ].status = STATUS_FREE;
2412 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2413 &cp_entry[ i ].status_haddr);
2415 /* although there is a one-to-one mapping of tx queue entries and tpds,
2416 we do not write here the DMA (physical) base address of each tpd into
2417 the related cp resident entry, because the cp relies on this write
2418 operation to detect that a new pdu has been submitted for tx */
2421 /* set the head and tail entries of the queue */
2425 fore200e->state = FORE200E_STATE_INIT_TXQ;
2430 static int __devinit
2431 fore200e_init_cmd_queue(struct fore200e* fore200e)
2433 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2434 struct cp_cmdq_entry __iomem * cp_entry;
2437 DPRINTK(2, "command queue is being initialized\n");
2439 /* allocate and align the array of status words */
2440 if (fore200e->bus->dma_chunk_alloc(fore200e,
2442 sizeof(enum status),
2444 fore200e->bus->status_alignment) < 0) {
2448 /* get the base address of the cp resident cmd queue entries */
2449 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2451 /* fill the host resident and cp resident cmd entries */
2452 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2454 cmdq->host_entry[ i ].status =
2455 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2456 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2458 *cmdq->host_entry[ i ].status = STATUS_FREE;
2460 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2461 &cp_entry[ i ].status_haddr);
2464 /* set the head entry of the queue */
2467 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2473 fore200e_param_bs_queue(struct fore200e* fore200e,
2474 enum buffer_scheme scheme, enum buffer_magn magn,
2475 int queue_length, int pool_size, int supply_blksize)
2477 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2479 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2480 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2481 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2482 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2486 static int __devinit
2487 fore200e_initialize(struct fore200e* fore200e)
2489 struct cp_queues __iomem * cpq;
2490 int ok, scheme, magn;
2492 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2494 init_MUTEX(&fore200e->rate_sf);
2495 spin_lock_init(&fore200e->q_lock);
2497 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2499 /* enable cp to host interrupts */
2500 fore200e->bus->write(1, &cpq->imask);
2502 if (fore200e->bus->irq_enable)
2503 fore200e->bus->irq_enable(fore200e);
2505 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2507 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2508 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2509 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2511 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2512 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2514 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2515 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2516 fore200e_param_bs_queue(fore200e, scheme, magn,
2518 fore200e_rx_buf_nbr[ scheme ][ magn ],
2521 /* issue the initialize command */
2522 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2523 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2525 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2527 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2531 printk(FORE200E "device %s initialized\n", fore200e->name);
2533 fore200e->state = FORE200E_STATE_INITIALIZE;
2538 static void __devinit
2539 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2541 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2546 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2550 static int __devinit
2551 fore200e_monitor_getc(struct fore200e* fore200e)
2553 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2554 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2557 while (time_before(jiffies, timeout)) {
2559 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2561 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2563 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2565 printk("%c", c & 0xFF);
2575 static void __devinit
2576 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2580 /* the i960 monitor doesn't accept any new character if it has something to say */
2581 while (fore200e_monitor_getc(fore200e) >= 0);
2583 fore200e_monitor_putc(fore200e, *str++);
2586 while (fore200e_monitor_getc(fore200e) >= 0);
2590 static int __devinit
2591 fore200e_start_fw(struct fore200e* fore200e)
2595 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2597 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2599 #if defined(__sparc_v9__)
2600 /* reported to be required by SBA cards on some sparc64 hosts */
2604 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2606 fore200e_monitor_puts(fore200e, cmd);
2608 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2610 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2614 printk(FORE200E "device %s firmware started\n", fore200e->name);
2616 fore200e->state = FORE200E_STATE_START_FW;
2621 static int __devinit
2622 fore200e_load_fw(struct fore200e* fore200e)
2624 u32* fw_data = (u32*) fore200e->bus->fw_data;
2625 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2627 struct fw_header* fw_header = (struct fw_header*) fw_data;
2629 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2631 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2632 fore200e->name, load_addr, fw_size);
2634 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2635 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2639 for (; fw_size--; fw_data++, load_addr++)
2640 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2642 fore200e->state = FORE200E_STATE_LOAD_FW;
2647 static int __devinit
2648 fore200e_register(struct fore200e* fore200e)
2650 struct atm_dev* atm_dev;
2652 DPRINTK(2, "device %s being registered\n", fore200e->name);
2654 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2656 if (atm_dev == NULL) {
2657 printk(FORE200E "unable to register device %s\n", fore200e->name);
2661 atm_dev->dev_data = fore200e;
2662 fore200e->atm_dev = atm_dev;
2664 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2665 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2667 fore200e->available_cell_rate = ATM_OC3_PCR;
2669 fore200e->state = FORE200E_STATE_REGISTER;
2674 static int __devinit
2675 fore200e_init(struct fore200e* fore200e)
2677 if (fore200e_register(fore200e) < 0)
2680 if (fore200e->bus->configure(fore200e) < 0)
2683 if (fore200e->bus->map(fore200e) < 0)
2686 if (fore200e_reset(fore200e, 1) < 0)
2689 if (fore200e_load_fw(fore200e) < 0)
2692 if (fore200e_start_fw(fore200e) < 0)
2695 if (fore200e_initialize(fore200e) < 0)
2698 if (fore200e_init_cmd_queue(fore200e) < 0)
2701 if (fore200e_init_tx_queue(fore200e) < 0)
2704 if (fore200e_init_rx_queue(fore200e) < 0)
2707 if (fore200e_init_bs_queue(fore200e) < 0)
2710 if (fore200e_alloc_rx_buf(fore200e) < 0)
2713 if (fore200e_get_esi(fore200e) < 0)
2716 if (fore200e_irq_request(fore200e) < 0)
2719 fore200e_supply(fore200e);
2721 /* all done, board initialization is now complete */
2722 fore200e->state = FORE200E_STATE_COMPLETE;
2727 static int __devinit
2728 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2730 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2731 struct fore200e* fore200e;
2733 static int index = 0;
2735 if (pci_enable_device(pci_dev)) {
2740 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
2741 if (fore200e == NULL) {
2746 fore200e->bus = bus;
2747 fore200e->bus_dev = pci_dev;
2748 fore200e->irq = pci_dev->irq;
2749 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2751 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2753 pci_set_master(pci_dev);
2755 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2756 fore200e->bus->model_name,
2757 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2759 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2761 err = fore200e_init(fore200e);
2763 fore200e_shutdown(fore200e);
2768 pci_set_drvdata(pci_dev, fore200e);
2776 pci_disable_device(pci_dev);
2781 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2783 struct fore200e *fore200e;
2785 fore200e = pci_get_drvdata(pci_dev);
2787 fore200e_shutdown(fore200e);
2789 pci_disable_device(pci_dev);
2793 #ifdef CONFIG_ATM_FORE200E_PCA
2794 static struct pci_device_id fore200e_pca_tbl[] = {
2795 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2796 0, 0, (unsigned long) &fore200e_bus[0] },
2800 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2802 static struct pci_driver fore200e_pca_driver = {
2803 .name = "fore_200e",
2804 .probe = fore200e_pca_detect,
2805 .remove = __devexit_p(fore200e_pca_remove_one),
2806 .id_table = fore200e_pca_tbl,
2812 fore200e_module_init(void)
2814 const struct fore200e_bus* bus;
2815 struct fore200e* fore200e;
2818 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2820 /* for each configured bus interface */
2821 for (bus = fore200e_bus; bus->model_name; bus++) {
2823 /* detect all boards present on that bus */
2824 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2826 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2827 fore200e->bus->model_name,
2828 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2830 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2832 if (fore200e_init(fore200e) < 0) {
2834 fore200e_shutdown(fore200e);
2838 list_add(&fore200e->entry, &fore200e_boards);
2842 #ifdef CONFIG_ATM_FORE200E_PCA
2843 if (!pci_register_driver(&fore200e_pca_driver))
2847 if (!list_empty(&fore200e_boards))
2855 fore200e_module_cleanup(void)
2857 struct fore200e *fore200e, *next;
2859 #ifdef CONFIG_ATM_FORE200E_PCA
2860 pci_unregister_driver(&fore200e_pca_driver);
2863 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2864 fore200e_shutdown(fore200e);
2867 DPRINTK(1, "module being removed\n");
2872 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2874 struct fore200e* fore200e = FORE200E_DEV(dev);
2875 struct fore200e_vcc* fore200e_vcc;
2876 struct atm_vcc* vcc;
2877 int i, len, left = *pos;
2878 unsigned long flags;
2882 if (fore200e_getstats(fore200e) < 0)
2885 len = sprintf(page,"\n"
2887 " internal name:\t\t%s\n", fore200e->name);
2889 /* print bus-specific information */
2890 if (fore200e->bus->proc_read)
2891 len += fore200e->bus->proc_read(fore200e, page + len);
2893 len += sprintf(page + len,
2894 " interrupt line:\t\t%s\n"
2895 " physical base address:\t0x%p\n"
2896 " virtual base address:\t0x%p\n"
2897 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2898 " board serial number:\t\t%d\n\n",
2899 fore200e_irq_itoa(fore200e->irq),
2900 (void*)fore200e->phys_base,
2901 fore200e->virt_base,
2902 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2903 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2904 fore200e->esi[4] * 256 + fore200e->esi[5]);
2910 return sprintf(page,
2911 " free small bufs, scheme 1:\t%d\n"
2912 " free large bufs, scheme 1:\t%d\n"
2913 " free small bufs, scheme 2:\t%d\n"
2914 " free large bufs, scheme 2:\t%d\n",
2915 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2916 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2917 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2918 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2921 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2923 len = sprintf(page,"\n\n"
2924 " cell processor:\n"
2925 " heartbeat state:\t\t");
2927 if (hb >> 16 != 0xDEAD)
2928 len += sprintf(page + len, "0x%08x\n", hb);
2930 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2936 static const char* media_name[] = {
2937 "unshielded twisted pair",
2938 "multimode optical fiber ST",
2939 "multimode optical fiber SC",
2940 "single-mode optical fiber ST",
2941 "single-mode optical fiber SC",
2945 static const char* oc3_mode[] = {
2947 "diagnostic loopback",
2952 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2953 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2954 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2955 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2958 if ((media_index < 0) || (media_index > 4))
2961 switch (fore200e->loop_mode) {
2962 case ATM_LM_NONE: oc3_index = 0;
2964 case ATM_LM_LOC_PHY: oc3_index = 1;
2966 case ATM_LM_RMT_PHY: oc3_index = 2;
2968 default: oc3_index = 3;
2971 return sprintf(page,
2972 " firmware release:\t\t%d.%d.%d\n"
2973 " monitor release:\t\t%d.%d\n"
2974 " media type:\t\t\t%s\n"
2975 " OC-3 revision:\t\t0x%x\n"
2976 " OC-3 mode:\t\t\t%s",
2977 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2978 mon960_release >> 16, mon960_release << 16 >> 16,
2979 media_name[ media_index ],
2981 oc3_mode[ oc3_index ]);
2985 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2987 return sprintf(page,
2990 " version number:\t\t%d\n"
2991 " boot status word:\t\t0x%08x\n",
2992 fore200e->bus->read(&cp_monitor->mon_version),
2993 fore200e->bus->read(&cp_monitor->bstat));
2997 return sprintf(page,
2999 " device statistics:\n"
3001 " crc_header_errors:\t\t%10u\n"
3002 " framing_errors:\t\t%10u\n",
3003 fore200e_swap(fore200e->stats->phy.crc_header_errors),
3004 fore200e_swap(fore200e->stats->phy.framing_errors));
3007 return sprintf(page, "\n"
3009 " section_bip8_errors:\t%10u\n"
3010 " path_bip8_errors:\t\t%10u\n"
3011 " line_bip24_errors:\t\t%10u\n"
3012 " line_febe_errors:\t\t%10u\n"
3013 " path_febe_errors:\t\t%10u\n"
3014 " corr_hcs_errors:\t\t%10u\n"
3015 " ucorr_hcs_errors:\t\t%10u\n",
3016 fore200e_swap(fore200e->stats->oc3.section_bip8_errors),
3017 fore200e_swap(fore200e->stats->oc3.path_bip8_errors),
3018 fore200e_swap(fore200e->stats->oc3.line_bip24_errors),
3019 fore200e_swap(fore200e->stats->oc3.line_febe_errors),
3020 fore200e_swap(fore200e->stats->oc3.path_febe_errors),
3021 fore200e_swap(fore200e->stats->oc3.corr_hcs_errors),
3022 fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors));
3025 return sprintf(page,"\n"
3026 " ATM:\t\t\t\t cells\n"
3029 " vpi out of range:\t\t%10u\n"
3030 " vpi no conn:\t\t%10u\n"
3031 " vci out of range:\t\t%10u\n"
3032 " vci no conn:\t\t%10u\n",
3033 fore200e_swap(fore200e->stats->atm.cells_transmitted),
3034 fore200e_swap(fore200e->stats->atm.cells_received),
3035 fore200e_swap(fore200e->stats->atm.vpi_bad_range),
3036 fore200e_swap(fore200e->stats->atm.vpi_no_conn),
3037 fore200e_swap(fore200e->stats->atm.vci_bad_range),
3038 fore200e_swap(fore200e->stats->atm.vci_no_conn));
3041 return sprintf(page,"\n"
3042 " AAL0:\t\t\t cells\n"
3045 " dropped:\t\t\t%10u\n",
3046 fore200e_swap(fore200e->stats->aal0.cells_transmitted),
3047 fore200e_swap(fore200e->stats->aal0.cells_received),
3048 fore200e_swap(fore200e->stats->aal0.cells_dropped));
3051 return sprintf(page,"\n"
3053 " SAR sublayer:\t\t cells\n"
3056 " dropped:\t\t\t%10u\n"
3057 " CRC errors:\t\t%10u\n"
3058 " protocol errors:\t\t%10u\n\n"
3059 " CS sublayer:\t\t PDUs\n"
3062 " dropped:\t\t\t%10u\n"
3063 " protocol errors:\t\t%10u\n",
3064 fore200e_swap(fore200e->stats->aal34.cells_transmitted),
3065 fore200e_swap(fore200e->stats->aal34.cells_received),
3066 fore200e_swap(fore200e->stats->aal34.cells_dropped),
3067 fore200e_swap(fore200e->stats->aal34.cells_crc_errors),
3068 fore200e_swap(fore200e->stats->aal34.cells_protocol_errors),
3069 fore200e_swap(fore200e->stats->aal34.cspdus_transmitted),
3070 fore200e_swap(fore200e->stats->aal34.cspdus_received),
3071 fore200e_swap(fore200e->stats->aal34.cspdus_dropped),
3072 fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors));
3075 return sprintf(page,"\n"
3077 " SAR sublayer:\t\t cells\n"
3080 " dropped:\t\t\t%10u\n"
3081 " congestions:\t\t%10u\n\n"
3082 " CS sublayer:\t\t PDUs\n"
3085 " dropped:\t\t\t%10u\n"
3086 " CRC errors:\t\t%10u\n"
3087 " protocol errors:\t\t%10u\n",
3088 fore200e_swap(fore200e->stats->aal5.cells_transmitted),
3089 fore200e_swap(fore200e->stats->aal5.cells_received),
3090 fore200e_swap(fore200e->stats->aal5.cells_dropped),
3091 fore200e_swap(fore200e->stats->aal5.congestion_experienced),
3092 fore200e_swap(fore200e->stats->aal5.cspdus_transmitted),
3093 fore200e_swap(fore200e->stats->aal5.cspdus_received),
3094 fore200e_swap(fore200e->stats->aal5.cspdus_dropped),
3095 fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors),
3096 fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors));
3099 return sprintf(page,"\n"
3100 " AUX:\t\t allocation failures\n"
3101 " small b1:\t\t\t%10u\n"
3102 " large b1:\t\t\t%10u\n"
3103 " small b2:\t\t\t%10u\n"
3104 " large b2:\t\t\t%10u\n"
3105 " RX PDUs:\t\t\t%10u\n"
3106 " TX PDUs:\t\t\t%10lu\n",
3107 fore200e_swap(fore200e->stats->aux.small_b1_failed),
3108 fore200e_swap(fore200e->stats->aux.large_b1_failed),
3109 fore200e_swap(fore200e->stats->aux.small_b2_failed),
3110 fore200e_swap(fore200e->stats->aux.large_b2_failed),
3111 fore200e_swap(fore200e->stats->aux.rpd_alloc_failed),
3115 return sprintf(page,"\n"
3116 " receive carrier:\t\t\t%s\n",
3117 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3120 return sprintf(page,"\n"
3121 " VCCs:\n address VPI VCI AAL "
3122 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3125 for (i = 0; i < NBR_CONNECT; i++) {
3127 vcc = fore200e->vc_map[i].vcc;
3132 spin_lock_irqsave(&fore200e->q_lock, flags);
3134 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3136 fore200e_vcc = FORE200E_VCC(vcc);
3137 ASSERT(fore200e_vcc);
3140 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3141 (u32)(unsigned long)vcc,
3142 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3143 fore200e_vcc->tx_pdu,
3144 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3145 fore200e_vcc->tx_max_pdu,
3146 fore200e_vcc->rx_pdu,
3147 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3148 fore200e_vcc->rx_max_pdu);
3150 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3154 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3160 module_init(fore200e_module_init);
3161 module_exit(fore200e_module_cleanup);
3164 static const struct atmdev_ops fore200e_ops =
3166 .open = fore200e_open,
3167 .close = fore200e_close,
3168 .ioctl = fore200e_ioctl,
3169 .getsockopt = fore200e_getsockopt,
3170 .setsockopt = fore200e_setsockopt,
3171 .send = fore200e_send,
3172 .change_qos = fore200e_change_qos,
3173 .proc_read = fore200e_proc_read,
3174 .owner = THIS_MODULE
3178 #ifdef CONFIG_ATM_FORE200E_PCA
3179 extern const unsigned char _fore200e_pca_fw_data[];
3180 extern const unsigned int _fore200e_pca_fw_size;
3182 #ifdef CONFIG_ATM_FORE200E_SBA
3183 extern const unsigned char _fore200e_sba_fw_data[];
3184 extern const unsigned int _fore200e_sba_fw_size;
3187 static const struct fore200e_bus fore200e_bus[] = {
3188 #ifdef CONFIG_ATM_FORE200E_PCA
3189 { "PCA-200E", "pca200e", 32, 4, 32,
3190 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3193 fore200e_pca_dma_map,
3194 fore200e_pca_dma_unmap,
3195 fore200e_pca_dma_sync_for_cpu,
3196 fore200e_pca_dma_sync_for_device,
3197 fore200e_pca_dma_chunk_alloc,
3198 fore200e_pca_dma_chunk_free,
3200 fore200e_pca_configure,
3203 fore200e_pca_prom_read,
3206 fore200e_pca_irq_check,
3207 fore200e_pca_irq_ack,
3208 fore200e_pca_proc_read,
3211 #ifdef CONFIG_ATM_FORE200E_SBA
3212 { "SBA-200E", "sba200e", 32, 64, 32,
3213 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3216 fore200e_sba_dma_map,
3217 fore200e_sba_dma_unmap,
3218 fore200e_sba_dma_sync_for_cpu,
3219 fore200e_sba_dma_sync_for_device,
3220 fore200e_sba_dma_chunk_alloc,
3221 fore200e_sba_dma_chunk_free,
3222 fore200e_sba_detect,
3223 fore200e_sba_configure,
3226 fore200e_sba_prom_read,
3228 fore200e_sba_irq_enable,
3229 fore200e_sba_irq_check,
3230 fore200e_sba_irq_ack,
3231 fore200e_sba_proc_read,
3237 #ifdef MODULE_LICENSE
3238 MODULE_LICENSE("GPL");