2 * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
8 * QE UCC Gigabit Ethernet Driver
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/stddef.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/mii.h>
29 #include <linux/phy.h>
30 #include <linux/workqueue.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_platform.h>
35 #include <asm/uaccess.h>
38 #include <asm/immap_qe.h>
41 #include <asm/ucc_fast.h>
42 #include <asm/machdep.h>
48 #define ugeth_printk(level, format, arg...) \
49 printk(level format "\n", ## arg)
51 #define ugeth_dbg(format, arg...) \
52 ugeth_printk(KERN_DEBUG , format , ## arg)
53 #define ugeth_err(format, arg...) \
54 ugeth_printk(KERN_ERR , format , ## arg)
55 #define ugeth_info(format, arg...) \
56 ugeth_printk(KERN_INFO , format , ## arg)
57 #define ugeth_warn(format, arg...) \
58 ugeth_printk(KERN_WARNING , format , ## arg)
60 #ifdef UGETH_VERBOSE_DEBUG
61 #define ugeth_vdbg ugeth_dbg
63 #define ugeth_vdbg(fmt, args...) do { } while (0)
64 #endif /* UGETH_VERBOSE_DEBUG */
65 #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
68 static DEFINE_SPINLOCK(ugeth_lock);
74 module_param_named(debug, debug.msg_enable, int, 0);
75 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
77 static struct ucc_geth_info ugeth_primary_info = {
79 .bd_mem_part = MEM_PART_SYSTEM,
80 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
81 .max_rx_buf_length = 1536,
82 /* adjusted at startup if max-speed 1000 */
83 .urfs = UCC_GETH_URFS_INIT,
84 .urfet = UCC_GETH_URFET_INIT,
85 .urfset = UCC_GETH_URFSET_INIT,
86 .utfs = UCC_GETH_UTFS_INIT,
87 .utfet = UCC_GETH_UTFET_INIT,
88 .utftt = UCC_GETH_UTFTT_INIT,
90 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
91 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
92 .tenc = UCC_FAST_TX_ENCODING_NRZ,
93 .renc = UCC_FAST_RX_ENCODING_NRZ,
94 .tcrc = UCC_FAST_16_BIT_CRC,
95 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
99 .extendedFilteringChainPointer = ((uint32_t) NULL),
100 .typeorlen = 3072 /*1536 */ ,
101 .nonBackToBackIfgPart1 = 0x40,
102 .nonBackToBackIfgPart2 = 0x60,
103 .miminumInterFrameGapEnforcement = 0x50,
104 .backToBackInterFrameGap = 0x60,
108 .strictpriorityq = 0xff,
109 .altBebTruncation = 0xa,
111 .maxRetransmission = 0xf,
112 .collisionWindow = 0x37,
113 .receiveFlowControl = 1,
114 .transmitFlowControl = 1,
115 .maxGroupAddrInHash = 4,
116 .maxIndAddrInHash = 4,
118 .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
119 .minFrameLength = 64,
120 .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
121 .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */
123 .ecamptr = ((uint32_t) NULL),
124 .eventRegMask = UCCE_OTHER,
125 .pausePeriod = 0xf000,
126 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
147 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
148 .largestexternallookupkeysize =
149 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
150 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
151 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
152 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
153 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
154 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
155 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
156 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
157 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
158 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
159 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
160 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
161 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
164 static struct ucc_geth_info ugeth_info[8];
167 static void mem_disp(u8 *addr, int size)
170 int size16Aling = (size >> 4) << 4;
171 int size4Aling = (size >> 2) << 2;
176 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
177 printk("0x%08x: %08x %08x %08x %08x\r\n",
181 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
183 printk("0x%08x: ", (u32) i);
184 for (; (u32) i < (u32) addr + size4Aling; i += 4)
185 printk("%08x ", *((u32 *) (i)));
186 for (; (u32) i < (u32) addr + size; i++)
187 printk("%02x", *((i)));
193 static struct list_head *dequeue(struct list_head *lh)
197 spin_lock_irqsave(&ugeth_lock, flags);
198 if (!list_empty(lh)) {
199 struct list_head *node = lh->next;
201 spin_unlock_irqrestore(&ugeth_lock, flags);
204 spin_unlock_irqrestore(&ugeth_lock, flags);
209 static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
214 skb = netdev_alloc_skb(ugeth->ndev,
215 ugeth->ug_info->uf_info.max_rx_buf_length +
216 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
220 /* We need the data buffer to be aligned properly. We will reserve
221 * as many bytes as needed to align the data properly
224 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
225 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
228 out_be32(&((struct qe_bd __iomem *)bd)->buf,
229 dma_map_single(ugeth->dev,
231 ugeth->ug_info->uf_info.max_rx_buf_length +
232 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
235 out_be32((u32 __iomem *)bd,
236 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
241 static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
248 bd = ugeth->p_rx_bd_ring[rxQ];
252 bd_status = in_be32((u32 __iomem *)bd);
253 skb = get_new_skb(ugeth, bd);
255 if (!skb) /* If can not allocate data buffer,
256 abort. Cleanup will be elsewhere */
259 ugeth->rx_skbuff[rxQ][i] = skb;
261 /* advance the BD pointer */
262 bd += sizeof(struct qe_bd);
264 } while (!(bd_status & R_W));
269 static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
273 u32 thread_alignment,
275 int skip_page_for_first_entry)
277 u32 init_enet_offset;
281 for (i = 0; i < num_entries; i++) {
282 if ((snum = qe_get_snum()) < 0) {
283 if (netif_msg_ifup(ugeth))
284 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
287 if ((i == 0) && skip_page_for_first_entry)
288 /* First entry of Rx does not have page */
289 init_enet_offset = 0;
292 qe_muram_alloc(thread_size, thread_alignment);
293 if (IS_ERR_VALUE(init_enet_offset)) {
294 if (netif_msg_ifup(ugeth))
295 ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
296 qe_put_snum((u8) snum);
301 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
308 static int return_init_enet_entries(struct ucc_geth_private *ugeth,
312 int skip_page_for_first_entry)
314 u32 init_enet_offset;
318 for (i = 0; i < num_entries; i++) {
321 /* Check that this entry was actually valid --
322 needed in case failed in allocations */
323 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
325 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
326 ENET_INIT_PARAM_SNUM_SHIFT;
327 qe_put_snum((u8) snum);
328 if (!((i == 0) && skip_page_for_first_entry)) {
329 /* First entry of Rx does not have page */
331 (val & ENET_INIT_PARAM_PTR_MASK);
332 qe_muram_free(init_enet_offset);
342 static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
343 u32 __iomem *p_start,
347 int skip_page_for_first_entry)
349 u32 init_enet_offset;
353 for (i = 0; i < num_entries; i++) {
354 u32 val = in_be32(p_start);
356 /* Check that this entry was actually valid --
357 needed in case failed in allocations */
358 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
360 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
361 ENET_INIT_PARAM_SNUM_SHIFT;
362 qe_put_snum((u8) snum);
363 if (!((i == 0) && skip_page_for_first_entry)) {
364 /* First entry of Rx does not have page */
367 ENET_INIT_PARAM_PTR_MASK);
368 ugeth_info("Init enet entry %d:", i);
369 ugeth_info("Base address: 0x%08x",
371 qe_muram_addr(init_enet_offset));
372 mem_disp(qe_muram_addr(init_enet_offset),
383 static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
385 kfree(enet_addr_cont);
388 static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
390 out_be16(®[0], ((u16)mac[5] << 8) | mac[4]);
391 out_be16(®[1], ((u16)mac[3] << 8) | mac[2]);
392 out_be16(®[2], ((u16)mac[1] << 8) | mac[0]);
395 static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
397 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
399 if (!(paddr_num < NUM_OF_PADDRS)) {
400 ugeth_warn("%s: Illagel paddr_num.", __func__);
405 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
408 /* Writing address ff.ff.ff.ff.ff.ff disables address
409 recognition for this register */
410 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
411 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
412 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
417 static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
420 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
424 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
428 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
430 /* Ethernet frames are defined in Little Endian mode,
431 therefore to insert */
432 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
434 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
436 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
437 QE_CR_PROTOCOL_ETHERNET, 0);
440 static inline int compare_addr(u8 **addr1, u8 **addr2)
442 return memcmp(addr1, addr2, ETH_ALEN);
446 static void get_statistics(struct ucc_geth_private *ugeth,
447 struct ucc_geth_tx_firmware_statistics *
448 tx_firmware_statistics,
449 struct ucc_geth_rx_firmware_statistics *
450 rx_firmware_statistics,
451 struct ucc_geth_hardware_statistics *hardware_statistics)
453 struct ucc_fast __iomem *uf_regs;
454 struct ucc_geth __iomem *ug_regs;
455 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
456 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
458 ug_regs = ugeth->ug_regs;
459 uf_regs = (struct ucc_fast __iomem *) ug_regs;
460 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
461 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
463 /* Tx firmware only if user handed pointer and driver actually
464 gathers Tx firmware statistics */
465 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
466 tx_firmware_statistics->sicoltx =
467 in_be32(&p_tx_fw_statistics_pram->sicoltx);
468 tx_firmware_statistics->mulcoltx =
469 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
470 tx_firmware_statistics->latecoltxfr =
471 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
472 tx_firmware_statistics->frabortduecol =
473 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
474 tx_firmware_statistics->frlostinmactxer =
475 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
476 tx_firmware_statistics->carriersenseertx =
477 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
478 tx_firmware_statistics->frtxok =
479 in_be32(&p_tx_fw_statistics_pram->frtxok);
480 tx_firmware_statistics->txfrexcessivedefer =
481 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
482 tx_firmware_statistics->txpkts256 =
483 in_be32(&p_tx_fw_statistics_pram->txpkts256);
484 tx_firmware_statistics->txpkts512 =
485 in_be32(&p_tx_fw_statistics_pram->txpkts512);
486 tx_firmware_statistics->txpkts1024 =
487 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
488 tx_firmware_statistics->txpktsjumbo =
489 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
492 /* Rx firmware only if user handed pointer and driver actually
493 * gathers Rx firmware statistics */
494 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
496 rx_firmware_statistics->frrxfcser =
497 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
498 rx_firmware_statistics->fraligner =
499 in_be32(&p_rx_fw_statistics_pram->fraligner);
500 rx_firmware_statistics->inrangelenrxer =
501 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
502 rx_firmware_statistics->outrangelenrxer =
503 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
504 rx_firmware_statistics->frtoolong =
505 in_be32(&p_rx_fw_statistics_pram->frtoolong);
506 rx_firmware_statistics->runt =
507 in_be32(&p_rx_fw_statistics_pram->runt);
508 rx_firmware_statistics->verylongevent =
509 in_be32(&p_rx_fw_statistics_pram->verylongevent);
510 rx_firmware_statistics->symbolerror =
511 in_be32(&p_rx_fw_statistics_pram->symbolerror);
512 rx_firmware_statistics->dropbsy =
513 in_be32(&p_rx_fw_statistics_pram->dropbsy);
514 for (i = 0; i < 0x8; i++)
515 rx_firmware_statistics->res0[i] =
516 p_rx_fw_statistics_pram->res0[i];
517 rx_firmware_statistics->mismatchdrop =
518 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
519 rx_firmware_statistics->underpkts =
520 in_be32(&p_rx_fw_statistics_pram->underpkts);
521 rx_firmware_statistics->pkts256 =
522 in_be32(&p_rx_fw_statistics_pram->pkts256);
523 rx_firmware_statistics->pkts512 =
524 in_be32(&p_rx_fw_statistics_pram->pkts512);
525 rx_firmware_statistics->pkts1024 =
526 in_be32(&p_rx_fw_statistics_pram->pkts1024);
527 rx_firmware_statistics->pktsjumbo =
528 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
529 rx_firmware_statistics->frlossinmacer =
530 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
531 rx_firmware_statistics->pausefr =
532 in_be32(&p_rx_fw_statistics_pram->pausefr);
533 for (i = 0; i < 0x4; i++)
534 rx_firmware_statistics->res1[i] =
535 p_rx_fw_statistics_pram->res1[i];
536 rx_firmware_statistics->removevlan =
537 in_be32(&p_rx_fw_statistics_pram->removevlan);
538 rx_firmware_statistics->replacevlan =
539 in_be32(&p_rx_fw_statistics_pram->replacevlan);
540 rx_firmware_statistics->insertvlan =
541 in_be32(&p_rx_fw_statistics_pram->insertvlan);
544 /* Hardware only if user handed pointer and driver actually
545 gathers hardware statistics */
546 if (hardware_statistics &&
547 (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
548 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
549 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
550 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
551 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
552 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
553 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
554 hardware_statistics->txok = in_be32(&ug_regs->txok);
555 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
556 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
557 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
558 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
559 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
560 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
561 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
562 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
566 static void dump_bds(struct ucc_geth_private *ugeth)
571 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
572 if (ugeth->p_tx_bd_ring[i]) {
574 (ugeth->ug_info->bdRingLenTx[i] *
575 sizeof(struct qe_bd));
576 ugeth_info("TX BDs[%d]", i);
577 mem_disp(ugeth->p_tx_bd_ring[i], length);
580 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
581 if (ugeth->p_rx_bd_ring[i]) {
583 (ugeth->ug_info->bdRingLenRx[i] *
584 sizeof(struct qe_bd));
585 ugeth_info("RX BDs[%d]", i);
586 mem_disp(ugeth->p_rx_bd_ring[i], length);
591 static void dump_regs(struct ucc_geth_private *ugeth)
595 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1);
596 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
598 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
599 (u32) & ugeth->ug_regs->maccfg1,
600 in_be32(&ugeth->ug_regs->maccfg1));
601 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
602 (u32) & ugeth->ug_regs->maccfg2,
603 in_be32(&ugeth->ug_regs->maccfg2));
604 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
605 (u32) & ugeth->ug_regs->ipgifg,
606 in_be32(&ugeth->ug_regs->ipgifg));
607 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
608 (u32) & ugeth->ug_regs->hafdup,
609 in_be32(&ugeth->ug_regs->hafdup));
610 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
611 (u32) & ugeth->ug_regs->ifctl,
612 in_be32(&ugeth->ug_regs->ifctl));
613 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
614 (u32) & ugeth->ug_regs->ifstat,
615 in_be32(&ugeth->ug_regs->ifstat));
616 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
617 (u32) & ugeth->ug_regs->macstnaddr1,
618 in_be32(&ugeth->ug_regs->macstnaddr1));
619 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
620 (u32) & ugeth->ug_regs->macstnaddr2,
621 in_be32(&ugeth->ug_regs->macstnaddr2));
622 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
623 (u32) & ugeth->ug_regs->uempr,
624 in_be32(&ugeth->ug_regs->uempr));
625 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
626 (u32) & ugeth->ug_regs->utbipar,
627 in_be32(&ugeth->ug_regs->utbipar));
628 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
629 (u32) & ugeth->ug_regs->uescr,
630 in_be16(&ugeth->ug_regs->uescr));
631 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
632 (u32) & ugeth->ug_regs->tx64,
633 in_be32(&ugeth->ug_regs->tx64));
634 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
635 (u32) & ugeth->ug_regs->tx127,
636 in_be32(&ugeth->ug_regs->tx127));
637 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
638 (u32) & ugeth->ug_regs->tx255,
639 in_be32(&ugeth->ug_regs->tx255));
640 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
641 (u32) & ugeth->ug_regs->rx64,
642 in_be32(&ugeth->ug_regs->rx64));
643 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
644 (u32) & ugeth->ug_regs->rx127,
645 in_be32(&ugeth->ug_regs->rx127));
646 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
647 (u32) & ugeth->ug_regs->rx255,
648 in_be32(&ugeth->ug_regs->rx255));
649 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
650 (u32) & ugeth->ug_regs->txok,
651 in_be32(&ugeth->ug_regs->txok));
652 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
653 (u32) & ugeth->ug_regs->txcf,
654 in_be16(&ugeth->ug_regs->txcf));
655 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
656 (u32) & ugeth->ug_regs->tmca,
657 in_be32(&ugeth->ug_regs->tmca));
658 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
659 (u32) & ugeth->ug_regs->tbca,
660 in_be32(&ugeth->ug_regs->tbca));
661 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
662 (u32) & ugeth->ug_regs->rxfok,
663 in_be32(&ugeth->ug_regs->rxfok));
664 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
665 (u32) & ugeth->ug_regs->rxbok,
666 in_be32(&ugeth->ug_regs->rxbok));
667 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
668 (u32) & ugeth->ug_regs->rbyt,
669 in_be32(&ugeth->ug_regs->rbyt));
670 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
671 (u32) & ugeth->ug_regs->rmca,
672 in_be32(&ugeth->ug_regs->rmca));
673 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
674 (u32) & ugeth->ug_regs->rbca,
675 in_be32(&ugeth->ug_regs->rbca));
676 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
677 (u32) & ugeth->ug_regs->scar,
678 in_be32(&ugeth->ug_regs->scar));
679 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
680 (u32) & ugeth->ug_regs->scam,
681 in_be32(&ugeth->ug_regs->scam));
683 if (ugeth->p_thread_data_tx) {
684 int numThreadsTxNumerical;
685 switch (ugeth->ug_info->numThreadsTx) {
686 case UCC_GETH_NUM_OF_THREADS_1:
687 numThreadsTxNumerical = 1;
689 case UCC_GETH_NUM_OF_THREADS_2:
690 numThreadsTxNumerical = 2;
692 case UCC_GETH_NUM_OF_THREADS_4:
693 numThreadsTxNumerical = 4;
695 case UCC_GETH_NUM_OF_THREADS_6:
696 numThreadsTxNumerical = 6;
698 case UCC_GETH_NUM_OF_THREADS_8:
699 numThreadsTxNumerical = 8;
702 numThreadsTxNumerical = 0;
706 ugeth_info("Thread data TXs:");
707 ugeth_info("Base address: 0x%08x",
708 (u32) ugeth->p_thread_data_tx);
709 for (i = 0; i < numThreadsTxNumerical; i++) {
710 ugeth_info("Thread data TX[%d]:", i);
711 ugeth_info("Base address: 0x%08x",
712 (u32) & ugeth->p_thread_data_tx[i]);
713 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
714 sizeof(struct ucc_geth_thread_data_tx));
717 if (ugeth->p_thread_data_rx) {
718 int numThreadsRxNumerical;
719 switch (ugeth->ug_info->numThreadsRx) {
720 case UCC_GETH_NUM_OF_THREADS_1:
721 numThreadsRxNumerical = 1;
723 case UCC_GETH_NUM_OF_THREADS_2:
724 numThreadsRxNumerical = 2;
726 case UCC_GETH_NUM_OF_THREADS_4:
727 numThreadsRxNumerical = 4;
729 case UCC_GETH_NUM_OF_THREADS_6:
730 numThreadsRxNumerical = 6;
732 case UCC_GETH_NUM_OF_THREADS_8:
733 numThreadsRxNumerical = 8;
736 numThreadsRxNumerical = 0;
740 ugeth_info("Thread data RX:");
741 ugeth_info("Base address: 0x%08x",
742 (u32) ugeth->p_thread_data_rx);
743 for (i = 0; i < numThreadsRxNumerical; i++) {
744 ugeth_info("Thread data RX[%d]:", i);
745 ugeth_info("Base address: 0x%08x",
746 (u32) & ugeth->p_thread_data_rx[i]);
747 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
748 sizeof(struct ucc_geth_thread_data_rx));
751 if (ugeth->p_exf_glbl_param) {
752 ugeth_info("EXF global param:");
753 ugeth_info("Base address: 0x%08x",
754 (u32) ugeth->p_exf_glbl_param);
755 mem_disp((u8 *) ugeth->p_exf_glbl_param,
756 sizeof(*ugeth->p_exf_glbl_param));
758 if (ugeth->p_tx_glbl_pram) {
759 ugeth_info("TX global param:");
760 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
761 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
762 (u32) & ugeth->p_tx_glbl_pram->temoder,
763 in_be16(&ugeth->p_tx_glbl_pram->temoder));
764 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
765 (u32) & ugeth->p_tx_glbl_pram->sqptr,
766 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
767 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
768 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
769 in_be32(&ugeth->p_tx_glbl_pram->
770 schedulerbasepointer));
771 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
772 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
773 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
774 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
775 (u32) & ugeth->p_tx_glbl_pram->tstate,
776 in_be32(&ugeth->p_tx_glbl_pram->tstate));
777 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
778 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
779 ugeth->p_tx_glbl_pram->iphoffset[0]);
780 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
781 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
782 ugeth->p_tx_glbl_pram->iphoffset[1]);
783 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
784 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
785 ugeth->p_tx_glbl_pram->iphoffset[2]);
786 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
787 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
788 ugeth->p_tx_glbl_pram->iphoffset[3]);
789 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
790 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
791 ugeth->p_tx_glbl_pram->iphoffset[4]);
792 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
793 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
794 ugeth->p_tx_glbl_pram->iphoffset[5]);
795 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
796 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
797 ugeth->p_tx_glbl_pram->iphoffset[6]);
798 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
799 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
800 ugeth->p_tx_glbl_pram->iphoffset[7]);
801 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
802 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
803 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
804 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
805 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
806 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
807 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
808 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
809 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
810 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
811 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
812 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
813 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
814 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
815 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
816 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
817 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
818 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
819 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
820 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
821 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
822 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
823 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
824 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
825 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
826 (u32) & ugeth->p_tx_glbl_pram->tqptr,
827 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
829 if (ugeth->p_rx_glbl_pram) {
830 ugeth_info("RX global param:");
831 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
832 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
833 (u32) & ugeth->p_rx_glbl_pram->remoder,
834 in_be32(&ugeth->p_rx_glbl_pram->remoder));
835 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
836 (u32) & ugeth->p_rx_glbl_pram->rqptr,
837 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
838 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
839 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
840 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
841 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
842 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
843 ugeth->p_rx_glbl_pram->rxgstpack);
844 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
845 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
846 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
847 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
848 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
849 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
850 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
851 (u32) & ugeth->p_rx_glbl_pram->rstate,
852 ugeth->p_rx_glbl_pram->rstate);
853 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
854 (u32) & ugeth->p_rx_glbl_pram->mrblr,
855 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
856 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
857 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
858 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
859 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
860 (u32) & ugeth->p_rx_glbl_pram->mflr,
861 in_be16(&ugeth->p_rx_glbl_pram->mflr));
862 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
863 (u32) & ugeth->p_rx_glbl_pram->minflr,
864 in_be16(&ugeth->p_rx_glbl_pram->minflr));
865 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
866 (u32) & ugeth->p_rx_glbl_pram->maxd1,
867 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
868 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
869 (u32) & ugeth->p_rx_glbl_pram->maxd2,
870 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
871 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
872 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
873 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
874 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
875 (u32) & ugeth->p_rx_glbl_pram->l2qt,
876 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
877 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
878 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
879 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
880 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
881 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
882 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
883 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
884 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
885 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
886 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
887 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
888 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
889 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
890 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
891 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
892 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
893 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
894 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
895 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
896 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
897 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
898 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
899 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
900 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
901 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
902 (u32) & ugeth->p_rx_glbl_pram->vlantype,
903 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
904 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
905 (u32) & ugeth->p_rx_glbl_pram->vlantci,
906 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
907 for (i = 0; i < 64; i++)
909 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
911 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
912 ugeth->p_rx_glbl_pram->addressfiltering[i]);
913 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
914 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
915 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
917 if (ugeth->p_send_q_mem_reg) {
918 ugeth_info("Send Q memory registers:");
919 ugeth_info("Base address: 0x%08x",
920 (u32) ugeth->p_send_q_mem_reg);
921 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
922 ugeth_info("SQQD[%d]:", i);
923 ugeth_info("Base address: 0x%08x",
924 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
925 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
926 sizeof(struct ucc_geth_send_queue_qd));
929 if (ugeth->p_scheduler) {
930 ugeth_info("Scheduler:");
931 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
932 mem_disp((u8 *) ugeth->p_scheduler,
933 sizeof(*ugeth->p_scheduler));
935 if (ugeth->p_tx_fw_statistics_pram) {
936 ugeth_info("TX FW statistics pram:");
937 ugeth_info("Base address: 0x%08x",
938 (u32) ugeth->p_tx_fw_statistics_pram);
939 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
940 sizeof(*ugeth->p_tx_fw_statistics_pram));
942 if (ugeth->p_rx_fw_statistics_pram) {
943 ugeth_info("RX FW statistics pram:");
944 ugeth_info("Base address: 0x%08x",
945 (u32) ugeth->p_rx_fw_statistics_pram);
946 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
947 sizeof(*ugeth->p_rx_fw_statistics_pram));
949 if (ugeth->p_rx_irq_coalescing_tbl) {
950 ugeth_info("RX IRQ coalescing tables:");
951 ugeth_info("Base address: 0x%08x",
952 (u32) ugeth->p_rx_irq_coalescing_tbl);
953 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
954 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
955 ugeth_info("Base address: 0x%08x",
956 (u32) & ugeth->p_rx_irq_coalescing_tbl->
959 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
960 (u32) & ugeth->p_rx_irq_coalescing_tbl->
961 coalescingentry[i].interruptcoalescingmaxvalue,
962 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
964 interruptcoalescingmaxvalue));
966 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
967 (u32) & ugeth->p_rx_irq_coalescing_tbl->
968 coalescingentry[i].interruptcoalescingcounter,
969 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
971 interruptcoalescingcounter));
974 if (ugeth->p_rx_bd_qs_tbl) {
975 ugeth_info("RX BD QS tables:");
976 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
977 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
978 ugeth_info("RX BD QS table[%d]:", i);
979 ugeth_info("Base address: 0x%08x",
980 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
982 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
983 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
984 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
986 ("bdptr : addr - 0x%08x, val - 0x%08x",
987 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
988 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
990 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
991 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
992 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
995 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
996 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
997 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
998 ugeth_info("ucode RX Prefetched BDs:");
999 ugeth_info("Base address: 0x%08x",
1001 qe_muram_addr(in_be32
1002 (&ugeth->p_rx_bd_qs_tbl[i].
1005 qe_muram_addr(in_be32
1006 (&ugeth->p_rx_bd_qs_tbl[i].
1008 sizeof(struct ucc_geth_rx_prefetched_bds));
1011 if (ugeth->p_init_enet_param_shadow) {
1013 ugeth_info("Init enet param shadow:");
1014 ugeth_info("Base address: 0x%08x",
1015 (u32) ugeth->p_init_enet_param_shadow);
1016 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1017 sizeof(*ugeth->p_init_enet_param_shadow));
1019 size = sizeof(struct ucc_geth_thread_rx_pram);
1020 if (ugeth->ug_info->rxExtendedFiltering) {
1022 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1023 if (ugeth->ug_info->largestexternallookupkeysize ==
1024 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1026 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1027 if (ugeth->ug_info->largestexternallookupkeysize ==
1028 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1030 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1033 dump_init_enet_entries(ugeth,
1034 &(ugeth->p_init_enet_param_shadow->
1036 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1037 sizeof(struct ucc_geth_thread_tx_pram),
1038 ugeth->ug_info->riscTx, 0);
1039 dump_init_enet_entries(ugeth,
1040 &(ugeth->p_init_enet_param_shadow->
1042 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1043 ugeth->ug_info->riscRx, 1);
1048 static void init_default_reg_vals(u32 __iomem *upsmr_register,
1049 u32 __iomem *maccfg1_register,
1050 u32 __iomem *maccfg2_register)
1052 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1053 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1054 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1057 static int init_half_duplex_params(int alt_beb,
1058 int back_pressure_no_backoff,
1061 u8 alt_beb_truncation,
1062 u8 max_retransmissions,
1063 u8 collision_window,
1064 u32 __iomem *hafdup_register)
1068 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1069 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1070 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1073 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1076 value |= HALFDUP_ALT_BEB;
1077 if (back_pressure_no_backoff)
1078 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1080 value |= HALFDUP_NO_BACKOFF;
1082 value |= HALFDUP_EXCESSIVE_DEFER;
1084 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1086 value |= collision_window;
1088 out_be32(hafdup_register, value);
1092 static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1096 u32 __iomem *ipgifg_register)
1100 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1102 if (non_btb_cs_ipg > non_btb_ipg)
1105 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1106 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1107 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1108 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1112 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1113 IPGIFG_NBTB_CS_IPG_MASK);
1115 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1116 IPGIFG_NBTB_IPG_MASK);
1118 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1119 IPGIFG_MIN_IFG_MASK);
1120 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1122 out_be32(ipgifg_register, value);
1126 int init_flow_control_params(u32 automatic_flow_control_mode,
1127 int rx_flow_control_enable,
1128 int tx_flow_control_enable,
1130 u16 extension_field,
1131 u32 __iomem *upsmr_register,
1132 u32 __iomem *uempr_register,
1133 u32 __iomem *maccfg1_register)
1137 /* Set UEMPR register */
1138 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1139 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1140 out_be32(uempr_register, value);
1142 /* Set UPSMR register */
1143 setbits32(upsmr_register, automatic_flow_control_mode);
1145 value = in_be32(maccfg1_register);
1146 if (rx_flow_control_enable)
1147 value |= MACCFG1_FLOW_RX;
1148 if (tx_flow_control_enable)
1149 value |= MACCFG1_FLOW_TX;
1150 out_be32(maccfg1_register, value);
1155 static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1156 int auto_zero_hardware_statistics,
1157 u32 __iomem *upsmr_register,
1158 u16 __iomem *uescr_register)
1160 u16 uescr_value = 0;
1162 /* Enable hardware statistics gathering if requested */
1163 if (enable_hardware_statistics)
1164 setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
1166 /* Clear hardware statistics counters */
1167 uescr_value = in_be16(uescr_register);
1168 uescr_value |= UESCR_CLRCNT;
1169 /* Automatically zero hardware statistics counters on read,
1171 if (auto_zero_hardware_statistics)
1172 uescr_value |= UESCR_AUTOZ;
1173 out_be16(uescr_register, uescr_value);
1178 static int init_firmware_statistics_gathering_mode(int
1179 enable_tx_firmware_statistics,
1180 int enable_rx_firmware_statistics,
1181 u32 __iomem *tx_rmon_base_ptr,
1182 u32 tx_firmware_statistics_structure_address,
1183 u32 __iomem *rx_rmon_base_ptr,
1184 u32 rx_firmware_statistics_structure_address,
1185 u16 __iomem *temoder_register,
1186 u32 __iomem *remoder_register)
1188 /* Note: this function does not check if */
1189 /* the parameters it receives are NULL */
1191 if (enable_tx_firmware_statistics) {
1192 out_be32(tx_rmon_base_ptr,
1193 tx_firmware_statistics_structure_address);
1194 setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
1197 if (enable_rx_firmware_statistics) {
1198 out_be32(rx_rmon_base_ptr,
1199 rx_firmware_statistics_structure_address);
1200 setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
1206 static int init_mac_station_addr_regs(u8 address_byte_0,
1212 u32 __iomem *macstnaddr1_register,
1213 u32 __iomem *macstnaddr2_register)
1217 /* Example: for a station address of 0x12345678ABCD, */
1218 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1220 /* MACSTNADDR1 Register: */
1223 /* station address byte 5 station address byte 4 */
1225 /* station address byte 3 station address byte 2 */
1226 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1227 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1228 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1229 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1231 out_be32(macstnaddr1_register, value);
1233 /* MACSTNADDR2 Register: */
1236 /* station address byte 1 station address byte 0 */
1238 /* reserved reserved */
1240 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1241 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1243 out_be32(macstnaddr2_register, value);
1248 static int init_check_frame_length_mode(int length_check,
1249 u32 __iomem *maccfg2_register)
1253 value = in_be32(maccfg2_register);
1256 value |= MACCFG2_LC;
1258 value &= ~MACCFG2_LC;
1260 out_be32(maccfg2_register, value);
1264 static int init_preamble_length(u8 preamble_length,
1265 u32 __iomem *maccfg2_register)
1267 if ((preamble_length < 3) || (preamble_length > 7))
1270 clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
1271 preamble_length << MACCFG2_PREL_SHIFT);
1276 static int init_rx_parameters(int reject_broadcast,
1277 int receive_short_frames,
1278 int promiscuous, u32 __iomem *upsmr_register)
1282 value = in_be32(upsmr_register);
1284 if (reject_broadcast)
1285 value |= UCC_GETH_UPSMR_BRO;
1287 value &= ~UCC_GETH_UPSMR_BRO;
1289 if (receive_short_frames)
1290 value |= UCC_GETH_UPSMR_RSH;
1292 value &= ~UCC_GETH_UPSMR_RSH;
1295 value |= UCC_GETH_UPSMR_PRO;
1297 value &= ~UCC_GETH_UPSMR_PRO;
1299 out_be32(upsmr_register, value);
1304 static int init_max_rx_buff_len(u16 max_rx_buf_len,
1305 u16 __iomem *mrblr_register)
1307 /* max_rx_buf_len value must be a multiple of 128 */
1308 if ((max_rx_buf_len == 0) ||
1309 (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1312 out_be16(mrblr_register, max_rx_buf_len);
1316 static int init_min_frame_len(u16 min_frame_length,
1317 u16 __iomem *minflr_register,
1318 u16 __iomem *mrblr_register)
1320 u16 mrblr_value = 0;
1322 mrblr_value = in_be16(mrblr_register);
1323 if (min_frame_length >= (mrblr_value - 4))
1326 out_be16(minflr_register, min_frame_length);
1330 static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1332 struct ucc_geth_info *ug_info;
1333 struct ucc_geth __iomem *ug_regs;
1334 struct ucc_fast __iomem *uf_regs;
1339 ugeth_vdbg("%s: IN", __func__);
1341 ug_info = ugeth->ug_info;
1342 ug_regs = ugeth->ug_regs;
1343 uf_regs = ugeth->uccf->uf_regs;
1346 maccfg2 = in_be32(&ug_regs->maccfg2);
1347 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1348 if ((ugeth->max_speed == SPEED_10) ||
1349 (ugeth->max_speed == SPEED_100))
1350 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1351 else if (ugeth->max_speed == SPEED_1000)
1352 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1353 maccfg2 |= ug_info->padAndCrc;
1354 out_be32(&ug_regs->maccfg2, maccfg2);
1357 upsmr = in_be32(&uf_regs->upsmr);
1358 upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
1359 UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
1360 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1361 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1362 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1363 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1364 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
1365 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1366 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
1367 upsmr |= UCC_GETH_UPSMR_RPM;
1368 switch (ugeth->max_speed) {
1370 upsmr |= UCC_GETH_UPSMR_R10M;
1373 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
1374 upsmr |= UCC_GETH_UPSMR_RMM;
1377 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1378 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1379 upsmr |= UCC_GETH_UPSMR_TBIM;
1381 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
1382 upsmr |= UCC_GETH_UPSMR_SGMM;
1384 out_be32(&uf_regs->upsmr, upsmr);
1386 /* Disable autonegotiation in tbi mode, because by default it
1387 comes up in autonegotiation mode. */
1388 /* Note that this depends on proper setting in utbipar register. */
1389 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1390 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1391 struct ucc_geth_info *ug_info = ugeth->ug_info;
1392 struct phy_device *tbiphy;
1394 if (!ug_info->tbi_node)
1395 ugeth_warn("TBI mode requires that the device "
1396 "tree specify a tbi-handle\n");
1398 tbiphy = of_phy_find_device(ug_info->tbi_node);
1400 ugeth_warn("Could not get TBI device\n");
1402 value = phy_read(tbiphy, ENET_TBI_MII_CR);
1403 value &= ~0x1000; /* Turn off autonegotiation */
1404 phy_write(tbiphy, ENET_TBI_MII_CR, value);
1407 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1409 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1411 if (netif_msg_probe(ugeth))
1412 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
1420 static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1422 struct ucc_fast_private *uccf;
1429 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1430 clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
1431 out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
1433 /* Issue host command */
1435 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1436 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1437 QE_CR_PROTOCOL_ETHERNET, 0);
1439 /* Wait for command to complete */
1442 temp = in_be32(uccf->p_ucce);
1443 } while (!(temp & UCC_GETH_UCCE_GRA) && --i);
1445 uccf->stopped_tx = 1;
1450 static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth)
1452 struct ucc_fast_private *uccf;
1459 /* Clear acknowledge bit */
1460 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1461 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1462 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
1464 /* Keep issuing command and checking acknowledge bit until
1465 it is asserted, according to spec */
1467 /* Issue host command */
1469 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1471 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1472 QE_CR_PROTOCOL_ETHERNET, 0);
1474 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1475 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
1477 uccf->stopped_rx = 1;
1482 static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
1484 struct ucc_fast_private *uccf;
1490 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1491 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
1492 uccf->stopped_tx = 0;
1497 static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
1499 struct ucc_fast_private *uccf;
1505 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1506 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
1508 uccf->stopped_rx = 0;
1513 static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1515 struct ucc_fast_private *uccf;
1516 int enabled_tx, enabled_rx;
1520 /* check if the UCC number is in range. */
1521 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1522 if (netif_msg_probe(ugeth))
1523 ugeth_err("%s: ucc_num out of range.", __func__);
1527 enabled_tx = uccf->enabled_tx;
1528 enabled_rx = uccf->enabled_rx;
1530 /* Get Tx and Rx going again, in case this channel was actively
1532 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
1533 ugeth_restart_tx(ugeth);
1534 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
1535 ugeth_restart_rx(ugeth);
1537 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
1543 static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1545 struct ucc_fast_private *uccf;
1549 /* check if the UCC number is in range. */
1550 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1551 if (netif_msg_probe(ugeth))
1552 ugeth_err("%s: ucc_num out of range.", __func__);
1556 /* Stop any transmissions */
1557 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
1558 ugeth_graceful_stop_tx(ugeth);
1560 /* Stop any receptions */
1561 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
1562 ugeth_graceful_stop_rx(ugeth);
1564 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
1569 static void ugeth_quiesce(struct ucc_geth_private *ugeth)
1571 /* Prevent any further xmits, plus detach the device. */
1572 netif_device_detach(ugeth->ndev);
1574 /* Wait for any current xmits to finish. */
1575 netif_tx_disable(ugeth->ndev);
1577 /* Disable the interrupt to avoid NAPI rescheduling. */
1578 disable_irq(ugeth->ug_info->uf_info.irq);
1580 /* Stop NAPI, and possibly wait for its completion. */
1581 napi_disable(&ugeth->napi);
1584 static void ugeth_activate(struct ucc_geth_private *ugeth)
1586 napi_enable(&ugeth->napi);
1587 enable_irq(ugeth->ug_info->uf_info.irq);
1588 netif_device_attach(ugeth->ndev);
1591 /* Called every time the controller might need to be made
1592 * aware of new link state. The PHY code conveys this
1593 * information through variables in the ugeth structure, and this
1594 * function converts those variables into the appropriate
1595 * register values, and can bring down the device if needed.
1598 static void adjust_link(struct net_device *dev)
1600 struct ucc_geth_private *ugeth = netdev_priv(dev);
1601 struct ucc_geth __iomem *ug_regs;
1602 struct ucc_fast __iomem *uf_regs;
1603 struct phy_device *phydev = ugeth->phydev;
1606 ug_regs = ugeth->ug_regs;
1607 uf_regs = ugeth->uccf->uf_regs;
1610 u32 tempval = in_be32(&ug_regs->maccfg2);
1611 u32 upsmr = in_be32(&uf_regs->upsmr);
1612 /* Now we make sure that we can be in full duplex mode.
1613 * If not, we operate in half-duplex mode. */
1614 if (phydev->duplex != ugeth->oldduplex) {
1616 if (!(phydev->duplex))
1617 tempval &= ~(MACCFG2_FDX);
1619 tempval |= MACCFG2_FDX;
1620 ugeth->oldduplex = phydev->duplex;
1623 if (phydev->speed != ugeth->oldspeed) {
1625 switch (phydev->speed) {
1627 tempval = ((tempval &
1628 ~(MACCFG2_INTERFACE_MODE_MASK)) |
1629 MACCFG2_INTERFACE_MODE_BYTE);
1633 tempval = ((tempval &
1634 ~(MACCFG2_INTERFACE_MODE_MASK)) |
1635 MACCFG2_INTERFACE_MODE_NIBBLE);
1636 /* if reduced mode, re-set UPSMR.R10M */
1637 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1638 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1639 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1640 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1641 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
1642 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1643 if (phydev->speed == SPEED_10)
1644 upsmr |= UCC_GETH_UPSMR_R10M;
1646 upsmr &= ~UCC_GETH_UPSMR_R10M;
1650 if (netif_msg_link(ugeth))
1652 "%s: Ack! Speed (%d) is not 10/100/1000!",
1653 dev->name, phydev->speed);
1656 ugeth->oldspeed = phydev->speed;
1659 if (!ugeth->oldlink) {
1666 * To change the MAC configuration we need to disable
1667 * the controller. To do so, we have to either grab
1668 * ugeth->lock, which is a bad idea since 'graceful
1669 * stop' commands might take quite a while, or we can
1670 * quiesce driver's activity.
1672 ugeth_quiesce(ugeth);
1673 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
1675 out_be32(&ug_regs->maccfg2, tempval);
1676 out_be32(&uf_regs->upsmr, upsmr);
1678 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
1679 ugeth_activate(ugeth);
1681 } else if (ugeth->oldlink) {
1684 ugeth->oldspeed = 0;
1685 ugeth->oldduplex = -1;
1688 if (new_state && netif_msg_link(ugeth))
1689 phy_print_status(phydev);
1692 /* Initialize TBI PHY interface for communicating with the
1693 * SERDES lynx PHY on the chip. We communicate with this PHY
1694 * through the MDIO bus on each controller, treating it as a
1695 * "normal" PHY at the address found in the UTBIPA register. We assume
1696 * that the UTBIPA register is valid. Either the MDIO bus code will set
1697 * it to a value that doesn't conflict with other PHYs on the bus, or the
1698 * value doesn't matter, as there are no other PHYs on the bus.
1700 static void uec_configure_serdes(struct net_device *dev)
1702 struct ucc_geth_private *ugeth = netdev_priv(dev);
1703 struct ucc_geth_info *ug_info = ugeth->ug_info;
1704 struct phy_device *tbiphy;
1706 if (!ug_info->tbi_node) {
1707 dev_warn(&dev->dev, "SGMII mode requires that the device "
1708 "tree specify a tbi-handle\n");
1712 tbiphy = of_phy_find_device(ug_info->tbi_node);
1714 dev_err(&dev->dev, "error: Could not get TBI device\n");
1719 * If the link is already up, we must already be ok, and don't need to
1720 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1721 * everything for us? Resetting it takes the link down and requires
1722 * several seconds for it to come back.
1724 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
1727 /* Single clk mode, mii mode off(for serdes communication) */
1728 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
1730 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1732 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
1735 /* Configure the PHY for dev.
1736 * returns 0 if success. -1 if failure
1738 static int init_phy(struct net_device *dev)
1740 struct ucc_geth_private *priv = netdev_priv(dev);
1741 struct ucc_geth_info *ug_info = priv->ug_info;
1742 struct phy_device *phydev;
1746 priv->oldduplex = -1;
1748 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
1749 priv->phy_interface);
1751 phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1752 priv->phy_interface);
1754 dev_err(&dev->dev, "Could not attach to PHY\n");
1758 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
1759 uec_configure_serdes(dev);
1761 phydev->supported &= (SUPPORTED_MII |
1763 ADVERTISED_10baseT_Half |
1764 ADVERTISED_10baseT_Full |
1765 ADVERTISED_100baseT_Half |
1766 ADVERTISED_100baseT_Full);
1768 if (priv->max_speed == SPEED_1000)
1769 phydev->supported |= ADVERTISED_1000baseT_Full;
1771 phydev->advertising = phydev->supported;
1773 priv->phydev = phydev;
1778 static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
1781 ucc_fast_dump_regs(ugeth->uccf);
1787 static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
1792 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
1793 struct ucc_fast_private *uccf;
1794 enum comm_dir comm_dir;
1795 struct list_head *p_lh;
1797 u32 __iomem *addr_h;
1798 u32 __iomem *addr_l;
1804 (struct ucc_geth_82xx_address_filtering_pram __iomem *)
1805 ugeth->p_rx_glbl_pram->addressfiltering;
1807 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
1808 addr_h = &(p_82xx_addr_filt->gaddr_h);
1809 addr_l = &(p_82xx_addr_filt->gaddr_l);
1810 p_lh = &ugeth->group_hash_q;
1811 p_counter = &(ugeth->numGroupAddrInHash);
1812 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
1813 addr_h = &(p_82xx_addr_filt->iaddr_h);
1814 addr_l = &(p_82xx_addr_filt->iaddr_l);
1815 p_lh = &ugeth->ind_hash_q;
1816 p_counter = &(ugeth->numIndAddrInHash);
1821 if (uccf->enabled_tx)
1822 comm_dir |= COMM_DIR_TX;
1823 if (uccf->enabled_rx)
1824 comm_dir |= COMM_DIR_RX;
1826 ugeth_disable(ugeth, comm_dir);
1828 /* Clear the hash table. */
1829 out_be32(addr_h, 0x00000000);
1830 out_be32(addr_l, 0x00000000);
1837 /* Delete all remaining CQ elements */
1838 for (i = 0; i < num; i++)
1839 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
1844 ugeth_enable(ugeth, comm_dir);
1849 static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
1852 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
1853 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
1856 static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
1858 struct ucc_geth_info *ug_info;
1859 struct ucc_fast_info *uf_info;
1864 ug_info = ugeth->ug_info;
1865 uf_info = &ug_info->uf_info;
1867 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1868 if (ugeth->p_rx_bd_ring[i]) {
1869 /* Return existing data buffers in ring */
1870 bd = ugeth->p_rx_bd_ring[i];
1871 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
1872 if (ugeth->rx_skbuff[i][j]) {
1873 dma_unmap_single(ugeth->dev,
1874 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1876 uf_info.max_rx_buf_length +
1877 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
1880 ugeth->rx_skbuff[i][j]);
1881 ugeth->rx_skbuff[i][j] = NULL;
1883 bd += sizeof(struct qe_bd);
1886 kfree(ugeth->rx_skbuff[i]);
1888 if (ugeth->ug_info->uf_info.bd_mem_part ==
1890 kfree((void *)ugeth->rx_bd_ring_offset[i]);
1891 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1893 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
1894 ugeth->p_rx_bd_ring[i] = NULL;
1900 static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
1902 struct ucc_geth_info *ug_info;
1903 struct ucc_fast_info *uf_info;
1907 ug_info = ugeth->ug_info;
1908 uf_info = &ug_info->uf_info;
1910 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1911 bd = ugeth->p_tx_bd_ring[i];
1914 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
1915 if (ugeth->tx_skbuff[i][j]) {
1916 dma_unmap_single(ugeth->dev,
1917 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1918 (in_be32((u32 __iomem *)bd) &
1921 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
1922 ugeth->tx_skbuff[i][j] = NULL;
1926 kfree(ugeth->tx_skbuff[i]);
1928 if (ugeth->p_tx_bd_ring[i]) {
1929 if (ugeth->ug_info->uf_info.bd_mem_part ==
1931 kfree((void *)ugeth->tx_bd_ring_offset[i]);
1932 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1934 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
1935 ugeth->p_tx_bd_ring[i] = NULL;
1941 static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
1947 ucc_fast_free(ugeth->uccf);
1951 if (ugeth->p_thread_data_tx) {
1952 qe_muram_free(ugeth->thread_dat_tx_offset);
1953 ugeth->p_thread_data_tx = NULL;
1955 if (ugeth->p_thread_data_rx) {
1956 qe_muram_free(ugeth->thread_dat_rx_offset);
1957 ugeth->p_thread_data_rx = NULL;
1959 if (ugeth->p_exf_glbl_param) {
1960 qe_muram_free(ugeth->exf_glbl_param_offset);
1961 ugeth->p_exf_glbl_param = NULL;
1963 if (ugeth->p_rx_glbl_pram) {
1964 qe_muram_free(ugeth->rx_glbl_pram_offset);
1965 ugeth->p_rx_glbl_pram = NULL;
1967 if (ugeth->p_tx_glbl_pram) {
1968 qe_muram_free(ugeth->tx_glbl_pram_offset);
1969 ugeth->p_tx_glbl_pram = NULL;
1971 if (ugeth->p_send_q_mem_reg) {
1972 qe_muram_free(ugeth->send_q_mem_reg_offset);
1973 ugeth->p_send_q_mem_reg = NULL;
1975 if (ugeth->p_scheduler) {
1976 qe_muram_free(ugeth->scheduler_offset);
1977 ugeth->p_scheduler = NULL;
1979 if (ugeth->p_tx_fw_statistics_pram) {
1980 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
1981 ugeth->p_tx_fw_statistics_pram = NULL;
1983 if (ugeth->p_rx_fw_statistics_pram) {
1984 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
1985 ugeth->p_rx_fw_statistics_pram = NULL;
1987 if (ugeth->p_rx_irq_coalescing_tbl) {
1988 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
1989 ugeth->p_rx_irq_coalescing_tbl = NULL;
1991 if (ugeth->p_rx_bd_qs_tbl) {
1992 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
1993 ugeth->p_rx_bd_qs_tbl = NULL;
1995 if (ugeth->p_init_enet_param_shadow) {
1996 return_init_enet_entries(ugeth,
1997 &(ugeth->p_init_enet_param_shadow->
1999 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2000 ugeth->ug_info->riscRx, 1);
2001 return_init_enet_entries(ugeth,
2002 &(ugeth->p_init_enet_param_shadow->
2004 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2005 ugeth->ug_info->riscTx, 0);
2006 kfree(ugeth->p_init_enet_param_shadow);
2007 ugeth->p_init_enet_param_shadow = NULL;
2009 ucc_geth_free_tx(ugeth);
2010 ucc_geth_free_rx(ugeth);
2011 while (!list_empty(&ugeth->group_hash_q))
2012 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2013 (dequeue(&ugeth->group_hash_q)));
2014 while (!list_empty(&ugeth->ind_hash_q))
2015 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2016 (dequeue(&ugeth->ind_hash_q)));
2017 if (ugeth->ug_regs) {
2018 iounmap(ugeth->ug_regs);
2019 ugeth->ug_regs = NULL;
2023 static void ucc_geth_set_multi(struct net_device *dev)
2025 struct ucc_geth_private *ugeth;
2026 struct netdev_hw_addr *ha;
2027 struct ucc_fast __iomem *uf_regs;
2028 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2030 ugeth = netdev_priv(dev);
2032 uf_regs = ugeth->uccf->uf_regs;
2034 if (dev->flags & IFF_PROMISC) {
2035 setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
2037 clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
2040 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
2041 p_rx_glbl_pram->addressfiltering;
2043 if (dev->flags & IFF_ALLMULTI) {
2044 /* Catch all multicast addresses, so set the
2045 * filter to all 1's.
2047 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2048 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2050 /* Clear filter and add the addresses in the list.
2052 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2053 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2055 netdev_for_each_mc_addr(ha, dev) {
2056 /* Ask CPM to run CRC and set bit in
2059 hw_add_addr_in_hash(ugeth, ha->addr);
2065 static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2067 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
2068 struct phy_device *phydev = ugeth->phydev;
2070 ugeth_vdbg("%s: IN", __func__);
2073 * Tell the kernel the link is down.
2074 * Must be done before disabling the controller
2075 * or deadlock may happen.
2079 /* Disable the controller */
2080 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2082 /* Mask all interrupts */
2083 out_be32(ugeth->uccf->p_uccm, 0x00000000);
2085 /* Clear all interrupts */
2086 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2088 /* Disable Rx and Tx */
2089 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2091 ucc_geth_memclean(ugeth);
2094 static int ucc_struct_init(struct ucc_geth_private *ugeth)
2096 struct ucc_geth_info *ug_info;
2097 struct ucc_fast_info *uf_info;
2100 ug_info = ugeth->ug_info;
2101 uf_info = &ug_info->uf_info;
2103 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2104 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2105 if (netif_msg_probe(ugeth))
2106 ugeth_err("%s: Bad memory partition value.",
2112 for (i = 0; i < ug_info->numQueuesRx; i++) {
2113 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2114 (ug_info->bdRingLenRx[i] %
2115 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2116 if (netif_msg_probe(ugeth))
2118 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
2125 for (i = 0; i < ug_info->numQueuesTx; i++) {
2126 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2127 if (netif_msg_probe(ugeth))
2129 ("%s: Tx BD ring length must be no smaller than 2.",
2136 if ((uf_info->max_rx_buf_length == 0) ||
2137 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2138 if (netif_msg_probe(ugeth))
2140 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2146 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2147 if (netif_msg_probe(ugeth))
2148 ugeth_err("%s: number of tx queues too large.", __func__);
2153 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2154 if (netif_msg_probe(ugeth))
2155 ugeth_err("%s: number of rx queues too large.", __func__);
2160 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2161 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2162 if (netif_msg_probe(ugeth))
2164 ("%s: VLAN priority table entry must not be"
2165 " larger than number of Rx queues.",
2172 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2173 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2174 if (netif_msg_probe(ugeth))
2176 ("%s: IP priority table entry must not be"
2177 " larger than number of Rx queues.",
2183 if (ug_info->cam && !ug_info->ecamptr) {
2184 if (netif_msg_probe(ugeth))
2185 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2190 if ((ug_info->numStationAddresses !=
2191 UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
2192 ug_info->rxExtendedFiltering) {
2193 if (netif_msg_probe(ugeth))
2194 ugeth_err("%s: Number of station addresses greater than 1 "
2195 "not allowed in extended parsing mode.",
2200 /* Generate uccm_mask for receive */
2201 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2202 for (i = 0; i < ug_info->numQueuesRx; i++)
2203 uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
2205 for (i = 0; i < ug_info->numQueuesTx; i++)
2206 uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
2207 /* Initialize the general fast UCC block. */
2208 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2209 if (netif_msg_probe(ugeth))
2210 ugeth_err("%s: Failed to init uccf.", __func__);
2214 /* read the number of risc engines, update the riscTx and riscRx
2215 * if there are 4 riscs in QE
2217 if (qe_get_num_of_risc() == 4) {
2218 ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS;
2219 ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS;
2222 ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
2223 if (!ugeth->ug_regs) {
2224 if (netif_msg_probe(ugeth))
2225 ugeth_err("%s: Failed to ioremap regs.", __func__);
2232 static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
2234 struct ucc_geth_info *ug_info;
2235 struct ucc_fast_info *uf_info;
2240 ug_info = ugeth->ug_info;
2241 uf_info = &ug_info->uf_info;
2243 /* Allocate Tx bds */
2244 for (j = 0; j < ug_info->numQueuesTx; j++) {
2245 /* Allocate in multiple of
2246 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2247 according to spec */
2248 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
2249 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2250 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2251 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
2252 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2253 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2254 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2256 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2257 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2258 ugeth->tx_bd_ring_offset[j] =
2259 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2261 if (ugeth->tx_bd_ring_offset[j] != 0)
2262 ugeth->p_tx_bd_ring[j] =
2263 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
2264 align) & ~(align - 1));
2265 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2266 ugeth->tx_bd_ring_offset[j] =
2267 qe_muram_alloc(length,
2268 UCC_GETH_TX_BD_RING_ALIGNMENT);
2269 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
2270 ugeth->p_tx_bd_ring[j] =
2271 (u8 __iomem *) qe_muram_addr(ugeth->
2272 tx_bd_ring_offset[j]);
2274 if (!ugeth->p_tx_bd_ring[j]) {
2275 if (netif_msg_ifup(ugeth))
2277 ("%s: Can not allocate memory for Tx bd rings.",
2281 /* Zero unused end of bd ring, according to spec */
2282 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
2283 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
2284 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2288 for (j = 0; j < ug_info->numQueuesTx; j++) {
2289 /* Setup the skbuff rings */
2290 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2291 ugeth->ug_info->bdRingLenTx[j],
2294 if (ugeth->tx_skbuff[j] == NULL) {
2295 if (netif_msg_ifup(ugeth))
2296 ugeth_err("%s: Could not allocate tx_skbuff",
2301 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2302 ugeth->tx_skbuff[j][i] = NULL;
2304 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2305 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2306 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2307 /* clear bd buffer */
2308 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2309 /* set bd status and length */
2310 out_be32((u32 __iomem *)bd, 0);
2311 bd += sizeof(struct qe_bd);
2313 bd -= sizeof(struct qe_bd);
2314 /* set bd status and length */
2315 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
2321 static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
2323 struct ucc_geth_info *ug_info;
2324 struct ucc_fast_info *uf_info;
2329 ug_info = ugeth->ug_info;
2330 uf_info = &ug_info->uf_info;
2332 /* Allocate Rx bds */
2333 for (j = 0; j < ug_info->numQueuesRx; j++) {
2334 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
2335 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2337 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2338 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2339 ugeth->rx_bd_ring_offset[j] =
2340 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2341 if (ugeth->rx_bd_ring_offset[j] != 0)
2342 ugeth->p_rx_bd_ring[j] =
2343 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
2344 align) & ~(align - 1));
2345 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2346 ugeth->rx_bd_ring_offset[j] =
2347 qe_muram_alloc(length,
2348 UCC_GETH_RX_BD_RING_ALIGNMENT);
2349 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
2350 ugeth->p_rx_bd_ring[j] =
2351 (u8 __iomem *) qe_muram_addr(ugeth->
2352 rx_bd_ring_offset[j]);
2354 if (!ugeth->p_rx_bd_ring[j]) {
2355 if (netif_msg_ifup(ugeth))
2357 ("%s: Can not allocate memory for Rx bd rings.",
2364 for (j = 0; j < ug_info->numQueuesRx; j++) {
2365 /* Setup the skbuff rings */
2366 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2367 ugeth->ug_info->bdRingLenRx[j],
2370 if (ugeth->rx_skbuff[j] == NULL) {
2371 if (netif_msg_ifup(ugeth))
2372 ugeth_err("%s: Could not allocate rx_skbuff",
2377 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2378 ugeth->rx_skbuff[j][i] = NULL;
2380 ugeth->skb_currx[j] = 0;
2381 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2382 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2383 /* set bd status and length */
2384 out_be32((u32 __iomem *)bd, R_I);
2385 /* clear bd buffer */
2386 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2387 bd += sizeof(struct qe_bd);
2389 bd -= sizeof(struct qe_bd);
2390 /* set bd status and length */
2391 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
2397 static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2399 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2400 struct ucc_geth_init_pram __iomem *p_init_enet_pram;
2401 struct ucc_fast_private *uccf;
2402 struct ucc_geth_info *ug_info;
2403 struct ucc_fast_info *uf_info;
2404 struct ucc_fast __iomem *uf_regs;
2405 struct ucc_geth __iomem *ug_regs;
2406 int ret_val = -EINVAL;
2407 u32 remoder = UCC_GETH_REMODER_INIT;
2408 u32 init_enet_pram_offset, cecr_subblock, command;
2409 u32 ifstat, i, j, size, l2qt, l3qt;
2410 u16 temoder = UCC_GETH_TEMODER_INIT;
2412 u8 function_code = 0;
2413 u8 __iomem *endOfRing;
2414 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2416 ugeth_vdbg("%s: IN", __func__);
2418 ug_info = ugeth->ug_info;
2419 uf_info = &ug_info->uf_info;
2420 uf_regs = uccf->uf_regs;
2421 ug_regs = ugeth->ug_regs;
2423 switch (ug_info->numThreadsRx) {
2424 case UCC_GETH_NUM_OF_THREADS_1:
2425 numThreadsRxNumerical = 1;
2427 case UCC_GETH_NUM_OF_THREADS_2:
2428 numThreadsRxNumerical = 2;
2430 case UCC_GETH_NUM_OF_THREADS_4:
2431 numThreadsRxNumerical = 4;
2433 case UCC_GETH_NUM_OF_THREADS_6:
2434 numThreadsRxNumerical = 6;
2436 case UCC_GETH_NUM_OF_THREADS_8:
2437 numThreadsRxNumerical = 8;
2440 if (netif_msg_ifup(ugeth))
2441 ugeth_err("%s: Bad number of Rx threads value.",
2447 switch (ug_info->numThreadsTx) {
2448 case UCC_GETH_NUM_OF_THREADS_1:
2449 numThreadsTxNumerical = 1;
2451 case UCC_GETH_NUM_OF_THREADS_2:
2452 numThreadsTxNumerical = 2;
2454 case UCC_GETH_NUM_OF_THREADS_4:
2455 numThreadsTxNumerical = 4;
2457 case UCC_GETH_NUM_OF_THREADS_6:
2458 numThreadsTxNumerical = 6;
2460 case UCC_GETH_NUM_OF_THREADS_8:
2461 numThreadsTxNumerical = 8;
2464 if (netif_msg_ifup(ugeth))
2465 ugeth_err("%s: Bad number of Tx threads value.",
2471 /* Calculate rx_extended_features */
2472 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2473 ug_info->ipAddressAlignment ||
2474 (ug_info->numStationAddresses !=
2475 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2477 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2478 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) ||
2479 (ug_info->vlanOperationNonTagged !=
2480 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2482 init_default_reg_vals(&uf_regs->upsmr,
2483 &ug_regs->maccfg1, &ug_regs->maccfg2);
2486 /* For more details see the hardware spec. */
2487 init_rx_parameters(ug_info->bro,
2488 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2490 /* We're going to ignore other registers for now, */
2491 /* except as needed to get up and running */
2494 /* For more details see the hardware spec. */
2495 init_flow_control_params(ug_info->aufc,
2496 ug_info->receiveFlowControl,
2497 ug_info->transmitFlowControl,
2498 ug_info->pausePeriod,
2499 ug_info->extensionField,
2501 &ug_regs->uempr, &ug_regs->maccfg1);
2503 setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2506 /* For more details see the hardware spec. */
2507 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2508 ug_info->nonBackToBackIfgPart2,
2510 miminumInterFrameGapEnforcement,
2511 ug_info->backToBackInterFrameGap,
2514 if (netif_msg_ifup(ugeth))
2515 ugeth_err("%s: IPGIFG initialization parameter too large.",
2521 /* For more details see the hardware spec. */
2522 ret_val = init_half_duplex_params(ug_info->altBeb,
2523 ug_info->backPressureNoBackoff,
2525 ug_info->excessDefer,
2526 ug_info->altBebTruncation,
2527 ug_info->maxRetransmission,
2528 ug_info->collisionWindow,
2531 if (netif_msg_ifup(ugeth))
2532 ugeth_err("%s: Half Duplex initialization parameter too large.",
2538 /* For more details see the hardware spec. */
2539 /* Read only - resets upon read */
2540 ifstat = in_be32(&ug_regs->ifstat);
2543 /* For more details see the hardware spec. */
2544 out_be32(&ug_regs->uempr, 0);
2547 /* For more details see the hardware spec. */
2548 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2549 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2550 0, &uf_regs->upsmr, &ug_regs->uescr);
2552 ret_val = ucc_geth_alloc_tx(ugeth);
2556 ret_val = ucc_geth_alloc_rx(ugeth);
2563 /* Tx global PRAM */
2564 /* Allocate global tx parameter RAM page */
2565 ugeth->tx_glbl_pram_offset =
2566 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
2567 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
2568 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
2569 if (netif_msg_ifup(ugeth))
2571 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2575 ugeth->p_tx_glbl_pram =
2576 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
2577 tx_glbl_pram_offset);
2578 /* Zero out p_tx_glbl_pram */
2579 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
2581 /* Fill global PRAM */
2584 /* Size varies with number of Tx threads */
2585 ugeth->thread_dat_tx_offset =
2586 qe_muram_alloc(numThreadsTxNumerical *
2587 sizeof(struct ucc_geth_thread_data_tx) +
2588 32 * (numThreadsTxNumerical == 1),
2589 UCC_GETH_THREAD_DATA_ALIGNMENT);
2590 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
2591 if (netif_msg_ifup(ugeth))
2593 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2598 ugeth->p_thread_data_tx =
2599 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
2600 thread_dat_tx_offset);
2601 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
2604 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
2605 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
2606 ug_info->vtagtable[i]);
2609 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
2610 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
2611 ug_info->iphoffset[i]);
2614 /* Size varies with number of Tx queues */
2615 ugeth->send_q_mem_reg_offset =
2616 qe_muram_alloc(ug_info->numQueuesTx *
2617 sizeof(struct ucc_geth_send_queue_qd),
2618 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
2619 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
2620 if (netif_msg_ifup(ugeth))
2622 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2627 ugeth->p_send_q_mem_reg =
2628 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
2629 send_q_mem_reg_offset);
2630 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
2632 /* Setup the table */
2633 /* Assume BD rings are already established */
2634 for (i = 0; i < ug_info->numQueuesTx; i++) {
2636 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
2637 1) * sizeof(struct qe_bd);
2638 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
2639 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2640 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
2641 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2642 last_bd_completed_address,
2643 (u32) virt_to_phys(endOfRing));
2644 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
2646 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2647 (u32) immrbar_virt_to_phys(ugeth->
2649 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2650 last_bd_completed_address,
2651 (u32) immrbar_virt_to_phys(endOfRing));
2655 /* schedulerbasepointer */
2657 if (ug_info->numQueuesTx > 1) {
2658 /* scheduler exists only if more than 1 tx queue */
2659 ugeth->scheduler_offset =
2660 qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
2661 UCC_GETH_SCHEDULER_ALIGNMENT);
2662 if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
2663 if (netif_msg_ifup(ugeth))
2665 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2670 ugeth->p_scheduler =
2671 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
2673 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
2674 ugeth->scheduler_offset);
2675 /* Zero out p_scheduler */
2676 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
2678 /* Set values in scheduler */
2679 out_be32(&ugeth->p_scheduler->mblinterval,
2680 ug_info->mblinterval);
2681 out_be16(&ugeth->p_scheduler->nortsrbytetime,
2682 ug_info->nortsrbytetime);
2683 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
2684 out_8(&ugeth->p_scheduler->strictpriorityq,
2685 ug_info->strictpriorityq);
2686 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
2687 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
2688 for (i = 0; i < NUM_TX_QUEUES; i++)
2689 out_8(&ugeth->p_scheduler->weightfactor[i],
2690 ug_info->weightfactor[i]);
2692 /* Set pointers to cpucount registers in scheduler */
2693 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
2694 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
2695 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
2696 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
2697 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
2698 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
2699 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
2700 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
2703 /* schedulerbasepointer */
2704 /* TxRMON_PTR (statistics) */
2706 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
2707 ugeth->tx_fw_statistics_pram_offset =
2708 qe_muram_alloc(sizeof
2709 (struct ucc_geth_tx_firmware_statistics_pram),
2710 UCC_GETH_TX_STATISTICS_ALIGNMENT);
2711 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
2712 if (netif_msg_ifup(ugeth))
2714 ("%s: Can not allocate DPRAM memory for"
2715 " p_tx_fw_statistics_pram.",
2719 ugeth->p_tx_fw_statistics_pram =
2720 (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
2721 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
2722 /* Zero out p_tx_fw_statistics_pram */
2723 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
2724 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
2728 /* Already has speed set */
2730 if (ug_info->numQueuesTx > 1)
2731 temoder |= TEMODER_SCHEDULER_ENABLE;
2732 if (ug_info->ipCheckSumGenerate)
2733 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
2734 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
2735 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
2737 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
2739 /* Function code register value to be used later */
2740 function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
2741 /* Required for QE */
2743 /* function code register */
2744 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
2746 /* Rx global PRAM */
2747 /* Allocate global rx parameter RAM page */
2748 ugeth->rx_glbl_pram_offset =
2749 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
2750 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
2751 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
2752 if (netif_msg_ifup(ugeth))
2754 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2758 ugeth->p_rx_glbl_pram =
2759 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
2760 rx_glbl_pram_offset);
2761 /* Zero out p_rx_glbl_pram */
2762 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
2764 /* Fill global PRAM */
2767 /* Size varies with number of Rx threads */
2768 ugeth->thread_dat_rx_offset =
2769 qe_muram_alloc(numThreadsRxNumerical *
2770 sizeof(struct ucc_geth_thread_data_rx),
2771 UCC_GETH_THREAD_DATA_ALIGNMENT);
2772 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
2773 if (netif_msg_ifup(ugeth))
2775 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2780 ugeth->p_thread_data_rx =
2781 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
2782 thread_dat_rx_offset);
2783 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
2786 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
2788 /* rxrmonbaseptr (statistics) */
2790 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
2791 ugeth->rx_fw_statistics_pram_offset =
2792 qe_muram_alloc(sizeof
2793 (struct ucc_geth_rx_firmware_statistics_pram),
2794 UCC_GETH_RX_STATISTICS_ALIGNMENT);
2795 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
2796 if (netif_msg_ifup(ugeth))
2798 ("%s: Can not allocate DPRAM memory for"
2799 " p_rx_fw_statistics_pram.", __func__);
2802 ugeth->p_rx_fw_statistics_pram =
2803 (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
2804 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
2805 /* Zero out p_rx_fw_statistics_pram */
2806 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
2807 sizeof(struct ucc_geth_rx_firmware_statistics_pram));
2810 /* intCoalescingPtr */
2812 /* Size varies with number of Rx queues */
2813 ugeth->rx_irq_coalescing_tbl_offset =
2814 qe_muram_alloc(ug_info->numQueuesRx *
2815 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
2816 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
2817 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
2818 if (netif_msg_ifup(ugeth))
2820 ("%s: Can not allocate DPRAM memory for"
2821 " p_rx_irq_coalescing_tbl.", __func__);
2825 ugeth->p_rx_irq_coalescing_tbl =
2826 (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
2827 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
2828 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
2829 ugeth->rx_irq_coalescing_tbl_offset);
2831 /* Fill interrupt coalescing table */
2832 for (i = 0; i < ug_info->numQueuesRx; i++) {
2833 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2834 interruptcoalescingmaxvalue,
2835 ug_info->interruptcoalescingmaxvalue[i]);
2836 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2837 interruptcoalescingcounter,
2838 ug_info->interruptcoalescingmaxvalue[i]);
2842 init_max_rx_buff_len(uf_info->max_rx_buf_length,
2843 &ugeth->p_rx_glbl_pram->mrblr);
2845 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
2847 init_min_frame_len(ug_info->minFrameLength,
2848 &ugeth->p_rx_glbl_pram->minflr,
2849 &ugeth->p_rx_glbl_pram->mrblr);
2851 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
2853 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
2857 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
2858 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
2859 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
2862 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
2864 for (i = 0; i < 8; i++)
2865 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
2866 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
2870 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
2873 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
2876 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
2879 /* Size varies with number of Rx queues */
2880 ugeth->rx_bd_qs_tbl_offset =
2881 qe_muram_alloc(ug_info->numQueuesRx *
2882 (sizeof(struct ucc_geth_rx_bd_queues_entry) +
2883 sizeof(struct ucc_geth_rx_prefetched_bds)),
2884 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
2885 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
2886 if (netif_msg_ifup(ugeth))
2888 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
2893 ugeth->p_rx_bd_qs_tbl =
2894 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
2895 rx_bd_qs_tbl_offset);
2896 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
2897 /* Zero out p_rx_bd_qs_tbl */
2898 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
2900 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
2901 sizeof(struct ucc_geth_rx_prefetched_bds)));
2903 /* Setup the table */
2904 /* Assume BD rings are already established */
2905 for (i = 0; i < ug_info->numQueuesRx; i++) {
2906 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
2907 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
2908 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
2909 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
2911 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
2912 (u32) immrbar_virt_to_phys(ugeth->
2915 /* rest of fields handled by QE */
2919 /* Already has speed set */
2921 if (ugeth->rx_extended_features)
2922 remoder |= REMODER_RX_EXTENDED_FEATURES;
2923 if (ug_info->rxExtendedFiltering)
2924 remoder |= REMODER_RX_EXTENDED_FILTERING;
2925 if (ug_info->dynamicMaxFrameLength)
2926 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
2927 if (ug_info->dynamicMinFrameLength)
2928 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
2930 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
2933 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
2934 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
2935 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
2936 if (ug_info->ipCheckSumCheck)
2937 remoder |= REMODER_IP_CHECKSUM_CHECK;
2938 if (ug_info->ipAddressAlignment)
2939 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
2940 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
2942 /* Note that this function must be called */
2943 /* ONLY AFTER p_tx_fw_statistics_pram */
2944 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
2945 init_firmware_statistics_gathering_mode((ug_info->
2947 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
2948 (ug_info->statisticsMode &
2949 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
2950 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
2951 ugeth->tx_fw_statistics_pram_offset,
2952 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
2953 ugeth->rx_fw_statistics_pram_offset,
2954 &ugeth->p_tx_glbl_pram->temoder,
2955 &ugeth->p_rx_glbl_pram->remoder);
2957 /* function code register */
2958 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
2960 /* initialize extended filtering */
2961 if (ug_info->rxExtendedFiltering) {
2962 if (!ug_info->extendedFilteringChainPointer) {
2963 if (netif_msg_ifup(ugeth))
2964 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
2969 /* Allocate memory for extended filtering Mode Global
2971 ugeth->exf_glbl_param_offset =
2972 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
2973 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
2974 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
2975 if (netif_msg_ifup(ugeth))
2977 ("%s: Can not allocate DPRAM memory for"
2978 " p_exf_glbl_param.", __func__);
2982 ugeth->p_exf_glbl_param =
2983 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
2984 exf_glbl_param_offset);
2985 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
2986 ugeth->exf_glbl_param_offset);
2987 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
2988 (u32) ug_info->extendedFilteringChainPointer);
2990 } else { /* initialize 82xx style address filtering */
2992 /* Init individual address recognition registers to disabled */
2994 for (j = 0; j < NUM_OF_PADDRS; j++)
2995 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
2998 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
2999 p_rx_glbl_pram->addressfiltering;
3001 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3002 ENET_ADDR_TYPE_GROUP);
3003 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3004 ENET_ADDR_TYPE_INDIVIDUAL);
3008 * Initialize UCC at QE level
3011 command = QE_INIT_TX_RX;
3013 /* Allocate shadow InitEnet command parameter structure.
3014 * This is needed because after the InitEnet command is executed,
3015 * the structure in DPRAM is released, because DPRAM is a premium
3017 * This shadow structure keeps a copy of what was done so that the
3018 * allocated resources can be released when the channel is freed.
3020 if (!(ugeth->p_init_enet_param_shadow =
3021 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3022 if (netif_msg_ifup(ugeth))
3024 ("%s: Can not allocate memory for"
3025 " p_UccInitEnetParamShadows.", __func__);
3028 /* Zero out *p_init_enet_param_shadow */
3029 memset((char *)ugeth->p_init_enet_param_shadow,
3030 0, sizeof(struct ucc_geth_init_pram));
3032 /* Fill shadow InitEnet command parameter structure */
3034 ugeth->p_init_enet_param_shadow->resinit1 =
3035 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3036 ugeth->p_init_enet_param_shadow->resinit2 =
3037 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3038 ugeth->p_init_enet_param_shadow->resinit3 =
3039 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3040 ugeth->p_init_enet_param_shadow->resinit4 =
3041 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3042 ugeth->p_init_enet_param_shadow->resinit5 =
3043 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3044 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3045 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3046 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3047 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3049 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3050 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3051 if ((ug_info->largestexternallookupkeysize !=
3052 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
3053 (ug_info->largestexternallookupkeysize !=
3054 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) &&
3055 (ug_info->largestexternallookupkeysize !=
3056 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3057 if (netif_msg_ifup(ugeth))
3058 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3062 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3063 ug_info->largestexternallookupkeysize;
3064 size = sizeof(struct ucc_geth_thread_rx_pram);
3065 if (ug_info->rxExtendedFiltering) {
3066 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3067 if (ug_info->largestexternallookupkeysize ==
3068 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3070 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3071 if (ug_info->largestexternallookupkeysize ==
3072 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3074 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3077 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3078 p_init_enet_param_shadow->rxthread[0]),
3079 (u8) (numThreadsRxNumerical + 1)
3080 /* Rx needs one extra for terminator */
3081 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3082 ug_info->riscRx, 1)) != 0) {
3083 if (netif_msg_ifup(ugeth))
3084 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3089 ugeth->p_init_enet_param_shadow->txglobal =
3090 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3092 fill_init_enet_entries(ugeth,
3093 &(ugeth->p_init_enet_param_shadow->
3094 txthread[0]), numThreadsTxNumerical,
3095 sizeof(struct ucc_geth_thread_tx_pram),
3096 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3097 ug_info->riscTx, 0)) != 0) {
3098 if (netif_msg_ifup(ugeth))
3099 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3104 /* Load Rx bds with buffers */
3105 for (i = 0; i < ug_info->numQueuesRx; i++) {
3106 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3107 if (netif_msg_ifup(ugeth))
3108 ugeth_err("%s: Can not fill Rx bds with buffers.",
3114 /* Allocate InitEnet command parameter structure */
3115 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
3116 if (IS_ERR_VALUE(init_enet_pram_offset)) {
3117 if (netif_msg_ifup(ugeth))
3119 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3124 (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
3126 /* Copy shadow InitEnet command parameter structure into PRAM */
3127 out_8(&p_init_enet_pram->resinit1,
3128 ugeth->p_init_enet_param_shadow->resinit1);
3129 out_8(&p_init_enet_pram->resinit2,
3130 ugeth->p_init_enet_param_shadow->resinit2);
3131 out_8(&p_init_enet_pram->resinit3,
3132 ugeth->p_init_enet_param_shadow->resinit3);
3133 out_8(&p_init_enet_pram->resinit4,
3134 ugeth->p_init_enet_param_shadow->resinit4);
3135 out_be16(&p_init_enet_pram->resinit5,
3136 ugeth->p_init_enet_param_shadow->resinit5);
3137 out_8(&p_init_enet_pram->largestexternallookupkeysize,
3138 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
3139 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3140 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3141 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3142 out_be32(&p_init_enet_pram->rxthread[i],
3143 ugeth->p_init_enet_param_shadow->rxthread[i]);
3144 out_be32(&p_init_enet_pram->txglobal,
3145 ugeth->p_init_enet_param_shadow->txglobal);
3146 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3147 out_be32(&p_init_enet_pram->txthread[i],
3148 ugeth->p_init_enet_param_shadow->txthread[i]);
3150 /* Issue QE command */
3152 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3153 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
3154 init_enet_pram_offset);
3156 /* Free InitEnet command parameter */
3157 qe_muram_free(init_enet_pram_offset);
3162 /* This is called by the kernel when a frame is ready for transmission. */
3163 /* It is pointed to by the dev->hard_start_xmit function pointer */
3164 static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3166 struct ucc_geth_private *ugeth = netdev_priv(dev);
3167 #ifdef CONFIG_UGETH_TX_ON_DEMAND
3168 struct ucc_fast_private *uccf;
3170 u8 __iomem *bd; /* BD pointer */
3173 unsigned long flags;
3175 ugeth_vdbg("%s: IN", __func__);
3177 spin_lock_irqsave(&ugeth->lock, flags);
3179 dev->stats.tx_bytes += skb->len;
3181 /* Start from the next BD that should be filled */
3182 bd = ugeth->txBd[txQ];
3183 bd_status = in_be32((u32 __iomem *)bd);
3184 /* Save the skb pointer so we can free it later */
3185 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3187 /* Update the current skb pointer (wrapping if this was the last) */
3188 ugeth->skb_curtx[txQ] =
3189 (ugeth->skb_curtx[txQ] +
3190 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3192 /* set up the buffer descriptor */
3193 out_be32(&((struct qe_bd __iomem *)bd)->buf,
3194 dma_map_single(ugeth->dev, skb->data,
3195 skb->len, DMA_TO_DEVICE));
3197 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
3199 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3201 /* set bd status and length */
3202 out_be32((u32 __iomem *)bd, bd_status);
3204 /* Move to next BD in the ring */
3205 if (!(bd_status & T_W))
3206 bd += sizeof(struct qe_bd);
3208 bd = ugeth->p_tx_bd_ring[txQ];
3210 /* If the next BD still needs to be cleaned up, then the bds
3211 are full. We need to tell the kernel to stop sending us stuff. */
3212 if (bd == ugeth->confBd[txQ]) {
3213 if (!netif_queue_stopped(dev))
3214 netif_stop_queue(dev);
3217 ugeth->txBd[txQ] = bd;
3219 skb_tx_timestamp(skb);
3221 if (ugeth->p_scheduler) {
3222 ugeth->cpucount[txQ]++;
3223 /* Indicate to QE that there are more Tx bds ready for
3225 /* This is done by writing a running counter of the bd
3226 count to the scheduler PRAM. */
3227 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3230 #ifdef CONFIG_UGETH_TX_ON_DEMAND
3232 out_be16(uccf->p_utodr, UCC_FAST_TOD);
3234 spin_unlock_irqrestore(&ugeth->lock, flags);
3236 return NETDEV_TX_OK;
3239 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
3241 struct sk_buff *skb;
3243 u16 length, howmany = 0;
3246 struct net_device *dev;
3248 ugeth_vdbg("%s: IN", __func__);
3252 /* collect received buffers */
3253 bd = ugeth->rxBd[rxQ];
3255 bd_status = in_be32((u32 __iomem *)bd);
3257 /* while there are received buffers and BD is full (~R_E) */
3258 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3259 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
3260 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3261 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3263 /* determine whether buffer is first, last, first and last
3264 (single buffer frame) or middle (not first and not last) */
3266 (!(bd_status & (R_F | R_L))) ||
3267 (bd_status & R_ERRORS_FATAL)) {
3268 if (netif_msg_rx_err(ugeth))
3269 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3270 __func__, __LINE__, (u32) skb);
3273 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3274 dev->stats.rx_dropped++;
3276 dev->stats.rx_packets++;
3279 /* Prep the skb for the packet */
3280 skb_put(skb, length);
3282 /* Tell the skb what kind of packet this is */
3283 skb->protocol = eth_type_trans(skb, ugeth->ndev);
3285 dev->stats.rx_bytes += length;
3286 /* Send the packet up the stack */
3287 netif_receive_skb(skb);
3290 skb = get_new_skb(ugeth, bd);
3292 if (netif_msg_rx_err(ugeth))
3293 ugeth_warn("%s: No Rx Data Buffer", __func__);
3294 dev->stats.rx_dropped++;
3298 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3300 /* update to point at the next skb */
3301 ugeth->skb_currx[rxQ] =
3302 (ugeth->skb_currx[rxQ] +
3303 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3305 if (bd_status & R_W)
3306 bd = ugeth->p_rx_bd_ring[rxQ];
3308 bd += sizeof(struct qe_bd);
3310 bd_status = in_be32((u32 __iomem *)bd);
3313 ugeth->rxBd[rxQ] = bd;
3317 static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3319 /* Start from the next BD that should be filled */
3320 struct ucc_geth_private *ugeth = netdev_priv(dev);
3321 u8 __iomem *bd; /* BD pointer */
3324 bd = ugeth->confBd[txQ];
3325 bd_status = in_be32((u32 __iomem *)bd);
3327 /* Normal processing. */
3328 while ((bd_status & T_R) == 0) {
3329 struct sk_buff *skb;
3331 /* BD contains already transmitted buffer. */
3332 /* Handle the transmitted buffer and release */
3333 /* the BD to be used with the current frame */
3335 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
3339 dev->stats.tx_packets++;
3343 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3344 ugeth->skb_dirtytx[txQ] =
3345 (ugeth->skb_dirtytx[txQ] +
3346 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3348 /* We freed a buffer, so now we can restart transmission */
3349 if (netif_queue_stopped(dev))
3350 netif_wake_queue(dev);
3352 /* Advance the confirmation BD pointer */
3353 if (!(bd_status & T_W))
3354 bd += sizeof(struct qe_bd);
3356 bd = ugeth->p_tx_bd_ring[txQ];
3357 bd_status = in_be32((u32 __iomem *)bd);
3359 ugeth->confBd[txQ] = bd;
3363 static int ucc_geth_poll(struct napi_struct *napi, int budget)
3365 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
3366 struct ucc_geth_info *ug_info;
3369 ug_info = ugeth->ug_info;
3371 /* Tx event processing */
3372 spin_lock(&ugeth->lock);
3373 for (i = 0; i < ug_info->numQueuesTx; i++)
3374 ucc_geth_tx(ugeth->ndev, i);
3375 spin_unlock(&ugeth->lock);
3378 for (i = 0; i < ug_info->numQueuesRx; i++)
3379 howmany += ucc_geth_rx(ugeth, i, budget - howmany);
3381 if (howmany < budget) {
3382 napi_complete(napi);
3383 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
3389 static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3391 struct net_device *dev = info;
3392 struct ucc_geth_private *ugeth = netdev_priv(dev);
3393 struct ucc_fast_private *uccf;
3394 struct ucc_geth_info *ug_info;
3398 ugeth_vdbg("%s: IN", __func__);
3401 ug_info = ugeth->ug_info;
3403 /* read and clear events */
3404 ucce = (u32) in_be32(uccf->p_ucce);
3405 uccm = (u32) in_be32(uccf->p_uccm);
3407 out_be32(uccf->p_ucce, ucce);
3409 /* check for receive events that require processing */
3410 if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
3411 if (napi_schedule_prep(&ugeth->napi)) {
3412 uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
3413 out_be32(uccf->p_uccm, uccm);
3414 __napi_schedule(&ugeth->napi);
3418 /* Errors and other events */
3419 if (ucce & UCCE_OTHER) {
3420 if (ucce & UCC_GETH_UCCE_BSY)
3421 dev->stats.rx_errors++;
3422 if (ucce & UCC_GETH_UCCE_TXE)
3423 dev->stats.tx_errors++;
3429 #ifdef CONFIG_NET_POLL_CONTROLLER
3431 * Polling 'interrupt' - used by things like netconsole to send skbs
3432 * without having to re-enable interrupts. It's not called while
3433 * the interrupt routine is executing.
3435 static void ucc_netpoll(struct net_device *dev)
3437 struct ucc_geth_private *ugeth = netdev_priv(dev);
3438 int irq = ugeth->ug_info->uf_info.irq;
3441 ucc_geth_irq_handler(irq, dev);
3444 #endif /* CONFIG_NET_POLL_CONTROLLER */
3446 static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
3448 struct ucc_geth_private *ugeth = netdev_priv(dev);
3449 struct sockaddr *addr = p;
3451 if (!is_valid_ether_addr(addr->sa_data))
3452 return -EADDRNOTAVAIL;
3454 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3457 * If device is not running, we will set mac addr register
3458 * when opening the device.
3460 if (!netif_running(dev))
3463 spin_lock_irq(&ugeth->lock);
3464 init_mac_station_addr_regs(dev->dev_addr[0],
3470 &ugeth->ug_regs->macstnaddr1,
3471 &ugeth->ug_regs->macstnaddr2);
3472 spin_unlock_irq(&ugeth->lock);
3477 static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
3479 struct net_device *dev = ugeth->ndev;
3482 err = ucc_struct_init(ugeth);
3484 if (netif_msg_ifup(ugeth))
3485 ugeth_err("%s: Cannot configure internal struct, "
3486 "aborting.", dev->name);
3490 err = ucc_geth_startup(ugeth);
3492 if (netif_msg_ifup(ugeth))
3493 ugeth_err("%s: Cannot configure net device, aborting.",
3498 err = adjust_enet_interface(ugeth);
3500 if (netif_msg_ifup(ugeth))
3501 ugeth_err("%s: Cannot configure net device, aborting.",
3506 /* Set MACSTNADDR1, MACSTNADDR2 */
3507 /* For more details see the hardware spec. */
3508 init_mac_station_addr_regs(dev->dev_addr[0],
3514 &ugeth->ug_regs->macstnaddr1,
3515 &ugeth->ug_regs->macstnaddr2);
3517 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3519 if (netif_msg_ifup(ugeth))
3520 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
3526 ucc_geth_stop(ugeth);
3530 /* Called when something needs to use the ethernet device */
3531 /* Returns 0 for success. */
3532 static int ucc_geth_open(struct net_device *dev)
3534 struct ucc_geth_private *ugeth = netdev_priv(dev);
3537 ugeth_vdbg("%s: IN", __func__);
3539 /* Test station address */
3540 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3541 if (netif_msg_ifup(ugeth))
3542 ugeth_err("%s: Multicast address used for station "
3543 "address - is this what you wanted?",
3548 err = init_phy(dev);
3550 if (netif_msg_ifup(ugeth))
3551 ugeth_err("%s: Cannot initialize PHY, aborting.",
3556 err = ucc_geth_init_mac(ugeth);
3558 if (netif_msg_ifup(ugeth))
3559 ugeth_err("%s: Cannot initialize MAC, aborting.",
3564 err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
3565 0, "UCC Geth", dev);
3567 if (netif_msg_ifup(ugeth))
3568 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
3573 phy_start(ugeth->phydev);
3574 napi_enable(&ugeth->napi);
3575 netif_start_queue(dev);
3577 device_set_wakeup_capable(&dev->dev,
3578 qe_alive_during_sleep() || ugeth->phydev->irq);
3579 device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
3584 ucc_geth_stop(ugeth);
3588 /* Stops the kernel queue, and halts the controller */
3589 static int ucc_geth_close(struct net_device *dev)
3591 struct ucc_geth_private *ugeth = netdev_priv(dev);
3593 ugeth_vdbg("%s: IN", __func__);
3595 napi_disable(&ugeth->napi);
3597 cancel_work_sync(&ugeth->timeout_work);
3598 ucc_geth_stop(ugeth);
3599 phy_disconnect(ugeth->phydev);
3600 ugeth->phydev = NULL;
3602 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
3604 netif_stop_queue(dev);
3609 /* Reopen device. This will reset the MAC and PHY. */
3610 static void ucc_geth_timeout_work(struct work_struct *work)
3612 struct ucc_geth_private *ugeth;
3613 struct net_device *dev;
3615 ugeth = container_of(work, struct ucc_geth_private, timeout_work);
3618 ugeth_vdbg("%s: IN", __func__);
3620 dev->stats.tx_errors++;
3622 ugeth_dump_regs(ugeth);
3624 if (dev->flags & IFF_UP) {
3626 * Must reset MAC *and* PHY. This is done by reopening
3629 netif_tx_stop_all_queues(dev);
3630 ucc_geth_stop(ugeth);
3631 ucc_geth_init_mac(ugeth);
3632 /* Must start PHY here */
3633 phy_start(ugeth->phydev);
3634 netif_tx_start_all_queues(dev);
3637 netif_tx_schedule_all(dev);
3641 * ucc_geth_timeout gets called when a packet has not been
3642 * transmitted after a set amount of time.
3644 static void ucc_geth_timeout(struct net_device *dev)
3646 struct ucc_geth_private *ugeth = netdev_priv(dev);
3648 schedule_work(&ugeth->timeout_work);
3654 static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
3656 struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
3657 struct ucc_geth_private *ugeth = netdev_priv(ndev);
3659 if (!netif_running(ndev))
3662 netif_device_detach(ndev);
3663 napi_disable(&ugeth->napi);
3666 * Disable the controller, otherwise we'll wakeup on any network
3669 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
3671 if (ugeth->wol_en & WAKE_MAGIC) {
3672 setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
3673 setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
3674 ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
3675 } else if (!(ugeth->wol_en & WAKE_PHY)) {
3676 phy_stop(ugeth->phydev);
3682 static int ucc_geth_resume(struct platform_device *ofdev)
3684 struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
3685 struct ucc_geth_private *ugeth = netdev_priv(ndev);
3688 if (!netif_running(ndev))
3691 if (qe_alive_during_sleep()) {
3692 if (ugeth->wol_en & WAKE_MAGIC) {
3693 ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX);
3694 clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
3695 clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
3697 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3700 * Full reinitialization is required if QE shuts down
3703 ucc_geth_memclean(ugeth);
3705 err = ucc_geth_init_mac(ugeth);
3707 ugeth_err("%s: Cannot initialize MAC, aborting.",
3714 ugeth->oldspeed = 0;
3715 ugeth->oldduplex = -1;
3717 phy_stop(ugeth->phydev);
3718 phy_start(ugeth->phydev);
3720 napi_enable(&ugeth->napi);
3721 netif_device_attach(ndev);
3727 #define ucc_geth_suspend NULL
3728 #define ucc_geth_resume NULL
3731 static phy_interface_t to_phy_interface(const char *phy_connection_type)
3733 if (strcasecmp(phy_connection_type, "mii") == 0)
3734 return PHY_INTERFACE_MODE_MII;
3735 if (strcasecmp(phy_connection_type, "gmii") == 0)
3736 return PHY_INTERFACE_MODE_GMII;
3737 if (strcasecmp(phy_connection_type, "tbi") == 0)
3738 return PHY_INTERFACE_MODE_TBI;
3739 if (strcasecmp(phy_connection_type, "rmii") == 0)
3740 return PHY_INTERFACE_MODE_RMII;
3741 if (strcasecmp(phy_connection_type, "rgmii") == 0)
3742 return PHY_INTERFACE_MODE_RGMII;
3743 if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
3744 return PHY_INTERFACE_MODE_RGMII_ID;
3745 if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
3746 return PHY_INTERFACE_MODE_RGMII_TXID;
3747 if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
3748 return PHY_INTERFACE_MODE_RGMII_RXID;
3749 if (strcasecmp(phy_connection_type, "rtbi") == 0)
3750 return PHY_INTERFACE_MODE_RTBI;
3751 if (strcasecmp(phy_connection_type, "sgmii") == 0)
3752 return PHY_INTERFACE_MODE_SGMII;
3754 return PHY_INTERFACE_MODE_MII;
3757 static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3759 struct ucc_geth_private *ugeth = netdev_priv(dev);
3761 if (!netif_running(dev))
3767 return phy_mii_ioctl(ugeth->phydev, rq, cmd);
3770 static const struct net_device_ops ucc_geth_netdev_ops = {
3771 .ndo_open = ucc_geth_open,
3772 .ndo_stop = ucc_geth_close,
3773 .ndo_start_xmit = ucc_geth_start_xmit,
3774 .ndo_validate_addr = eth_validate_addr,
3775 .ndo_set_mac_address = ucc_geth_set_mac_addr,
3776 .ndo_change_mtu = eth_change_mtu,
3777 .ndo_set_rx_mode = ucc_geth_set_multi,
3778 .ndo_tx_timeout = ucc_geth_timeout,
3779 .ndo_do_ioctl = ucc_geth_ioctl,
3780 #ifdef CONFIG_NET_POLL_CONTROLLER
3781 .ndo_poll_controller = ucc_netpoll,
3785 static int ucc_geth_probe(struct platform_device* ofdev)
3787 struct device *device = &ofdev->dev;
3788 struct device_node *np = ofdev->dev.of_node;
3789 struct net_device *dev = NULL;
3790 struct ucc_geth_private *ugeth = NULL;
3791 struct ucc_geth_info *ug_info;
3792 struct resource res;
3793 int err, ucc_num, max_speed = 0;
3794 const unsigned int *prop;
3796 const void *mac_addr;
3797 phy_interface_t phy_interface;
3798 static const int enet_to_speed[] = {
3799 SPEED_10, SPEED_10, SPEED_10,
3800 SPEED_100, SPEED_100, SPEED_100,
3801 SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
3803 static const phy_interface_t enet_to_phy_interface[] = {
3804 PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
3805 PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
3806 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
3807 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
3808 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
3809 PHY_INTERFACE_MODE_SGMII,
3812 ugeth_vdbg("%s: IN", __func__);
3814 prop = of_get_property(np, "cell-index", NULL);
3816 prop = of_get_property(np, "device-id", NULL);
3821 ucc_num = *prop - 1;
3822 if ((ucc_num < 0) || (ucc_num > 7))
3825 ug_info = &ugeth_info[ucc_num];
3826 if (ug_info == NULL) {
3827 if (netif_msg_probe(&debug))
3828 ugeth_err("%s: [%d] Missing additional data!",
3833 ug_info->uf_info.ucc_num = ucc_num;
3835 sprop = of_get_property(np, "rx-clock-name", NULL);
3837 ug_info->uf_info.rx_clock = qe_clock_source(sprop);
3838 if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
3839 (ug_info->uf_info.rx_clock > QE_CLK24)) {
3841 "ucc_geth: invalid rx-clock-name property\n");
3845 prop = of_get_property(np, "rx-clock", NULL);
3847 /* If both rx-clock-name and rx-clock are missing,
3848 we want to tell people to use rx-clock-name. */
3850 "ucc_geth: missing rx-clock-name property\n");
3853 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
3855 "ucc_geth: invalid rx-clock propperty\n");
3858 ug_info->uf_info.rx_clock = *prop;
3861 sprop = of_get_property(np, "tx-clock-name", NULL);
3863 ug_info->uf_info.tx_clock = qe_clock_source(sprop);
3864 if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
3865 (ug_info->uf_info.tx_clock > QE_CLK24)) {
3867 "ucc_geth: invalid tx-clock-name property\n");
3871 prop = of_get_property(np, "tx-clock", NULL);
3874 "ucc_geth: missing tx-clock-name property\n");
3877 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
3879 "ucc_geth: invalid tx-clock property\n");
3882 ug_info->uf_info.tx_clock = *prop;
3885 err = of_address_to_resource(np, 0, &res);
3889 ug_info->uf_info.regs = res.start;
3890 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3892 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
3894 /* Find the TBI PHY node. If it's not there, we don't support SGMII */
3895 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
3897 /* get the phy interface type, or default to MII */
3898 prop = of_get_property(np, "phy-connection-type", NULL);
3900 /* handle interface property present in old trees */
3901 prop = of_get_property(ug_info->phy_node, "interface", NULL);
3903 phy_interface = enet_to_phy_interface[*prop];
3904 max_speed = enet_to_speed[*prop];
3906 phy_interface = PHY_INTERFACE_MODE_MII;
3908 phy_interface = to_phy_interface((const char *)prop);
3911 /* get speed, or derive from PHY interface */
3913 switch (phy_interface) {
3914 case PHY_INTERFACE_MODE_GMII:
3915 case PHY_INTERFACE_MODE_RGMII:
3916 case PHY_INTERFACE_MODE_RGMII_ID:
3917 case PHY_INTERFACE_MODE_RGMII_RXID:
3918 case PHY_INTERFACE_MODE_RGMII_TXID:
3919 case PHY_INTERFACE_MODE_TBI:
3920 case PHY_INTERFACE_MODE_RTBI:
3921 case PHY_INTERFACE_MODE_SGMII:
3922 max_speed = SPEED_1000;
3925 max_speed = SPEED_100;
3929 if (max_speed == SPEED_1000) {
3930 unsigned int snums = qe_get_num_of_snums();
3932 /* configure muram FIFOs for gigabit operation */
3933 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
3934 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
3935 ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
3936 ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
3937 ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
3938 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
3939 ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
3941 /* If QE's snum number is 46/76 which means we need to support
3942 * 4 UECs at 1000Base-T simultaneously, we need to allocate
3943 * more Threads to Rx.
3945 if ((snums == 76) || (snums == 46))
3946 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
3948 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
3951 if (netif_msg_probe(&debug))
3952 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
3953 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
3954 ug_info->uf_info.irq);
3956 /* Create an ethernet device instance */
3957 dev = alloc_etherdev(sizeof(*ugeth));
3962 ugeth = netdev_priv(dev);
3963 spin_lock_init(&ugeth->lock);
3965 /* Create CQs for hash tables */
3966 INIT_LIST_HEAD(&ugeth->group_hash_q);
3967 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3969 dev_set_drvdata(device, dev);
3971 /* Set the dev->base_addr to the gfar reg region */
3972 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
3974 SET_NETDEV_DEV(dev, device);
3976 /* Fill in the dev structure */
3977 uec_set_ethtool_ops(dev);
3978 dev->netdev_ops = &ucc_geth_netdev_ops;
3979 dev->watchdog_timeo = TX_TIMEOUT;
3980 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
3981 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
3984 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
3985 ugeth->phy_interface = phy_interface;
3986 ugeth->max_speed = max_speed;
3988 err = register_netdev(dev);
3990 if (netif_msg_probe(ugeth))
3991 ugeth_err("%s: Cannot register net device, aborting.",
3997 mac_addr = of_get_mac_address(np);
3999 memcpy(dev->dev_addr, mac_addr, 6);
4001 ugeth->ug_info = ug_info;
4002 ugeth->dev = device;
4009 static int ucc_geth_remove(struct platform_device* ofdev)
4011 struct device *device = &ofdev->dev;
4012 struct net_device *dev = dev_get_drvdata(device);
4013 struct ucc_geth_private *ugeth = netdev_priv(dev);
4015 unregister_netdev(dev);
4017 ucc_geth_memclean(ugeth);
4018 dev_set_drvdata(device, NULL);
4023 static struct of_device_id ucc_geth_match[] = {
4026 .compatible = "ucc_geth",
4031 MODULE_DEVICE_TABLE(of, ucc_geth_match);
4033 static struct platform_driver ucc_geth_driver = {
4036 .owner = THIS_MODULE,
4037 .of_match_table = ucc_geth_match,
4039 .probe = ucc_geth_probe,
4040 .remove = ucc_geth_remove,
4041 .suspend = ucc_geth_suspend,
4042 .resume = ucc_geth_resume,
4045 static int __init ucc_geth_init(void)
4049 if (netif_msg_drv(&debug))
4050 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
4051 for (i = 0; i < 8; i++)
4052 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4053 sizeof(ugeth_primary_info));
4055 ret = platform_driver_register(&ucc_geth_driver);
4060 static void __exit ucc_geth_exit(void)
4062 platform_driver_unregister(&ucc_geth_driver);
4065 module_init(ucc_geth_init);
4066 module_exit(ucc_geth_exit);
4068 MODULE_AUTHOR("Freescale Semiconductor, Inc");
4069 MODULE_DESCRIPTION(DRV_DESC);
4070 MODULE_VERSION(DRV_VERSION);
4071 MODULE_LICENSE("GPL");