1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 1.7.7";
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
97 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
100 mac_info_t *mac_control;
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
122 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
124 {"tmac_data_octets"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
150 {"rmac_jabber_frms"},
158 {"rmac_err_drp_udp"},
160 {"rmac_accepted_ip"},
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
170 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
180 static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
183 nic_t *nic = dev->priv;
186 spin_lock_irqsave(&nic->tx_lock, flags);
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
194 nic_t *nic = dev->priv;
197 spin_lock_irqsave(&nic->tx_lock, flags);
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
204 * Constants to be programmed into the Xena's registers, to configure
208 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
211 static u64 herc_act_dtx_cfg[] = {
213 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
215 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
221 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
223 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
228 static u64 xena_mdio_cfg[] = {
230 0xC001010000000000ULL, 0xC0010100000000E0ULL,
231 0xC0010100008000E4ULL,
232 /* Remove Reset from PMA PLL */
233 0xC001010000000000ULL, 0xC0010100000000E0ULL,
234 0xC0010100000000E4ULL,
238 static u64 xena_dtx_cfg[] = {
239 0x8000051500000000ULL, 0x80000515000000E0ULL,
240 0x80000515D93500E4ULL, 0x8001051500000000ULL,
241 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
242 0x8002051500000000ULL, 0x80020515000000E0ULL,
243 0x80020515F21000E4ULL,
244 /* Set PADLOOPBACKN */
245 0x8002051500000000ULL, 0x80020515000000E0ULL,
246 0x80020515B20000E4ULL, 0x8003051500000000ULL,
247 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
248 0x8004051500000000ULL, 0x80040515000000E0ULL,
249 0x80040515B20000E4ULL, 0x8005051500000000ULL,
250 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
252 /* Remove PADLOOPBACKN */
253 0x8002051500000000ULL, 0x80020515000000E0ULL,
254 0x80020515F20000E4ULL, 0x8003051500000000ULL,
255 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
256 0x8004051500000000ULL, 0x80040515000000E0ULL,
257 0x80040515F20000E4ULL, 0x8005051500000000ULL,
258 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
263 * Constants for Fixing the MacAddress problem seen mostly on
266 static u64 fix_mac[] = {
267 0x0060000000000000ULL, 0x0060600000000000ULL,
268 0x0040600000000000ULL, 0x0000600000000000ULL,
269 0x0020600000000000ULL, 0x0060600000000000ULL,
270 0x0020600000000000ULL, 0x0060600000000000ULL,
271 0x0020600000000000ULL, 0x0060600000000000ULL,
272 0x0020600000000000ULL, 0x0060600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0000600000000000ULL,
280 0x0040600000000000ULL, 0x0060600000000000ULL,
284 /* Module Loadable parameters. */
285 static unsigned int tx_fifo_num = 1;
286 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
287 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
288 static unsigned int rx_ring_num = 1;
289 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
290 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
291 static unsigned int rts_frm_len[MAX_RX_RINGS] =
292 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
293 static unsigned int use_continuous_tx_intrs = 1;
294 static unsigned int rmac_pause_time = 65535;
295 static unsigned int mc_pause_threshold_q0q3 = 187;
296 static unsigned int mc_pause_threshold_q4q7 = 187;
297 static unsigned int shared_splits;
298 static unsigned int tmac_util_period = 5;
299 static unsigned int rmac_util_period = 5;
300 #ifndef CONFIG_S2IO_NAPI
301 static unsigned int indicate_max_pkts;
306 * This table lists all the devices that this driver supports.
308 static struct pci_device_id s2io_tbl[] __devinitdata = {
309 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
310 PCI_ANY_ID, PCI_ANY_ID},
311 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
312 PCI_ANY_ID, PCI_ANY_ID},
313 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
314 PCI_ANY_ID, PCI_ANY_ID},
315 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
316 PCI_ANY_ID, PCI_ANY_ID},
320 MODULE_DEVICE_TABLE(pci, s2io_tbl);
322 static struct pci_driver s2io_driver = {
324 .id_table = s2io_tbl,
325 .probe = s2io_init_nic,
326 .remove = __devexit_p(s2io_rem_nic),
329 /* A simplifier macro used both by init and free shared_mem Fns(). */
330 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
333 * init_shared_mem - Allocation and Initialization of Memory
334 * @nic: Device private variable.
335 * Description: The function allocates all the memory areas shared
336 * between the NIC and the driver. This includes Tx descriptors,
337 * Rx descriptors and the statistics block.
340 static int init_shared_mem(struct s2io_nic *nic)
343 void *tmp_v_addr, *tmp_v_addr_next;
344 dma_addr_t tmp_p_addr, tmp_p_addr_next;
345 RxD_block_t *pre_rxd_blk = NULL;
346 int i, j, blk_cnt, rx_sz, tx_sz;
347 int lst_size, lst_per_page;
348 struct net_device *dev = nic->dev;
349 #ifdef CONFIG_2BUFF_MODE
354 mac_info_t *mac_control;
355 struct config_param *config;
357 mac_control = &nic->mac_control;
358 config = &nic->config;
361 /* Allocation and initialization of TXDLs in FIOFs */
363 for (i = 0; i < config->tx_fifo_num; i++) {
364 size += config->tx_cfg[i].fifo_len;
366 if (size > MAX_AVAILABLE_TXDS) {
367 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
369 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
370 DBG_PRINT(ERR_DBG, "that can be used\n");
374 lst_size = (sizeof(TxD_t) * config->max_txds);
375 tx_sz = lst_size * size;
376 lst_per_page = PAGE_SIZE / lst_size;
378 for (i = 0; i < config->tx_fifo_num; i++) {
379 int fifo_len = config->tx_cfg[i].fifo_len;
380 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
381 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
383 if (!mac_control->fifos[i].list_info) {
385 "Malloc failed for list_info\n");
388 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
390 for (i = 0; i < config->tx_fifo_num; i++) {
391 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
393 mac_control->fifos[i].tx_curr_put_info.offset = 0;
394 mac_control->fifos[i].tx_curr_put_info.fifo_len =
395 config->tx_cfg[i].fifo_len - 1;
396 mac_control->fifos[i].tx_curr_get_info.offset = 0;
397 mac_control->fifos[i].tx_curr_get_info.fifo_len =
398 config->tx_cfg[i].fifo_len - 1;
399 mac_control->fifos[i].fifo_no = i;
400 mac_control->fifos[i].nic = nic;
401 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
403 for (j = 0; j < page_num; j++) {
407 tmp_v = pci_alloc_consistent(nic->pdev,
411 "pci_alloc_consistent ");
412 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
415 while (k < lst_per_page) {
416 int l = (j * lst_per_page) + k;
417 if (l == config->tx_cfg[i].fifo_len)
419 mac_control->fifos[i].list_info[l].list_virt_addr =
420 tmp_v + (k * lst_size);
421 mac_control->fifos[i].list_info[l].list_phy_addr =
422 tmp_p + (k * lst_size);
428 /* Allocation and initialization of RXDs in Rings */
430 for (i = 0; i < config->rx_ring_num; i++) {
431 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
432 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
433 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
435 DBG_PRINT(ERR_DBG, "RxDs per Block");
438 size += config->rx_cfg[i].num_rxd;
439 mac_control->rings[i].block_count =
440 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
441 mac_control->rings[i].pkt_cnt =
442 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
444 size = (size * (sizeof(RxD_t)));
447 for (i = 0; i < config->rx_ring_num; i++) {
448 mac_control->rings[i].rx_curr_get_info.block_index = 0;
449 mac_control->rings[i].rx_curr_get_info.offset = 0;
450 mac_control->rings[i].rx_curr_get_info.ring_len =
451 config->rx_cfg[i].num_rxd - 1;
452 mac_control->rings[i].rx_curr_put_info.block_index = 0;
453 mac_control->rings[i].rx_curr_put_info.offset = 0;
454 mac_control->rings[i].rx_curr_put_info.ring_len =
455 config->rx_cfg[i].num_rxd - 1;
456 mac_control->rings[i].nic = nic;
457 mac_control->rings[i].ring_no = i;
460 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
461 /* Allocating all the Rx blocks */
462 for (j = 0; j < blk_cnt; j++) {
463 #ifndef CONFIG_2BUFF_MODE
464 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
466 size = SIZE_OF_BLOCK;
468 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
470 if (tmp_v_addr == NULL) {
472 * In case of failure, free_shared_mem()
473 * is called, which should free any
474 * memory that was alloced till the
477 mac_control->rings[i].rx_blocks[j].block_virt_addr =
481 memset(tmp_v_addr, 0, size);
482 mac_control->rings[i].rx_blocks[j].block_virt_addr =
484 mac_control->rings[i].rx_blocks[j].block_dma_addr =
487 /* Interlinking all Rx Blocks */
488 for (j = 0; j < blk_cnt; j++) {
490 mac_control->rings[i].rx_blocks[j].block_virt_addr;
492 mac_control->rings[i].rx_blocks[(j + 1) %
493 blk_cnt].block_virt_addr;
495 mac_control->rings[i].rx_blocks[j].block_dma_addr;
497 mac_control->rings[i].rx_blocks[(j + 1) %
498 blk_cnt].block_dma_addr;
500 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
501 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
504 #ifndef CONFIG_2BUFF_MODE
505 pre_rxd_blk->reserved_2_pNext_RxD_block =
506 (unsigned long) tmp_v_addr_next;
508 pre_rxd_blk->pNext_RxD_Blk_physical =
509 (u64) tmp_p_addr_next;
513 #ifdef CONFIG_2BUFF_MODE
515 * Allocation of Storages for buffer addresses in 2BUFF mode
516 * and the buffers as well.
518 for (i = 0; i < config->rx_ring_num; i++) {
520 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
521 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
523 if (!mac_control->rings[i].ba)
525 for (j = 0; j < blk_cnt; j++) {
527 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
528 (MAX_RXDS_PER_BLOCK + 1)),
530 if (!mac_control->rings[i].ba[j])
532 while (k != MAX_RXDS_PER_BLOCK) {
533 ba = &mac_control->rings[i].ba[j][k];
535 ba->ba_0_org = (void *) kmalloc
536 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
539 tmp = (u64) ba->ba_0_org;
541 tmp &= ~((u64) ALIGN_SIZE);
542 ba->ba_0 = (void *) tmp;
544 ba->ba_1_org = (void *) kmalloc
545 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
548 tmp = (u64) ba->ba_1_org;
550 tmp &= ~((u64) ALIGN_SIZE);
551 ba->ba_1 = (void *) tmp;
558 /* Allocation and initialization of Statistics block */
559 size = sizeof(StatInfo_t);
560 mac_control->stats_mem = pci_alloc_consistent
561 (nic->pdev, size, &mac_control->stats_mem_phy);
563 if (!mac_control->stats_mem) {
565 * In case of failure, free_shared_mem() is called, which
566 * should free any memory that was alloced till the
571 mac_control->stats_mem_sz = size;
573 tmp_v_addr = mac_control->stats_mem;
574 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
575 memset(tmp_v_addr, 0, size);
576 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
577 (unsigned long long) tmp_p_addr);
583 * free_shared_mem - Free the allocated Memory
584 * @nic: Device private variable.
585 * Description: This function is to free all memory locations allocated by
586 * the init_shared_mem() function and return it to the kernel.
589 static void free_shared_mem(struct s2io_nic *nic)
591 int i, j, blk_cnt, size;
593 dma_addr_t tmp_p_addr;
594 mac_info_t *mac_control;
595 struct config_param *config;
596 int lst_size, lst_per_page;
602 mac_control = &nic->mac_control;
603 config = &nic->config;
605 lst_size = (sizeof(TxD_t) * config->max_txds);
606 lst_per_page = PAGE_SIZE / lst_size;
608 for (i = 0; i < config->tx_fifo_num; i++) {
609 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
611 for (j = 0; j < page_num; j++) {
612 int mem_blks = (j * lst_per_page);
613 if (!mac_control->fifos[i].list_info[mem_blks].
616 pci_free_consistent(nic->pdev, PAGE_SIZE,
617 mac_control->fifos[i].
620 mac_control->fifos[i].
624 kfree(mac_control->fifos[i].list_info);
627 #ifndef CONFIG_2BUFF_MODE
628 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
630 size = SIZE_OF_BLOCK;
632 for (i = 0; i < config->rx_ring_num; i++) {
633 blk_cnt = mac_control->rings[i].block_count;
634 for (j = 0; j < blk_cnt; j++) {
635 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
637 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
639 if (tmp_v_addr == NULL)
641 pci_free_consistent(nic->pdev, size,
642 tmp_v_addr, tmp_p_addr);
646 #ifdef CONFIG_2BUFF_MODE
647 /* Freeing buffer storage addresses in 2BUFF mode. */
648 for (i = 0; i < config->rx_ring_num; i++) {
650 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
651 for (j = 0; j < blk_cnt; j++) {
653 if (!mac_control->rings[i].ba[j])
655 while (k != MAX_RXDS_PER_BLOCK) {
656 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
661 kfree(mac_control->rings[i].ba[j]);
663 if (mac_control->rings[i].ba)
664 kfree(mac_control->rings[i].ba);
668 if (mac_control->stats_mem) {
669 pci_free_consistent(nic->pdev,
670 mac_control->stats_mem_sz,
671 mac_control->stats_mem,
672 mac_control->stats_mem_phy);
677 * s2io_verify_pci_mode -
680 static int s2io_verify_pci_mode(nic_t *nic)
682 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
683 register u64 val64 = 0;
686 val64 = readq(&bar0->pci_mode);
687 mode = (u8)GET_PCI_MODE(val64);
689 if ( val64 & PCI_MODE_UNKNOWN_MODE)
690 return -1; /* Unknown PCI mode */
696 * s2io_print_pci_mode -
698 static int s2io_print_pci_mode(nic_t *nic)
700 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
701 register u64 val64 = 0;
703 struct config_param *config = &nic->config;
705 val64 = readq(&bar0->pci_mode);
706 mode = (u8)GET_PCI_MODE(val64);
708 if ( val64 & PCI_MODE_UNKNOWN_MODE)
709 return -1; /* Unknown PCI mode */
711 if (val64 & PCI_MODE_32_BITS) {
712 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
714 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
718 case PCI_MODE_PCI_33:
719 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
720 config->bus_speed = 33;
722 case PCI_MODE_PCI_66:
723 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
724 config->bus_speed = 133;
726 case PCI_MODE_PCIX_M1_66:
727 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
728 config->bus_speed = 133; /* Herc doubles the clock rate */
730 case PCI_MODE_PCIX_M1_100:
731 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
732 config->bus_speed = 200;
734 case PCI_MODE_PCIX_M1_133:
735 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
736 config->bus_speed = 266;
738 case PCI_MODE_PCIX_M2_66:
739 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
740 config->bus_speed = 133;
742 case PCI_MODE_PCIX_M2_100:
743 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
744 config->bus_speed = 200;
746 case PCI_MODE_PCIX_M2_133:
747 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
748 config->bus_speed = 266;
751 return -1; /* Unsupported bus speed */
758 * init_nic - Initialization of hardware
759 * @nic: device peivate variable
760 * Description: The function sequentially configures every block
761 * of the H/W from their reset values.
762 * Return Value: SUCCESS on success and
763 * '-1' on failure (endian settings incorrect).
766 static int init_nic(struct s2io_nic *nic)
768 XENA_dev_config_t __iomem *bar0 = nic->bar0;
769 struct net_device *dev = nic->dev;
770 register u64 val64 = 0;
774 mac_info_t *mac_control;
775 struct config_param *config;
776 int mdio_cnt = 0, dtx_cnt = 0;
777 unsigned long long mem_share;
780 mac_control = &nic->mac_control;
781 config = &nic->config;
783 /* to set the swapper controle on the card */
784 if(s2io_set_swapper(nic)) {
785 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
790 * Herc requires EOI to be removed from reset before XGXS, so..
792 if (nic->device_type & XFRAME_II_DEVICE) {
793 val64 = 0xA500000000ULL;
794 writeq(val64, &bar0->sw_reset);
796 val64 = readq(&bar0->sw_reset);
799 /* Remove XGXS from reset state */
801 writeq(val64, &bar0->sw_reset);
803 val64 = readq(&bar0->sw_reset);
805 /* Enable Receiving broadcasts */
806 add = &bar0->mac_cfg;
807 val64 = readq(&bar0->mac_cfg);
808 val64 |= MAC_RMAC_BCAST_ENABLE;
809 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
810 writel((u32) val64, add);
811 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
812 writel((u32) (val64 >> 32), (add + 4));
814 /* Read registers in all blocks */
815 val64 = readq(&bar0->mac_int_mask);
816 val64 = readq(&bar0->mc_int_mask);
817 val64 = readq(&bar0->xgxs_int_mask);
821 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
824 * Configuring the XAUI Interface of Xena.
825 * ***************************************
826 * To Configure the Xena's XAUI, one has to write a series
827 * of 64 bit values into two registers in a particular
828 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
829 * which will be defined in the array of configuration values
830 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
831 * to switch writing from one regsiter to another. We continue
832 * writing these values until we encounter the 'END_SIGN' macro.
833 * For example, After making a series of 21 writes into
834 * dtx_control register the 'SWITCH_SIGN' appears and hence we
835 * start writing into mdio_control until we encounter END_SIGN.
837 if (nic->device_type & XFRAME_II_DEVICE) {
838 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
839 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
840 &bar0->dtx_control, UF);
842 msleep(1); /* Necessary!! */
848 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
849 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
853 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
854 &bar0->dtx_control, UF);
855 val64 = readq(&bar0->dtx_control);
859 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
860 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
864 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
865 &bar0->mdio_control, UF);
866 val64 = readq(&bar0->mdio_control);
869 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
870 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
878 /* Tx DMA Initialization */
880 writeq(val64, &bar0->tx_fifo_partition_0);
881 writeq(val64, &bar0->tx_fifo_partition_1);
882 writeq(val64, &bar0->tx_fifo_partition_2);
883 writeq(val64, &bar0->tx_fifo_partition_3);
886 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
888 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
889 13) | vBIT(config->tx_cfg[i].fifo_priority,
892 if (i == (config->tx_fifo_num - 1)) {
899 writeq(val64, &bar0->tx_fifo_partition_0);
903 writeq(val64, &bar0->tx_fifo_partition_1);
907 writeq(val64, &bar0->tx_fifo_partition_2);
911 writeq(val64, &bar0->tx_fifo_partition_3);
916 /* Enable Tx FIFO partition 0. */
917 val64 = readq(&bar0->tx_fifo_partition_0);
918 val64 |= BIT(0); /* To enable the FIFO partition. */
919 writeq(val64, &bar0->tx_fifo_partition_0);
922 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
923 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
925 if ((nic->device_type == XFRAME_I_DEVICE) &&
926 (get_xena_rev_id(nic->pdev) < 4))
927 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
929 val64 = readq(&bar0->tx_fifo_partition_0);
930 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
931 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
934 * Initialization of Tx_PA_CONFIG register to ignore packet
935 * integrity checking.
937 val64 = readq(&bar0->tx_pa_cfg);
938 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
939 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
940 writeq(val64, &bar0->tx_pa_cfg);
942 /* Rx DMA intialization. */
944 for (i = 0; i < config->rx_ring_num; i++) {
946 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
949 writeq(val64, &bar0->rx_queue_priority);
952 * Allocating equal share of memory to all the
956 if (nic->device_type & XFRAME_II_DEVICE)
961 for (i = 0; i < config->rx_ring_num; i++) {
964 mem_share = (mem_size / config->rx_ring_num +
965 mem_size % config->rx_ring_num);
966 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
969 mem_share = (mem_size / config->rx_ring_num);
970 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
973 mem_share = (mem_size / config->rx_ring_num);
974 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
977 mem_share = (mem_size / config->rx_ring_num);
978 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
981 mem_share = (mem_size / config->rx_ring_num);
982 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
985 mem_share = (mem_size / config->rx_ring_num);
986 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
989 mem_share = (mem_size / config->rx_ring_num);
990 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
993 mem_share = (mem_size / config->rx_ring_num);
994 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
998 writeq(val64, &bar0->rx_queue_cfg);
1001 * Filling Tx round robin registers
1002 * as per the number of FIFOs
1004 switch (config->tx_fifo_num) {
1006 val64 = 0x0000000000000000ULL;
1007 writeq(val64, &bar0->tx_w_round_robin_0);
1008 writeq(val64, &bar0->tx_w_round_robin_1);
1009 writeq(val64, &bar0->tx_w_round_robin_2);
1010 writeq(val64, &bar0->tx_w_round_robin_3);
1011 writeq(val64, &bar0->tx_w_round_robin_4);
1014 val64 = 0x0000010000010000ULL;
1015 writeq(val64, &bar0->tx_w_round_robin_0);
1016 val64 = 0x0100000100000100ULL;
1017 writeq(val64, &bar0->tx_w_round_robin_1);
1018 val64 = 0x0001000001000001ULL;
1019 writeq(val64, &bar0->tx_w_round_robin_2);
1020 val64 = 0x0000010000010000ULL;
1021 writeq(val64, &bar0->tx_w_round_robin_3);
1022 val64 = 0x0100000000000000ULL;
1023 writeq(val64, &bar0->tx_w_round_robin_4);
1026 val64 = 0x0001000102000001ULL;
1027 writeq(val64, &bar0->tx_w_round_robin_0);
1028 val64 = 0x0001020000010001ULL;
1029 writeq(val64, &bar0->tx_w_round_robin_1);
1030 val64 = 0x0200000100010200ULL;
1031 writeq(val64, &bar0->tx_w_round_robin_2);
1032 val64 = 0x0001000102000001ULL;
1033 writeq(val64, &bar0->tx_w_round_robin_3);
1034 val64 = 0x0001020000000000ULL;
1035 writeq(val64, &bar0->tx_w_round_robin_4);
1038 val64 = 0x0001020300010200ULL;
1039 writeq(val64, &bar0->tx_w_round_robin_0);
1040 val64 = 0x0100000102030001ULL;
1041 writeq(val64, &bar0->tx_w_round_robin_1);
1042 val64 = 0x0200010000010203ULL;
1043 writeq(val64, &bar0->tx_w_round_robin_2);
1044 val64 = 0x0001020001000001ULL;
1045 writeq(val64, &bar0->tx_w_round_robin_3);
1046 val64 = 0x0203000100000000ULL;
1047 writeq(val64, &bar0->tx_w_round_robin_4);
1050 val64 = 0x0001000203000102ULL;
1051 writeq(val64, &bar0->tx_w_round_robin_0);
1052 val64 = 0x0001020001030004ULL;
1053 writeq(val64, &bar0->tx_w_round_robin_1);
1054 val64 = 0x0001000203000102ULL;
1055 writeq(val64, &bar0->tx_w_round_robin_2);
1056 val64 = 0x0001020001030004ULL;
1057 writeq(val64, &bar0->tx_w_round_robin_3);
1058 val64 = 0x0001000000000000ULL;
1059 writeq(val64, &bar0->tx_w_round_robin_4);
1062 val64 = 0x0001020304000102ULL;
1063 writeq(val64, &bar0->tx_w_round_robin_0);
1064 val64 = 0x0304050001020001ULL;
1065 writeq(val64, &bar0->tx_w_round_robin_1);
1066 val64 = 0x0203000100000102ULL;
1067 writeq(val64, &bar0->tx_w_round_robin_2);
1068 val64 = 0x0304000102030405ULL;
1069 writeq(val64, &bar0->tx_w_round_robin_3);
1070 val64 = 0x0001000200000000ULL;
1071 writeq(val64, &bar0->tx_w_round_robin_4);
1074 val64 = 0x0001020001020300ULL;
1075 writeq(val64, &bar0->tx_w_round_robin_0);
1076 val64 = 0x0102030400010203ULL;
1077 writeq(val64, &bar0->tx_w_round_robin_1);
1078 val64 = 0x0405060001020001ULL;
1079 writeq(val64, &bar0->tx_w_round_robin_2);
1080 val64 = 0x0304050000010200ULL;
1081 writeq(val64, &bar0->tx_w_round_robin_3);
1082 val64 = 0x0102030000000000ULL;
1083 writeq(val64, &bar0->tx_w_round_robin_4);
1086 val64 = 0x0001020300040105ULL;
1087 writeq(val64, &bar0->tx_w_round_robin_0);
1088 val64 = 0x0200030106000204ULL;
1089 writeq(val64, &bar0->tx_w_round_robin_1);
1090 val64 = 0x0103000502010007ULL;
1091 writeq(val64, &bar0->tx_w_round_robin_2);
1092 val64 = 0x0304010002060500ULL;
1093 writeq(val64, &bar0->tx_w_round_robin_3);
1094 val64 = 0x0103020400000000ULL;
1095 writeq(val64, &bar0->tx_w_round_robin_4);
1099 /* Filling the Rx round robin registers as per the
1100 * number of Rings and steering based on QoS.
1102 switch (config->rx_ring_num) {
1104 val64 = 0x8080808080808080ULL;
1105 writeq(val64, &bar0->rts_qos_steering);
1108 val64 = 0x0000010000010000ULL;
1109 writeq(val64, &bar0->rx_w_round_robin_0);
1110 val64 = 0x0100000100000100ULL;
1111 writeq(val64, &bar0->rx_w_round_robin_1);
1112 val64 = 0x0001000001000001ULL;
1113 writeq(val64, &bar0->rx_w_round_robin_2);
1114 val64 = 0x0000010000010000ULL;
1115 writeq(val64, &bar0->rx_w_round_robin_3);
1116 val64 = 0x0100000000000000ULL;
1117 writeq(val64, &bar0->rx_w_round_robin_4);
1119 val64 = 0x8080808040404040ULL;
1120 writeq(val64, &bar0->rts_qos_steering);
1123 val64 = 0x0001000102000001ULL;
1124 writeq(val64, &bar0->rx_w_round_robin_0);
1125 val64 = 0x0001020000010001ULL;
1126 writeq(val64, &bar0->rx_w_round_robin_1);
1127 val64 = 0x0200000100010200ULL;
1128 writeq(val64, &bar0->rx_w_round_robin_2);
1129 val64 = 0x0001000102000001ULL;
1130 writeq(val64, &bar0->rx_w_round_robin_3);
1131 val64 = 0x0001020000000000ULL;
1132 writeq(val64, &bar0->rx_w_round_robin_4);
1134 val64 = 0x8080804040402020ULL;
1135 writeq(val64, &bar0->rts_qos_steering);
1138 val64 = 0x0001020300010200ULL;
1139 writeq(val64, &bar0->rx_w_round_robin_0);
1140 val64 = 0x0100000102030001ULL;
1141 writeq(val64, &bar0->rx_w_round_robin_1);
1142 val64 = 0x0200010000010203ULL;
1143 writeq(val64, &bar0->rx_w_round_robin_2);
1144 val64 = 0x0001020001000001ULL;
1145 writeq(val64, &bar0->rx_w_round_robin_3);
1146 val64 = 0x0203000100000000ULL;
1147 writeq(val64, &bar0->rx_w_round_robin_4);
1149 val64 = 0x8080404020201010ULL;
1150 writeq(val64, &bar0->rts_qos_steering);
1153 val64 = 0x0001000203000102ULL;
1154 writeq(val64, &bar0->rx_w_round_robin_0);
1155 val64 = 0x0001020001030004ULL;
1156 writeq(val64, &bar0->rx_w_round_robin_1);
1157 val64 = 0x0001000203000102ULL;
1158 writeq(val64, &bar0->rx_w_round_robin_2);
1159 val64 = 0x0001020001030004ULL;
1160 writeq(val64, &bar0->rx_w_round_robin_3);
1161 val64 = 0x0001000000000000ULL;
1162 writeq(val64, &bar0->rx_w_round_robin_4);
1164 val64 = 0x8080404020201008ULL;
1165 writeq(val64, &bar0->rts_qos_steering);
1168 val64 = 0x0001020304000102ULL;
1169 writeq(val64, &bar0->rx_w_round_robin_0);
1170 val64 = 0x0304050001020001ULL;
1171 writeq(val64, &bar0->rx_w_round_robin_1);
1172 val64 = 0x0203000100000102ULL;
1173 writeq(val64, &bar0->rx_w_round_robin_2);
1174 val64 = 0x0304000102030405ULL;
1175 writeq(val64, &bar0->rx_w_round_robin_3);
1176 val64 = 0x0001000200000000ULL;
1177 writeq(val64, &bar0->rx_w_round_robin_4);
1179 val64 = 0x8080404020100804ULL;
1180 writeq(val64, &bar0->rts_qos_steering);
1183 val64 = 0x0001020001020300ULL;
1184 writeq(val64, &bar0->rx_w_round_robin_0);
1185 val64 = 0x0102030400010203ULL;
1186 writeq(val64, &bar0->rx_w_round_robin_1);
1187 val64 = 0x0405060001020001ULL;
1188 writeq(val64, &bar0->rx_w_round_robin_2);
1189 val64 = 0x0304050000010200ULL;
1190 writeq(val64, &bar0->rx_w_round_robin_3);
1191 val64 = 0x0102030000000000ULL;
1192 writeq(val64, &bar0->rx_w_round_robin_4);
1194 val64 = 0x8080402010080402ULL;
1195 writeq(val64, &bar0->rts_qos_steering);
1198 val64 = 0x0001020300040105ULL;
1199 writeq(val64, &bar0->rx_w_round_robin_0);
1200 val64 = 0x0200030106000204ULL;
1201 writeq(val64, &bar0->rx_w_round_robin_1);
1202 val64 = 0x0103000502010007ULL;
1203 writeq(val64, &bar0->rx_w_round_robin_2);
1204 val64 = 0x0304010002060500ULL;
1205 writeq(val64, &bar0->rx_w_round_robin_3);
1206 val64 = 0x0103020400000000ULL;
1207 writeq(val64, &bar0->rx_w_round_robin_4);
1209 val64 = 0x8040201008040201ULL;
1210 writeq(val64, &bar0->rts_qos_steering);
1216 for (i = 0; i < 8; i++)
1217 writeq(val64, &bar0->rts_frm_len_n[i]);
1219 /* Set the default rts frame length for the rings configured */
1220 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1221 for (i = 0 ; i < config->rx_ring_num ; i++)
1222 writeq(val64, &bar0->rts_frm_len_n[i]);
1224 /* Set the frame length for the configured rings
1225 * desired by the user
1227 for (i = 0; i < config->rx_ring_num; i++) {
1228 /* If rts_frm_len[i] == 0 then it is assumed that user not
1229 * specified frame length steering.
1230 * If the user provides the frame length then program
1231 * the rts_frm_len register for those values or else
1232 * leave it as it is.
1234 if (rts_frm_len[i] != 0) {
1235 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1236 &bar0->rts_frm_len_n[i]);
1240 /* Program statistics memory */
1241 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1243 if (nic->device_type == XFRAME_II_DEVICE) {
1244 val64 = STAT_BC(0x320);
1245 writeq(val64, &bar0->stat_byte_cnt);
1249 * Initializing the sampling rate for the device to calculate the
1250 * bandwidth utilization.
1252 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1253 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1254 writeq(val64, &bar0->mac_link_util);
1258 * Initializing the Transmit and Receive Traffic Interrupt
1262 * TTI Initialization. Default Tx timer gets us about
1263 * 250 interrupts per sec. Continuous interrupts are enabled
1266 if (nic->device_type == XFRAME_II_DEVICE) {
1267 int count = (nic->config.bus_speed * 125)/2;
1268 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1271 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1273 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1274 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1275 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1276 if (use_continuous_tx_intrs)
1277 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1278 writeq(val64, &bar0->tti_data1_mem);
1280 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1281 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1282 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1283 writeq(val64, &bar0->tti_data2_mem);
1285 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1286 writeq(val64, &bar0->tti_command_mem);
1289 * Once the operation completes, the Strobe bit of the command
1290 * register will be reset. We poll for this particular condition
1291 * We wait for a maximum of 500ms for the operation to complete,
1292 * if it's not complete by then we return error.
1296 val64 = readq(&bar0->tti_command_mem);
1297 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1301 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1310 /* RTI Initialization */
1311 if (nic->device_type == XFRAME_II_DEVICE) {
1313 * Programmed to generate Apprx 500 Intrs per
1316 int count = (nic->config.bus_speed * 125)/4;
1317 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1319 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1321 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1322 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1323 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1325 writeq(val64, &bar0->rti_data1_mem);
1327 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1328 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1329 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1330 writeq(val64, &bar0->rti_data2_mem);
1332 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1333 writeq(val64, &bar0->rti_command_mem);
1336 * Once the operation completes, the Strobe bit of the
1337 * command register will be reset. We poll for this
1338 * particular condition. We wait for a maximum of 500ms
1339 * for the operation to complete, if it's not complete
1340 * by then we return error.
1344 val64 = readq(&bar0->rti_command_mem);
1345 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1349 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1358 * Initializing proper values as Pause threshold into all
1359 * the 8 Queues on Rx side.
1361 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1362 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1364 /* Disable RMAC PAD STRIPPING */
1365 add = (void *) &bar0->mac_cfg;
1366 val64 = readq(&bar0->mac_cfg);
1367 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1368 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1369 writel((u32) (val64), add);
1370 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1371 writel((u32) (val64 >> 32), (add + 4));
1372 val64 = readq(&bar0->mac_cfg);
1375 * Set the time value to be inserted in the pause frame
1376 * generated by xena.
1378 val64 = readq(&bar0->rmac_pause_cfg);
1379 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1380 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1381 writeq(val64, &bar0->rmac_pause_cfg);
1384 * Set the Threshold Limit for Generating the pause frame
1385 * If the amount of data in any Queue exceeds ratio of
1386 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1387 * pause frame is generated
1390 for (i = 0; i < 4; i++) {
1392 (((u64) 0xFF00 | nic->mac_control.
1393 mc_pause_threshold_q0q3)
1396 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1399 for (i = 0; i < 4; i++) {
1401 (((u64) 0xFF00 | nic->mac_control.
1402 mc_pause_threshold_q4q7)
1405 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1408 * TxDMA will stop Read request if the number of read split has
1409 * exceeded the limit pointed by shared_splits
1411 val64 = readq(&bar0->pic_control);
1412 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1413 writeq(val64, &bar0->pic_control);
1416 * Programming the Herc to split every write transaction
1417 * that does not start on an ADB to reduce disconnects.
1419 if (nic->device_type == XFRAME_II_DEVICE) {
1420 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1421 writeq(val64, &bar0->wreq_split_mask);
1428 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1429 * @nic: device private variable,
1430 * @mask: A mask indicating which Intr block must be modified and,
1431 * @flag: A flag indicating whether to enable or disable the Intrs.
1432 * Description: This function will either disable or enable the interrupts
1433 * depending on the flag argument. The mask argument can be used to
1434 * enable/disable any Intr block.
1435 * Return Value: NONE.
1438 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1440 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1441 register u64 val64 = 0, temp64 = 0;
1443 /* Top level interrupt classification */
1444 /* PIC Interrupts */
1445 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1446 /* Enable PIC Intrs in the general intr mask register */
1447 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1448 if (flag == ENABLE_INTRS) {
1449 temp64 = readq(&bar0->general_int_mask);
1450 temp64 &= ~((u64) val64);
1451 writeq(temp64, &bar0->general_int_mask);
1453 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1454 * interrupts for now.
1457 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1459 * No MSI Support is available presently, so TTI and
1460 * RTI interrupts are also disabled.
1462 } else if (flag == DISABLE_INTRS) {
1464 * Disable PIC Intrs in the general
1465 * intr mask register
1467 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1468 temp64 = readq(&bar0->general_int_mask);
1470 writeq(val64, &bar0->general_int_mask);
1474 /* DMA Interrupts */
1475 /* Enabling/Disabling Tx DMA interrupts */
1476 if (mask & TX_DMA_INTR) {
1477 /* Enable TxDMA Intrs in the general intr mask register */
1478 val64 = TXDMA_INT_M;
1479 if (flag == ENABLE_INTRS) {
1480 temp64 = readq(&bar0->general_int_mask);
1481 temp64 &= ~((u64) val64);
1482 writeq(temp64, &bar0->general_int_mask);
1484 * Keep all interrupts other than PFC interrupt
1485 * and PCC interrupt disabled in DMA level.
1487 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1489 writeq(val64, &bar0->txdma_int_mask);
1491 * Enable only the MISC error 1 interrupt in PFC block
1493 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1494 writeq(val64, &bar0->pfc_err_mask);
1496 * Enable only the FB_ECC error interrupt in PCC block
1498 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1499 writeq(val64, &bar0->pcc_err_mask);
1500 } else if (flag == DISABLE_INTRS) {
1502 * Disable TxDMA Intrs in the general intr mask
1505 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1506 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1507 temp64 = readq(&bar0->general_int_mask);
1509 writeq(val64, &bar0->general_int_mask);
1513 /* Enabling/Disabling Rx DMA interrupts */
1514 if (mask & RX_DMA_INTR) {
1515 /* Enable RxDMA Intrs in the general intr mask register */
1516 val64 = RXDMA_INT_M;
1517 if (flag == ENABLE_INTRS) {
1518 temp64 = readq(&bar0->general_int_mask);
1519 temp64 &= ~((u64) val64);
1520 writeq(temp64, &bar0->general_int_mask);
1522 * All RxDMA block interrupts are disabled for now
1525 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1526 } else if (flag == DISABLE_INTRS) {
1528 * Disable RxDMA Intrs in the general intr mask
1531 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1532 temp64 = readq(&bar0->general_int_mask);
1534 writeq(val64, &bar0->general_int_mask);
1538 /* MAC Interrupts */
1539 /* Enabling/Disabling MAC interrupts */
1540 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1541 val64 = TXMAC_INT_M | RXMAC_INT_M;
1542 if (flag == ENABLE_INTRS) {
1543 temp64 = readq(&bar0->general_int_mask);
1544 temp64 &= ~((u64) val64);
1545 writeq(temp64, &bar0->general_int_mask);
1547 * All MAC block error interrupts are disabled for now
1548 * except the link status change interrupt.
1551 val64 = MAC_INT_STATUS_RMAC_INT;
1552 temp64 = readq(&bar0->mac_int_mask);
1553 temp64 &= ~((u64) val64);
1554 writeq(temp64, &bar0->mac_int_mask);
1556 val64 = readq(&bar0->mac_rmac_err_mask);
1557 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1558 writeq(val64, &bar0->mac_rmac_err_mask);
1559 } else if (flag == DISABLE_INTRS) {
1561 * Disable MAC Intrs in the general intr mask register
1563 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1564 writeq(DISABLE_ALL_INTRS,
1565 &bar0->mac_rmac_err_mask);
1567 temp64 = readq(&bar0->general_int_mask);
1569 writeq(val64, &bar0->general_int_mask);
1573 /* XGXS Interrupts */
1574 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1575 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1576 if (flag == ENABLE_INTRS) {
1577 temp64 = readq(&bar0->general_int_mask);
1578 temp64 &= ~((u64) val64);
1579 writeq(temp64, &bar0->general_int_mask);
1581 * All XGXS block error interrupts are disabled for now
1584 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1585 } else if (flag == DISABLE_INTRS) {
1587 * Disable MC Intrs in the general intr mask register
1589 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1590 temp64 = readq(&bar0->general_int_mask);
1592 writeq(val64, &bar0->general_int_mask);
1596 /* Memory Controller(MC) interrupts */
1597 if (mask & MC_INTR) {
1599 if (flag == ENABLE_INTRS) {
1600 temp64 = readq(&bar0->general_int_mask);
1601 temp64 &= ~((u64) val64);
1602 writeq(temp64, &bar0->general_int_mask);
1604 * Enable all MC Intrs.
1606 writeq(0x0, &bar0->mc_int_mask);
1607 writeq(0x0, &bar0->mc_err_mask);
1608 } else if (flag == DISABLE_INTRS) {
1610 * Disable MC Intrs in the general intr mask register
1612 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1613 temp64 = readq(&bar0->general_int_mask);
1615 writeq(val64, &bar0->general_int_mask);
1620 /* Tx traffic interrupts */
1621 if (mask & TX_TRAFFIC_INTR) {
1622 val64 = TXTRAFFIC_INT_M;
1623 if (flag == ENABLE_INTRS) {
1624 temp64 = readq(&bar0->general_int_mask);
1625 temp64 &= ~((u64) val64);
1626 writeq(temp64, &bar0->general_int_mask);
1628 * Enable all the Tx side interrupts
1629 * writing 0 Enables all 64 TX interrupt levels
1631 writeq(0x0, &bar0->tx_traffic_mask);
1632 } else if (flag == DISABLE_INTRS) {
1634 * Disable Tx Traffic Intrs in the general intr mask
1637 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1638 temp64 = readq(&bar0->general_int_mask);
1640 writeq(val64, &bar0->general_int_mask);
1644 /* Rx traffic interrupts */
1645 if (mask & RX_TRAFFIC_INTR) {
1646 val64 = RXTRAFFIC_INT_M;
1647 if (flag == ENABLE_INTRS) {
1648 temp64 = readq(&bar0->general_int_mask);
1649 temp64 &= ~((u64) val64);
1650 writeq(temp64, &bar0->general_int_mask);
1651 /* writing 0 Enables all 8 RX interrupt levels */
1652 writeq(0x0, &bar0->rx_traffic_mask);
1653 } else if (flag == DISABLE_INTRS) {
1655 * Disable Rx Traffic Intrs in the general intr mask
1658 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1659 temp64 = readq(&bar0->general_int_mask);
1661 writeq(val64, &bar0->general_int_mask);
1666 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1670 if (flag == FALSE) {
1671 if ((!herc && (rev_id >= 4)) || herc) {
1672 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1673 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1674 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1678 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1679 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1680 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1685 if ((!herc && (rev_id >= 4)) || herc) {
1686 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1687 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1688 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1689 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1690 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1694 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1695 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1696 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1697 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1698 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1707 * verify_xena_quiescence - Checks whether the H/W is ready
1708 * @val64 : Value read from adapter status register.
1709 * @flag : indicates if the adapter enable bit was ever written once
1711 * Description: Returns whether the H/W is ready to go or not. Depending
1712 * on whether adapter enable bit was written or not the comparison
1713 * differs and the calling function passes the input argument flag to
1715 * Return: 1 If xena is quiescence
1716 * 0 If Xena is not quiescence
1719 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1722 u64 tmp64 = ~((u64) val64);
1723 int rev_id = get_xena_rev_id(sp->pdev);
1725 herc = (sp->device_type == XFRAME_II_DEVICE);
1728 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1729 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1730 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1731 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1732 ADAPTER_STATUS_P_PLL_LOCK))) {
1733 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1740 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1741 * @sp: Pointer to device specifc structure
1743 * New procedure to clear mac address reading problems on Alpha platforms
1747 void fix_mac_address(nic_t * sp)
1749 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1753 while (fix_mac[i] != END_SIGN) {
1754 writeq(fix_mac[i++], &bar0->gpio_control);
1756 val64 = readq(&bar0->gpio_control);
1761 * start_nic - Turns the device on
1762 * @nic : device private variable.
1764 * This function actually turns the device on. Before this function is
1765 * called,all Registers are configured from their reset states
1766 * and shared memory is allocated but the NIC is still quiescent. On
1767 * calling this function, the device interrupts are cleared and the NIC is
1768 * literally switched on by writing into the adapter control register.
1770 * SUCCESS on success and -1 on failure.
1773 static int start_nic(struct s2io_nic *nic)
1775 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1776 struct net_device *dev = nic->dev;
1777 register u64 val64 = 0;
1780 mac_info_t *mac_control;
1781 struct config_param *config;
1783 mac_control = &nic->mac_control;
1784 config = &nic->config;
1786 /* PRC Initialization and configuration */
1787 for (i = 0; i < config->rx_ring_num; i++) {
1788 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1789 &bar0->prc_rxd0_n[i]);
1791 val64 = readq(&bar0->prc_ctrl_n[i]);
1792 #ifndef CONFIG_2BUFF_MODE
1793 val64 |= PRC_CTRL_RC_ENABLED;
1795 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1797 writeq(val64, &bar0->prc_ctrl_n[i]);
1800 #ifdef CONFIG_2BUFF_MODE
1801 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1802 val64 = readq(&bar0->rx_pa_cfg);
1803 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1804 writeq(val64, &bar0->rx_pa_cfg);
1808 * Enabling MC-RLDRAM. After enabling the device, we timeout
1809 * for around 100ms, which is approximately the time required
1810 * for the device to be ready for operation.
1812 val64 = readq(&bar0->mc_rldram_mrs);
1813 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1814 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1815 val64 = readq(&bar0->mc_rldram_mrs);
1817 msleep(100); /* Delay by around 100 ms. */
1819 /* Enabling ECC Protection. */
1820 val64 = readq(&bar0->adapter_control);
1821 val64 &= ~ADAPTER_ECC_EN;
1822 writeq(val64, &bar0->adapter_control);
1825 * Clearing any possible Link state change interrupts that
1826 * could have popped up just before Enabling the card.
1828 val64 = readq(&bar0->mac_rmac_err_reg);
1830 writeq(val64, &bar0->mac_rmac_err_reg);
1833 * Verify if the device is ready to be enabled, if so enable
1836 val64 = readq(&bar0->adapter_status);
1837 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1838 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1839 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1840 (unsigned long long) val64);
1844 /* Enable select interrupts */
1845 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1846 RX_MAC_INTR | MC_INTR;
1847 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1850 * With some switches, link might be already up at this point.
1851 * Because of this weird behavior, when we enable laser,
1852 * we may not get link. We need to handle this. We cannot
1853 * figure out which switch is misbehaving. So we are forced to
1854 * make a global change.
1857 /* Enabling Laser. */
1858 val64 = readq(&bar0->adapter_control);
1859 val64 |= ADAPTER_EOI_TX_ON;
1860 writeq(val64, &bar0->adapter_control);
1862 /* SXE-002: Initialize link and activity LED */
1863 subid = nic->pdev->subsystem_device;
1864 if (((subid & 0xFF) >= 0x07) &&
1865 (nic->device_type == XFRAME_I_DEVICE)) {
1866 val64 = readq(&bar0->gpio_control);
1867 val64 |= 0x0000800000000000ULL;
1868 writeq(val64, &bar0->gpio_control);
1869 val64 = 0x0411040400000000ULL;
1870 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1874 * Don't see link state interrupts on certain switches, so
1875 * directly scheduling a link state task from here.
1877 schedule_work(&nic->set_link_task);
1883 * free_tx_buffers - Free all queued Tx buffers
1884 * @nic : device private variable.
1886 * Free all queued Tx buffers.
1887 * Return Value: void
1890 static void free_tx_buffers(struct s2io_nic *nic)
1892 struct net_device *dev = nic->dev;
1893 struct sk_buff *skb;
1896 mac_info_t *mac_control;
1897 struct config_param *config;
1898 int cnt = 0, frg_cnt;
1900 mac_control = &nic->mac_control;
1901 config = &nic->config;
1903 for (i = 0; i < config->tx_fifo_num; i++) {
1904 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1905 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1908 (struct sk_buff *) ((unsigned long) txdp->
1911 memset(txdp, 0, sizeof(TxD_t) *
1915 frg_cnt = skb_shinfo(skb)->nr_frags;
1916 pci_unmap_single(nic->pdev, (dma_addr_t)
1917 txdp->Buffer_Pointer,
1918 skb->len - skb->data_len,
1924 for (j = 0; j < frg_cnt; j++, txdp++) {
1926 &skb_shinfo(skb)->frags[j];
1927 pci_unmap_page(nic->pdev,
1937 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1941 "%s:forcibly freeing %d skbs on FIFO%d\n",
1943 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1944 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1949 * stop_nic - To stop the nic
1950 * @nic ; device private variable.
1952 * This function does exactly the opposite of what the start_nic()
1953 * function does. This function is called to stop the device.
1958 static void stop_nic(struct s2io_nic *nic)
1960 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1961 register u64 val64 = 0;
1962 u16 interruptible, i;
1963 mac_info_t *mac_control;
1964 struct config_param *config;
1966 mac_control = &nic->mac_control;
1967 config = &nic->config;
1969 /* Disable all interrupts */
1970 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1971 RX_MAC_INTR | MC_INTR;
1972 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1975 for (i = 0; i < config->rx_ring_num; i++) {
1976 val64 = readq(&bar0->prc_ctrl_n[i]);
1977 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1978 writeq(val64, &bar0->prc_ctrl_n[i]);
1983 * fill_rx_buffers - Allocates the Rx side skbs
1984 * @nic: device private variable
1985 * @ring_no: ring number
1987 * The function allocates Rx side skbs and puts the physical
1988 * address of these buffers into the RxD buffer pointers, so that the NIC
1989 * can DMA the received frame into these locations.
1990 * The NIC supports 3 receive modes, viz
1992 * 2. three buffer and
1993 * 3. Five buffer modes.
1994 * Each mode defines how many fragments the received frame will be split
1995 * up into by the NIC. The frame is split into L3 header, L4 Header,
1996 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1997 * is split into 3 fragments. As of now only single buffer mode is
2000 * SUCCESS on success or an appropriate -ve value on failure.
2003 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2005 struct net_device *dev = nic->dev;
2006 struct sk_buff *skb;
2008 int off, off1, size, block_no, block_no1;
2009 int offset, offset1;
2012 mac_info_t *mac_control;
2013 struct config_param *config;
2014 #ifdef CONFIG_2BUFF_MODE
2019 dma_addr_t rxdpphys;
2021 #ifndef CONFIG_S2IO_NAPI
2022 unsigned long flags;
2025 mac_control = &nic->mac_control;
2026 config = &nic->config;
2027 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2028 atomic_read(&nic->rx_bufs_left[ring_no]);
2029 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2030 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2032 while (alloc_tab < alloc_cnt) {
2033 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2035 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2037 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2038 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2039 #ifndef CONFIG_2BUFF_MODE
2040 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2041 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2043 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2044 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2047 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2048 block_virt_addr + off;
2049 if ((offset == offset1) && (rxdp->Host_Control)) {
2050 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2051 DBG_PRINT(INTR_DBG, " info equated\n");
2054 #ifndef CONFIG_2BUFF_MODE
2055 if (rxdp->Control_1 == END_OF_BLOCK) {
2056 mac_control->rings[ring_no].rx_curr_put_info.
2058 mac_control->rings[ring_no].rx_curr_put_info.
2059 block_index %= mac_control->rings[ring_no].block_count;
2060 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2063 off %= (MAX_RXDS_PER_BLOCK + 1);
2064 mac_control->rings[ring_no].rx_curr_put_info.offset =
2066 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2067 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2070 #ifndef CONFIG_S2IO_NAPI
2071 spin_lock_irqsave(&nic->put_lock, flags);
2072 mac_control->rings[ring_no].put_pos =
2073 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2074 spin_unlock_irqrestore(&nic->put_lock, flags);
2077 if (rxdp->Host_Control == END_OF_BLOCK) {
2078 mac_control->rings[ring_no].rx_curr_put_info.
2080 mac_control->rings[ring_no].rx_curr_put_info.block_index
2081 %= mac_control->rings[ring_no].block_count;
2082 block_no = mac_control->rings[ring_no].rx_curr_put_info
2085 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2086 dev->name, block_no,
2087 (unsigned long long) rxdp->Control_1);
2088 mac_control->rings[ring_no].rx_curr_put_info.offset =
2090 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2093 #ifndef CONFIG_S2IO_NAPI
2094 spin_lock_irqsave(&nic->put_lock, flags);
2095 mac_control->rings[ring_no].put_pos = (block_no *
2096 (MAX_RXDS_PER_BLOCK + 1)) + off;
2097 spin_unlock_irqrestore(&nic->put_lock, flags);
2101 #ifndef CONFIG_2BUFF_MODE
2102 if (rxdp->Control_1 & RXD_OWN_XENA)
2104 if (rxdp->Control_2 & BIT(0))
2107 mac_control->rings[ring_no].rx_curr_put_info.
2111 #ifdef CONFIG_2BUFF_MODE
2113 * RxDs Spanning cache lines will be replenished only
2114 * if the succeeding RxD is also owned by Host. It
2115 * will always be the ((8*i)+3) and ((8*i)+6)
2116 * descriptors for the 48 byte descriptor. The offending
2117 * decsriptor is of-course the 3rd descriptor.
2119 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2120 block_dma_addr + (off * sizeof(RxD_t));
2121 if (((u64) (rxdpphys)) % 128 > 80) {
2122 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2123 block_virt_addr + (off + 1);
2124 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2125 nextblk = (block_no + 1) %
2126 (mac_control->rings[ring_no].block_count);
2127 rxdpnext = mac_control->rings[ring_no].rx_blocks
2128 [nextblk].block_virt_addr;
2130 if (rxdpnext->Control_2 & BIT(0))
2135 #ifndef CONFIG_2BUFF_MODE
2136 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2138 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2141 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2142 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2145 #ifndef CONFIG_2BUFF_MODE
2146 skb_reserve(skb, NET_IP_ALIGN);
2147 memset(rxdp, 0, sizeof(RxD_t));
2148 rxdp->Buffer0_ptr = pci_map_single
2149 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2150 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2151 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2152 rxdp->Host_Control = (unsigned long) (skb);
2153 rxdp->Control_1 |= RXD_OWN_XENA;
2155 off %= (MAX_RXDS_PER_BLOCK + 1);
2156 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2158 ba = &mac_control->rings[ring_no].ba[block_no][off];
2159 skb_reserve(skb, BUF0_LEN);
2160 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2162 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2164 memset(rxdp, 0, sizeof(RxD_t));
2165 rxdp->Buffer2_ptr = pci_map_single
2166 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2167 PCI_DMA_FROMDEVICE);
2169 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2170 PCI_DMA_FROMDEVICE);
2172 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2173 PCI_DMA_FROMDEVICE);
2175 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2176 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2177 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2178 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2179 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2180 rxdp->Control_1 |= RXD_OWN_XENA;
2182 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2184 rxdp->Control_2 |= SET_RXD_MARKER;
2186 atomic_inc(&nic->rx_bufs_left[ring_no]);
2195 * free_rx_buffers - Frees all Rx buffers
2196 * @sp: device private variable.
2198 * This function will free all Rx buffers allocated by host.
2203 static void free_rx_buffers(struct s2io_nic *sp)
2205 struct net_device *dev = sp->dev;
2206 int i, j, blk = 0, off, buf_cnt = 0;
2208 struct sk_buff *skb;
2209 mac_info_t *mac_control;
2210 struct config_param *config;
2211 #ifdef CONFIG_2BUFF_MODE
2215 mac_control = &sp->mac_control;
2216 config = &sp->config;
2218 for (i = 0; i < config->rx_ring_num; i++) {
2219 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2220 off = j % (MAX_RXDS_PER_BLOCK + 1);
2221 rxdp = mac_control->rings[i].rx_blocks[blk].
2222 block_virt_addr + off;
2224 #ifndef CONFIG_2BUFF_MODE
2225 if (rxdp->Control_1 == END_OF_BLOCK) {
2227 (RxD_t *) ((unsigned long) rxdp->
2233 if (rxdp->Host_Control == END_OF_BLOCK) {
2239 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2240 memset(rxdp, 0, sizeof(RxD_t));
2245 (struct sk_buff *) ((unsigned long) rxdp->
2248 #ifndef CONFIG_2BUFF_MODE
2249 pci_unmap_single(sp->pdev, (dma_addr_t)
2252 HEADER_ETHERNET_II_802_3_SIZE
2253 + HEADER_802_2_SIZE +
2255 PCI_DMA_FROMDEVICE);
2257 ba = &mac_control->rings[i].ba[blk][off];
2258 pci_unmap_single(sp->pdev, (dma_addr_t)
2261 PCI_DMA_FROMDEVICE);
2262 pci_unmap_single(sp->pdev, (dma_addr_t)
2265 PCI_DMA_FROMDEVICE);
2266 pci_unmap_single(sp->pdev, (dma_addr_t)
2268 dev->mtu + BUF0_LEN + 4,
2269 PCI_DMA_FROMDEVICE);
2272 atomic_dec(&sp->rx_bufs_left[i]);
2275 memset(rxdp, 0, sizeof(RxD_t));
2277 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2278 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2279 mac_control->rings[i].rx_curr_put_info.offset = 0;
2280 mac_control->rings[i].rx_curr_get_info.offset = 0;
2281 atomic_set(&sp->rx_bufs_left[i], 0);
2282 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2283 dev->name, buf_cnt, i);
2288 * s2io_poll - Rx interrupt handler for NAPI support
2289 * @dev : pointer to the device structure.
2290 * @budget : The number of packets that were budgeted to be processed
2291 * during one pass through the 'Poll" function.
2293 * Comes into picture only if NAPI support has been incorporated. It does
2294 * the same thing that rx_intr_handler does, but not in a interrupt context
2295 * also It will process only a given number of packets.
2297 * 0 on success and 1 if there are No Rx packets to be processed.
2300 #if defined(CONFIG_S2IO_NAPI)
2301 static int s2io_poll(struct net_device *dev, int *budget)
2303 nic_t *nic = dev->priv;
2304 int pkt_cnt = 0, org_pkts_to_process;
2305 mac_info_t *mac_control;
2306 struct config_param *config;
2307 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2311 atomic_inc(&nic->isr_cnt);
2312 mac_control = &nic->mac_control;
2313 config = &nic->config;
2315 nic->pkts_to_process = *budget;
2316 if (nic->pkts_to_process > dev->quota)
2317 nic->pkts_to_process = dev->quota;
2318 org_pkts_to_process = nic->pkts_to_process;
2320 val64 = readq(&bar0->rx_traffic_int);
2321 writeq(val64, &bar0->rx_traffic_int);
2323 for (i = 0; i < config->rx_ring_num; i++) {
2324 rx_intr_handler(&mac_control->rings[i]);
2325 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2326 if (!nic->pkts_to_process) {
2327 /* Quota for the current iteration has been met */
2334 dev->quota -= pkt_cnt;
2336 netif_rx_complete(dev);
2338 for (i = 0; i < config->rx_ring_num; i++) {
2339 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2340 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2341 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2345 /* Re enable the Rx interrupts. */
2346 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2347 atomic_dec(&nic->isr_cnt);
2351 dev->quota -= pkt_cnt;
2354 for (i = 0; i < config->rx_ring_num; i++) {
2355 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2356 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2357 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2361 atomic_dec(&nic->isr_cnt);
2367 * rx_intr_handler - Rx interrupt handler
2368 * @nic: device private variable.
2370 * If the interrupt is because of a received frame or if the
2371 * receive ring contains fresh as yet un-processed frames,this function is
2372 * called. It picks out the RxD at which place the last Rx processing had
2373 * stopped and sends the skb to the OSM's Rx handler and then increments
2378 static void rx_intr_handler(ring_info_t *ring_data)
2380 nic_t *nic = ring_data->nic;
2381 struct net_device *dev = (struct net_device *) nic->dev;
2382 int get_block, get_offset, put_block, put_offset, ring_bufs;
2383 rx_curr_get_info_t get_info, put_info;
2385 struct sk_buff *skb;
2386 #ifndef CONFIG_S2IO_NAPI
2389 spin_lock(&nic->rx_lock);
2390 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2391 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2392 __FUNCTION__, dev->name);
2393 spin_unlock(&nic->rx_lock);
2396 get_info = ring_data->rx_curr_get_info;
2397 get_block = get_info.block_index;
2398 put_info = ring_data->rx_curr_put_info;
2399 put_block = put_info.block_index;
2400 ring_bufs = get_info.ring_len+1;
2401 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2403 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2405 #ifndef CONFIG_S2IO_NAPI
2406 spin_lock(&nic->put_lock);
2407 put_offset = ring_data->put_pos;
2408 spin_unlock(&nic->put_lock);
2410 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2413 while (RXD_IS_UP2DT(rxdp) &&
2414 (((get_offset + 1) % ring_bufs) != put_offset)) {
2415 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2417 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2419 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2420 spin_unlock(&nic->rx_lock);
2423 #ifndef CONFIG_2BUFF_MODE
2424 pci_unmap_single(nic->pdev, (dma_addr_t)
2427 HEADER_ETHERNET_II_802_3_SIZE +
2430 PCI_DMA_FROMDEVICE);
2432 pci_unmap_single(nic->pdev, (dma_addr_t)
2434 BUF0_LEN, PCI_DMA_FROMDEVICE);
2435 pci_unmap_single(nic->pdev, (dma_addr_t)
2437 BUF1_LEN, PCI_DMA_FROMDEVICE);
2438 pci_unmap_single(nic->pdev, (dma_addr_t)
2440 dev->mtu + BUF0_LEN + 4,
2441 PCI_DMA_FROMDEVICE);
2443 rx_osm_handler(ring_data, rxdp);
2445 ring_data->rx_curr_get_info.offset =
2447 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2449 if (get_info.offset &&
2450 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2451 get_info.offset = 0;
2452 ring_data->rx_curr_get_info.offset
2455 get_block %= ring_data->block_count;
2456 ring_data->rx_curr_get_info.block_index
2458 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2461 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2463 #ifdef CONFIG_S2IO_NAPI
2464 nic->pkts_to_process -= 1;
2465 if (!nic->pkts_to_process)
2469 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2473 spin_unlock(&nic->rx_lock);
2477 * tx_intr_handler - Transmit interrupt handler
2478 * @nic : device private variable
2480 * If an interrupt was raised to indicate DMA complete of the
2481 * Tx packet, this function is called. It identifies the last TxD
2482 * whose buffer was freed and frees all skbs whose data have already
2483 * DMA'ed into the NICs internal memory.
2488 static void tx_intr_handler(fifo_info_t *fifo_data)
2490 nic_t *nic = fifo_data->nic;
2491 struct net_device *dev = (struct net_device *) nic->dev;
2492 tx_curr_get_info_t get_info, put_info;
2493 struct sk_buff *skb;
2497 get_info = fifo_data->tx_curr_get_info;
2498 put_info = fifo_data->tx_curr_put_info;
2499 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2501 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2502 (get_info.offset != put_info.offset) &&
2503 (txdlp->Host_Control)) {
2504 /* Check for TxD errors */
2505 if (txdlp->Control_1 & TXD_T_CODE) {
2506 unsigned long long err;
2507 err = txdlp->Control_1 & TXD_T_CODE;
2508 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2512 skb = (struct sk_buff *) ((unsigned long)
2513 txdlp->Host_Control);
2515 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2517 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2521 frg_cnt = skb_shinfo(skb)->nr_frags;
2522 nic->tx_pkt_count++;
2524 pci_unmap_single(nic->pdev, (dma_addr_t)
2525 txdlp->Buffer_Pointer,
2526 skb->len - skb->data_len,
2532 for (j = 0; j < frg_cnt; j++, txdlp++) {
2534 &skb_shinfo(skb)->frags[j];
2535 pci_unmap_page(nic->pdev,
2545 (sizeof(TxD_t) * fifo_data->max_txds));
2547 /* Updating the statistics block */
2548 nic->stats.tx_bytes += skb->len;
2549 dev_kfree_skb_irq(skb);
2552 get_info.offset %= get_info.fifo_len + 1;
2553 txdlp = (TxD_t *) fifo_data->list_info
2554 [get_info.offset].list_virt_addr;
2555 fifo_data->tx_curr_get_info.offset =
2559 spin_lock(&nic->tx_lock);
2560 if (netif_queue_stopped(dev))
2561 netif_wake_queue(dev);
2562 spin_unlock(&nic->tx_lock);
2566 * alarm_intr_handler - Alarm Interrrupt handler
2567 * @nic: device private variable
2568 * Description: If the interrupt was neither because of Rx packet or Tx
2569 * complete, this function is called. If the interrupt was to indicate
2570 * a loss of link, the OSM link status handler is invoked for any other
2571 * alarm interrupt the block that raised the interrupt is displayed
2572 * and a H/W reset is issued.
2577 static void alarm_intr_handler(struct s2io_nic *nic)
2579 struct net_device *dev = (struct net_device *) nic->dev;
2580 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2581 register u64 val64 = 0, err_reg = 0;
2583 /* Handling link status change error Intr */
2584 err_reg = readq(&bar0->mac_rmac_err_reg);
2585 writeq(err_reg, &bar0->mac_rmac_err_reg);
2586 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2587 schedule_work(&nic->set_link_task);
2590 /* Handling Ecc errors */
2591 val64 = readq(&bar0->mc_err_reg);
2592 writeq(val64, &bar0->mc_err_reg);
2593 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2594 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2595 nic->mac_control.stats_info->sw_stat.
2597 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2599 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2600 netif_stop_queue(dev);
2601 schedule_work(&nic->rst_timer_task);
2603 nic->mac_control.stats_info->sw_stat.
2608 /* In case of a serious error, the device will be Reset. */
2609 val64 = readq(&bar0->serr_source);
2610 if (val64 & SERR_SOURCE_ANY) {
2611 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2612 DBG_PRINT(ERR_DBG, "serious error!!\n");
2613 netif_stop_queue(dev);
2614 schedule_work(&nic->rst_timer_task);
2618 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2619 * Error occurs, the adapter will be recycled by disabling the
2620 * adapter enable bit and enabling it again after the device
2621 * becomes Quiescent.
2623 val64 = readq(&bar0->pcc_err_reg);
2624 writeq(val64, &bar0->pcc_err_reg);
2625 if (val64 & PCC_FB_ECC_DB_ERR) {
2626 u64 ac = readq(&bar0->adapter_control);
2627 ac &= ~(ADAPTER_CNTL_EN);
2628 writeq(ac, &bar0->adapter_control);
2629 ac = readq(&bar0->adapter_control);
2630 schedule_work(&nic->set_link_task);
2633 /* Other type of interrupts are not being handled now, TODO */
2637 * wait_for_cmd_complete - waits for a command to complete.
2638 * @sp : private member of the device structure, which is a pointer to the
2639 * s2io_nic structure.
2640 * Description: Function that waits for a command to Write into RMAC
2641 * ADDR DATA registers to be completed and returns either success or
2642 * error depending on whether the command was complete or not.
2644 * SUCCESS on success and FAILURE on failure.
2647 int wait_for_cmd_complete(nic_t * sp)
2649 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2650 int ret = FAILURE, cnt = 0;
2654 val64 = readq(&bar0->rmac_addr_cmd_mem);
2655 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2668 * s2io_reset - Resets the card.
2669 * @sp : private member of the device structure.
2670 * Description: Function to Reset the card. This function then also
2671 * restores the previously saved PCI configuration space registers as
2672 * the card reset also resets the configuration space.
2677 void s2io_reset(nic_t * sp)
2679 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2683 val64 = SW_RESET_ALL;
2684 writeq(val64, &bar0->sw_reset);
2687 * At this stage, if the PCI write is indeed completed, the
2688 * card is reset and so is the PCI Config space of the device.
2689 * So a read cannot be issued at this stage on any of the
2690 * registers to ensure the write into "sw_reset" register
2692 * Question: Is there any system call that will explicitly force
2693 * all the write commands still pending on the bus to be pushed
2695 * As of now I'am just giving a 250ms delay and hoping that the
2696 * PCI write to sw_reset register is done by this time.
2700 if (!(sp->device_type & XFRAME_II_DEVICE)) {
2701 /* Restore the PCI state saved during initializarion. */
2702 pci_restore_state(sp->pdev);
2704 pci_set_master(sp->pdev);
2710 /* Set swapper to enable I/O register access */
2711 s2io_set_swapper(sp);
2713 /* Clear certain PCI/PCI-X fields after reset */
2714 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2715 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2716 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2718 val64 = readq(&bar0->txpic_int_reg);
2719 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2720 writeq(val64, &bar0->txpic_int_reg);
2722 /* Clearing PCIX Ecc status register */
2723 pci_write_config_dword(sp->pdev, 0x68, 0);
2725 /* Reset device statistics maintained by OS */
2726 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2728 /* SXE-002: Configure link and activity LED to turn it off */
2729 subid = sp->pdev->subsystem_device;
2730 if (((subid & 0xFF) >= 0x07) &&
2731 (sp->device_type == XFRAME_I_DEVICE)) {
2732 val64 = readq(&bar0->gpio_control);
2733 val64 |= 0x0000800000000000ULL;
2734 writeq(val64, &bar0->gpio_control);
2735 val64 = 0x0411040400000000ULL;
2736 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2740 * Clear spurious ECC interrupts that would have occured on
2741 * XFRAME II cards after reset.
2743 if (sp->device_type == XFRAME_II_DEVICE) {
2744 val64 = readq(&bar0->pcc_err_reg);
2745 writeq(val64, &bar0->pcc_err_reg);
2748 sp->device_enabled_once = FALSE;
2752 * s2io_set_swapper - to set the swapper controle on the card
2753 * @sp : private member of the device structure,
2754 * pointer to the s2io_nic structure.
2755 * Description: Function to set the swapper control on the card
2756 * correctly depending on the 'endianness' of the system.
2758 * SUCCESS on success and FAILURE on failure.
2761 int s2io_set_swapper(nic_t * sp)
2763 struct net_device *dev = sp->dev;
2764 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2765 u64 val64, valt, valr;
2768 * Set proper endian settings and verify the same by reading
2769 * the PIF Feed-back register.
2772 val64 = readq(&bar0->pif_rd_swapper_fb);
2773 if (val64 != 0x0123456789ABCDEFULL) {
2775 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2776 0x8100008181000081ULL, /* FE=1, SE=0 */
2777 0x4200004242000042ULL, /* FE=0, SE=1 */
2778 0}; /* FE=0, SE=0 */
2781 writeq(value[i], &bar0->swapper_ctrl);
2782 val64 = readq(&bar0->pif_rd_swapper_fb);
2783 if (val64 == 0x0123456789ABCDEFULL)
2788 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2790 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2791 (unsigned long long) val64);
2796 valr = readq(&bar0->swapper_ctrl);
2799 valt = 0x0123456789ABCDEFULL;
2800 writeq(valt, &bar0->xmsi_address);
2801 val64 = readq(&bar0->xmsi_address);
2805 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2806 0x0081810000818100ULL, /* FE=1, SE=0 */
2807 0x0042420000424200ULL, /* FE=0, SE=1 */
2808 0}; /* FE=0, SE=0 */
2811 writeq((value[i] | valr), &bar0->swapper_ctrl);
2812 writeq(valt, &bar0->xmsi_address);
2813 val64 = readq(&bar0->xmsi_address);
2819 unsigned long long x = val64;
2820 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2821 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2825 val64 = readq(&bar0->swapper_ctrl);
2826 val64 &= 0xFFFF000000000000ULL;
2830 * The device by default set to a big endian format, so a
2831 * big endian driver need not set anything.
2833 val64 |= (SWAPPER_CTRL_TXP_FE |
2834 SWAPPER_CTRL_TXP_SE |
2835 SWAPPER_CTRL_TXD_R_FE |
2836 SWAPPER_CTRL_TXD_W_FE |
2837 SWAPPER_CTRL_TXF_R_FE |
2838 SWAPPER_CTRL_RXD_R_FE |
2839 SWAPPER_CTRL_RXD_W_FE |
2840 SWAPPER_CTRL_RXF_W_FE |
2841 SWAPPER_CTRL_XMSI_FE |
2842 SWAPPER_CTRL_XMSI_SE |
2843 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2844 writeq(val64, &bar0->swapper_ctrl);
2847 * Initially we enable all bits to make it accessible by the
2848 * driver, then we selectively enable only those bits that
2851 val64 |= (SWAPPER_CTRL_TXP_FE |
2852 SWAPPER_CTRL_TXP_SE |
2853 SWAPPER_CTRL_TXD_R_FE |
2854 SWAPPER_CTRL_TXD_R_SE |
2855 SWAPPER_CTRL_TXD_W_FE |
2856 SWAPPER_CTRL_TXD_W_SE |
2857 SWAPPER_CTRL_TXF_R_FE |
2858 SWAPPER_CTRL_RXD_R_FE |
2859 SWAPPER_CTRL_RXD_R_SE |
2860 SWAPPER_CTRL_RXD_W_FE |
2861 SWAPPER_CTRL_RXD_W_SE |
2862 SWAPPER_CTRL_RXF_W_FE |
2863 SWAPPER_CTRL_XMSI_FE |
2864 SWAPPER_CTRL_XMSI_SE |
2865 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2866 writeq(val64, &bar0->swapper_ctrl);
2868 val64 = readq(&bar0->swapper_ctrl);
2871 * Verifying if endian settings are accurate by reading a
2872 * feedback register.
2874 val64 = readq(&bar0->pif_rd_swapper_fb);
2875 if (val64 != 0x0123456789ABCDEFULL) {
2876 /* Endian settings are incorrect, calls for another dekko. */
2877 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2879 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2880 (unsigned long long) val64);
2887 /* ********************************************************* *
2888 * Functions defined below concern the OS part of the driver *
2889 * ********************************************************* */
2892 * s2io_open - open entry point of the driver
2893 * @dev : pointer to the device structure.
2895 * This function is the open entry point of the driver. It mainly calls a
2896 * function to allocate Rx buffers and inserts them into the buffer
2897 * descriptors and then enables the Rx part of the NIC.
2899 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2903 int s2io_open(struct net_device *dev)
2905 nic_t *sp = dev->priv;
2909 * Make sure you have link off by default every time
2910 * Nic is initialized
2912 netif_carrier_off(dev);
2913 sp->last_link_state = 0; /* Unkown link state */
2915 /* Initialize H/W and enable interrupts */
2916 if (s2io_card_up(sp)) {
2917 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2920 goto hw_init_failed;
2923 /* After proper initialization of H/W, register ISR */
2924 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2927 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2929 goto isr_registration_failed;
2932 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2933 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2935 goto setting_mac_address_failed;
2938 netif_start_queue(dev);
2941 setting_mac_address_failed:
2942 free_irq(sp->pdev->irq, dev);
2943 isr_registration_failed:
2944 del_timer_sync(&sp->alarm_timer);
2951 * s2io_close -close entry point of the driver
2952 * @dev : device pointer.
2954 * This is the stop entry point of the driver. It needs to undo exactly
2955 * whatever was done by the open entry point,thus it's usually referred to
2956 * as the close function.Among other things this function mainly stops the
2957 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2959 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2963 int s2io_close(struct net_device *dev)
2965 nic_t *sp = dev->priv;
2966 flush_scheduled_work();
2967 netif_stop_queue(dev);
2968 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2971 free_irq(sp->pdev->irq, dev);
2972 sp->device_close_flag = TRUE; /* Device is shut down. */
2977 * s2io_xmit - Tx entry point of te driver
2978 * @skb : the socket buffer containing the Tx data.
2979 * @dev : device pointer.
2981 * This function is the Tx entry point of the driver. S2IO NIC supports
2982 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2983 * NOTE: when device cant queue the pkt,just the trans_start variable will
2986 * 0 on success & 1 on failure.
2989 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2991 nic_t *sp = dev->priv;
2992 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2995 TxFIFO_element_t __iomem *tx_fifo;
2996 unsigned long flags;
3001 int vlan_priority = 0;
3002 mac_info_t *mac_control;
3003 struct config_param *config;
3005 mac_control = &sp->mac_control;
3006 config = &sp->config;
3008 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3009 spin_lock_irqsave(&sp->tx_lock, flags);
3010 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3011 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3013 spin_unlock_irqrestore(&sp->tx_lock, flags);
3020 /* Get Fifo number to Transmit based on vlan priority */
3021 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3022 vlan_tag = vlan_tx_tag_get(skb);
3023 vlan_priority = vlan_tag >> 13;
3024 queue = config->fifo_mapping[vlan_priority];
3027 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3028 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3029 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3032 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3033 /* Avoid "put" pointer going beyond "get" pointer */
3034 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3035 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
3036 netif_stop_queue(dev);
3038 spin_unlock_irqrestore(&sp->tx_lock, flags);
3042 mss = skb_shinfo(skb)->tso_size;
3044 txdp->Control_1 |= TXD_TCP_LSO_EN;
3045 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3049 frg_cnt = skb_shinfo(skb)->nr_frags;
3050 frg_len = skb->len - skb->data_len;
3052 txdp->Buffer_Pointer = pci_map_single
3053 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3054 txdp->Host_Control = (unsigned long) skb;
3055 if (skb->ip_summed == CHECKSUM_HW) {
3057 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3061 txdp->Control_2 |= config->tx_intr_type;
3063 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3064 txdp->Control_2 |= TXD_VLAN_ENABLE;
3065 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3068 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3069 TXD_GATHER_CODE_FIRST);
3070 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3072 /* For fragmented SKB. */
3073 for (i = 0; i < frg_cnt; i++) {
3074 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3076 txdp->Buffer_Pointer = (u64) pci_map_page
3077 (sp->pdev, frag->page, frag->page_offset,
3078 frag->size, PCI_DMA_TODEVICE);
3079 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3081 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3083 tx_fifo = mac_control->tx_FIFO_start[queue];
3084 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3085 writeq(val64, &tx_fifo->TxDL_Pointer);
3089 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3094 val64 |= TX_FIFO_SPECIAL_FUNC;
3096 writeq(val64, &tx_fifo->List_Control);
3099 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3100 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3102 /* Avoid "put" pointer going beyond "get" pointer */
3103 if (((put_off + 1) % queue_len) == get_off) {
3105 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3107 netif_stop_queue(dev);
3110 dev->trans_start = jiffies;
3111 spin_unlock_irqrestore(&sp->tx_lock, flags);
3117 s2io_alarm_handle(unsigned long data)
3119 nic_t *sp = (nic_t *)data;
3121 alarm_intr_handler(sp);
3122 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3126 * s2io_isr - ISR handler of the device .
3127 * @irq: the irq of the device.
3128 * @dev_id: a void pointer to the dev structure of the NIC.
3129 * @pt_regs: pointer to the registers pushed on the stack.
3130 * Description: This function is the ISR handler of the device. It
3131 * identifies the reason for the interrupt and calls the relevant
3132 * service routines. As a contongency measure, this ISR allocates the
3133 * recv buffers, if their numbers are below the panic value which is
3134 * presently set to 25% of the original number of rcv buffers allocated.
3136 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3137 * IRQ_NONE: will be returned if interrupt is not from our device
3139 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3141 struct net_device *dev = (struct net_device *) dev_id;
3142 nic_t *sp = dev->priv;
3143 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3145 u64 reason = 0, val64;
3146 mac_info_t *mac_control;
3147 struct config_param *config;
3149 atomic_inc(&sp->isr_cnt);
3150 mac_control = &sp->mac_control;
3151 config = &sp->config;
3154 * Identify the cause for interrupt and call the appropriate
3155 * interrupt handler. Causes for the interrupt could be;
3159 * 4. Error in any functional blocks of the NIC.
3161 reason = readq(&bar0->general_int_status);
3164 /* The interrupt was not raised by Xena. */
3165 atomic_dec(&sp->isr_cnt);
3169 #ifdef CONFIG_S2IO_NAPI
3170 if (reason & GEN_INTR_RXTRAFFIC) {
3171 if (netif_rx_schedule_prep(dev)) {
3172 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3174 __netif_rx_schedule(dev);
3178 /* If Intr is because of Rx Traffic */
3179 if (reason & GEN_INTR_RXTRAFFIC) {
3181 * rx_traffic_int reg is an R1 register, writing all 1's
3182 * will ensure that the actual interrupt causing bit get's
3183 * cleared and hence a read can be avoided.
3185 val64 = 0xFFFFFFFFFFFFFFFFULL;
3186 writeq(val64, &bar0->rx_traffic_int);
3187 for (i = 0; i < config->rx_ring_num; i++) {
3188 rx_intr_handler(&mac_control->rings[i]);
3193 /* If Intr is because of Tx Traffic */
3194 if (reason & GEN_INTR_TXTRAFFIC) {
3196 * tx_traffic_int reg is an R1 register, writing all 1's
3197 * will ensure that the actual interrupt causing bit get's
3198 * cleared and hence a read can be avoided.
3200 val64 = 0xFFFFFFFFFFFFFFFFULL;
3201 writeq(val64, &bar0->tx_traffic_int);
3203 for (i = 0; i < config->tx_fifo_num; i++)
3204 tx_intr_handler(&mac_control->fifos[i]);
3208 * If the Rx buffer count is below the panic threshold then
3209 * reallocate the buffers from the interrupt handler itself,
3210 * else schedule a tasklet to reallocate the buffers.
3212 #ifndef CONFIG_S2IO_NAPI
3213 for (i = 0; i < config->rx_ring_num; i++) {
3215 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3216 int level = rx_buffer_level(sp, rxb_size, i);
3218 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3219 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3220 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3221 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3222 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3224 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3225 clear_bit(0, (&sp->tasklet_status));
3226 atomic_dec(&sp->isr_cnt);
3229 clear_bit(0, (&sp->tasklet_status));
3230 } else if (level == LOW) {
3231 tasklet_schedule(&sp->task);
3236 atomic_dec(&sp->isr_cnt);
3243 static void s2io_updt_stats(nic_t *sp)
3245 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3249 if (atomic_read(&sp->card_state) == CARD_UP) {
3250 /* Apprx 30us on a 133 MHz bus */
3251 val64 = SET_UPDT_CLICKS(10) |
3252 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3253 writeq(val64, &bar0->stat_cfg);
3256 val64 = readq(&bar0->stat_cfg);
3257 if (!(val64 & BIT(0)))
3261 break; /* Updt failed */
3267 * s2io_get_stats - Updates the device statistics structure.
3268 * @dev : pointer to the device structure.
3270 * This function updates the device statistics structure in the s2io_nic
3271 * structure and returns a pointer to the same.
3273 * pointer to the updated net_device_stats structure.
3276 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3278 nic_t *sp = dev->priv;
3279 mac_info_t *mac_control;
3280 struct config_param *config;
3283 mac_control = &sp->mac_control;
3284 config = &sp->config;
3286 /* Configure Stats for immediate updt */
3287 s2io_updt_stats(sp);
3289 sp->stats.tx_packets =
3290 le32_to_cpu(mac_control->stats_info->tmac_frms);
3291 sp->stats.tx_errors =
3292 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3293 sp->stats.rx_errors =
3294 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3295 sp->stats.multicast =
3296 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3297 sp->stats.rx_length_errors =
3298 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3300 return (&sp->stats);
3304 * s2io_set_multicast - entry point for multicast address enable/disable.
3305 * @dev : pointer to the device structure
3307 * This function is a driver entry point which gets called by the kernel
3308 * whenever multicast addresses must be enabled/disabled. This also gets
3309 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3310 * determine, if multicast address must be enabled or if promiscuous mode
3311 * is to be disabled etc.
3316 static void s2io_set_multicast(struct net_device *dev)
3319 struct dev_mc_list *mclist;
3320 nic_t *sp = dev->priv;
3321 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3322 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3324 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3327 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3328 /* Enable all Multicast addresses */
3329 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3330 &bar0->rmac_addr_data0_mem);
3331 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3332 &bar0->rmac_addr_data1_mem);
3333 val64 = RMAC_ADDR_CMD_MEM_WE |
3334 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3335 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3336 writeq(val64, &bar0->rmac_addr_cmd_mem);
3337 /* Wait till command completes */
3338 wait_for_cmd_complete(sp);
3341 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3342 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3343 /* Disable all Multicast addresses */
3344 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3345 &bar0->rmac_addr_data0_mem);
3346 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3347 &bar0->rmac_addr_data1_mem);
3348 val64 = RMAC_ADDR_CMD_MEM_WE |
3349 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3350 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3351 writeq(val64, &bar0->rmac_addr_cmd_mem);
3352 /* Wait till command completes */
3353 wait_for_cmd_complete(sp);
3356 sp->all_multi_pos = 0;
3359 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3360 /* Put the NIC into promiscuous mode */
3361 add = &bar0->mac_cfg;
3362 val64 = readq(&bar0->mac_cfg);
3363 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3365 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3366 writel((u32) val64, add);
3367 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3368 writel((u32) (val64 >> 32), (add + 4));
3370 val64 = readq(&bar0->mac_cfg);
3371 sp->promisc_flg = 1;
3372 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3374 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3375 /* Remove the NIC from promiscuous mode */
3376 add = &bar0->mac_cfg;
3377 val64 = readq(&bar0->mac_cfg);
3378 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3380 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3381 writel((u32) val64, add);
3382 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3383 writel((u32) (val64 >> 32), (add + 4));
3385 val64 = readq(&bar0->mac_cfg);
3386 sp->promisc_flg = 0;
3387 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3391 /* Update individual M_CAST address list */
3392 if ((!sp->m_cast_flg) && dev->mc_count) {
3394 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3395 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3397 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3398 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3402 prev_cnt = sp->mc_addr_count;
3403 sp->mc_addr_count = dev->mc_count;
3405 /* Clear out the previous list of Mc in the H/W. */
3406 for (i = 0; i < prev_cnt; i++) {
3407 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3408 &bar0->rmac_addr_data0_mem);
3409 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3410 &bar0->rmac_addr_data1_mem);
3411 val64 = RMAC_ADDR_CMD_MEM_WE |
3412 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3413 RMAC_ADDR_CMD_MEM_OFFSET
3414 (MAC_MC_ADDR_START_OFFSET + i);
3415 writeq(val64, &bar0->rmac_addr_cmd_mem);
3417 /* Wait for command completes */
3418 if (wait_for_cmd_complete(sp)) {
3419 DBG_PRINT(ERR_DBG, "%s: Adding ",
3421 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3426 /* Create the new Rx filter list and update the same in H/W. */
3427 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3428 i++, mclist = mclist->next) {
3429 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3431 for (j = 0; j < ETH_ALEN; j++) {
3432 mac_addr |= mclist->dmi_addr[j];
3436 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3437 &bar0->rmac_addr_data0_mem);
3438 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3439 &bar0->rmac_addr_data1_mem);
3440 val64 = RMAC_ADDR_CMD_MEM_WE |
3441 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3442 RMAC_ADDR_CMD_MEM_OFFSET
3443 (i + MAC_MC_ADDR_START_OFFSET);
3444 writeq(val64, &bar0->rmac_addr_cmd_mem);
3446 /* Wait for command completes */
3447 if (wait_for_cmd_complete(sp)) {
3448 DBG_PRINT(ERR_DBG, "%s: Adding ",
3450 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3458 * s2io_set_mac_addr - Programs the Xframe mac address
3459 * @dev : pointer to the device structure.
3460 * @addr: a uchar pointer to the new mac address which is to be set.
3461 * Description : This procedure will program the Xframe to receive
3462 * frames with new Mac Address
3463 * Return value: SUCCESS on success and an appropriate (-)ve integer
3464 * as defined in errno.h file on failure.
3467 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3469 nic_t *sp = dev->priv;
3470 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3471 register u64 val64, mac_addr = 0;
3475 * Set the new MAC address as the new unicast filter and reflect this
3476 * change on the device address registered with the OS. It will be
3479 for (i = 0; i < ETH_ALEN; i++) {
3481 mac_addr |= addr[i];
3484 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3485 &bar0->rmac_addr_data0_mem);
3488 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3489 RMAC_ADDR_CMD_MEM_OFFSET(0);
3490 writeq(val64, &bar0->rmac_addr_cmd_mem);
3491 /* Wait till command completes */
3492 if (wait_for_cmd_complete(sp)) {
3493 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3501 * s2io_ethtool_sset - Sets different link parameters.
3502 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3503 * @info: pointer to the structure with parameters given by ethtool to set
3506 * The function sets different link parameters provided by the user onto
3512 static int s2io_ethtool_sset(struct net_device *dev,
3513 struct ethtool_cmd *info)
3515 nic_t *sp = dev->priv;
3516 if ((info->autoneg == AUTONEG_ENABLE) ||
3517 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3520 s2io_close(sp->dev);
3528 * s2io_ethtol_gset - Return link specific information.
3529 * @sp : private member of the device structure, pointer to the
3530 * s2io_nic structure.
3531 * @info : pointer to the structure with parameters given by ethtool
3532 * to return link information.
3534 * Returns link specific information like speed, duplex etc.. to ethtool.
3536 * return 0 on success.
3539 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3541 nic_t *sp = dev->priv;
3542 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3543 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3544 info->port = PORT_FIBRE;
3545 /* info->transceiver?? TODO */
3547 if (netif_carrier_ok(sp->dev)) {
3548 info->speed = 10000;
3549 info->duplex = DUPLEX_FULL;
3555 info->autoneg = AUTONEG_DISABLE;
3560 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3561 * @sp : private member of the device structure, which is a pointer to the
3562 * s2io_nic structure.
3563 * @info : pointer to the structure with parameters given by ethtool to
3564 * return driver information.
3566 * Returns driver specefic information like name, version etc.. to ethtool.
3571 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3572 struct ethtool_drvinfo *info)
3574 nic_t *sp = dev->priv;
3576 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3577 strncpy(info->version, s2io_driver_version,
3578 sizeof(s2io_driver_version));
3579 strncpy(info->fw_version, "", 32);
3580 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3581 info->regdump_len = XENA_REG_SPACE;
3582 info->eedump_len = XENA_EEPROM_SPACE;
3583 info->testinfo_len = S2IO_TEST_LEN;
3584 info->n_stats = S2IO_STAT_LEN;
3588 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3589 * @sp: private member of the device structure, which is a pointer to the
3590 * s2io_nic structure.
3591 * @regs : pointer to the structure with parameters given by ethtool for
3592 * dumping the registers.
3593 * @reg_space: The input argumnet into which all the registers are dumped.
3595 * Dumps the entire register space of xFrame NIC into the user given
3601 static void s2io_ethtool_gregs(struct net_device *dev,
3602 struct ethtool_regs *regs, void *space)
3606 u8 *reg_space = (u8 *) space;
3607 nic_t *sp = dev->priv;
3609 regs->len = XENA_REG_SPACE;
3610 regs->version = sp->pdev->subsystem_device;
3612 for (i = 0; i < regs->len; i += 8) {
3613 reg = readq(sp->bar0 + i);
3614 memcpy((reg_space + i), ®, 8);
3619 * s2io_phy_id - timer function that alternates adapter LED.
3620 * @data : address of the private member of the device structure, which
3621 * is a pointer to the s2io_nic structure, provided as an u32.
3622 * Description: This is actually the timer function that alternates the
3623 * adapter LED bit of the adapter control bit to set/reset every time on
3624 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3625 * once every second.
3627 static void s2io_phy_id(unsigned long data)
3629 nic_t *sp = (nic_t *) data;
3630 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3634 subid = sp->pdev->subsystem_device;
3635 if ((sp->device_type == XFRAME_II_DEVICE) ||
3636 ((subid & 0xFF) >= 0x07)) {
3637 val64 = readq(&bar0->gpio_control);
3638 val64 ^= GPIO_CTRL_GPIO_0;
3639 writeq(val64, &bar0->gpio_control);
3641 val64 = readq(&bar0->adapter_control);
3642 val64 ^= ADAPTER_LED_ON;
3643 writeq(val64, &bar0->adapter_control);
3646 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3650 * s2io_ethtool_idnic - To physically identify the nic on the system.
3651 * @sp : private member of the device structure, which is a pointer to the
3652 * s2io_nic structure.
3653 * @id : pointer to the structure with identification parameters given by
3655 * Description: Used to physically identify the NIC on the system.
3656 * The Link LED will blink for a time specified by the user for
3658 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3659 * identification is possible only if it's link is up.
3661 * int , returns 0 on success
3664 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3666 u64 val64 = 0, last_gpio_ctrl_val;
3667 nic_t *sp = dev->priv;
3668 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3671 subid = sp->pdev->subsystem_device;
3672 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3673 if ((sp->device_type == XFRAME_I_DEVICE) &&
3674 ((subid & 0xFF) < 0x07)) {
3675 val64 = readq(&bar0->adapter_control);
3676 if (!(val64 & ADAPTER_CNTL_EN)) {
3678 "Adapter Link down, cannot blink LED\n");
3682 if (sp->id_timer.function == NULL) {
3683 init_timer(&sp->id_timer);
3684 sp->id_timer.function = s2io_phy_id;
3685 sp->id_timer.data = (unsigned long) sp;
3687 mod_timer(&sp->id_timer, jiffies);
3689 msleep_interruptible(data * HZ);
3691 msleep_interruptible(MAX_FLICKER_TIME);
3692 del_timer_sync(&sp->id_timer);
3694 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
3695 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3696 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3703 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3704 * @sp : private member of the device structure, which is a pointer to the
3705 * s2io_nic structure.
3706 * @ep : pointer to the structure with pause parameters given by ethtool.
3708 * Returns the Pause frame generation and reception capability of the NIC.
3712 static void s2io_ethtool_getpause_data(struct net_device *dev,
3713 struct ethtool_pauseparam *ep)
3716 nic_t *sp = dev->priv;
3717 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3719 val64 = readq(&bar0->rmac_pause_cfg);
3720 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3721 ep->tx_pause = TRUE;
3722 if (val64 & RMAC_PAUSE_RX_ENABLE)
3723 ep->rx_pause = TRUE;
3724 ep->autoneg = FALSE;
3728 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3729 * @sp : private member of the device structure, which is a pointer to the
3730 * s2io_nic structure.
3731 * @ep : pointer to the structure with pause parameters given by ethtool.
3733 * It can be used to set or reset Pause frame generation or reception
3734 * support of the NIC.
3736 * int, returns 0 on Success
3739 static int s2io_ethtool_setpause_data(struct net_device *dev,
3740 struct ethtool_pauseparam *ep)
3743 nic_t *sp = dev->priv;
3744 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3746 val64 = readq(&bar0->rmac_pause_cfg);
3748 val64 |= RMAC_PAUSE_GEN_ENABLE;
3750 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3752 val64 |= RMAC_PAUSE_RX_ENABLE;
3754 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3755 writeq(val64, &bar0->rmac_pause_cfg);
3760 * read_eeprom - reads 4 bytes of data from user given offset.
3761 * @sp : private member of the device structure, which is a pointer to the
3762 * s2io_nic structure.
3763 * @off : offset at which the data must be written
3764 * @data : Its an output parameter where the data read at the given
3767 * Will read 4 bytes of data from the user given offset and return the
3769 * NOTE: Will allow to read only part of the EEPROM visible through the
3772 * -1 on failure and 0 on success.
3775 #define S2IO_DEV_ID 5
3776 static int read_eeprom(nic_t * sp, int off, u32 * data)
3781 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3783 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3784 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3785 I2C_CONTROL_CNTL_START;
3786 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3788 while (exit_cnt < 5) {
3789 val64 = readq(&bar0->i2c_control);
3790 if (I2C_CONTROL_CNTL_END(val64)) {
3791 *data = I2C_CONTROL_GET_DATA(val64);
3803 * write_eeprom - actually writes the relevant part of the data value.
3804 * @sp : private member of the device structure, which is a pointer to the
3805 * s2io_nic structure.
3806 * @off : offset at which the data must be written
3807 * @data : The data that is to be written
3808 * @cnt : Number of bytes of the data that are actually to be written into
3809 * the Eeprom. (max of 3)
3811 * Actually writes the relevant part of the data value into the Eeprom
3812 * through the I2C bus.
3814 * 0 on success, -1 on failure.
3817 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3819 int exit_cnt = 0, ret = -1;
3821 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3823 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3824 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3825 I2C_CONTROL_CNTL_START;
3826 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3828 while (exit_cnt < 5) {
3829 val64 = readq(&bar0->i2c_control);
3830 if (I2C_CONTROL_CNTL_END(val64)) {
3831 if (!(val64 & I2C_CONTROL_NACK))
3843 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3844 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3845 * @eeprom : pointer to the user level structure provided by ethtool,
3846 * containing all relevant information.
3847 * @data_buf : user defined value to be written into Eeprom.
3848 * Description: Reads the values stored in the Eeprom at given offset
3849 * for a given length. Stores these values int the input argument data
3850 * buffer 'data_buf' and returns these to the caller (ethtool.)
3855 static int s2io_ethtool_geeprom(struct net_device *dev,
3856 struct ethtool_eeprom *eeprom, u8 * data_buf)
3859 nic_t *sp = dev->priv;
3861 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3863 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3864 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3866 for (i = 0; i < eeprom->len; i += 4) {
3867 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3868 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3872 memcpy((data_buf + i), &valid, 4);
3878 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3879 * @sp : private member of the device structure, which is a pointer to the
3880 * s2io_nic structure.
3881 * @eeprom : pointer to the user level structure provided by ethtool,
3882 * containing all relevant information.
3883 * @data_buf ; user defined value to be written into Eeprom.
3885 * Tries to write the user provided value in the Eeprom, at the offset
3886 * given by the user.
3888 * 0 on success, -EFAULT on failure.
3891 static int s2io_ethtool_seeprom(struct net_device *dev,
3892 struct ethtool_eeprom *eeprom,
3895 int len = eeprom->len, cnt = 0;
3896 u32 valid = 0, data;
3897 nic_t *sp = dev->priv;
3899 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3901 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3902 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3908 data = (u32) data_buf[cnt] & 0x000000FF;
3910 valid = (u32) (data << 24);
3914 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3916 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3918 "write into the specified offset\n");
3929 * s2io_register_test - reads and writes into all clock domains.
3930 * @sp : private member of the device structure, which is a pointer to the
3931 * s2io_nic structure.
3932 * @data : variable that returns the result of each of the test conducted b
3935 * Read and write into all clock domains. The NIC has 3 clock domains,
3936 * see that registers in all the three regions are accessible.
3941 static int s2io_register_test(nic_t * sp, uint64_t * data)
3943 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3947 val64 = readq(&bar0->pif_rd_swapper_fb);
3948 if (val64 != 0x123456789abcdefULL) {
3950 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3953 val64 = readq(&bar0->rmac_pause_cfg);
3954 if (val64 != 0xc000ffff00000000ULL) {
3956 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3959 val64 = readq(&bar0->rx_queue_cfg);
3960 if (val64 != 0x0808080808080808ULL) {
3962 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3965 val64 = readq(&bar0->xgxs_efifo_cfg);
3966 if (val64 != 0x000000001923141EULL) {
3968 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3971 val64 = 0x5A5A5A5A5A5A5A5AULL;
3972 writeq(val64, &bar0->xmsi_data);
3973 val64 = readq(&bar0->xmsi_data);
3974 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3976 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3979 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3980 writeq(val64, &bar0->xmsi_data);
3981 val64 = readq(&bar0->xmsi_data);
3982 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3984 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3992 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3993 * @sp : private member of the device structure, which is a pointer to the
3994 * s2io_nic structure.
3995 * @data:variable that returns the result of each of the test conducted by
3998 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4004 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4009 /* Test Write Error at offset 0 */
4010 if (!write_eeprom(sp, 0, 0, 3))
4013 /* Test Write at offset 4f0 */
4014 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4016 if (read_eeprom(sp, 0x4F0, &ret_data))
4019 if (ret_data != 0x01234567)
4022 /* Reset the EEPROM data go FFFF */
4023 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4025 /* Test Write Request Error at offset 0x7c */
4026 if (!write_eeprom(sp, 0x07C, 0, 3))
4029 /* Test Write Request at offset 0x7fc */
4030 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4032 if (read_eeprom(sp, 0x7FC, &ret_data))
4035 if (ret_data != 0x01234567)
4038 /* Reset the EEPROM data go FFFF */
4039 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4041 /* Test Write Error at offset 0x80 */
4042 if (!write_eeprom(sp, 0x080, 0, 3))
4045 /* Test Write Error at offset 0xfc */
4046 if (!write_eeprom(sp, 0x0FC, 0, 3))
4049 /* Test Write Error at offset 0x100 */
4050 if (!write_eeprom(sp, 0x100, 0, 3))
4053 /* Test Write Error at offset 4ec */
4054 if (!write_eeprom(sp, 0x4EC, 0, 3))
4062 * s2io_bist_test - invokes the MemBist test of the card .
4063 * @sp : private member of the device structure, which is a pointer to the
4064 * s2io_nic structure.
4065 * @data:variable that returns the result of each of the test conducted by
4068 * This invokes the MemBist test of the card. We give around
4069 * 2 secs time for the Test to complete. If it's still not complete
4070 * within this peiod, we consider that the test failed.
4072 * 0 on success and -1 on failure.
4075 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4078 int cnt = 0, ret = -1;
4080 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4081 bist |= PCI_BIST_START;
4082 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4085 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4086 if (!(bist & PCI_BIST_START)) {
4087 *data = (bist & PCI_BIST_CODE_MASK);
4099 * s2io-link_test - verifies the link state of the nic
4100 * @sp ; private member of the device structure, which is a pointer to the
4101 * s2io_nic structure.
4102 * @data: variable that returns the result of each of the test conducted by
4105 * The function verifies the link state of the NIC and updates the input
4106 * argument 'data' appropriately.
4111 static int s2io_link_test(nic_t * sp, uint64_t * data)
4113 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4116 val64 = readq(&bar0->adapter_status);
4117 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4124 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4125 * @sp - private member of the device structure, which is a pointer to the
4126 * s2io_nic structure.
4127 * @data - variable that returns the result of each of the test
4128 * conducted by the driver.
4130 * This is one of the offline test that tests the read and write
4131 * access to the RldRam chip on the NIC.
4136 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4138 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4140 int cnt, iteration = 0, test_pass = 0;
4142 val64 = readq(&bar0->adapter_control);
4143 val64 &= ~ADAPTER_ECC_EN;
4144 writeq(val64, &bar0->adapter_control);
4146 val64 = readq(&bar0->mc_rldram_test_ctrl);
4147 val64 |= MC_RLDRAM_TEST_MODE;
4148 writeq(val64, &bar0->mc_rldram_test_ctrl);
4150 val64 = readq(&bar0->mc_rldram_mrs);
4151 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4152 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4154 val64 |= MC_RLDRAM_MRS_ENABLE;
4155 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4157 while (iteration < 2) {
4158 val64 = 0x55555555aaaa0000ULL;
4159 if (iteration == 1) {
4160 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4162 writeq(val64, &bar0->mc_rldram_test_d0);
4164 val64 = 0xaaaa5a5555550000ULL;
4165 if (iteration == 1) {
4166 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4168 writeq(val64, &bar0->mc_rldram_test_d1);
4170 val64 = 0x55aaaaaaaa5a0000ULL;
4171 if (iteration == 1) {
4172 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4174 writeq(val64, &bar0->mc_rldram_test_d2);
4176 val64 = (u64) (0x0000003fffff0000ULL);
4177 writeq(val64, &bar0->mc_rldram_test_add);
4180 val64 = MC_RLDRAM_TEST_MODE;
4181 writeq(val64, &bar0->mc_rldram_test_ctrl);
4184 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4186 writeq(val64, &bar0->mc_rldram_test_ctrl);
4188 for (cnt = 0; cnt < 5; cnt++) {
4189 val64 = readq(&bar0->mc_rldram_test_ctrl);
4190 if (val64 & MC_RLDRAM_TEST_DONE)
4198 val64 = MC_RLDRAM_TEST_MODE;
4199 writeq(val64, &bar0->mc_rldram_test_ctrl);
4201 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4202 writeq(val64, &bar0->mc_rldram_test_ctrl);
4204 for (cnt = 0; cnt < 5; cnt++) {
4205 val64 = readq(&bar0->mc_rldram_test_ctrl);
4206 if (val64 & MC_RLDRAM_TEST_DONE)
4214 val64 = readq(&bar0->mc_rldram_test_ctrl);
4215 if (val64 & MC_RLDRAM_TEST_PASS)
4230 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4231 * @sp : private member of the device structure, which is a pointer to the
4232 * s2io_nic structure.
4233 * @ethtest : pointer to a ethtool command specific structure that will be
4234 * returned to the user.
4235 * @data : variable that returns the result of each of the test
4236 * conducted by the driver.
4238 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4239 * the health of the card.
4244 static void s2io_ethtool_test(struct net_device *dev,
4245 struct ethtool_test *ethtest,
4248 nic_t *sp = dev->priv;
4249 int orig_state = netif_running(sp->dev);
4251 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4252 /* Offline Tests. */
4254 s2io_close(sp->dev);
4256 if (s2io_register_test(sp, &data[0]))
4257 ethtest->flags |= ETH_TEST_FL_FAILED;
4261 if (s2io_rldram_test(sp, &data[3]))
4262 ethtest->flags |= ETH_TEST_FL_FAILED;
4266 if (s2io_eeprom_test(sp, &data[1]))
4267 ethtest->flags |= ETH_TEST_FL_FAILED;
4269 if (s2io_bist_test(sp, &data[4]))
4270 ethtest->flags |= ETH_TEST_FL_FAILED;
4280 "%s: is not up, cannot run test\n",
4289 if (s2io_link_test(sp, &data[2]))
4290 ethtest->flags |= ETH_TEST_FL_FAILED;
4299 static void s2io_get_ethtool_stats(struct net_device *dev,
4300 struct ethtool_stats *estats,
4304 nic_t *sp = dev->priv;
4305 StatInfo_t *stat_info = sp->mac_control.stats_info;
4307 s2io_updt_stats(sp);
4309 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4310 le32_to_cpu(stat_info->tmac_frms);
4312 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4313 le32_to_cpu(stat_info->tmac_data_octets);
4314 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4316 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4317 le32_to_cpu(stat_info->tmac_mcst_frms);
4319 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4320 le32_to_cpu(stat_info->tmac_bcst_frms);
4321 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4323 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4324 le32_to_cpu(stat_info->tmac_any_err_frms);
4325 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4327 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4328 le32_to_cpu(stat_info->tmac_vld_ip);
4330 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4331 le32_to_cpu(stat_info->tmac_drop_ip);
4333 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4334 le32_to_cpu(stat_info->tmac_icmp);
4336 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4337 le32_to_cpu(stat_info->tmac_rst_tcp);
4338 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4339 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4340 le32_to_cpu(stat_info->tmac_udp);
4342 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4343 le32_to_cpu(stat_info->rmac_vld_frms);
4345 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4346 le32_to_cpu(stat_info->rmac_data_octets);
4347 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4348 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4350 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4351 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4353 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4354 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4355 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4356 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4357 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4359 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4360 le32_to_cpu(stat_info->rmac_discarded_frms);
4362 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4363 le32_to_cpu(stat_info->rmac_usized_frms);
4365 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4366 le32_to_cpu(stat_info->rmac_osized_frms);
4368 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4369 le32_to_cpu(stat_info->rmac_frag_frms);
4371 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4372 le32_to_cpu(stat_info->rmac_jabber_frms);
4373 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4374 le32_to_cpu(stat_info->rmac_ip);
4375 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4376 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4377 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4378 le32_to_cpu(stat_info->rmac_drop_ip);
4379 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4380 le32_to_cpu(stat_info->rmac_icmp);
4381 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4382 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4383 le32_to_cpu(stat_info->rmac_udp);
4385 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4386 le32_to_cpu(stat_info->rmac_err_drp_udp);
4388 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4389 le32_to_cpu(stat_info->rmac_pause_cnt);
4391 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4392 le32_to_cpu(stat_info->rmac_accepted_ip);
4393 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4395 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4396 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4399 int s2io_ethtool_get_regs_len(struct net_device *dev)
4401 return (XENA_REG_SPACE);
4405 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4407 nic_t *sp = dev->priv;
4409 return (sp->rx_csum);
4411 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4413 nic_t *sp = dev->priv;
4422 int s2io_get_eeprom_len(struct net_device *dev)
4424 return (XENA_EEPROM_SPACE);
4427 int s2io_ethtool_self_test_count(struct net_device *dev)
4429 return (S2IO_TEST_LEN);
4431 void s2io_ethtool_get_strings(struct net_device *dev,
4432 u32 stringset, u8 * data)
4434 switch (stringset) {
4436 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4439 memcpy(data, ðtool_stats_keys,
4440 sizeof(ethtool_stats_keys));
4443 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4445 return (S2IO_STAT_LEN);
4448 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4451 dev->features |= NETIF_F_IP_CSUM;
4453 dev->features &= ~NETIF_F_IP_CSUM;
4459 static struct ethtool_ops netdev_ethtool_ops = {
4460 .get_settings = s2io_ethtool_gset,
4461 .set_settings = s2io_ethtool_sset,
4462 .get_drvinfo = s2io_ethtool_gdrvinfo,
4463 .get_regs_len = s2io_ethtool_get_regs_len,
4464 .get_regs = s2io_ethtool_gregs,
4465 .get_link = ethtool_op_get_link,
4466 .get_eeprom_len = s2io_get_eeprom_len,
4467 .get_eeprom = s2io_ethtool_geeprom,
4468 .set_eeprom = s2io_ethtool_seeprom,
4469 .get_pauseparam = s2io_ethtool_getpause_data,
4470 .set_pauseparam = s2io_ethtool_setpause_data,
4471 .get_rx_csum = s2io_ethtool_get_rx_csum,
4472 .set_rx_csum = s2io_ethtool_set_rx_csum,
4473 .get_tx_csum = ethtool_op_get_tx_csum,
4474 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4475 .get_sg = ethtool_op_get_sg,
4476 .set_sg = ethtool_op_set_sg,
4478 .get_tso = ethtool_op_get_tso,
4479 .set_tso = ethtool_op_set_tso,
4481 .self_test_count = s2io_ethtool_self_test_count,
4482 .self_test = s2io_ethtool_test,
4483 .get_strings = s2io_ethtool_get_strings,
4484 .phys_id = s2io_ethtool_idnic,
4485 .get_stats_count = s2io_ethtool_get_stats_count,
4486 .get_ethtool_stats = s2io_get_ethtool_stats
4490 * s2io_ioctl - Entry point for the Ioctl
4491 * @dev : Device pointer.
4492 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4493 * a proprietary structure used to pass information to the driver.
4494 * @cmd : This is used to distinguish between the different commands that
4495 * can be passed to the IOCTL functions.
4497 * Currently there are no special functionality supported in IOCTL, hence
4498 * function always return EOPNOTSUPPORTED
4501 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4507 * s2io_change_mtu - entry point to change MTU size for the device.
4508 * @dev : device pointer.
4509 * @new_mtu : the new MTU size for the device.
4510 * Description: A driver entry point to change MTU size for the device.
4511 * Before changing the MTU the device must be stopped.
4513 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4517 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4519 nic_t *sp = dev->priv;
4521 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4522 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4528 if (netif_running(dev)) {
4530 netif_stop_queue(dev);
4531 if (s2io_card_up(sp)) {
4532 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4535 if (netif_queue_stopped(dev))
4536 netif_wake_queue(dev);
4537 } else { /* Device is down */
4538 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4539 u64 val64 = new_mtu;
4541 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4548 * s2io_tasklet - Bottom half of the ISR.
4549 * @dev_adr : address of the device structure in dma_addr_t format.
4551 * This is the tasklet or the bottom half of the ISR. This is
4552 * an extension of the ISR which is scheduled by the scheduler to be run
4553 * when the load on the CPU is low. All low priority tasks of the ISR can
4554 * be pushed into the tasklet. For now the tasklet is used only to
4555 * replenish the Rx buffers in the Rx buffer descriptors.
4560 static void s2io_tasklet(unsigned long dev_addr)
4562 struct net_device *dev = (struct net_device *) dev_addr;
4563 nic_t *sp = dev->priv;
4565 mac_info_t *mac_control;
4566 struct config_param *config;
4568 mac_control = &sp->mac_control;
4569 config = &sp->config;
4571 if (!TASKLET_IN_USE) {
4572 for (i = 0; i < config->rx_ring_num; i++) {
4573 ret = fill_rx_buffers(sp, i);
4574 if (ret == -ENOMEM) {
4575 DBG_PRINT(ERR_DBG, "%s: Out of ",
4577 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4579 } else if (ret == -EFILL) {
4581 "%s: Rx Ring %d is full\n",
4586 clear_bit(0, (&sp->tasklet_status));
4591 * s2io_set_link - Set the LInk status
4592 * @data: long pointer to device private structue
4593 * Description: Sets the link status for the adapter
4596 static void s2io_set_link(unsigned long data)
4598 nic_t *nic = (nic_t *) data;
4599 struct net_device *dev = nic->dev;
4600 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4604 if (test_and_set_bit(0, &(nic->link_state))) {
4605 /* The card is being reset, no point doing anything */
4609 subid = nic->pdev->subsystem_device;
4611 * Allow a small delay for the NICs self initiated
4612 * cleanup to complete.
4616 val64 = readq(&bar0->adapter_status);
4617 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4618 if (LINK_IS_UP(val64)) {
4619 val64 = readq(&bar0->adapter_control);
4620 val64 |= ADAPTER_CNTL_EN;
4621 writeq(val64, &bar0->adapter_control);
4622 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4624 val64 = readq(&bar0->gpio_control);
4625 val64 |= GPIO_CTRL_GPIO_0;
4626 writeq(val64, &bar0->gpio_control);
4627 val64 = readq(&bar0->gpio_control);
4629 val64 |= ADAPTER_LED_ON;
4630 writeq(val64, &bar0->adapter_control);
4632 val64 = readq(&bar0->adapter_status);
4633 if (!LINK_IS_UP(val64)) {
4634 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4635 DBG_PRINT(ERR_DBG, " Link down");
4636 DBG_PRINT(ERR_DBG, "after ");
4637 DBG_PRINT(ERR_DBG, "enabling ");
4638 DBG_PRINT(ERR_DBG, "device \n");
4640 if (nic->device_enabled_once == FALSE) {
4641 nic->device_enabled_once = TRUE;
4643 s2io_link(nic, LINK_UP);
4645 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4647 val64 = readq(&bar0->gpio_control);
4648 val64 &= ~GPIO_CTRL_GPIO_0;
4649 writeq(val64, &bar0->gpio_control);
4650 val64 = readq(&bar0->gpio_control);
4652 s2io_link(nic, LINK_DOWN);
4654 } else { /* NIC is not Quiescent. */
4655 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4656 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4657 netif_stop_queue(dev);
4659 clear_bit(0, &(nic->link_state));
4662 static void s2io_card_down(nic_t * sp)
4665 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4666 unsigned long flags;
4667 register u64 val64 = 0;
4669 del_timer_sync(&sp->alarm_timer);
4670 /* If s2io_set_link task is executing, wait till it completes. */
4671 while (test_and_set_bit(0, &(sp->link_state))) {
4674 atomic_set(&sp->card_state, CARD_DOWN);
4676 /* disable Tx and Rx traffic on the NIC */
4680 tasklet_kill(&sp->task);
4682 /* Check if the device is Quiescent and then Reset the NIC */
4684 val64 = readq(&bar0->adapter_status);
4685 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4693 "s2io_close:Device not Quiescent ");
4694 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4695 (unsigned long long) val64);
4701 /* Waiting till all Interrupt handlers are complete */
4705 if (!atomic_read(&sp->isr_cnt))
4710 spin_lock_irqsave(&sp->tx_lock, flags);
4711 /* Free all Tx buffers */
4712 free_tx_buffers(sp);
4713 spin_unlock_irqrestore(&sp->tx_lock, flags);
4715 /* Free all Rx buffers */
4716 spin_lock_irqsave(&sp->rx_lock, flags);
4717 free_rx_buffers(sp);
4718 spin_unlock_irqrestore(&sp->rx_lock, flags);
4720 clear_bit(0, &(sp->link_state));
4723 static int s2io_card_up(nic_t * sp)
4726 mac_info_t *mac_control;
4727 struct config_param *config;
4728 struct net_device *dev = (struct net_device *) sp->dev;
4730 /* Initialize the H/W I/O registers */
4731 if (init_nic(sp) != 0) {
4732 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4738 * Initializing the Rx buffers. For now we are considering only 1
4739 * Rx ring and initializing buffers into 30 Rx blocks
4741 mac_control = &sp->mac_control;
4742 config = &sp->config;
4744 for (i = 0; i < config->rx_ring_num; i++) {
4745 if ((ret = fill_rx_buffers(sp, i))) {
4746 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4749 free_rx_buffers(sp);
4752 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4753 atomic_read(&sp->rx_bufs_left[i]));
4756 /* Setting its receive mode */
4757 s2io_set_multicast(dev);
4759 /* Enable tasklet for the device */
4760 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4762 /* Enable Rx Traffic and interrupts on the NIC */
4763 if (start_nic(sp)) {
4764 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4765 tasklet_kill(&sp->task);
4767 free_irq(dev->irq, dev);
4768 free_rx_buffers(sp);
4772 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4774 atomic_set(&sp->card_state, CARD_UP);
4779 * s2io_restart_nic - Resets the NIC.
4780 * @data : long pointer to the device private structure
4782 * This function is scheduled to be run by the s2io_tx_watchdog
4783 * function after 0.5 secs to reset the NIC. The idea is to reduce
4784 * the run time of the watch dog routine which is run holding a
4788 static void s2io_restart_nic(unsigned long data)
4790 struct net_device *dev = (struct net_device *) data;
4791 nic_t *sp = dev->priv;
4794 if (s2io_card_up(sp)) {
4795 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4798 netif_wake_queue(dev);
4799 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4805 * s2io_tx_watchdog - Watchdog for transmit side.
4806 * @dev : Pointer to net device structure
4808 * This function is triggered if the Tx Queue is stopped
4809 * for a pre-defined amount of time when the Interface is still up.
4810 * If the Interface is jammed in such a situation, the hardware is
4811 * reset (by s2io_close) and restarted again (by s2io_open) to
4812 * overcome any problem that might have been caused in the hardware.
4817 static void s2io_tx_watchdog(struct net_device *dev)
4819 nic_t *sp = dev->priv;
4821 if (netif_carrier_ok(dev)) {
4822 schedule_work(&sp->rst_timer_task);
4827 * rx_osm_handler - To perform some OS related operations on SKB.
4828 * @sp: private member of the device structure,pointer to s2io_nic structure.
4829 * @skb : the socket buffer pointer.
4830 * @len : length of the packet
4831 * @cksum : FCS checksum of the frame.
4832 * @ring_no : the ring from which this RxD was extracted.
4834 * This function is called by the Tx interrupt serivce routine to perform
4835 * some OS related operations on the SKB before passing it to the upper
4836 * layers. It mainly checks if the checksum is OK, if so adds it to the
4837 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4838 * to the upper layer. If the checksum is wrong, it increments the Rx
4839 * packet error count, frees the SKB and returns error.
4841 * SUCCESS on success and -1 on failure.
4843 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4845 nic_t *sp = ring_data->nic;
4846 struct net_device *dev = (struct net_device *) sp->dev;
4847 struct sk_buff *skb = (struct sk_buff *)
4848 ((unsigned long) rxdp->Host_Control);
4849 int ring_no = ring_data->ring_no;
4850 u16 l3_csum, l4_csum;
4851 #ifdef CONFIG_2BUFF_MODE
4852 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4853 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4854 int get_block = ring_data->rx_curr_get_info.block_index;
4855 int get_off = ring_data->rx_curr_get_info.offset;
4856 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4857 unsigned char *buff;
4859 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4862 if (rxdp->Control_1 & RXD_T_CODE) {
4863 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4864 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4867 sp->stats.rx_crc_errors++;
4868 atomic_dec(&sp->rx_bufs_left[ring_no]);
4869 rxdp->Host_Control = 0;
4873 /* Updating statistics */
4874 rxdp->Host_Control = 0;
4876 sp->stats.rx_packets++;
4877 #ifndef CONFIG_2BUFF_MODE
4878 sp->stats.rx_bytes += len;
4880 sp->stats.rx_bytes += buf0_len + buf2_len;
4883 #ifndef CONFIG_2BUFF_MODE
4886 buff = skb_push(skb, buf0_len);
4887 memcpy(buff, ba->ba_0, buf0_len);
4888 skb_put(skb, buf2_len);
4891 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4893 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4894 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4895 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4897 * NIC verifies if the Checksum of the received
4898 * frame is Ok or not and accordingly returns
4899 * a flag in the RxD.
4901 skb->ip_summed = CHECKSUM_UNNECESSARY;
4904 * Packet with erroneous checksum, let the
4905 * upper layers deal with it.
4907 skb->ip_summed = CHECKSUM_NONE;
4910 skb->ip_summed = CHECKSUM_NONE;
4913 skb->protocol = eth_type_trans(skb, dev);
4914 #ifdef CONFIG_S2IO_NAPI
4915 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4916 /* Queueing the vlan frame to the upper layer */
4917 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
4918 RXD_GET_VLAN_TAG(rxdp->Control_2));
4920 netif_receive_skb(skb);
4923 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4924 /* Queueing the vlan frame to the upper layer */
4925 vlan_hwaccel_rx(skb, sp->vlgrp,
4926 RXD_GET_VLAN_TAG(rxdp->Control_2));
4931 dev->last_rx = jiffies;
4932 atomic_dec(&sp->rx_bufs_left[ring_no]);
4937 * s2io_link - stops/starts the Tx queue.
4938 * @sp : private member of the device structure, which is a pointer to the
4939 * s2io_nic structure.
4940 * @link : inidicates whether link is UP/DOWN.
4942 * This function stops/starts the Tx queue depending on whether the link
4943 * status of the NIC is is down or up. This is called by the Alarm
4944 * interrupt handler whenever a link change interrupt comes up.
4949 void s2io_link(nic_t * sp, int link)
4951 struct net_device *dev = (struct net_device *) sp->dev;
4953 if (link != sp->last_link_state) {
4954 if (link == LINK_DOWN) {
4955 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4956 netif_carrier_off(dev);
4958 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4959 netif_carrier_on(dev);
4962 sp->last_link_state = link;
4966 * get_xena_rev_id - to identify revision ID of xena.
4967 * @pdev : PCI Dev structure
4969 * Function to identify the Revision ID of xena.
4971 * returns the revision ID of the device.
4974 int get_xena_rev_id(struct pci_dev *pdev)
4978 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4983 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4984 * @sp : private member of the device structure, which is a pointer to the
4985 * s2io_nic structure.
4987 * This function initializes a few of the PCI and PCI-X configuration registers
4988 * with recommended values.
4993 static void s2io_init_pci(nic_t * sp)
4995 u16 pci_cmd = 0, pcix_cmd = 0;
4997 /* Enable Data Parity Error Recovery in PCI-X command register. */
4998 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5000 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5002 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5005 /* Set the PErr Response bit in PCI command register. */
5006 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5007 pci_write_config_word(sp->pdev, PCI_COMMAND,
5008 (pci_cmd | PCI_COMMAND_PARITY));
5009 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5011 /* Forcibly disabling relaxed ordering capability of the card. */
5013 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5015 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5019 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5020 MODULE_LICENSE("GPL");
5021 module_param(tx_fifo_num, int, 0);
5022 module_param(rx_ring_num, int, 0);
5023 module_param_array(tx_fifo_len, uint, NULL, 0);
5024 module_param_array(rx_ring_sz, uint, NULL, 0);
5025 module_param_array(rts_frm_len, uint, NULL, 0);
5026 module_param(use_continuous_tx_intrs, int, 1);
5027 module_param(rmac_pause_time, int, 0);
5028 module_param(mc_pause_threshold_q0q3, int, 0);
5029 module_param(mc_pause_threshold_q4q7, int, 0);
5030 module_param(shared_splits, int, 0);
5031 module_param(tmac_util_period, int, 0);
5032 module_param(rmac_util_period, int, 0);
5033 #ifndef CONFIG_S2IO_NAPI
5034 module_param(indicate_max_pkts, int, 0);
5038 * s2io_init_nic - Initialization of the adapter .
5039 * @pdev : structure containing the PCI related information of the device.
5040 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5042 * The function initializes an adapter identified by the pci_dec structure.
5043 * All OS related initialization including memory and device structure and
5044 * initlaization of the device private variable is done. Also the swapper
5045 * control register is initialized to enable read and write into the I/O
5046 * registers of the device.
5048 * returns 0 on success and negative on failure.
5051 static int __devinit
5052 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5055 struct net_device *dev;
5057 int dma_flag = FALSE;
5058 u32 mac_up, mac_down;
5059 u64 val64 = 0, tmp64 = 0;
5060 XENA_dev_config_t __iomem *bar0 = NULL;
5062 mac_info_t *mac_control;
5063 struct config_param *config;
5066 #ifdef CONFIG_S2IO_NAPI
5067 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5070 if ((ret = pci_enable_device(pdev))) {
5072 "s2io_init_nic: pci_enable_device failed\n");
5076 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5077 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5079 if (pci_set_consistent_dma_mask
5080 (pdev, DMA_64BIT_MASK)) {
5082 "Unable to obtain 64bit DMA for \
5083 consistent allocations\n");
5084 pci_disable_device(pdev);
5087 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5088 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5090 pci_disable_device(pdev);
5094 if (pci_request_regions(pdev, s2io_driver_name)) {
5095 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5096 pci_disable_device(pdev);
5100 dev = alloc_etherdev(sizeof(nic_t));
5102 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5103 pci_disable_device(pdev);
5104 pci_release_regions(pdev);
5108 pci_set_master(pdev);
5109 pci_set_drvdata(pdev, dev);
5110 SET_MODULE_OWNER(dev);
5111 SET_NETDEV_DEV(dev, &pdev->dev);
5113 /* Private member variable initialized to s2io NIC structure */
5115 memset(sp, 0, sizeof(nic_t));
5118 sp->high_dma_flag = dma_flag;
5119 sp->device_enabled_once = FALSE;
5121 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5122 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5123 sp->device_type = XFRAME_II_DEVICE;
5125 sp->device_type = XFRAME_I_DEVICE;
5127 /* Initialize some PCI/PCI-X fields of the NIC. */
5131 * Setting the device configuration parameters.
5132 * Most of these parameters can be specified by the user during
5133 * module insertion as they are module loadable parameters. If
5134 * these parameters are not not specified during load time, they
5135 * are initialized with default values.
5137 mac_control = &sp->mac_control;
5138 config = &sp->config;
5140 /* Tx side parameters. */
5141 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5142 config->tx_fifo_num = tx_fifo_num;
5143 for (i = 0; i < MAX_TX_FIFOS; i++) {
5144 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5145 config->tx_cfg[i].fifo_priority = i;
5148 /* mapping the QoS priority to the configured fifos */
5149 for (i = 0; i < MAX_TX_FIFOS; i++)
5150 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5152 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5153 for (i = 0; i < config->tx_fifo_num; i++) {
5154 config->tx_cfg[i].f_no_snoop =
5155 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5156 if (config->tx_cfg[i].fifo_len < 65) {
5157 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5161 config->max_txds = MAX_SKB_FRAGS;
5163 /* Rx side parameters. */
5164 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5165 config->rx_ring_num = rx_ring_num;
5166 for (i = 0; i < MAX_RX_RINGS; i++) {
5167 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5168 (MAX_RXDS_PER_BLOCK + 1);
5169 config->rx_cfg[i].ring_priority = i;
5172 for (i = 0; i < rx_ring_num; i++) {
5173 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5174 config->rx_cfg[i].f_no_snoop =
5175 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5178 /* Setting Mac Control parameters */
5179 mac_control->rmac_pause_time = rmac_pause_time;
5180 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5181 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5184 /* Initialize Ring buffer parameters. */
5185 for (i = 0; i < config->rx_ring_num; i++)
5186 atomic_set(&sp->rx_bufs_left[i], 0);
5188 /* Initialize the number of ISRs currently running */
5189 atomic_set(&sp->isr_cnt, 0);
5191 /* initialize the shared memory used by the NIC and the host */
5192 if (init_shared_mem(sp)) {
5193 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5196 goto mem_alloc_failed;
5199 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5200 pci_resource_len(pdev, 0));
5202 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5205 goto bar0_remap_failed;
5208 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5209 pci_resource_len(pdev, 2));
5211 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5214 goto bar1_remap_failed;
5217 dev->irq = pdev->irq;
5218 dev->base_addr = (unsigned long) sp->bar0;
5220 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5221 for (j = 0; j < MAX_TX_FIFOS; j++) {
5222 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5223 (sp->bar1 + (j * 0x00020000));
5226 /* Driver entry points */
5227 dev->open = &s2io_open;
5228 dev->stop = &s2io_close;
5229 dev->hard_start_xmit = &s2io_xmit;
5230 dev->get_stats = &s2io_get_stats;
5231 dev->set_multicast_list = &s2io_set_multicast;
5232 dev->do_ioctl = &s2io_ioctl;
5233 dev->change_mtu = &s2io_change_mtu;
5234 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5235 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5236 dev->vlan_rx_register = s2io_vlan_rx_register;
5237 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5240 * will use eth_mac_addr() for dev->set_mac_address
5241 * mac address will be set every time dev->open() is called
5243 #if defined(CONFIG_S2IO_NAPI)
5244 dev->poll = s2io_poll;
5248 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5249 if (sp->high_dma_flag == TRUE)
5250 dev->features |= NETIF_F_HIGHDMA;
5252 dev->features |= NETIF_F_TSO;
5255 dev->tx_timeout = &s2io_tx_watchdog;
5256 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5257 INIT_WORK(&sp->rst_timer_task,
5258 (void (*)(void *)) s2io_restart_nic, dev);
5259 INIT_WORK(&sp->set_link_task,
5260 (void (*)(void *)) s2io_set_link, sp);
5262 if (!(sp->device_type & XFRAME_II_DEVICE)) {
5263 pci_save_state(sp->pdev);
5266 /* Setting swapper control on the NIC, for proper reset operation */
5267 if (s2io_set_swapper(sp)) {
5268 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5271 goto set_swap_failed;
5274 /* Verify if the Herc works on the slot its placed into */
5275 if (sp->device_type & XFRAME_II_DEVICE) {
5276 mode = s2io_verify_pci_mode(sp);
5278 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5279 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5281 goto set_swap_failed;
5285 /* Not needed for Herc */
5286 if (sp->device_type & XFRAME_I_DEVICE) {
5288 * Fix for all "FFs" MAC address problems observed on
5291 fix_mac_address(sp);
5296 * MAC address initialization.
5297 * For now only one mac address will be read and used.
5300 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5301 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5302 writeq(val64, &bar0->rmac_addr_cmd_mem);
5303 wait_for_cmd_complete(sp);
5305 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5306 mac_down = (u32) tmp64;
5307 mac_up = (u32) (tmp64 >> 32);
5309 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5311 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5312 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5313 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5314 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5315 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5316 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5318 /* Set the factory defined MAC address initially */
5319 dev->addr_len = ETH_ALEN;
5320 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5323 * Initialize the tasklet status and link state flags
5324 * and the card state parameter
5326 atomic_set(&(sp->card_state), 0);
5327 sp->tasklet_status = 0;
5330 /* Initialize spinlocks */
5331 spin_lock_init(&sp->tx_lock);
5332 #ifndef CONFIG_S2IO_NAPI
5333 spin_lock_init(&sp->put_lock);
5335 spin_lock_init(&sp->rx_lock);
5338 * SXE-002: Configure link and activity LED to init state
5341 subid = sp->pdev->subsystem_device;
5342 if ((subid & 0xFF) >= 0x07) {
5343 val64 = readq(&bar0->gpio_control);
5344 val64 |= 0x0000800000000000ULL;
5345 writeq(val64, &bar0->gpio_control);
5346 val64 = 0x0411040400000000ULL;
5347 writeq(val64, (void __iomem *) bar0 + 0x2700);
5348 val64 = readq(&bar0->gpio_control);
5351 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5353 if (register_netdev(dev)) {
5354 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5356 goto register_failed;
5359 if (sp->device_type & XFRAME_II_DEVICE) {
5360 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5362 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5363 get_xena_rev_id(sp->pdev),
5364 s2io_driver_version);
5365 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5366 sp->def_mac_addr[0].mac_addr[0],
5367 sp->def_mac_addr[0].mac_addr[1],
5368 sp->def_mac_addr[0].mac_addr[2],
5369 sp->def_mac_addr[0].mac_addr[3],
5370 sp->def_mac_addr[0].mac_addr[4],
5371 sp->def_mac_addr[0].mac_addr[5]);
5372 int mode = s2io_print_pci_mode(sp);
5374 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5376 goto set_swap_failed;
5379 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5381 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5382 get_xena_rev_id(sp->pdev),
5383 s2io_driver_version);
5384 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5385 sp->def_mac_addr[0].mac_addr[0],
5386 sp->def_mac_addr[0].mac_addr[1],
5387 sp->def_mac_addr[0].mac_addr[2],
5388 sp->def_mac_addr[0].mac_addr[3],
5389 sp->def_mac_addr[0].mac_addr[4],
5390 sp->def_mac_addr[0].mac_addr[5]);
5393 /* Initialize device name */
5394 strcpy(sp->name, dev->name);
5395 if (sp->device_type & XFRAME_II_DEVICE)
5396 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5398 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5401 * Make Link state as off at this point, when the Link change
5402 * interrupt comes the state will be automatically changed to
5405 netif_carrier_off(dev);
5416 free_shared_mem(sp);
5417 pci_disable_device(pdev);
5418 pci_release_regions(pdev);
5419 pci_set_drvdata(pdev, NULL);
5426 * s2io_rem_nic - Free the PCI device
5427 * @pdev: structure containing the PCI related information of the device.
5428 * Description: This function is called by the Pci subsystem to release a
5429 * PCI device and free up all resource held up by the device. This could
5430 * be in response to a Hot plug event or when the driver is to be removed
5434 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5436 struct net_device *dev =
5437 (struct net_device *) pci_get_drvdata(pdev);
5441 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5446 unregister_netdev(dev);
5448 free_shared_mem(sp);
5451 pci_disable_device(pdev);
5452 pci_release_regions(pdev);
5453 pci_set_drvdata(pdev, NULL);
5458 * s2io_starter - Entry point for the driver
5459 * Description: This function is the entry point for the driver. It verifies
5460 * the module loadable parameters and initializes PCI configuration space.
5463 int __init s2io_starter(void)
5465 return pci_module_init(&s2io_driver);
5469 * s2io_closer - Cleanup routine for the driver
5470 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5473 void s2io_closer(void)
5475 pci_unregister_driver(&s2io_driver);
5476 DBG_PRINT(INIT_DBG, "cleanup done\n");
5479 module_init(s2io_starter);
5480 module_exit(s2io_closer);