1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
74 #include <linux/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
80 #include <asm/div64.h>
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.16.1"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
144 {"tmac_data_octets"},
148 {"tmac_pause_ctrl_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
221 {"new_rd_req_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
226 {"new_wr_req_rtry_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
286 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
287 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
289 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
291 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
292 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
294 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
295 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
297 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
298 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
300 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
301 init_timer(&timer); \
302 timer.function = handle; \
303 timer.data = (unsigned long) arg; \
304 mod_timer(&timer, (jiffies + exp)) \
307 static void s2io_vlan_rx_register(struct net_device *dev,
308 struct vlan_group *grp)
310 struct s2io_nic *nic = dev->priv;
313 spin_lock_irqsave(&nic->tx_lock, flags);
315 spin_unlock_irqrestore(&nic->tx_lock, flags);
318 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
321 /* Unregister the vlan */
322 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
324 struct s2io_nic *nic = dev->priv;
327 spin_lock_irqsave(&nic->tx_lock, flags);
329 nic->vlgrp->vlan_devices[vid] = NULL;
330 spin_unlock_irqrestore(&nic->tx_lock, flags);
334 * Constants to be programmed into the Xena's registers, to configure
339 static const u64 herc_act_dtx_cfg[] = {
341 0x8000051536750000ULL, 0x80000515367500E0ULL,
343 0x8000051536750004ULL, 0x80000515367500E4ULL,
345 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
347 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
349 0x801205150D440000ULL, 0x801205150D4400E0ULL,
351 0x801205150D440004ULL, 0x801205150D4400E4ULL,
353 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
355 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
360 static const u64 xena_dtx_cfg[] = {
362 0x8000051500000000ULL, 0x80000515000000E0ULL,
364 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
366 0x8001051500000000ULL, 0x80010515000000E0ULL,
368 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
370 0x8002051500000000ULL, 0x80020515000000E0ULL,
372 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
377 * Constants for Fixing the MacAddress problem seen mostly on
380 static const u64 fix_mac[] = {
381 0x0060000000000000ULL, 0x0060600000000000ULL,
382 0x0040600000000000ULL, 0x0000600000000000ULL,
383 0x0020600000000000ULL, 0x0060600000000000ULL,
384 0x0020600000000000ULL, 0x0060600000000000ULL,
385 0x0020600000000000ULL, 0x0060600000000000ULL,
386 0x0020600000000000ULL, 0x0060600000000000ULL,
387 0x0020600000000000ULL, 0x0060600000000000ULL,
388 0x0020600000000000ULL, 0x0060600000000000ULL,
389 0x0020600000000000ULL, 0x0060600000000000ULL,
390 0x0020600000000000ULL, 0x0060600000000000ULL,
391 0x0020600000000000ULL, 0x0060600000000000ULL,
392 0x0020600000000000ULL, 0x0060600000000000ULL,
393 0x0020600000000000ULL, 0x0000600000000000ULL,
394 0x0040600000000000ULL, 0x0060600000000000ULL,
398 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
399 MODULE_LICENSE("GPL");
400 MODULE_VERSION(DRV_VERSION);
403 /* Module Loadable parameters. */
404 S2IO_PARM_INT(tx_fifo_num, 1);
405 S2IO_PARM_INT(rx_ring_num, 1);
408 S2IO_PARM_INT(rx_ring_mode, 1);
409 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
410 S2IO_PARM_INT(rmac_pause_time, 0x100);
411 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
412 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
413 S2IO_PARM_INT(shared_splits, 0);
414 S2IO_PARM_INT(tmac_util_period, 5);
415 S2IO_PARM_INT(rmac_util_period, 5);
416 S2IO_PARM_INT(bimodal, 0);
417 S2IO_PARM_INT(l3l4hdr_size, 128);
418 /* Frequency of Rx desc syncs expressed as power of 2 */
419 S2IO_PARM_INT(rxsync_frequency, 3);
420 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
421 S2IO_PARM_INT(intr_type, 0);
422 /* Large receive offload feature */
423 S2IO_PARM_INT(lro, 0);
424 /* Max pkts to be aggregated by LRO at one time. If not specified,
425 * aggregation happens until we hit max IP pkt size(64K)
427 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
428 S2IO_PARM_INT(indicate_max_pkts, 0);
430 S2IO_PARM_INT(napi, 1);
431 S2IO_PARM_INT(ufo, 0);
432 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
434 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
435 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
436 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
437 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
438 static unsigned int rts_frm_len[MAX_RX_RINGS] =
439 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
441 module_param_array(tx_fifo_len, uint, NULL, 0);
442 module_param_array(rx_ring_sz, uint, NULL, 0);
443 module_param_array(rts_frm_len, uint, NULL, 0);
447 * This table lists all the devices that this driver supports.
449 static struct pci_device_id s2io_tbl[] __devinitdata = {
450 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
451 PCI_ANY_ID, PCI_ANY_ID},
452 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
453 PCI_ANY_ID, PCI_ANY_ID},
454 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
455 PCI_ANY_ID, PCI_ANY_ID},
456 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
457 PCI_ANY_ID, PCI_ANY_ID},
461 MODULE_DEVICE_TABLE(pci, s2io_tbl);
463 static struct pci_driver s2io_driver = {
465 .id_table = s2io_tbl,
466 .probe = s2io_init_nic,
467 .remove = __devexit_p(s2io_rem_nic),
470 /* A simplifier macro used both by init and free shared_mem Fns(). */
471 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
474 * init_shared_mem - Allocation and Initialization of Memory
475 * @nic: Device private variable.
476 * Description: The function allocates all the memory areas shared
477 * between the NIC and the driver. This includes Tx descriptors,
478 * Rx descriptors and the statistics block.
481 static int init_shared_mem(struct s2io_nic *nic)
484 void *tmp_v_addr, *tmp_v_addr_next;
485 dma_addr_t tmp_p_addr, tmp_p_addr_next;
486 struct RxD_block *pre_rxd_blk = NULL;
488 int lst_size, lst_per_page;
489 struct net_device *dev = nic->dev;
493 struct mac_info *mac_control;
494 struct config_param *config;
496 mac_control = &nic->mac_control;
497 config = &nic->config;
500 /* Allocation and initialization of TXDLs in FIOFs */
502 for (i = 0; i < config->tx_fifo_num; i++) {
503 size += config->tx_cfg[i].fifo_len;
505 if (size > MAX_AVAILABLE_TXDS) {
506 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
507 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
511 lst_size = (sizeof(struct TxD) * config->max_txds);
512 lst_per_page = PAGE_SIZE / lst_size;
514 for (i = 0; i < config->tx_fifo_num; i++) {
515 int fifo_len = config->tx_cfg[i].fifo_len;
516 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
517 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
519 if (!mac_control->fifos[i].list_info) {
521 "Malloc failed for list_info\n");
524 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
526 for (i = 0; i < config->tx_fifo_num; i++) {
527 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
529 mac_control->fifos[i].tx_curr_put_info.offset = 0;
530 mac_control->fifos[i].tx_curr_put_info.fifo_len =
531 config->tx_cfg[i].fifo_len - 1;
532 mac_control->fifos[i].tx_curr_get_info.offset = 0;
533 mac_control->fifos[i].tx_curr_get_info.fifo_len =
534 config->tx_cfg[i].fifo_len - 1;
535 mac_control->fifos[i].fifo_no = i;
536 mac_control->fifos[i].nic = nic;
537 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
539 for (j = 0; j < page_num; j++) {
543 tmp_v = pci_alloc_consistent(nic->pdev,
547 "pci_alloc_consistent ");
548 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
551 /* If we got a zero DMA address(can happen on
552 * certain platforms like PPC), reallocate.
553 * Store virtual address of page we don't want,
557 mac_control->zerodma_virt_addr = tmp_v;
559 "%s: Zero DMA address for TxDL. ", dev->name);
561 "Virtual address %p\n", tmp_v);
562 tmp_v = pci_alloc_consistent(nic->pdev,
566 "pci_alloc_consistent ");
567 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
571 while (k < lst_per_page) {
572 int l = (j * lst_per_page) + k;
573 if (l == config->tx_cfg[i].fifo_len)
575 mac_control->fifos[i].list_info[l].list_virt_addr =
576 tmp_v + (k * lst_size);
577 mac_control->fifos[i].list_info[l].list_phy_addr =
578 tmp_p + (k * lst_size);
584 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
585 if (!nic->ufo_in_band_v)
588 /* Allocation and initialization of RXDs in Rings */
590 for (i = 0; i < config->rx_ring_num; i++) {
591 if (config->rx_cfg[i].num_rxd %
592 (rxd_count[nic->rxd_mode] + 1)) {
593 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
594 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
596 DBG_PRINT(ERR_DBG, "RxDs per Block");
599 size += config->rx_cfg[i].num_rxd;
600 mac_control->rings[i].block_count =
601 config->rx_cfg[i].num_rxd /
602 (rxd_count[nic->rxd_mode] + 1 );
603 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
604 mac_control->rings[i].block_count;
606 if (nic->rxd_mode == RXD_MODE_1)
607 size = (size * (sizeof(struct RxD1)));
609 size = (size * (sizeof(struct RxD3)));
611 for (i = 0; i < config->rx_ring_num; i++) {
612 mac_control->rings[i].rx_curr_get_info.block_index = 0;
613 mac_control->rings[i].rx_curr_get_info.offset = 0;
614 mac_control->rings[i].rx_curr_get_info.ring_len =
615 config->rx_cfg[i].num_rxd - 1;
616 mac_control->rings[i].rx_curr_put_info.block_index = 0;
617 mac_control->rings[i].rx_curr_put_info.offset = 0;
618 mac_control->rings[i].rx_curr_put_info.ring_len =
619 config->rx_cfg[i].num_rxd - 1;
620 mac_control->rings[i].nic = nic;
621 mac_control->rings[i].ring_no = i;
623 blk_cnt = config->rx_cfg[i].num_rxd /
624 (rxd_count[nic->rxd_mode] + 1);
625 /* Allocating all the Rx blocks */
626 for (j = 0; j < blk_cnt; j++) {
627 struct rx_block_info *rx_blocks;
630 rx_blocks = &mac_control->rings[i].rx_blocks[j];
631 size = SIZE_OF_BLOCK; //size is always page size
632 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
634 if (tmp_v_addr == NULL) {
636 * In case of failure, free_shared_mem()
637 * is called, which should free any
638 * memory that was alloced till the
641 rx_blocks->block_virt_addr = tmp_v_addr;
644 memset(tmp_v_addr, 0, size);
645 rx_blocks->block_virt_addr = tmp_v_addr;
646 rx_blocks->block_dma_addr = tmp_p_addr;
647 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
648 rxd_count[nic->rxd_mode],
650 if (!rx_blocks->rxds)
652 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
653 rx_blocks->rxds[l].virt_addr =
654 rx_blocks->block_virt_addr +
655 (rxd_size[nic->rxd_mode] * l);
656 rx_blocks->rxds[l].dma_addr =
657 rx_blocks->block_dma_addr +
658 (rxd_size[nic->rxd_mode] * l);
661 /* Interlinking all Rx Blocks */
662 for (j = 0; j < blk_cnt; j++) {
664 mac_control->rings[i].rx_blocks[j].block_virt_addr;
666 mac_control->rings[i].rx_blocks[(j + 1) %
667 blk_cnt].block_virt_addr;
669 mac_control->rings[i].rx_blocks[j].block_dma_addr;
671 mac_control->rings[i].rx_blocks[(j + 1) %
672 blk_cnt].block_dma_addr;
674 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
675 pre_rxd_blk->reserved_2_pNext_RxD_block =
676 (unsigned long) tmp_v_addr_next;
677 pre_rxd_blk->pNext_RxD_Blk_physical =
678 (u64) tmp_p_addr_next;
681 if (nic->rxd_mode >= RXD_MODE_3A) {
683 * Allocation of Storages for buffer addresses in 2BUFF mode
684 * and the buffers as well.
686 for (i = 0; i < config->rx_ring_num; i++) {
687 blk_cnt = config->rx_cfg[i].num_rxd /
688 (rxd_count[nic->rxd_mode]+ 1);
689 mac_control->rings[i].ba =
690 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
692 if (!mac_control->rings[i].ba)
694 for (j = 0; j < blk_cnt; j++) {
696 mac_control->rings[i].ba[j] =
697 kmalloc((sizeof(struct buffAdd) *
698 (rxd_count[nic->rxd_mode] + 1)),
700 if (!mac_control->rings[i].ba[j])
702 while (k != rxd_count[nic->rxd_mode]) {
703 ba = &mac_control->rings[i].ba[j][k];
705 ba->ba_0_org = (void *) kmalloc
706 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
709 tmp = (unsigned long)ba->ba_0_org;
711 tmp &= ~((unsigned long) ALIGN_SIZE);
712 ba->ba_0 = (void *) tmp;
714 ba->ba_1_org = (void *) kmalloc
715 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
718 tmp = (unsigned long) ba->ba_1_org;
720 tmp &= ~((unsigned long) ALIGN_SIZE);
721 ba->ba_1 = (void *) tmp;
728 /* Allocation and initialization of Statistics block */
729 size = sizeof(struct stat_block);
730 mac_control->stats_mem = pci_alloc_consistent
731 (nic->pdev, size, &mac_control->stats_mem_phy);
733 if (!mac_control->stats_mem) {
735 * In case of failure, free_shared_mem() is called, which
736 * should free any memory that was alloced till the
741 mac_control->stats_mem_sz = size;
743 tmp_v_addr = mac_control->stats_mem;
744 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
745 memset(tmp_v_addr, 0, size);
746 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
747 (unsigned long long) tmp_p_addr);
753 * free_shared_mem - Free the allocated Memory
754 * @nic: Device private variable.
755 * Description: This function is to free all memory locations allocated by
756 * the init_shared_mem() function and return it to the kernel.
759 static void free_shared_mem(struct s2io_nic *nic)
761 int i, j, blk_cnt, size;
763 dma_addr_t tmp_p_addr;
764 struct mac_info *mac_control;
765 struct config_param *config;
766 int lst_size, lst_per_page;
767 struct net_device *dev = nic->dev;
772 mac_control = &nic->mac_control;
773 config = &nic->config;
775 lst_size = (sizeof(struct TxD) * config->max_txds);
776 lst_per_page = PAGE_SIZE / lst_size;
778 for (i = 0; i < config->tx_fifo_num; i++) {
779 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
781 for (j = 0; j < page_num; j++) {
782 int mem_blks = (j * lst_per_page);
783 if (!mac_control->fifos[i].list_info)
785 if (!mac_control->fifos[i].list_info[mem_blks].
788 pci_free_consistent(nic->pdev, PAGE_SIZE,
789 mac_control->fifos[i].
792 mac_control->fifos[i].
796 /* If we got a zero DMA address during allocation,
799 if (mac_control->zerodma_virt_addr) {
800 pci_free_consistent(nic->pdev, PAGE_SIZE,
801 mac_control->zerodma_virt_addr,
804 "%s: Freeing TxDL with zero DMA addr. ",
806 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
807 mac_control->zerodma_virt_addr);
809 kfree(mac_control->fifos[i].list_info);
812 size = SIZE_OF_BLOCK;
813 for (i = 0; i < config->rx_ring_num; i++) {
814 blk_cnt = mac_control->rings[i].block_count;
815 for (j = 0; j < blk_cnt; j++) {
816 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
818 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
820 if (tmp_v_addr == NULL)
822 pci_free_consistent(nic->pdev, size,
823 tmp_v_addr, tmp_p_addr);
824 kfree(mac_control->rings[i].rx_blocks[j].rxds);
828 if (nic->rxd_mode >= RXD_MODE_3A) {
829 /* Freeing buffer storage addresses in 2BUFF mode. */
830 for (i = 0; i < config->rx_ring_num; i++) {
831 blk_cnt = config->rx_cfg[i].num_rxd /
832 (rxd_count[nic->rxd_mode] + 1);
833 for (j = 0; j < blk_cnt; j++) {
835 if (!mac_control->rings[i].ba[j])
837 while (k != rxd_count[nic->rxd_mode]) {
839 &mac_control->rings[i].ba[j][k];
844 kfree(mac_control->rings[i].ba[j]);
846 kfree(mac_control->rings[i].ba);
850 if (mac_control->stats_mem) {
851 pci_free_consistent(nic->pdev,
852 mac_control->stats_mem_sz,
853 mac_control->stats_mem,
854 mac_control->stats_mem_phy);
856 if (nic->ufo_in_band_v)
857 kfree(nic->ufo_in_band_v);
861 * s2io_verify_pci_mode -
864 static int s2io_verify_pci_mode(struct s2io_nic *nic)
866 struct XENA_dev_config __iomem *bar0 = nic->bar0;
867 register u64 val64 = 0;
870 val64 = readq(&bar0->pci_mode);
871 mode = (u8)GET_PCI_MODE(val64);
873 if ( val64 & PCI_MODE_UNKNOWN_MODE)
874 return -1; /* Unknown PCI mode */
878 #define NEC_VENID 0x1033
879 #define NEC_DEVID 0x0125
880 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
882 struct pci_dev *tdev = NULL;
883 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
884 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
885 if (tdev->bus == s2io_pdev->bus->parent)
893 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
895 * s2io_print_pci_mode -
897 static int s2io_print_pci_mode(struct s2io_nic *nic)
899 struct XENA_dev_config __iomem *bar0 = nic->bar0;
900 register u64 val64 = 0;
902 struct config_param *config = &nic->config;
904 val64 = readq(&bar0->pci_mode);
905 mode = (u8)GET_PCI_MODE(val64);
907 if ( val64 & PCI_MODE_UNKNOWN_MODE)
908 return -1; /* Unknown PCI mode */
910 config->bus_speed = bus_speed[mode];
912 if (s2io_on_nec_bridge(nic->pdev)) {
913 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
918 if (val64 & PCI_MODE_32_BITS) {
919 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
921 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
925 case PCI_MODE_PCI_33:
926 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
928 case PCI_MODE_PCI_66:
929 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
931 case PCI_MODE_PCIX_M1_66:
932 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
934 case PCI_MODE_PCIX_M1_100:
935 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
937 case PCI_MODE_PCIX_M1_133:
938 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
940 case PCI_MODE_PCIX_M2_66:
941 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
943 case PCI_MODE_PCIX_M2_100:
944 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
946 case PCI_MODE_PCIX_M2_133:
947 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
950 return -1; /* Unsupported bus speed */
957 * init_nic - Initialization of hardware
958 * @nic: device peivate variable
959 * Description: The function sequentially configures every block
960 * of the H/W from their reset values.
961 * Return Value: SUCCESS on success and
962 * '-1' on failure (endian settings incorrect).
965 static int init_nic(struct s2io_nic *nic)
967 struct XENA_dev_config __iomem *bar0 = nic->bar0;
968 struct net_device *dev = nic->dev;
969 register u64 val64 = 0;
973 struct mac_info *mac_control;
974 struct config_param *config;
976 unsigned long long mem_share;
979 mac_control = &nic->mac_control;
980 config = &nic->config;
982 /* to set the swapper controle on the card */
983 if(s2io_set_swapper(nic)) {
984 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
989 * Herc requires EOI to be removed from reset before XGXS, so..
991 if (nic->device_type & XFRAME_II_DEVICE) {
992 val64 = 0xA500000000ULL;
993 writeq(val64, &bar0->sw_reset);
995 val64 = readq(&bar0->sw_reset);
998 /* Remove XGXS from reset state */
1000 writeq(val64, &bar0->sw_reset);
1002 val64 = readq(&bar0->sw_reset);
1004 /* Enable Receiving broadcasts */
1005 add = &bar0->mac_cfg;
1006 val64 = readq(&bar0->mac_cfg);
1007 val64 |= MAC_RMAC_BCAST_ENABLE;
1008 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1009 writel((u32) val64, add);
1010 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1011 writel((u32) (val64 >> 32), (add + 4));
1013 /* Read registers in all blocks */
1014 val64 = readq(&bar0->mac_int_mask);
1015 val64 = readq(&bar0->mc_int_mask);
1016 val64 = readq(&bar0->xgxs_int_mask);
1020 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1022 if (nic->device_type & XFRAME_II_DEVICE) {
1023 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1024 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1025 &bar0->dtx_control, UF);
1027 msleep(1); /* Necessary!! */
1031 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1032 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1033 &bar0->dtx_control, UF);
1034 val64 = readq(&bar0->dtx_control);
1039 /* Tx DMA Initialization */
1041 writeq(val64, &bar0->tx_fifo_partition_0);
1042 writeq(val64, &bar0->tx_fifo_partition_1);
1043 writeq(val64, &bar0->tx_fifo_partition_2);
1044 writeq(val64, &bar0->tx_fifo_partition_3);
1047 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1049 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1050 13) | vBIT(config->tx_cfg[i].fifo_priority,
1053 if (i == (config->tx_fifo_num - 1)) {
1060 writeq(val64, &bar0->tx_fifo_partition_0);
1064 writeq(val64, &bar0->tx_fifo_partition_1);
1068 writeq(val64, &bar0->tx_fifo_partition_2);
1072 writeq(val64, &bar0->tx_fifo_partition_3);
1078 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1079 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1081 if ((nic->device_type == XFRAME_I_DEVICE) &&
1082 (get_xena_rev_id(nic->pdev) < 4))
1083 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1085 val64 = readq(&bar0->tx_fifo_partition_0);
1086 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1087 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1090 * Initialization of Tx_PA_CONFIG register to ignore packet
1091 * integrity checking.
1093 val64 = readq(&bar0->tx_pa_cfg);
1094 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1095 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1096 writeq(val64, &bar0->tx_pa_cfg);
1098 /* Rx DMA intialization. */
1100 for (i = 0; i < config->rx_ring_num; i++) {
1102 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1105 writeq(val64, &bar0->rx_queue_priority);
1108 * Allocating equal share of memory to all the
1112 if (nic->device_type & XFRAME_II_DEVICE)
1117 for (i = 0; i < config->rx_ring_num; i++) {
1120 mem_share = (mem_size / config->rx_ring_num +
1121 mem_size % config->rx_ring_num);
1122 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1125 mem_share = (mem_size / config->rx_ring_num);
1126 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1129 mem_share = (mem_size / config->rx_ring_num);
1130 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1133 mem_share = (mem_size / config->rx_ring_num);
1134 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1137 mem_share = (mem_size / config->rx_ring_num);
1138 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1141 mem_share = (mem_size / config->rx_ring_num);
1142 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1145 mem_share = (mem_size / config->rx_ring_num);
1146 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1149 mem_share = (mem_size / config->rx_ring_num);
1150 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1154 writeq(val64, &bar0->rx_queue_cfg);
1157 * Filling Tx round robin registers
1158 * as per the number of FIFOs
1160 switch (config->tx_fifo_num) {
1162 val64 = 0x0000000000000000ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_0);
1164 writeq(val64, &bar0->tx_w_round_robin_1);
1165 writeq(val64, &bar0->tx_w_round_robin_2);
1166 writeq(val64, &bar0->tx_w_round_robin_3);
1167 writeq(val64, &bar0->tx_w_round_robin_4);
1170 val64 = 0x0000010000010000ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_0);
1172 val64 = 0x0100000100000100ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_1);
1174 val64 = 0x0001000001000001ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_2);
1176 val64 = 0x0000010000010000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_3);
1178 val64 = 0x0100000000000000ULL;
1179 writeq(val64, &bar0->tx_w_round_robin_4);
1182 val64 = 0x0001000102000001ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_0);
1184 val64 = 0x0001020000010001ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_1);
1186 val64 = 0x0200000100010200ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_2);
1188 val64 = 0x0001000102000001ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_3);
1190 val64 = 0x0001020000000000ULL;
1191 writeq(val64, &bar0->tx_w_round_robin_4);
1194 val64 = 0x0001020300010200ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_0);
1196 val64 = 0x0100000102030001ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_1);
1198 val64 = 0x0200010000010203ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_2);
1200 val64 = 0x0001020001000001ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_3);
1202 val64 = 0x0203000100000000ULL;
1203 writeq(val64, &bar0->tx_w_round_robin_4);
1206 val64 = 0x0001000203000102ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_0);
1208 val64 = 0x0001020001030004ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_1);
1210 val64 = 0x0001000203000102ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_2);
1212 val64 = 0x0001020001030004ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_3);
1214 val64 = 0x0001000000000000ULL;
1215 writeq(val64, &bar0->tx_w_round_robin_4);
1218 val64 = 0x0001020304000102ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_0);
1220 val64 = 0x0304050001020001ULL;
1221 writeq(val64, &bar0->tx_w_round_robin_1);
1222 val64 = 0x0203000100000102ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_2);
1224 val64 = 0x0304000102030405ULL;
1225 writeq(val64, &bar0->tx_w_round_robin_3);
1226 val64 = 0x0001000200000000ULL;
1227 writeq(val64, &bar0->tx_w_round_robin_4);
1230 val64 = 0x0001020001020300ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_0);
1232 val64 = 0x0102030400010203ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_1);
1234 val64 = 0x0405060001020001ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_2);
1236 val64 = 0x0304050000010200ULL;
1237 writeq(val64, &bar0->tx_w_round_robin_3);
1238 val64 = 0x0102030000000000ULL;
1239 writeq(val64, &bar0->tx_w_round_robin_4);
1242 val64 = 0x0001020300040105ULL;
1243 writeq(val64, &bar0->tx_w_round_robin_0);
1244 val64 = 0x0200030106000204ULL;
1245 writeq(val64, &bar0->tx_w_round_robin_1);
1246 val64 = 0x0103000502010007ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_2);
1248 val64 = 0x0304010002060500ULL;
1249 writeq(val64, &bar0->tx_w_round_robin_3);
1250 val64 = 0x0103020400000000ULL;
1251 writeq(val64, &bar0->tx_w_round_robin_4);
1255 /* Enable all configured Tx FIFO partitions */
1256 val64 = readq(&bar0->tx_fifo_partition_0);
1257 val64 |= (TX_FIFO_PARTITION_EN);
1258 writeq(val64, &bar0->tx_fifo_partition_0);
1260 /* Filling the Rx round robin registers as per the
1261 * number of Rings and steering based on QoS.
1263 switch (config->rx_ring_num) {
1265 val64 = 0x8080808080808080ULL;
1266 writeq(val64, &bar0->rts_qos_steering);
1269 val64 = 0x0000010000010000ULL;
1270 writeq(val64, &bar0->rx_w_round_robin_0);
1271 val64 = 0x0100000100000100ULL;
1272 writeq(val64, &bar0->rx_w_round_robin_1);
1273 val64 = 0x0001000001000001ULL;
1274 writeq(val64, &bar0->rx_w_round_robin_2);
1275 val64 = 0x0000010000010000ULL;
1276 writeq(val64, &bar0->rx_w_round_robin_3);
1277 val64 = 0x0100000000000000ULL;
1278 writeq(val64, &bar0->rx_w_round_robin_4);
1280 val64 = 0x8080808040404040ULL;
1281 writeq(val64, &bar0->rts_qos_steering);
1284 val64 = 0x0001000102000001ULL;
1285 writeq(val64, &bar0->rx_w_round_robin_0);
1286 val64 = 0x0001020000010001ULL;
1287 writeq(val64, &bar0->rx_w_round_robin_1);
1288 val64 = 0x0200000100010200ULL;
1289 writeq(val64, &bar0->rx_w_round_robin_2);
1290 val64 = 0x0001000102000001ULL;
1291 writeq(val64, &bar0->rx_w_round_robin_3);
1292 val64 = 0x0001020000000000ULL;
1293 writeq(val64, &bar0->rx_w_round_robin_4);
1295 val64 = 0x8080804040402020ULL;
1296 writeq(val64, &bar0->rts_qos_steering);
1299 val64 = 0x0001020300010200ULL;
1300 writeq(val64, &bar0->rx_w_round_robin_0);
1301 val64 = 0x0100000102030001ULL;
1302 writeq(val64, &bar0->rx_w_round_robin_1);
1303 val64 = 0x0200010000010203ULL;
1304 writeq(val64, &bar0->rx_w_round_robin_2);
1305 val64 = 0x0001020001000001ULL;
1306 writeq(val64, &bar0->rx_w_round_robin_3);
1307 val64 = 0x0203000100000000ULL;
1308 writeq(val64, &bar0->rx_w_round_robin_4);
1310 val64 = 0x8080404020201010ULL;
1311 writeq(val64, &bar0->rts_qos_steering);
1314 val64 = 0x0001000203000102ULL;
1315 writeq(val64, &bar0->rx_w_round_robin_0);
1316 val64 = 0x0001020001030004ULL;
1317 writeq(val64, &bar0->rx_w_round_robin_1);
1318 val64 = 0x0001000203000102ULL;
1319 writeq(val64, &bar0->rx_w_round_robin_2);
1320 val64 = 0x0001020001030004ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_3);
1322 val64 = 0x0001000000000000ULL;
1323 writeq(val64, &bar0->rx_w_round_robin_4);
1325 val64 = 0x8080404020201008ULL;
1326 writeq(val64, &bar0->rts_qos_steering);
1329 val64 = 0x0001020304000102ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_0);
1331 val64 = 0x0304050001020001ULL;
1332 writeq(val64, &bar0->rx_w_round_robin_1);
1333 val64 = 0x0203000100000102ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_2);
1335 val64 = 0x0304000102030405ULL;
1336 writeq(val64, &bar0->rx_w_round_robin_3);
1337 val64 = 0x0001000200000000ULL;
1338 writeq(val64, &bar0->rx_w_round_robin_4);
1340 val64 = 0x8080404020100804ULL;
1341 writeq(val64, &bar0->rts_qos_steering);
1344 val64 = 0x0001020001020300ULL;
1345 writeq(val64, &bar0->rx_w_round_robin_0);
1346 val64 = 0x0102030400010203ULL;
1347 writeq(val64, &bar0->rx_w_round_robin_1);
1348 val64 = 0x0405060001020001ULL;
1349 writeq(val64, &bar0->rx_w_round_robin_2);
1350 val64 = 0x0304050000010200ULL;
1351 writeq(val64, &bar0->rx_w_round_robin_3);
1352 val64 = 0x0102030000000000ULL;
1353 writeq(val64, &bar0->rx_w_round_robin_4);
1355 val64 = 0x8080402010080402ULL;
1356 writeq(val64, &bar0->rts_qos_steering);
1359 val64 = 0x0001020300040105ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_0);
1361 val64 = 0x0200030106000204ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_1);
1363 val64 = 0x0103000502010007ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_2);
1365 val64 = 0x0304010002060500ULL;
1366 writeq(val64, &bar0->rx_w_round_robin_3);
1367 val64 = 0x0103020400000000ULL;
1368 writeq(val64, &bar0->rx_w_round_robin_4);
1370 val64 = 0x8040201008040201ULL;
1371 writeq(val64, &bar0->rts_qos_steering);
1377 for (i = 0; i < 8; i++)
1378 writeq(val64, &bar0->rts_frm_len_n[i]);
1380 /* Set the default rts frame length for the rings configured */
1381 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1382 for (i = 0 ; i < config->rx_ring_num ; i++)
1383 writeq(val64, &bar0->rts_frm_len_n[i]);
1385 /* Set the frame length for the configured rings
1386 * desired by the user
1388 for (i = 0; i < config->rx_ring_num; i++) {
1389 /* If rts_frm_len[i] == 0 then it is assumed that user not
1390 * specified frame length steering.
1391 * If the user provides the frame length then program
1392 * the rts_frm_len register for those values or else
1393 * leave it as it is.
1395 if (rts_frm_len[i] != 0) {
1396 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1397 &bar0->rts_frm_len_n[i]);
1401 /* Disable differentiated services steering logic */
1402 for (i = 0; i < 64; i++) {
1403 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1404 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1406 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1411 /* Program statistics memory */
1412 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1414 if (nic->device_type == XFRAME_II_DEVICE) {
1415 val64 = STAT_BC(0x320);
1416 writeq(val64, &bar0->stat_byte_cnt);
1420 * Initializing the sampling rate for the device to calculate the
1421 * bandwidth utilization.
1423 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1424 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1425 writeq(val64, &bar0->mac_link_util);
1429 * Initializing the Transmit and Receive Traffic Interrupt
1433 * TTI Initialization. Default Tx timer gets us about
1434 * 250 interrupts per sec. Continuous interrupts are enabled
1437 if (nic->device_type == XFRAME_II_DEVICE) {
1438 int count = (nic->config.bus_speed * 125)/2;
1439 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1442 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1444 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1445 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1446 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1447 if (use_continuous_tx_intrs)
1448 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1449 writeq(val64, &bar0->tti_data1_mem);
1451 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1452 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1453 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1454 writeq(val64, &bar0->tti_data2_mem);
1456 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1457 writeq(val64, &bar0->tti_command_mem);
1460 * Once the operation completes, the Strobe bit of the command
1461 * register will be reset. We poll for this particular condition
1462 * We wait for a maximum of 500ms for the operation to complete,
1463 * if it's not complete by then we return error.
1467 val64 = readq(&bar0->tti_command_mem);
1468 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1472 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1480 if (nic->config.bimodal) {
1482 for (k = 0; k < config->rx_ring_num; k++) {
1483 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1484 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1485 writeq(val64, &bar0->tti_command_mem);
1488 * Once the operation completes, the Strobe bit of the command
1489 * register will be reset. We poll for this particular condition
1490 * We wait for a maximum of 500ms for the operation to complete,
1491 * if it's not complete by then we return error.
1495 val64 = readq(&bar0->tti_command_mem);
1496 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1501 "%s: TTI init Failed\n",
1511 /* RTI Initialization */
1512 if (nic->device_type == XFRAME_II_DEVICE) {
1514 * Programmed to generate Apprx 500 Intrs per
1517 int count = (nic->config.bus_speed * 125)/4;
1518 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1520 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1522 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1523 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1524 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1526 writeq(val64, &bar0->rti_data1_mem);
1528 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1529 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1530 if (nic->intr_type == MSI_X)
1531 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1532 RTI_DATA2_MEM_RX_UFC_D(0x40));
1534 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1535 RTI_DATA2_MEM_RX_UFC_D(0x80));
1536 writeq(val64, &bar0->rti_data2_mem);
1538 for (i = 0; i < config->rx_ring_num; i++) {
1539 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1540 | RTI_CMD_MEM_OFFSET(i);
1541 writeq(val64, &bar0->rti_command_mem);
1544 * Once the operation completes, the Strobe bit of the
1545 * command register will be reset. We poll for this
1546 * particular condition. We wait for a maximum of 500ms
1547 * for the operation to complete, if it's not complete
1548 * by then we return error.
1552 val64 = readq(&bar0->rti_command_mem);
1553 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1557 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1568 * Initializing proper values as Pause threshold into all
1569 * the 8 Queues on Rx side.
1571 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1572 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1574 /* Disable RMAC PAD STRIPPING */
1575 add = &bar0->mac_cfg;
1576 val64 = readq(&bar0->mac_cfg);
1577 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1578 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1579 writel((u32) (val64), add);
1580 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1581 writel((u32) (val64 >> 32), (add + 4));
1582 val64 = readq(&bar0->mac_cfg);
1584 /* Enable FCS stripping by adapter */
1585 add = &bar0->mac_cfg;
1586 val64 = readq(&bar0->mac_cfg);
1587 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1588 if (nic->device_type == XFRAME_II_DEVICE)
1589 writeq(val64, &bar0->mac_cfg);
1591 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1592 writel((u32) (val64), add);
1593 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1594 writel((u32) (val64 >> 32), (add + 4));
1598 * Set the time value to be inserted in the pause frame
1599 * generated by xena.
1601 val64 = readq(&bar0->rmac_pause_cfg);
1602 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1603 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1604 writeq(val64, &bar0->rmac_pause_cfg);
1607 * Set the Threshold Limit for Generating the pause frame
1608 * If the amount of data in any Queue exceeds ratio of
1609 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1610 * pause frame is generated
1613 for (i = 0; i < 4; i++) {
1615 (((u64) 0xFF00 | nic->mac_control.
1616 mc_pause_threshold_q0q3)
1619 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1622 for (i = 0; i < 4; i++) {
1624 (((u64) 0xFF00 | nic->mac_control.
1625 mc_pause_threshold_q4q7)
1628 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1631 * TxDMA will stop Read request if the number of read split has
1632 * exceeded the limit pointed by shared_splits
1634 val64 = readq(&bar0->pic_control);
1635 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1636 writeq(val64, &bar0->pic_control);
1638 if (nic->config.bus_speed == 266) {
1639 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1640 writeq(0x0, &bar0->read_retry_delay);
1641 writeq(0x0, &bar0->write_retry_delay);
1645 * Programming the Herc to split every write transaction
1646 * that does not start on an ADB to reduce disconnects.
1648 if (nic->device_type == XFRAME_II_DEVICE) {
1649 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1650 MISC_LINK_STABILITY_PRD(3);
1651 writeq(val64, &bar0->misc_control);
1652 val64 = readq(&bar0->pic_control2);
1653 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1654 writeq(val64, &bar0->pic_control2);
1656 if (strstr(nic->product_name, "CX4")) {
1657 val64 = TMAC_AVG_IPG(0x17);
1658 writeq(val64, &bar0->tmac_avg_ipg);
1663 #define LINK_UP_DOWN_INTERRUPT 1
1664 #define MAC_RMAC_ERR_TIMER 2
1666 static int s2io_link_fault_indication(struct s2io_nic *nic)
1668 if (nic->intr_type != INTA)
1669 return MAC_RMAC_ERR_TIMER;
1670 if (nic->device_type == XFRAME_II_DEVICE)
1671 return LINK_UP_DOWN_INTERRUPT;
1673 return MAC_RMAC_ERR_TIMER;
1677 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1678 * @nic: device private variable,
1679 * @mask: A mask indicating which Intr block must be modified and,
1680 * @flag: A flag indicating whether to enable or disable the Intrs.
1681 * Description: This function will either disable or enable the interrupts
1682 * depending on the flag argument. The mask argument can be used to
1683 * enable/disable any Intr block.
1684 * Return Value: NONE.
1687 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1689 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1690 register u64 val64 = 0, temp64 = 0;
1692 /* Top level interrupt classification */
1693 /* PIC Interrupts */
1694 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1695 /* Enable PIC Intrs in the general intr mask register */
1696 val64 = TXPIC_INT_M;
1697 if (flag == ENABLE_INTRS) {
1698 temp64 = readq(&bar0->general_int_mask);
1699 temp64 &= ~((u64) val64);
1700 writeq(temp64, &bar0->general_int_mask);
1702 * If Hercules adapter enable GPIO otherwise
1703 * disable all PCIX, Flash, MDIO, IIC and GPIO
1704 * interrupts for now.
1707 if (s2io_link_fault_indication(nic) ==
1708 LINK_UP_DOWN_INTERRUPT ) {
1709 temp64 = readq(&bar0->pic_int_mask);
1710 temp64 &= ~((u64) PIC_INT_GPIO);
1711 writeq(temp64, &bar0->pic_int_mask);
1712 temp64 = readq(&bar0->gpio_int_mask);
1713 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1714 writeq(temp64, &bar0->gpio_int_mask);
1716 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1719 * No MSI Support is available presently, so TTI and
1720 * RTI interrupts are also disabled.
1722 } else if (flag == DISABLE_INTRS) {
1724 * Disable PIC Intrs in the general
1725 * intr mask register
1727 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1728 temp64 = readq(&bar0->general_int_mask);
1730 writeq(val64, &bar0->general_int_mask);
1734 /* MAC Interrupts */
1735 /* Enabling/Disabling MAC interrupts */
1736 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1737 val64 = TXMAC_INT_M | RXMAC_INT_M;
1738 if (flag == ENABLE_INTRS) {
1739 temp64 = readq(&bar0->general_int_mask);
1740 temp64 &= ~((u64) val64);
1741 writeq(temp64, &bar0->general_int_mask);
1743 * All MAC block error interrupts are disabled for now
1746 } else if (flag == DISABLE_INTRS) {
1748 * Disable MAC Intrs in the general intr mask register
1750 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1751 writeq(DISABLE_ALL_INTRS,
1752 &bar0->mac_rmac_err_mask);
1754 temp64 = readq(&bar0->general_int_mask);
1756 writeq(val64, &bar0->general_int_mask);
1760 /* Tx traffic interrupts */
1761 if (mask & TX_TRAFFIC_INTR) {
1762 val64 = TXTRAFFIC_INT_M;
1763 if (flag == ENABLE_INTRS) {
1764 temp64 = readq(&bar0->general_int_mask);
1765 temp64 &= ~((u64) val64);
1766 writeq(temp64, &bar0->general_int_mask);
1768 * Enable all the Tx side interrupts
1769 * writing 0 Enables all 64 TX interrupt levels
1771 writeq(0x0, &bar0->tx_traffic_mask);
1772 } else if (flag == DISABLE_INTRS) {
1774 * Disable Tx Traffic Intrs in the general intr mask
1777 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1778 temp64 = readq(&bar0->general_int_mask);
1780 writeq(val64, &bar0->general_int_mask);
1784 /* Rx traffic interrupts */
1785 if (mask & RX_TRAFFIC_INTR) {
1786 val64 = RXTRAFFIC_INT_M;
1787 if (flag == ENABLE_INTRS) {
1788 temp64 = readq(&bar0->general_int_mask);
1789 temp64 &= ~((u64) val64);
1790 writeq(temp64, &bar0->general_int_mask);
1791 /* writing 0 Enables all 8 RX interrupt levels */
1792 writeq(0x0, &bar0->rx_traffic_mask);
1793 } else if (flag == DISABLE_INTRS) {
1795 * Disable Rx Traffic Intrs in the general intr mask
1798 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1799 temp64 = readq(&bar0->general_int_mask);
1801 writeq(val64, &bar0->general_int_mask);
1807 * verify_pcc_quiescent- Checks for PCC quiescent state
1808 * Return: 1 If PCC is quiescence
1809 * 0 If PCC is not quiescence
1811 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1814 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1815 u64 val64 = readq(&bar0->adapter_status);
1817 herc = (sp->device_type == XFRAME_II_DEVICE);
1819 if (flag == FALSE) {
1820 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1821 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1824 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1828 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1829 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1830 ADAPTER_STATUS_RMAC_PCC_IDLE))
1833 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1834 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1842 * verify_xena_quiescence - Checks whether the H/W is ready
1843 * Description: Returns whether the H/W is ready to go or not. Depending
1844 * on whether adapter enable bit was written or not the comparison
1845 * differs and the calling function passes the input argument flag to
1847 * Return: 1 If xena is quiescence
1848 * 0 If Xena is not quiescence
1851 static int verify_xena_quiescence(struct s2io_nic *sp)
1854 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1855 u64 val64 = readq(&bar0->adapter_status);
1856 mode = s2io_verify_pci_mode(sp);
1858 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1859 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1862 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1863 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1866 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1867 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1870 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1871 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1874 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1875 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1878 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1879 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1882 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1883 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1886 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1887 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1892 * In PCI 33 mode, the P_PLL is not used, and therefore,
1893 * the the P_PLL_LOCK bit in the adapter_status register will
1896 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1897 sp->device_type == XFRAME_II_DEVICE && mode !=
1899 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1902 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1903 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1904 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1911 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1912 * @sp: Pointer to device specifc structure
1914 * New procedure to clear mac address reading problems on Alpha platforms
1918 static void fix_mac_address(struct s2io_nic * sp)
1920 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1924 while (fix_mac[i] != END_SIGN) {
1925 writeq(fix_mac[i++], &bar0->gpio_control);
1927 val64 = readq(&bar0->gpio_control);
1932 * start_nic - Turns the device on
1933 * @nic : device private variable.
1935 * This function actually turns the device on. Before this function is
1936 * called,all Registers are configured from their reset states
1937 * and shared memory is allocated but the NIC is still quiescent. On
1938 * calling this function, the device interrupts are cleared and the NIC is
1939 * literally switched on by writing into the adapter control register.
1941 * SUCCESS on success and -1 on failure.
1944 static int start_nic(struct s2io_nic *nic)
1946 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1947 struct net_device *dev = nic->dev;
1948 register u64 val64 = 0;
1950 struct mac_info *mac_control;
1951 struct config_param *config;
1953 mac_control = &nic->mac_control;
1954 config = &nic->config;
1956 /* PRC Initialization and configuration */
1957 for (i = 0; i < config->rx_ring_num; i++) {
1958 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1959 &bar0->prc_rxd0_n[i]);
1961 val64 = readq(&bar0->prc_ctrl_n[i]);
1962 if (nic->config.bimodal)
1963 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1964 if (nic->rxd_mode == RXD_MODE_1)
1965 val64 |= PRC_CTRL_RC_ENABLED;
1967 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1968 if (nic->device_type == XFRAME_II_DEVICE)
1969 val64 |= PRC_CTRL_GROUP_READS;
1970 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1971 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1972 writeq(val64, &bar0->prc_ctrl_n[i]);
1975 if (nic->rxd_mode == RXD_MODE_3B) {
1976 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1977 val64 = readq(&bar0->rx_pa_cfg);
1978 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1979 writeq(val64, &bar0->rx_pa_cfg);
1982 if (vlan_tag_strip == 0) {
1983 val64 = readq(&bar0->rx_pa_cfg);
1984 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1985 writeq(val64, &bar0->rx_pa_cfg);
1986 vlan_strip_flag = 0;
1990 * Enabling MC-RLDRAM. After enabling the device, we timeout
1991 * for around 100ms, which is approximately the time required
1992 * for the device to be ready for operation.
1994 val64 = readq(&bar0->mc_rldram_mrs);
1995 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1996 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1997 val64 = readq(&bar0->mc_rldram_mrs);
1999 msleep(100); /* Delay by around 100 ms. */
2001 /* Enabling ECC Protection. */
2002 val64 = readq(&bar0->adapter_control);
2003 val64 &= ~ADAPTER_ECC_EN;
2004 writeq(val64, &bar0->adapter_control);
2007 * Clearing any possible Link state change interrupts that
2008 * could have popped up just before Enabling the card.
2010 val64 = readq(&bar0->mac_rmac_err_reg);
2012 writeq(val64, &bar0->mac_rmac_err_reg);
2015 * Verify if the device is ready to be enabled, if so enable
2018 val64 = readq(&bar0->adapter_status);
2019 if (!verify_xena_quiescence(nic)) {
2020 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2021 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2022 (unsigned long long) val64);
2027 * With some switches, link might be already up at this point.
2028 * Because of this weird behavior, when we enable laser,
2029 * we may not get link. We need to handle this. We cannot
2030 * figure out which switch is misbehaving. So we are forced to
2031 * make a global change.
2034 /* Enabling Laser. */
2035 val64 = readq(&bar0->adapter_control);
2036 val64 |= ADAPTER_EOI_TX_ON;
2037 writeq(val64, &bar0->adapter_control);
2039 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2041 * Dont see link state interrupts initally on some switches,
2042 * so directly scheduling the link state task here.
2044 schedule_work(&nic->set_link_task);
2046 /* SXE-002: Initialize link and activity LED */
2047 subid = nic->pdev->subsystem_device;
2048 if (((subid & 0xFF) >= 0x07) &&
2049 (nic->device_type == XFRAME_I_DEVICE)) {
2050 val64 = readq(&bar0->gpio_control);
2051 val64 |= 0x0000800000000000ULL;
2052 writeq(val64, &bar0->gpio_control);
2053 val64 = 0x0411040400000000ULL;
2054 writeq(val64, (void __iomem *)bar0 + 0x2700);
2060 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2062 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2063 TxD *txdlp, int get_off)
2065 struct s2io_nic *nic = fifo_data->nic;
2066 struct sk_buff *skb;
2071 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2072 pci_unmap_single(nic->pdev, (dma_addr_t)
2073 txds->Buffer_Pointer, sizeof(u64),
2078 skb = (struct sk_buff *) ((unsigned long)
2079 txds->Host_Control);
2081 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2084 pci_unmap_single(nic->pdev, (dma_addr_t)
2085 txds->Buffer_Pointer,
2086 skb->len - skb->data_len,
2088 frg_cnt = skb_shinfo(skb)->nr_frags;
2091 for (j = 0; j < frg_cnt; j++, txds++) {
2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2093 if (!txds->Buffer_Pointer)
2095 pci_unmap_page(nic->pdev, (dma_addr_t)
2096 txds->Buffer_Pointer,
2097 frag->size, PCI_DMA_TODEVICE);
2100 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2105 * free_tx_buffers - Free all queued Tx buffers
2106 * @nic : device private variable.
2108 * Free all queued Tx buffers.
2109 * Return Value: void
2112 static void free_tx_buffers(struct s2io_nic *nic)
2114 struct net_device *dev = nic->dev;
2115 struct sk_buff *skb;
2118 struct mac_info *mac_control;
2119 struct config_param *config;
2122 mac_control = &nic->mac_control;
2123 config = &nic->config;
2125 for (i = 0; i < config->tx_fifo_num; i++) {
2126 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2127 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2129 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2136 "%s:forcibly freeing %d skbs on FIFO%d\n",
2138 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2139 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2144 * stop_nic - To stop the nic
2145 * @nic ; device private variable.
2147 * This function does exactly the opposite of what the start_nic()
2148 * function does. This function is called to stop the device.
2153 static void stop_nic(struct s2io_nic *nic)
2155 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2156 register u64 val64 = 0;
2158 struct mac_info *mac_control;
2159 struct config_param *config;
2161 mac_control = &nic->mac_control;
2162 config = &nic->config;
2164 /* Disable all interrupts */
2165 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2166 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2167 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2168 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2170 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2171 val64 = readq(&bar0->adapter_control);
2172 val64 &= ~(ADAPTER_CNTL_EN);
2173 writeq(val64, &bar0->adapter_control);
2176 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2179 struct net_device *dev = nic->dev;
2180 struct sk_buff *frag_list;
2183 /* Buffer-1 receives L3/L4 headers */
2184 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2185 (nic->pdev, skb->data, l3l4hdr_size + 4,
2186 PCI_DMA_FROMDEVICE);
2188 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2189 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2190 if (skb_shinfo(skb)->frag_list == NULL) {
2191 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2194 frag_list = skb_shinfo(skb)->frag_list;
2195 skb->truesize += frag_list->truesize;
2196 frag_list->next = NULL;
2197 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2198 frag_list->data = tmp;
2199 frag_list->tail = tmp;
2201 /* Buffer-2 receives L4 data payload */
2202 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2203 frag_list->data, dev->mtu,
2204 PCI_DMA_FROMDEVICE);
2205 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2206 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2212 * fill_rx_buffers - Allocates the Rx side skbs
2213 * @nic: device private variable
2214 * @ring_no: ring number
2216 * The function allocates Rx side skbs and puts the physical
2217 * address of these buffers into the RxD buffer pointers, so that the NIC
2218 * can DMA the received frame into these locations.
2219 * The NIC supports 3 receive modes, viz
2221 * 2. three buffer and
2222 * 3. Five buffer modes.
2223 * Each mode defines how many fragments the received frame will be split
2224 * up into by the NIC. The frame is split into L3 header, L4 Header,
2225 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2226 * is split into 3 fragments. As of now only single buffer mode is
2229 * SUCCESS on success or an appropriate -ve value on failure.
2232 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2234 struct net_device *dev = nic->dev;
2235 struct sk_buff *skb;
2237 int off, off1, size, block_no, block_no1;
2240 struct mac_info *mac_control;
2241 struct config_param *config;
2244 unsigned long flags;
2245 struct RxD_t *first_rxdp = NULL;
2247 mac_control = &nic->mac_control;
2248 config = &nic->config;
2249 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2250 atomic_read(&nic->rx_bufs_left[ring_no]);
2252 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2253 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2254 while (alloc_tab < alloc_cnt) {
2255 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2257 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2259 rxdp = mac_control->rings[ring_no].
2260 rx_blocks[block_no].rxds[off].virt_addr;
2262 if ((block_no == block_no1) && (off == off1) &&
2263 (rxdp->Host_Control)) {
2264 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2266 DBG_PRINT(INTR_DBG, " info equated\n");
2269 if (off && (off == rxd_count[nic->rxd_mode])) {
2270 mac_control->rings[ring_no].rx_curr_put_info.
2272 if (mac_control->rings[ring_no].rx_curr_put_info.
2273 block_index == mac_control->rings[ring_no].
2275 mac_control->rings[ring_no].rx_curr_put_info.
2277 block_no = mac_control->rings[ring_no].
2278 rx_curr_put_info.block_index;
2279 if (off == rxd_count[nic->rxd_mode])
2281 mac_control->rings[ring_no].rx_curr_put_info.
2283 rxdp = mac_control->rings[ring_no].
2284 rx_blocks[block_no].block_virt_addr;
2285 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2289 spin_lock_irqsave(&nic->put_lock, flags);
2290 mac_control->rings[ring_no].put_pos =
2291 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2292 spin_unlock_irqrestore(&nic->put_lock, flags);
2294 mac_control->rings[ring_no].put_pos =
2295 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2297 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2298 ((nic->rxd_mode >= RXD_MODE_3A) &&
2299 (rxdp->Control_2 & BIT(0)))) {
2300 mac_control->rings[ring_no].rx_curr_put_info.
2304 /* calculate size of skb based on ring mode */
2305 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2306 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2307 if (nic->rxd_mode == RXD_MODE_1)
2308 size += NET_IP_ALIGN;
2309 else if (nic->rxd_mode == RXD_MODE_3B)
2310 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2312 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2315 skb = dev_alloc_skb(size);
2317 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2318 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2321 first_rxdp->Control_1 |= RXD_OWN_XENA;
2325 if (nic->rxd_mode == RXD_MODE_1) {
2326 /* 1 buffer mode - normal operation mode */
2327 memset(rxdp, 0, sizeof(struct RxD1));
2328 skb_reserve(skb, NET_IP_ALIGN);
2329 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2330 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2331 PCI_DMA_FROMDEVICE);
2332 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2334 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2336 * 2 or 3 buffer mode -
2337 * Both 2 buffer mode and 3 buffer mode provides 128
2338 * byte aligned receive buffers.
2340 * 3 buffer mode provides header separation where in
2341 * skb->data will have L3/L4 headers where as
2342 * skb_shinfo(skb)->frag_list will have the L4 data
2346 memset(rxdp, 0, sizeof(struct RxD3));
2347 ba = &mac_control->rings[ring_no].ba[block_no][off];
2348 skb_reserve(skb, BUF0_LEN);
2349 tmp = (u64)(unsigned long) skb->data;
2352 skb->data = (void *) (unsigned long)tmp;
2353 skb->tail = (void *) (unsigned long)tmp;
2355 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2356 ((struct RxD3*)rxdp)->Buffer0_ptr =
2357 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2358 PCI_DMA_FROMDEVICE);
2360 pci_dma_sync_single_for_device(nic->pdev,
2361 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2362 BUF0_LEN, PCI_DMA_FROMDEVICE);
2363 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2364 if (nic->rxd_mode == RXD_MODE_3B) {
2365 /* Two buffer mode */
2368 * Buffer2 will have L3/L4 header plus
2371 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2372 (nic->pdev, skb->data, dev->mtu + 4,
2373 PCI_DMA_FROMDEVICE);
2375 /* Buffer-1 will be dummy buffer. Not used */
2376 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2377 ((struct RxD3*)rxdp)->Buffer1_ptr =
2378 pci_map_single(nic->pdev,
2380 PCI_DMA_FROMDEVICE);
2382 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2383 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2387 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2388 dev_kfree_skb_irq(skb);
2391 first_rxdp->Control_1 |=
2397 rxdp->Control_2 |= BIT(0);
2399 rxdp->Host_Control = (unsigned long) (skb);
2400 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2401 rxdp->Control_1 |= RXD_OWN_XENA;
2403 if (off == (rxd_count[nic->rxd_mode] + 1))
2405 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2407 rxdp->Control_2 |= SET_RXD_MARKER;
2408 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2411 first_rxdp->Control_1 |= RXD_OWN_XENA;
2415 atomic_inc(&nic->rx_bufs_left[ring_no]);
2420 /* Transfer ownership of first descriptor to adapter just before
2421 * exiting. Before that, use memory barrier so that ownership
2422 * and other fields are seen by adapter correctly.
2426 first_rxdp->Control_1 |= RXD_OWN_XENA;
2432 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2434 struct net_device *dev = sp->dev;
2436 struct sk_buff *skb;
2438 struct mac_info *mac_control;
2441 mac_control = &sp->mac_control;
2442 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2443 rxdp = mac_control->rings[ring_no].
2444 rx_blocks[blk].rxds[j].virt_addr;
2445 skb = (struct sk_buff *)
2446 ((unsigned long) rxdp->Host_Control);
2450 if (sp->rxd_mode == RXD_MODE_1) {
2451 pci_unmap_single(sp->pdev, (dma_addr_t)
2452 ((struct RxD1*)rxdp)->Buffer0_ptr,
2454 HEADER_ETHERNET_II_802_3_SIZE
2455 + HEADER_802_2_SIZE +
2457 PCI_DMA_FROMDEVICE);
2458 memset(rxdp, 0, sizeof(struct RxD1));
2459 } else if(sp->rxd_mode == RXD_MODE_3B) {
2460 ba = &mac_control->rings[ring_no].
2462 pci_unmap_single(sp->pdev, (dma_addr_t)
2463 ((struct RxD3*)rxdp)->Buffer0_ptr,
2465 PCI_DMA_FROMDEVICE);
2466 pci_unmap_single(sp->pdev, (dma_addr_t)
2467 ((struct RxD3*)rxdp)->Buffer1_ptr,
2469 PCI_DMA_FROMDEVICE);
2470 pci_unmap_single(sp->pdev, (dma_addr_t)
2471 ((struct RxD3*)rxdp)->Buffer2_ptr,
2473 PCI_DMA_FROMDEVICE);
2474 memset(rxdp, 0, sizeof(struct RxD3));
2476 pci_unmap_single(sp->pdev, (dma_addr_t)
2477 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2478 PCI_DMA_FROMDEVICE);
2479 pci_unmap_single(sp->pdev, (dma_addr_t)
2480 ((struct RxD3*)rxdp)->Buffer1_ptr,
2482 PCI_DMA_FROMDEVICE);
2483 pci_unmap_single(sp->pdev, (dma_addr_t)
2484 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2485 PCI_DMA_FROMDEVICE);
2486 memset(rxdp, 0, sizeof(struct RxD3));
2489 atomic_dec(&sp->rx_bufs_left[ring_no]);
2494 * free_rx_buffers - Frees all Rx buffers
2495 * @sp: device private variable.
2497 * This function will free all Rx buffers allocated by host.
2502 static void free_rx_buffers(struct s2io_nic *sp)
2504 struct net_device *dev = sp->dev;
2505 int i, blk = 0, buf_cnt = 0;
2506 struct mac_info *mac_control;
2507 struct config_param *config;
2509 mac_control = &sp->mac_control;
2510 config = &sp->config;
2512 for (i = 0; i < config->rx_ring_num; i++) {
2513 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2514 free_rxd_blk(sp,i,blk);
2516 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2517 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2518 mac_control->rings[i].rx_curr_put_info.offset = 0;
2519 mac_control->rings[i].rx_curr_get_info.offset = 0;
2520 atomic_set(&sp->rx_bufs_left[i], 0);
2521 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2522 dev->name, buf_cnt, i);
2527 * s2io_poll - Rx interrupt handler for NAPI support
2528 * @dev : pointer to the device structure.
2529 * @budget : The number of packets that were budgeted to be processed
2530 * during one pass through the 'Poll" function.
2532 * Comes into picture only if NAPI support has been incorporated. It does
2533 * the same thing that rx_intr_handler does, but not in a interrupt context
2534 * also It will process only a given number of packets.
2536 * 0 on success and 1 if there are No Rx packets to be processed.
2539 static int s2io_poll(struct net_device *dev, int *budget)
2541 struct s2io_nic *nic = dev->priv;
2542 int pkt_cnt = 0, org_pkts_to_process;
2543 struct mac_info *mac_control;
2544 struct config_param *config;
2545 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2548 atomic_inc(&nic->isr_cnt);
2549 mac_control = &nic->mac_control;
2550 config = &nic->config;
2552 nic->pkts_to_process = *budget;
2553 if (nic->pkts_to_process > dev->quota)
2554 nic->pkts_to_process = dev->quota;
2555 org_pkts_to_process = nic->pkts_to_process;
2557 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2558 readl(&bar0->rx_traffic_int);
2560 for (i = 0; i < config->rx_ring_num; i++) {
2561 rx_intr_handler(&mac_control->rings[i]);
2562 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2563 if (!nic->pkts_to_process) {
2564 /* Quota for the current iteration has been met */
2571 dev->quota -= pkt_cnt;
2573 netif_rx_complete(dev);
2575 for (i = 0; i < config->rx_ring_num; i++) {
2576 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2577 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2578 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2582 /* Re enable the Rx interrupts. */
2583 writeq(0x0, &bar0->rx_traffic_mask);
2584 readl(&bar0->rx_traffic_mask);
2585 atomic_dec(&nic->isr_cnt);
2589 dev->quota -= pkt_cnt;
2592 for (i = 0; i < config->rx_ring_num; i++) {
2593 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2594 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2595 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2599 atomic_dec(&nic->isr_cnt);
2603 #ifdef CONFIG_NET_POLL_CONTROLLER
2605 * s2io_netpoll - netpoll event handler entry point
2606 * @dev : pointer to the device structure.
2608 * This function will be called by upper layer to check for events on the
2609 * interface in situations where interrupts are disabled. It is used for
2610 * specific in-kernel networking tasks, such as remote consoles and kernel
2611 * debugging over the network (example netdump in RedHat).
2613 static void s2io_netpoll(struct net_device *dev)
2615 struct s2io_nic *nic = dev->priv;
2616 struct mac_info *mac_control;
2617 struct config_param *config;
2618 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2619 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2622 disable_irq(dev->irq);
2624 atomic_inc(&nic->isr_cnt);
2625 mac_control = &nic->mac_control;
2626 config = &nic->config;
2628 writeq(val64, &bar0->rx_traffic_int);
2629 writeq(val64, &bar0->tx_traffic_int);
2631 /* we need to free up the transmitted skbufs or else netpoll will
2632 * run out of skbs and will fail and eventually netpoll application such
2633 * as netdump will fail.
2635 for (i = 0; i < config->tx_fifo_num; i++)
2636 tx_intr_handler(&mac_control->fifos[i]);
2638 /* check for received packet and indicate up to network */
2639 for (i = 0; i < config->rx_ring_num; i++)
2640 rx_intr_handler(&mac_control->rings[i]);
2642 for (i = 0; i < config->rx_ring_num; i++) {
2643 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2644 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2645 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2649 atomic_dec(&nic->isr_cnt);
2650 enable_irq(dev->irq);
2656 * rx_intr_handler - Rx interrupt handler
2657 * @nic: device private variable.
2659 * If the interrupt is because of a received frame or if the
2660 * receive ring contains fresh as yet un-processed frames,this function is
2661 * called. It picks out the RxD at which place the last Rx processing had
2662 * stopped and sends the skb to the OSM's Rx handler and then increments
2667 static void rx_intr_handler(struct ring_info *ring_data)
2669 struct s2io_nic *nic = ring_data->nic;
2670 struct net_device *dev = (struct net_device *) nic->dev;
2671 int get_block, put_block, put_offset;
2672 struct rx_curr_get_info get_info, put_info;
2674 struct sk_buff *skb;
2678 spin_lock(&nic->rx_lock);
2679 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2680 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2681 __FUNCTION__, dev->name);
2682 spin_unlock(&nic->rx_lock);
2686 get_info = ring_data->rx_curr_get_info;
2687 get_block = get_info.block_index;
2688 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2689 put_block = put_info.block_index;
2690 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2692 spin_lock(&nic->put_lock);
2693 put_offset = ring_data->put_pos;
2694 spin_unlock(&nic->put_lock);
2696 put_offset = ring_data->put_pos;
2698 while (RXD_IS_UP2DT(rxdp)) {
2700 * If your are next to put index then it's
2701 * FIFO full condition
2703 if ((get_block == put_block) &&
2704 (get_info.offset + 1) == put_info.offset) {
2705 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2708 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2710 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2712 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2713 spin_unlock(&nic->rx_lock);
2716 if (nic->rxd_mode == RXD_MODE_1) {
2717 pci_unmap_single(nic->pdev, (dma_addr_t)
2718 ((struct RxD1*)rxdp)->Buffer0_ptr,
2720 HEADER_ETHERNET_II_802_3_SIZE +
2723 PCI_DMA_FROMDEVICE);
2724 } else if (nic->rxd_mode == RXD_MODE_3B) {
2725 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2726 ((struct RxD3*)rxdp)->Buffer0_ptr,
2727 BUF0_LEN, PCI_DMA_FROMDEVICE);
2728 pci_unmap_single(nic->pdev, (dma_addr_t)
2729 ((struct RxD3*)rxdp)->Buffer2_ptr,
2731 PCI_DMA_FROMDEVICE);
2733 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2734 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2735 PCI_DMA_FROMDEVICE);
2736 pci_unmap_single(nic->pdev, (dma_addr_t)
2737 ((struct RxD3*)rxdp)->Buffer1_ptr,
2739 PCI_DMA_FROMDEVICE);
2740 pci_unmap_single(nic->pdev, (dma_addr_t)
2741 ((struct RxD3*)rxdp)->Buffer2_ptr,
2742 dev->mtu, PCI_DMA_FROMDEVICE);
2744 prefetch(skb->data);
2745 rx_osm_handler(ring_data, rxdp);
2747 ring_data->rx_curr_get_info.offset = get_info.offset;
2748 rxdp = ring_data->rx_blocks[get_block].
2749 rxds[get_info.offset].virt_addr;
2750 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2751 get_info.offset = 0;
2752 ring_data->rx_curr_get_info.offset = get_info.offset;
2754 if (get_block == ring_data->block_count)
2756 ring_data->rx_curr_get_info.block_index = get_block;
2757 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2760 nic->pkts_to_process -= 1;
2761 if ((napi) && (!nic->pkts_to_process))
2764 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2768 /* Clear all LRO sessions before exiting */
2769 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2770 struct lro *lro = &nic->lro0_n[i];
2772 update_L3L4_header(nic, lro);
2773 queue_rx_frame(lro->parent);
2774 clear_lro_session(lro);
2779 spin_unlock(&nic->rx_lock);
2783 * tx_intr_handler - Transmit interrupt handler
2784 * @nic : device private variable
2786 * If an interrupt was raised to indicate DMA complete of the
2787 * Tx packet, this function is called. It identifies the last TxD
2788 * whose buffer was freed and frees all skbs whose data have already
2789 * DMA'ed into the NICs internal memory.
2794 static void tx_intr_handler(struct fifo_info *fifo_data)
2796 struct s2io_nic *nic = fifo_data->nic;
2797 struct net_device *dev = (struct net_device *) nic->dev;
2798 struct tx_curr_get_info get_info, put_info;
2799 struct sk_buff *skb;
2802 get_info = fifo_data->tx_curr_get_info;
2803 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2804 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2806 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2807 (get_info.offset != put_info.offset) &&
2808 (txdlp->Host_Control)) {
2809 /* Check for TxD errors */
2810 if (txdlp->Control_1 & TXD_T_CODE) {
2811 unsigned long long err;
2812 err = txdlp->Control_1 & TXD_T_CODE;
2814 nic->mac_control.stats_info->sw_stat.
2817 if ((err >> 48) == 0xA) {
2818 DBG_PRINT(TX_DBG, "TxD returned due \
2819 to loss of link\n");
2822 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2826 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2828 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2830 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2834 /* Updating the statistics block */
2835 nic->stats.tx_bytes += skb->len;
2836 dev_kfree_skb_irq(skb);
2839 if (get_info.offset == get_info.fifo_len + 1)
2840 get_info.offset = 0;
2841 txdlp = (struct TxD *) fifo_data->list_info
2842 [get_info.offset].list_virt_addr;
2843 fifo_data->tx_curr_get_info.offset =
2847 spin_lock(&nic->tx_lock);
2848 if (netif_queue_stopped(dev))
2849 netif_wake_queue(dev);
2850 spin_unlock(&nic->tx_lock);
2854 * s2io_mdio_write - Function to write in to MDIO registers
2855 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2856 * @addr : address value
2857 * @value : data value
2858 * @dev : pointer to net_device structure
2860 * This function is used to write values to the MDIO registers
2863 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2866 struct s2io_nic *sp = dev->priv;
2867 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2869 //address transaction
2870 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2871 | MDIO_MMD_DEV_ADDR(mmd_type)
2872 | MDIO_MMS_PRT_ADDR(0x0);
2873 writeq(val64, &bar0->mdio_control);
2874 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2875 writeq(val64, &bar0->mdio_control);
2880 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2881 | MDIO_MMD_DEV_ADDR(mmd_type)
2882 | MDIO_MMS_PRT_ADDR(0x0)
2883 | MDIO_MDIO_DATA(value)
2884 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2885 writeq(val64, &bar0->mdio_control);
2886 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2887 writeq(val64, &bar0->mdio_control);
2891 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2892 | MDIO_MMD_DEV_ADDR(mmd_type)
2893 | MDIO_MMS_PRT_ADDR(0x0)
2894 | MDIO_OP(MDIO_OP_READ_TRANS);
2895 writeq(val64, &bar0->mdio_control);
2896 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2897 writeq(val64, &bar0->mdio_control);
2903 * s2io_mdio_read - Function to write in to MDIO registers
2904 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2905 * @addr : address value
2906 * @dev : pointer to net_device structure
2908 * This function is used to read values to the MDIO registers
2911 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2915 struct s2io_nic *sp = dev->priv;
2916 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2918 /* address transaction */
2919 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2920 | MDIO_MMD_DEV_ADDR(mmd_type)
2921 | MDIO_MMS_PRT_ADDR(0x0);
2922 writeq(val64, &bar0->mdio_control);
2923 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2924 writeq(val64, &bar0->mdio_control);
2927 /* Data transaction */
2929 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2930 | MDIO_MMD_DEV_ADDR(mmd_type)
2931 | MDIO_MMS_PRT_ADDR(0x0)
2932 | MDIO_OP(MDIO_OP_READ_TRANS);
2933 writeq(val64, &bar0->mdio_control);
2934 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2935 writeq(val64, &bar0->mdio_control);
2938 /* Read the value from regs */
2939 rval64 = readq(&bar0->mdio_control);
2940 rval64 = rval64 & 0xFFFF0000;
2941 rval64 = rval64 >> 16;
2945 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2946 * @counter : couter value to be updated
2947 * @flag : flag to indicate the status
2948 * @type : counter type
2950 * This function is to check the status of the xpak counters value
2954 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2959 for(i = 0; i <index; i++)
2964 *counter = *counter + 1;
2965 val64 = *regs_stat & mask;
2966 val64 = val64 >> (index * 0x2);
2973 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2974 "service. Excessive temperatures may "
2975 "result in premature transceiver "
2979 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2980 "service Excessive bias currents may "
2981 "indicate imminent laser diode "
2985 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2986 "service Excessive laser output "
2987 "power may saturate far-end "
2991 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2996 val64 = val64 << (index * 0x2);
2997 *regs_stat = (*regs_stat & (~mask)) | (val64);
3000 *regs_stat = *regs_stat & (~mask);
3005 * s2io_updt_xpak_counter - Function to update the xpak counters
3006 * @dev : pointer to net_device struct
3008 * This function is to upate the status of the xpak counters value
3011 static void s2io_updt_xpak_counter(struct net_device *dev)
3019 struct s2io_nic *sp = dev->priv;
3020 struct stat_block *stat_info = sp->mac_control.stats_info;
3022 /* Check the communication with the MDIO slave */
3025 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3026 if((val64 == 0xFFFF) || (val64 == 0x0000))
3028 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3029 "Returned %llx\n", (unsigned long long)val64);
3033 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3036 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3037 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3038 (unsigned long long)val64);
3042 /* Loading the DOM register to MDIO register */
3044 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3045 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3047 /* Reading the Alarm flags */
3050 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3052 flag = CHECKBIT(val64, 0x7);
3054 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3055 &stat_info->xpak_stat.xpak_regs_stat,
3058 if(CHECKBIT(val64, 0x6))
3059 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3061 flag = CHECKBIT(val64, 0x3);
3063 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3064 &stat_info->xpak_stat.xpak_regs_stat,
3067 if(CHECKBIT(val64, 0x2))
3068 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3070 flag = CHECKBIT(val64, 0x1);
3072 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3073 &stat_info->xpak_stat.xpak_regs_stat,
3076 if(CHECKBIT(val64, 0x0))
3077 stat_info->xpak_stat.alarm_laser_output_power_low++;
3079 /* Reading the Warning flags */
3082 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3084 if(CHECKBIT(val64, 0x7))
3085 stat_info->xpak_stat.warn_transceiver_temp_high++;
3087 if(CHECKBIT(val64, 0x6))
3088 stat_info->xpak_stat.warn_transceiver_temp_low++;
3090 if(CHECKBIT(val64, 0x3))
3091 stat_info->xpak_stat.warn_laser_bias_current_high++;
3093 if(CHECKBIT(val64, 0x2))
3094 stat_info->xpak_stat.warn_laser_bias_current_low++;
3096 if(CHECKBIT(val64, 0x1))
3097 stat_info->xpak_stat.warn_laser_output_power_high++;
3099 if(CHECKBIT(val64, 0x0))
3100 stat_info->xpak_stat.warn_laser_output_power_low++;
3104 * alarm_intr_handler - Alarm Interrrupt handler
3105 * @nic: device private variable
3106 * Description: If the interrupt was neither because of Rx packet or Tx
3107 * complete, this function is called. If the interrupt was to indicate
3108 * a loss of link, the OSM link status handler is invoked for any other
3109 * alarm interrupt the block that raised the interrupt is displayed
3110 * and a H/W reset is issued.
3115 static void alarm_intr_handler(struct s2io_nic *nic)
3117 struct net_device *dev = (struct net_device *) nic->dev;
3118 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3119 register u64 val64 = 0, err_reg = 0;
3122 if (atomic_read(&nic->card_state) == CARD_DOWN)
3124 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3125 /* Handling the XPAK counters update */
3126 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3127 /* waiting for an hour */
3128 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3130 s2io_updt_xpak_counter(dev);
3131 /* reset the count to zero */
3132 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3135 /* Handling link status change error Intr */
3136 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3137 err_reg = readq(&bar0->mac_rmac_err_reg);
3138 writeq(err_reg, &bar0->mac_rmac_err_reg);
3139 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3140 schedule_work(&nic->set_link_task);
3144 /* Handling Ecc errors */
3145 val64 = readq(&bar0->mc_err_reg);
3146 writeq(val64, &bar0->mc_err_reg);
3147 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3148 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3149 nic->mac_control.stats_info->sw_stat.
3151 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3153 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3154 if (nic->device_type != XFRAME_II_DEVICE) {
3155 /* Reset XframeI only if critical error */
3156 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3157 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3158 netif_stop_queue(dev);
3159 schedule_work(&nic->rst_timer_task);
3160 nic->mac_control.stats_info->sw_stat.
3165 nic->mac_control.stats_info->sw_stat.
3170 /* In case of a serious error, the device will be Reset. */
3171 val64 = readq(&bar0->serr_source);
3172 if (val64 & SERR_SOURCE_ANY) {
3173 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3174 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3175 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3176 (unsigned long long)val64);
3177 netif_stop_queue(dev);
3178 schedule_work(&nic->rst_timer_task);
3179 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3183 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3184 * Error occurs, the adapter will be recycled by disabling the
3185 * adapter enable bit and enabling it again after the device
3186 * becomes Quiescent.
3188 val64 = readq(&bar0->pcc_err_reg);
3189 writeq(val64, &bar0->pcc_err_reg);
3190 if (val64 & PCC_FB_ECC_DB_ERR) {
3191 u64 ac = readq(&bar0->adapter_control);
3192 ac &= ~(ADAPTER_CNTL_EN);
3193 writeq(ac, &bar0->adapter_control);
3194 ac = readq(&bar0->adapter_control);
3195 schedule_work(&nic->set_link_task);
3197 /* Check for data parity error */
3198 val64 = readq(&bar0->pic_int_status);
3199 if (val64 & PIC_INT_GPIO) {
3200 val64 = readq(&bar0->gpio_int_reg);
3201 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3202 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3203 schedule_work(&nic->rst_timer_task);
3204 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3208 /* Check for ring full counter */
3209 if (nic->device_type & XFRAME_II_DEVICE) {
3210 val64 = readq(&bar0->ring_bump_counter1);
3211 for (i=0; i<4; i++) {
3212 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3213 cnt >>= 64 - ((i+1)*16);
3214 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3218 val64 = readq(&bar0->ring_bump_counter2);
3219 for (i=0; i<4; i++) {
3220 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3221 cnt >>= 64 - ((i+1)*16);
3222 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3227 /* Other type of interrupts are not being handled now, TODO */
3231 * wait_for_cmd_complete - waits for a command to complete.
3232 * @sp : private member of the device structure, which is a pointer to the
3233 * s2io_nic structure.
3234 * Description: Function that waits for a command to Write into RMAC
3235 * ADDR DATA registers to be completed and returns either success or
3236 * error depending on whether the command was complete or not.
3238 * SUCCESS on success and FAILURE on failure.
3241 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3244 int ret = FAILURE, cnt = 0, delay = 1;
3247 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3251 val64 = readq(addr);
3252 if (bit_state == S2IO_BIT_RESET) {
3253 if (!(val64 & busy_bit)) {
3258 if (!(val64 & busy_bit)) {
3275 * check_pci_device_id - Checks if the device id is supported
3277 * Description: Function to check if the pci device id is supported by driver.
3278 * Return value: Actual device id if supported else PCI_ANY_ID
3280 static u16 check_pci_device_id(u16 id)
3283 case PCI_DEVICE_ID_HERC_WIN:
3284 case PCI_DEVICE_ID_HERC_UNI:
3285 return XFRAME_II_DEVICE;
3286 case PCI_DEVICE_ID_S2IO_UNI:
3287 case PCI_DEVICE_ID_S2IO_WIN:
3288 return XFRAME_I_DEVICE;
3295 * s2io_reset - Resets the card.
3296 * @sp : private member of the device structure.
3297 * Description: Function to Reset the card. This function then also
3298 * restores the previously saved PCI configuration space registers as
3299 * the card reset also resets the configuration space.
3304 static void s2io_reset(struct s2io_nic * sp)
3306 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3311 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3312 __FUNCTION__, sp->dev->name);
3314 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3315 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3317 if (sp->device_type == XFRAME_II_DEVICE) {
3319 ret = pci_set_power_state(sp->pdev, 3);
3321 ret = pci_set_power_state(sp->pdev, 0);
3323 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3331 val64 = SW_RESET_ALL;
3332 writeq(val64, &bar0->sw_reset);
3334 if (strstr(sp->product_name, "CX4")) {
3338 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3340 /* Restore the PCI state saved during initialization. */
3341 pci_restore_state(sp->pdev);
3342 pci_read_config_word(sp->pdev, 0x2, &val16);
3343 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3348 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3349 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3352 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3356 /* Set swapper to enable I/O register access */
3357 s2io_set_swapper(sp);
3359 /* Restore the MSIX table entries from local variables */
3360 restore_xmsi_data(sp);
3362 /* Clear certain PCI/PCI-X fields after reset */
3363 if (sp->device_type == XFRAME_II_DEVICE) {
3364 /* Clear "detected parity error" bit */
3365 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3367 /* Clearing PCIX Ecc status register */
3368 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3370 /* Clearing PCI_STATUS error reflected here */
3371 writeq(BIT(62), &bar0->txpic_int_reg);
3374 /* Reset device statistics maintained by OS */
3375 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3377 /* SXE-002: Configure link and activity LED to turn it off */
3378 subid = sp->pdev->subsystem_device;
3379 if (((subid & 0xFF) >= 0x07) &&
3380 (sp->device_type == XFRAME_I_DEVICE)) {
3381 val64 = readq(&bar0->gpio_control);
3382 val64 |= 0x0000800000000000ULL;
3383 writeq(val64, &bar0->gpio_control);
3384 val64 = 0x0411040400000000ULL;
3385 writeq(val64, (void __iomem *)bar0 + 0x2700);
3389 * Clear spurious ECC interrupts that would have occured on
3390 * XFRAME II cards after reset.
3392 if (sp->device_type == XFRAME_II_DEVICE) {
3393 val64 = readq(&bar0->pcc_err_reg);
3394 writeq(val64, &bar0->pcc_err_reg);
3397 sp->device_enabled_once = FALSE;
3401 * s2io_set_swapper - to set the swapper controle on the card
3402 * @sp : private member of the device structure,
3403 * pointer to the s2io_nic structure.
3404 * Description: Function to set the swapper control on the card
3405 * correctly depending on the 'endianness' of the system.
3407 * SUCCESS on success and FAILURE on failure.
3410 static int s2io_set_swapper(struct s2io_nic * sp)
3412 struct net_device *dev = sp->dev;
3413 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3414 u64 val64, valt, valr;
3417 * Set proper endian settings and verify the same by reading
3418 * the PIF Feed-back register.
3421 val64 = readq(&bar0->pif_rd_swapper_fb);
3422 if (val64 != 0x0123456789ABCDEFULL) {
3424 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3425 0x8100008181000081ULL, /* FE=1, SE=0 */
3426 0x4200004242000042ULL, /* FE=0, SE=1 */
3427 0}; /* FE=0, SE=0 */
3430 writeq(value[i], &bar0->swapper_ctrl);
3431 val64 = readq(&bar0->pif_rd_swapper_fb);
3432 if (val64 == 0x0123456789ABCDEFULL)
3437 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3439 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3440 (unsigned long long) val64);
3445 valr = readq(&bar0->swapper_ctrl);
3448 valt = 0x0123456789ABCDEFULL;
3449 writeq(valt, &bar0->xmsi_address);
3450 val64 = readq(&bar0->xmsi_address);
3454 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3455 0x0081810000818100ULL, /* FE=1, SE=0 */
3456 0x0042420000424200ULL, /* FE=0, SE=1 */
3457 0}; /* FE=0, SE=0 */
3460 writeq((value[i] | valr), &bar0->swapper_ctrl);
3461 writeq(valt, &bar0->xmsi_address);
3462 val64 = readq(&bar0->xmsi_address);
3468 unsigned long long x = val64;
3469 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3470 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3474 val64 = readq(&bar0->swapper_ctrl);
3475 val64 &= 0xFFFF000000000000ULL;
3479 * The device by default set to a big endian format, so a
3480 * big endian driver need not set anything.
3482 val64 |= (SWAPPER_CTRL_TXP_FE |
3483 SWAPPER_CTRL_TXP_SE |
3484 SWAPPER_CTRL_TXD_R_FE |
3485 SWAPPER_CTRL_TXD_W_FE |
3486 SWAPPER_CTRL_TXF_R_FE |
3487 SWAPPER_CTRL_RXD_R_FE |
3488 SWAPPER_CTRL_RXD_W_FE |
3489 SWAPPER_CTRL_RXF_W_FE |
3490 SWAPPER_CTRL_XMSI_FE |
3491 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3492 if (sp->intr_type == INTA)
3493 val64 |= SWAPPER_CTRL_XMSI_SE;
3494 writeq(val64, &bar0->swapper_ctrl);
3497 * Initially we enable all bits to make it accessible by the
3498 * driver, then we selectively enable only those bits that
3501 val64 |= (SWAPPER_CTRL_TXP_FE |
3502 SWAPPER_CTRL_TXP_SE |
3503 SWAPPER_CTRL_TXD_R_FE |
3504 SWAPPER_CTRL_TXD_R_SE |
3505 SWAPPER_CTRL_TXD_W_FE |
3506 SWAPPER_CTRL_TXD_W_SE |
3507 SWAPPER_CTRL_TXF_R_FE |
3508 SWAPPER_CTRL_RXD_R_FE |
3509 SWAPPER_CTRL_RXD_R_SE |
3510 SWAPPER_CTRL_RXD_W_FE |
3511 SWAPPER_CTRL_RXD_W_SE |
3512 SWAPPER_CTRL_RXF_W_FE |
3513 SWAPPER_CTRL_XMSI_FE |
3514 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3515 if (sp->intr_type == INTA)
3516 val64 |= SWAPPER_CTRL_XMSI_SE;
3517 writeq(val64, &bar0->swapper_ctrl);
3519 val64 = readq(&bar0->swapper_ctrl);
3522 * Verifying if endian settings are accurate by reading a
3523 * feedback register.
3525 val64 = readq(&bar0->pif_rd_swapper_fb);
3526 if (val64 != 0x0123456789ABCDEFULL) {
3527 /* Endian settings are incorrect, calls for another dekko. */
3528 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3530 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3531 (unsigned long long) val64);
3538 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3540 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3542 int ret = 0, cnt = 0;
3545 val64 = readq(&bar0->xmsi_access);
3546 if (!(val64 & BIT(15)))
3552 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3559 static void restore_xmsi_data(struct s2io_nic *nic)
3561 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3565 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3566 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3567 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3568 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3569 writeq(val64, &bar0->xmsi_access);
3570 if (wait_for_msix_trans(nic, i)) {
3571 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3577 static void store_xmsi_data(struct s2io_nic *nic)
3579 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3580 u64 val64, addr, data;
3583 /* Store and display */
3584 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3585 val64 = (BIT(15) | vBIT(i, 26, 6));
3586 writeq(val64, &bar0->xmsi_access);
3587 if (wait_for_msix_trans(nic, i)) {
3588 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3591 addr = readq(&bar0->xmsi_address);
3592 data = readq(&bar0->xmsi_data);
3594 nic->msix_info[i].addr = addr;
3595 nic->msix_info[i].data = data;
3600 int s2io_enable_msi(struct s2io_nic *nic)
3602 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3603 u16 msi_ctrl, msg_val;
3604 struct config_param *config = &nic->config;
3605 struct net_device *dev = nic->dev;
3606 u64 val64, tx_mat, rx_mat;
3609 val64 = readq(&bar0->pic_control);
3611 writeq(val64, &bar0->pic_control);
3613 err = pci_enable_msi(nic->pdev);
3615 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3621 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3622 * for interrupt handling.
3624 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3626 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3627 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3629 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3631 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3633 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3634 tx_mat = readq(&bar0->tx_mat0_n[0]);
3635 for (i=0; i<config->tx_fifo_num; i++) {
3636 tx_mat |= TX_MAT_SET(i, 1);
3638 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3640 rx_mat = readq(&bar0->rx_mat);
3641 for (i=0; i<config->rx_ring_num; i++) {
3642 rx_mat |= RX_MAT_SET(i, 1);
3644 writeq(rx_mat, &bar0->rx_mat);
3646 dev->irq = nic->pdev->irq;
3650 static int s2io_enable_msi_x(struct s2io_nic *nic)
3652 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3654 u16 msi_control; /* Temp variable */
3655 int ret, i, j, msix_indx = 1;
3657 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3659 if (nic->entries == NULL) {
3660 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3663 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3666 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3668 if (nic->s2io_entries == NULL) {
3669 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3670 kfree(nic->entries);
3673 memset(nic->s2io_entries, 0,
3674 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3676 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3677 nic->entries[i].entry = i;
3678 nic->s2io_entries[i].entry = i;
3679 nic->s2io_entries[i].arg = NULL;
3680 nic->s2io_entries[i].in_use = 0;
3683 tx_mat = readq(&bar0->tx_mat0_n[0]);
3684 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3685 tx_mat |= TX_MAT_SET(i, msix_indx);
3686 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3687 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3688 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3690 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3692 if (!nic->config.bimodal) {
3693 rx_mat = readq(&bar0->rx_mat);
3694 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3695 rx_mat |= RX_MAT_SET(j, msix_indx);
3696 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3697 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3698 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3700 writeq(rx_mat, &bar0->rx_mat);
3702 tx_mat = readq(&bar0->tx_mat0_n[7]);
3703 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3704 tx_mat |= TX_MAT_SET(i, msix_indx);
3705 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3706 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3707 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3709 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3712 nic->avail_msix_vectors = 0;
3713 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3714 /* We fail init if error or we get less vectors than min required */
3715 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3716 nic->avail_msix_vectors = ret;
3717 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3720 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3721 kfree(nic->entries);
3722 kfree(nic->s2io_entries);
3723 nic->entries = NULL;
3724 nic->s2io_entries = NULL;
3725 nic->avail_msix_vectors = 0;
3728 if (!nic->avail_msix_vectors)
3729 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3732 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3733 * in the herc NIC. (Temp change, needs to be removed later)
3735 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3736 msi_control |= 0x1; /* Enable MSI */
3737 pci_write_config_word(nic->pdev, 0x42, msi_control);
3742 /* ********************************************************* *
3743 * Functions defined below concern the OS part of the driver *
3744 * ********************************************************* */
3747 * s2io_open - open entry point of the driver
3748 * @dev : pointer to the device structure.
3750 * This function is the open entry point of the driver. It mainly calls a
3751 * function to allocate Rx buffers and inserts them into the buffer
3752 * descriptors and then enables the Rx part of the NIC.
3754 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3758 static int s2io_open(struct net_device *dev)
3760 struct s2io_nic *sp = dev->priv;
3764 * Make sure you have link off by default every time
3765 * Nic is initialized
3767 netif_carrier_off(dev);
3768 sp->last_link_state = 0;
3770 /* Initialize H/W and enable interrupts */
3771 err = s2io_card_up(sp);
3773 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3775 goto hw_init_failed;
3778 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3779 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3782 goto hw_init_failed;
3785 netif_start_queue(dev);
3789 if (sp->intr_type == MSI_X) {
3792 if (sp->s2io_entries)
3793 kfree(sp->s2io_entries);
3799 * s2io_close -close entry point of the driver
3800 * @dev : device pointer.
3802 * This is the stop entry point of the driver. It needs to undo exactly
3803 * whatever was done by the open entry point,thus it's usually referred to
3804 * as the close function.Among other things this function mainly stops the
3805 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3807 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3811 static int s2io_close(struct net_device *dev)
3813 struct s2io_nic *sp = dev->priv;
3815 netif_stop_queue(dev);
3816 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3819 sp->device_close_flag = TRUE; /* Device is shut down. */
3824 * s2io_xmit - Tx entry point of te driver
3825 * @skb : the socket buffer containing the Tx data.
3826 * @dev : device pointer.
3828 * This function is the Tx entry point of the driver. S2IO NIC supports
3829 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3830 * NOTE: when device cant queue the pkt,just the trans_start variable will
3833 * 0 on success & 1 on failure.
3836 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3838 struct s2io_nic *sp = dev->priv;
3839 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3842 struct TxFIFO_element __iomem *tx_fifo;
3843 unsigned long flags;
3845 int vlan_priority = 0;
3846 struct mac_info *mac_control;
3847 struct config_param *config;
3850 mac_control = &sp->mac_control;
3851 config = &sp->config;
3853 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3854 spin_lock_irqsave(&sp->tx_lock, flags);
3855 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3856 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3858 spin_unlock_irqrestore(&sp->tx_lock, flags);
3865 /* Get Fifo number to Transmit based on vlan priority */
3866 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3867 vlan_tag = vlan_tx_tag_get(skb);
3868 vlan_priority = vlan_tag >> 13;
3869 queue = config->fifo_mapping[vlan_priority];
3872 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3873 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3874 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3877 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3878 /* Avoid "put" pointer going beyond "get" pointer */
3879 if (txdp->Host_Control ||
3880 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3881 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3882 netif_stop_queue(dev);
3884 spin_unlock_irqrestore(&sp->tx_lock, flags);
3888 /* A buffer with no data will be dropped */
3890 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3892 spin_unlock_irqrestore(&sp->tx_lock, flags);
3896 offload_type = s2io_offload_type(skb);
3897 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3898 txdp->Control_1 |= TXD_TCP_LSO_EN;
3899 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3901 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3903 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3906 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3907 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3908 txdp->Control_2 |= config->tx_intr_type;
3910 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3911 txdp->Control_2 |= TXD_VLAN_ENABLE;
3912 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3915 frg_len = skb->len - skb->data_len;
3916 if (offload_type == SKB_GSO_UDP) {
3919 ufo_size = s2io_udp_mss(skb);
3921 txdp->Control_1 |= TXD_UFO_EN;
3922 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3923 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3925 sp->ufo_in_band_v[put_off] =
3926 (u64)skb_shinfo(skb)->ip6_frag_id;
3928 sp->ufo_in_band_v[put_off] =
3929 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3931 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3932 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3934 sizeof(u64), PCI_DMA_TODEVICE);
3938 txdp->Buffer_Pointer = pci_map_single
3939 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3940 txdp->Host_Control = (unsigned long) skb;
3941 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3942 if (offload_type == SKB_GSO_UDP)
3943 txdp->Control_1 |= TXD_UFO_EN;
3945 frg_cnt = skb_shinfo(skb)->nr_frags;
3946 /* For fragmented SKB. */
3947 for (i = 0; i < frg_cnt; i++) {
3948 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3949 /* A '0' length fragment will be ignored */
3953 txdp->Buffer_Pointer = (u64) pci_map_page
3954 (sp->pdev, frag->page, frag->page_offset,
3955 frag->size, PCI_DMA_TODEVICE);
3956 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3957 if (offload_type == SKB_GSO_UDP)
3958 txdp->Control_1 |= TXD_UFO_EN;
3960 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3962 if (offload_type == SKB_GSO_UDP)
3963 frg_cnt++; /* as Txd0 was used for inband header */
3965 tx_fifo = mac_control->tx_FIFO_start[queue];
3966 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3967 writeq(val64, &tx_fifo->TxDL_Pointer);
3969 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3972 val64 |= TX_FIFO_SPECIAL_FUNC;
3974 writeq(val64, &tx_fifo->List_Control);
3979 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3981 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3983 /* Avoid "put" pointer going beyond "get" pointer */
3984 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3985 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
3987 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3989 netif_stop_queue(dev);
3992 dev->trans_start = jiffies;
3993 spin_unlock_irqrestore(&sp->tx_lock, flags);
3999 s2io_alarm_handle(unsigned long data)
4001 struct s2io_nic *sp = (struct s2io_nic *)data;
4003 alarm_intr_handler(sp);
4004 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4007 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4009 int rxb_size, level;
4012 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4013 level = rx_buffer_level(sp, rxb_size, rng_n);
4015 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4017 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4018 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4019 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4020 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4022 clear_bit(0, (&sp->tasklet_status));
4025 clear_bit(0, (&sp->tasklet_status));
4026 } else if (level == LOW)
4027 tasklet_schedule(&sp->task);
4029 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4030 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4031 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4036 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4038 struct net_device *dev = (struct net_device *) dev_id;
4039 struct s2io_nic *sp = dev->priv;
4041 struct mac_info *mac_control;
4042 struct config_param *config;
4044 atomic_inc(&sp->isr_cnt);
4045 mac_control = &sp->mac_control;
4046 config = &sp->config;
4047 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4049 /* If Intr is because of Rx Traffic */
4050 for (i = 0; i < config->rx_ring_num; i++)
4051 rx_intr_handler(&mac_control->rings[i]);
4053 /* If Intr is because of Tx Traffic */
4054 for (i = 0; i < config->tx_fifo_num; i++)
4055 tx_intr_handler(&mac_control->fifos[i]);
4058 * If the Rx buffer count is below the panic threshold then
4059 * reallocate the buffers from the interrupt handler itself,
4060 * else schedule a tasklet to reallocate the buffers.
4062 for (i = 0; i < config->rx_ring_num; i++)
4063 s2io_chk_rx_buffers(sp, i);
4065 atomic_dec(&sp->isr_cnt);
4069 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4071 struct ring_info *ring = (struct ring_info *)dev_id;
4072 struct s2io_nic *sp = ring->nic;
4074 atomic_inc(&sp->isr_cnt);
4076 rx_intr_handler(ring);
4077 s2io_chk_rx_buffers(sp, ring->ring_no);
4079 atomic_dec(&sp->isr_cnt);
4083 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4085 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4086 struct s2io_nic *sp = fifo->nic;
4088 atomic_inc(&sp->isr_cnt);
4089 tx_intr_handler(fifo);
4090 atomic_dec(&sp->isr_cnt);
4093 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4095 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4098 val64 = readq(&bar0->pic_int_status);
4099 if (val64 & PIC_INT_GPIO) {
4100 val64 = readq(&bar0->gpio_int_reg);
4101 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4102 (val64 & GPIO_INT_REG_LINK_UP)) {
4104 * This is unstable state so clear both up/down
4105 * interrupt and adapter to re-evaluate the link state.
4107 val64 |= GPIO_INT_REG_LINK_DOWN;
4108 val64 |= GPIO_INT_REG_LINK_UP;
4109 writeq(val64, &bar0->gpio_int_reg);
4110 val64 = readq(&bar0->gpio_int_mask);
4111 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4112 GPIO_INT_MASK_LINK_DOWN);
4113 writeq(val64, &bar0->gpio_int_mask);
4115 else if (val64 & GPIO_INT_REG_LINK_UP) {
4116 val64 = readq(&bar0->adapter_status);
4117 /* Enable Adapter */
4118 val64 = readq(&bar0->adapter_control);
4119 val64 |= ADAPTER_CNTL_EN;
4120 writeq(val64, &bar0->adapter_control);
4121 val64 |= ADAPTER_LED_ON;
4122 writeq(val64, &bar0->adapter_control);
4123 if (!sp->device_enabled_once)
4124 sp->device_enabled_once = 1;
4126 s2io_link(sp, LINK_UP);
4128 * unmask link down interrupt and mask link-up
4131 val64 = readq(&bar0->gpio_int_mask);
4132 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4133 val64 |= GPIO_INT_MASK_LINK_UP;
4134 writeq(val64, &bar0->gpio_int_mask);
4136 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4137 val64 = readq(&bar0->adapter_status);
4138 s2io_link(sp, LINK_DOWN);
4139 /* Link is down so unmaks link up interrupt */
4140 val64 = readq(&bar0->gpio_int_mask);
4141 val64 &= ~GPIO_INT_MASK_LINK_UP;
4142 val64 |= GPIO_INT_MASK_LINK_DOWN;
4143 writeq(val64, &bar0->gpio_int_mask);
4146 val64 = readq(&bar0->adapter_control);
4147 val64 = val64 &(~ADAPTER_LED_ON);
4148 writeq(val64, &bar0->adapter_control);
4151 val64 = readq(&bar0->gpio_int_mask);
4155 * s2io_isr - ISR handler of the device .
4156 * @irq: the irq of the device.
4157 * @dev_id: a void pointer to the dev structure of the NIC.
4158 * Description: This function is the ISR handler of the device. It
4159 * identifies the reason for the interrupt and calls the relevant
4160 * service routines. As a contongency measure, this ISR allocates the
4161 * recv buffers, if their numbers are below the panic value which is
4162 * presently set to 25% of the original number of rcv buffers allocated.
4164 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4165 * IRQ_NONE: will be returned if interrupt is not from our device
4167 static irqreturn_t s2io_isr(int irq, void *dev_id)
4169 struct net_device *dev = (struct net_device *) dev_id;
4170 struct s2io_nic *sp = dev->priv;
4171 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4174 struct mac_info *mac_control;
4175 struct config_param *config;
4177 atomic_inc(&sp->isr_cnt);
4178 mac_control = &sp->mac_control;
4179 config = &sp->config;
4182 * Identify the cause for interrupt and call the appropriate
4183 * interrupt handler. Causes for the interrupt could be;
4187 * 4. Error in any functional blocks of the NIC.
4189 reason = readq(&bar0->general_int_status);
4192 /* The interrupt was not raised by us. */
4193 atomic_dec(&sp->isr_cnt);
4196 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4197 /* Disable device and get out */
4198 atomic_dec(&sp->isr_cnt);
4203 if (reason & GEN_INTR_RXTRAFFIC) {
4204 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4205 __netif_rx_schedule(dev);
4206 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4209 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4213 * Rx handler is called by default, without checking for the
4214 * cause of interrupt.
4215 * rx_traffic_int reg is an R1 register, writing all 1's
4216 * will ensure that the actual interrupt causing bit get's
4217 * cleared and hence a read can be avoided.
4219 if (reason & GEN_INTR_RXTRAFFIC)
4220 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4222 for (i = 0; i < config->rx_ring_num; i++) {
4223 rx_intr_handler(&mac_control->rings[i]);
4228 * tx_traffic_int reg is an R1 register, writing all 1's
4229 * will ensure that the actual interrupt causing bit get's
4230 * cleared and hence a read can be avoided.
4232 if (reason & GEN_INTR_TXTRAFFIC)
4233 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4235 for (i = 0; i < config->tx_fifo_num; i++)
4236 tx_intr_handler(&mac_control->fifos[i]);
4238 if (reason & GEN_INTR_TXPIC)
4239 s2io_txpic_intr_handle(sp);
4241 * If the Rx buffer count is below the panic threshold then
4242 * reallocate the buffers from the interrupt handler itself,
4243 * else schedule a tasklet to reallocate the buffers.
4246 for (i = 0; i < config->rx_ring_num; i++)
4247 s2io_chk_rx_buffers(sp, i);
4250 writeq(0, &bar0->general_int_mask);
4251 readl(&bar0->general_int_status);
4253 atomic_dec(&sp->isr_cnt);
4260 static void s2io_updt_stats(struct s2io_nic *sp)
4262 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4266 if (atomic_read(&sp->card_state) == CARD_UP) {
4267 /* Apprx 30us on a 133 MHz bus */
4268 val64 = SET_UPDT_CLICKS(10) |
4269 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4270 writeq(val64, &bar0->stat_cfg);
4273 val64 = readq(&bar0->stat_cfg);
4274 if (!(val64 & BIT(0)))
4278 break; /* Updt failed */
4281 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
4286 * s2io_get_stats - Updates the device statistics structure.
4287 * @dev : pointer to the device structure.
4289 * This function updates the device statistics structure in the s2io_nic
4290 * structure and returns a pointer to the same.
4292 * pointer to the updated net_device_stats structure.
4295 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4297 struct s2io_nic *sp = dev->priv;
4298 struct mac_info *mac_control;
4299 struct config_param *config;
4302 mac_control = &sp->mac_control;
4303 config = &sp->config;
4305 /* Configure Stats for immediate updt */
4306 s2io_updt_stats(sp);
4308 sp->stats.tx_packets =
4309 le32_to_cpu(mac_control->stats_info->tmac_frms);
4310 sp->stats.tx_errors =
4311 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4312 sp->stats.rx_errors =
4313 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4314 sp->stats.multicast =
4315 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4316 sp->stats.rx_length_errors =
4317 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4319 return (&sp->stats);
4323 * s2io_set_multicast - entry point for multicast address enable/disable.
4324 * @dev : pointer to the device structure
4326 * This function is a driver entry point which gets called by the kernel
4327 * whenever multicast addresses must be enabled/disabled. This also gets
4328 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4329 * determine, if multicast address must be enabled or if promiscuous mode
4330 * is to be disabled etc.
4335 static void s2io_set_multicast(struct net_device *dev)
4338 struct dev_mc_list *mclist;
4339 struct s2io_nic *sp = dev->priv;
4340 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4341 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4343 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4346 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4347 /* Enable all Multicast addresses */
4348 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4349 &bar0->rmac_addr_data0_mem);
4350 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4351 &bar0->rmac_addr_data1_mem);
4352 val64 = RMAC_ADDR_CMD_MEM_WE |
4353 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4354 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4355 writeq(val64, &bar0->rmac_addr_cmd_mem);
4356 /* Wait till command completes */
4357 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4358 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4362 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4363 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4364 /* Disable all Multicast addresses */
4365 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4366 &bar0->rmac_addr_data0_mem);
4367 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4368 &bar0->rmac_addr_data1_mem);
4369 val64 = RMAC_ADDR_CMD_MEM_WE |
4370 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4371 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4372 writeq(val64, &bar0->rmac_addr_cmd_mem);
4373 /* Wait till command completes */
4374 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4375 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4379 sp->all_multi_pos = 0;
4382 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4383 /* Put the NIC into promiscuous mode */
4384 add = &bar0->mac_cfg;
4385 val64 = readq(&bar0->mac_cfg);
4386 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4388 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4389 writel((u32) val64, add);
4390 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4391 writel((u32) (val64 >> 32), (add + 4));
4393 if (vlan_tag_strip != 1) {
4394 val64 = readq(&bar0->rx_pa_cfg);
4395 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4396 writeq(val64, &bar0->rx_pa_cfg);
4397 vlan_strip_flag = 0;
4400 val64 = readq(&bar0->mac_cfg);
4401 sp->promisc_flg = 1;
4402 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4404 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4405 /* Remove the NIC from promiscuous mode */
4406 add = &bar0->mac_cfg;
4407 val64 = readq(&bar0->mac_cfg);
4408 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4410 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4411 writel((u32) val64, add);
4412 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4413 writel((u32) (val64 >> 32), (add + 4));
4415 if (vlan_tag_strip != 0) {
4416 val64 = readq(&bar0->rx_pa_cfg);
4417 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4418 writeq(val64, &bar0->rx_pa_cfg);
4419 vlan_strip_flag = 1;
4422 val64 = readq(&bar0->mac_cfg);
4423 sp->promisc_flg = 0;
4424 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4428 /* Update individual M_CAST address list */
4429 if ((!sp->m_cast_flg) && dev->mc_count) {
4431 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4432 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4434 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4435 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4439 prev_cnt = sp->mc_addr_count;
4440 sp->mc_addr_count = dev->mc_count;
4442 /* Clear out the previous list of Mc in the H/W. */
4443 for (i = 0; i < prev_cnt; i++) {
4444 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4445 &bar0->rmac_addr_data0_mem);
4446 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4447 &bar0->rmac_addr_data1_mem);
4448 val64 = RMAC_ADDR_CMD_MEM_WE |
4449 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4450 RMAC_ADDR_CMD_MEM_OFFSET
4451 (MAC_MC_ADDR_START_OFFSET + i);
4452 writeq(val64, &bar0->rmac_addr_cmd_mem);
4454 /* Wait for command completes */
4455 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4456 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4458 DBG_PRINT(ERR_DBG, "%s: Adding ",
4460 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4465 /* Create the new Rx filter list and update the same in H/W. */
4466 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4467 i++, mclist = mclist->next) {
4468 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4471 for (j = 0; j < ETH_ALEN; j++) {
4472 mac_addr |= mclist->dmi_addr[j];
4476 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4477 &bar0->rmac_addr_data0_mem);
4478 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4479 &bar0->rmac_addr_data1_mem);
4480 val64 = RMAC_ADDR_CMD_MEM_WE |
4481 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4482 RMAC_ADDR_CMD_MEM_OFFSET
4483 (i + MAC_MC_ADDR_START_OFFSET);
4484 writeq(val64, &bar0->rmac_addr_cmd_mem);
4486 /* Wait for command completes */
4487 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4488 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4490 DBG_PRINT(ERR_DBG, "%s: Adding ",
4492 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4500 * s2io_set_mac_addr - Programs the Xframe mac address
4501 * @dev : pointer to the device structure.
4502 * @addr: a uchar pointer to the new mac address which is to be set.
4503 * Description : This procedure will program the Xframe to receive
4504 * frames with new Mac Address
4505 * Return value: SUCCESS on success and an appropriate (-)ve integer
4506 * as defined in errno.h file on failure.
4509 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4511 struct s2io_nic *sp = dev->priv;
4512 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4513 register u64 val64, mac_addr = 0;
4517 * Set the new MAC address as the new unicast filter and reflect this
4518 * change on the device address registered with the OS. It will be
4521 for (i = 0; i < ETH_ALEN; i++) {
4523 mac_addr |= addr[i];
4526 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4527 &bar0->rmac_addr_data0_mem);
4530 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4531 RMAC_ADDR_CMD_MEM_OFFSET(0);
4532 writeq(val64, &bar0->rmac_addr_cmd_mem);
4533 /* Wait till command completes */
4534 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4535 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4536 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4544 * s2io_ethtool_sset - Sets different link parameters.
4545 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4546 * @info: pointer to the structure with parameters given by ethtool to set
4549 * The function sets different link parameters provided by the user onto
4555 static int s2io_ethtool_sset(struct net_device *dev,
4556 struct ethtool_cmd *info)
4558 struct s2io_nic *sp = dev->priv;
4559 if ((info->autoneg == AUTONEG_ENABLE) ||
4560 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4563 s2io_close(sp->dev);
4571 * s2io_ethtol_gset - Return link specific information.
4572 * @sp : private member of the device structure, pointer to the
4573 * s2io_nic structure.
4574 * @info : pointer to the structure with parameters given by ethtool
4575 * to return link information.
4577 * Returns link specific information like speed, duplex etc.. to ethtool.
4579 * return 0 on success.
4582 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4584 struct s2io_nic *sp = dev->priv;
4585 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4586 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4587 info->port = PORT_FIBRE;
4588 /* info->transceiver?? TODO */
4590 if (netif_carrier_ok(sp->dev)) {
4591 info->speed = 10000;
4592 info->duplex = DUPLEX_FULL;
4598 info->autoneg = AUTONEG_DISABLE;
4603 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4604 * @sp : private member of the device structure, which is a pointer to the
4605 * s2io_nic structure.
4606 * @info : pointer to the structure with parameters given by ethtool to
4607 * return driver information.
4609 * Returns driver specefic information like name, version etc.. to ethtool.
4614 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4615 struct ethtool_drvinfo *info)
4617 struct s2io_nic *sp = dev->priv;
4619 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4620 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4621 strncpy(info->fw_version, "", sizeof(info->fw_version));
4622 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4623 info->regdump_len = XENA_REG_SPACE;
4624 info->eedump_len = XENA_EEPROM_SPACE;
4625 info->testinfo_len = S2IO_TEST_LEN;
4627 if (sp->device_type == XFRAME_I_DEVICE)
4628 info->n_stats = XFRAME_I_STAT_LEN;
4630 info->n_stats = XFRAME_II_STAT_LEN;
4634 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4635 * @sp: private member of the device structure, which is a pointer to the
4636 * s2io_nic structure.
4637 * @regs : pointer to the structure with parameters given by ethtool for
4638 * dumping the registers.
4639 * @reg_space: The input argumnet into which all the registers are dumped.
4641 * Dumps the entire register space of xFrame NIC into the user given
4647 static void s2io_ethtool_gregs(struct net_device *dev,
4648 struct ethtool_regs *regs, void *space)
4652 u8 *reg_space = (u8 *) space;
4653 struct s2io_nic *sp = dev->priv;
4655 regs->len = XENA_REG_SPACE;
4656 regs->version = sp->pdev->subsystem_device;
4658 for (i = 0; i < regs->len; i += 8) {
4659 reg = readq(sp->bar0 + i);
4660 memcpy((reg_space + i), ®, 8);
4665 * s2io_phy_id - timer function that alternates adapter LED.
4666 * @data : address of the private member of the device structure, which
4667 * is a pointer to the s2io_nic structure, provided as an u32.
4668 * Description: This is actually the timer function that alternates the
4669 * adapter LED bit of the adapter control bit to set/reset every time on
4670 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4671 * once every second.
4673 static void s2io_phy_id(unsigned long data)
4675 struct s2io_nic *sp = (struct s2io_nic *) data;
4676 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4680 subid = sp->pdev->subsystem_device;
4681 if ((sp->device_type == XFRAME_II_DEVICE) ||
4682 ((subid & 0xFF) >= 0x07)) {
4683 val64 = readq(&bar0->gpio_control);
4684 val64 ^= GPIO_CTRL_GPIO_0;
4685 writeq(val64, &bar0->gpio_control);
4687 val64 = readq(&bar0->adapter_control);
4688 val64 ^= ADAPTER_LED_ON;
4689 writeq(val64, &bar0->adapter_control);
4692 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4696 * s2io_ethtool_idnic - To physically identify the nic on the system.
4697 * @sp : private member of the device structure, which is a pointer to the
4698 * s2io_nic structure.
4699 * @id : pointer to the structure with identification parameters given by
4701 * Description: Used to physically identify the NIC on the system.
4702 * The Link LED will blink for a time specified by the user for
4704 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4705 * identification is possible only if it's link is up.
4707 * int , returns 0 on success
4710 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4712 u64 val64 = 0, last_gpio_ctrl_val;
4713 struct s2io_nic *sp = dev->priv;
4714 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4717 subid = sp->pdev->subsystem_device;
4718 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4719 if ((sp->device_type == XFRAME_I_DEVICE) &&
4720 ((subid & 0xFF) < 0x07)) {
4721 val64 = readq(&bar0->adapter_control);
4722 if (!(val64 & ADAPTER_CNTL_EN)) {
4724 "Adapter Link down, cannot blink LED\n");
4728 if (sp->id_timer.function == NULL) {
4729 init_timer(&sp->id_timer);
4730 sp->id_timer.function = s2io_phy_id;
4731 sp->id_timer.data = (unsigned long) sp;
4733 mod_timer(&sp->id_timer, jiffies);
4735 msleep_interruptible(data * HZ);
4737 msleep_interruptible(MAX_FLICKER_TIME);
4738 del_timer_sync(&sp->id_timer);
4740 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4741 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4742 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4749 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4750 * @sp : private member of the device structure, which is a pointer to the
4751 * s2io_nic structure.
4752 * @ep : pointer to the structure with pause parameters given by ethtool.
4754 * Returns the Pause frame generation and reception capability of the NIC.
4758 static void s2io_ethtool_getpause_data(struct net_device *dev,
4759 struct ethtool_pauseparam *ep)
4762 struct s2io_nic *sp = dev->priv;
4763 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4765 val64 = readq(&bar0->rmac_pause_cfg);
4766 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4767 ep->tx_pause = TRUE;
4768 if (val64 & RMAC_PAUSE_RX_ENABLE)
4769 ep->rx_pause = TRUE;
4770 ep->autoneg = FALSE;
4774 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4775 * @sp : private member of the device structure, which is a pointer to the
4776 * s2io_nic structure.
4777 * @ep : pointer to the structure with pause parameters given by ethtool.
4779 * It can be used to set or reset Pause frame generation or reception
4780 * support of the NIC.
4782 * int, returns 0 on Success
4785 static int s2io_ethtool_setpause_data(struct net_device *dev,
4786 struct ethtool_pauseparam *ep)
4789 struct s2io_nic *sp = dev->priv;
4790 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4792 val64 = readq(&bar0->rmac_pause_cfg);
4794 val64 |= RMAC_PAUSE_GEN_ENABLE;
4796 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4798 val64 |= RMAC_PAUSE_RX_ENABLE;
4800 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4801 writeq(val64, &bar0->rmac_pause_cfg);
4806 * read_eeprom - reads 4 bytes of data from user given offset.
4807 * @sp : private member of the device structure, which is a pointer to the
4808 * s2io_nic structure.
4809 * @off : offset at which the data must be written
4810 * @data : Its an output parameter where the data read at the given
4813 * Will read 4 bytes of data from the user given offset and return the
4815 * NOTE: Will allow to read only part of the EEPROM visible through the
4818 * -1 on failure and 0 on success.
4821 #define S2IO_DEV_ID 5
4822 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4827 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4829 if (sp->device_type == XFRAME_I_DEVICE) {
4830 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4831 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4832 I2C_CONTROL_CNTL_START;
4833 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4835 while (exit_cnt < 5) {
4836 val64 = readq(&bar0->i2c_control);
4837 if (I2C_CONTROL_CNTL_END(val64)) {
4838 *data = I2C_CONTROL_GET_DATA(val64);
4847 if (sp->device_type == XFRAME_II_DEVICE) {
4848 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4849 SPI_CONTROL_BYTECNT(0x3) |
4850 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4851 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4852 val64 |= SPI_CONTROL_REQ;
4853 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4854 while (exit_cnt < 5) {
4855 val64 = readq(&bar0->spi_control);
4856 if (val64 & SPI_CONTROL_NACK) {
4859 } else if (val64 & SPI_CONTROL_DONE) {
4860 *data = readq(&bar0->spi_data);
4873 * write_eeprom - actually writes the relevant part of the data value.
4874 * @sp : private member of the device structure, which is a pointer to the
4875 * s2io_nic structure.
4876 * @off : offset at which the data must be written
4877 * @data : The data that is to be written
4878 * @cnt : Number of bytes of the data that are actually to be written into
4879 * the Eeprom. (max of 3)
4881 * Actually writes the relevant part of the data value into the Eeprom
4882 * through the I2C bus.
4884 * 0 on success, -1 on failure.
4887 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4889 int exit_cnt = 0, ret = -1;
4891 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4893 if (sp->device_type == XFRAME_I_DEVICE) {
4894 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4895 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4896 I2C_CONTROL_CNTL_START;
4897 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4899 while (exit_cnt < 5) {
4900 val64 = readq(&bar0->i2c_control);
4901 if (I2C_CONTROL_CNTL_END(val64)) {
4902 if (!(val64 & I2C_CONTROL_NACK))
4911 if (sp->device_type == XFRAME_II_DEVICE) {
4912 int write_cnt = (cnt == 8) ? 0 : cnt;
4913 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4915 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4916 SPI_CONTROL_BYTECNT(write_cnt) |
4917 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4918 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4919 val64 |= SPI_CONTROL_REQ;
4920 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4921 while (exit_cnt < 5) {
4922 val64 = readq(&bar0->spi_control);
4923 if (val64 & SPI_CONTROL_NACK) {
4926 } else if (val64 & SPI_CONTROL_DONE) {
4936 static void s2io_vpd_read(struct s2io_nic *nic)
4940 int i=0, cnt, fail = 0;
4941 int vpd_addr = 0x80;
4943 if (nic->device_type == XFRAME_II_DEVICE) {
4944 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4948 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4951 strcpy(nic->serial_num, "NOT AVAILABLE");
4953 vpd_data = kmalloc(256, GFP_KERNEL);
4957 for (i = 0; i < 256; i +=4 ) {
4958 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4959 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
4960 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4961 for (cnt = 0; cnt <5; cnt++) {
4963 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4968 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4972 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4973 (u32 *)&vpd_data[i]);
4977 /* read serial number of adapter */
4978 for (cnt = 0; cnt < 256; cnt++) {
4979 if ((vpd_data[cnt] == 'S') &&
4980 (vpd_data[cnt+1] == 'N') &&
4981 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4982 memset(nic->serial_num, 0, VPD_STRING_LEN);
4983 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4990 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
4991 memset(nic->product_name, 0, vpd_data[1]);
4992 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4998 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4999 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5000 * @eeprom : pointer to the user level structure provided by ethtool,
5001 * containing all relevant information.
5002 * @data_buf : user defined value to be written into Eeprom.
5003 * Description: Reads the values stored in the Eeprom at given offset
5004 * for a given length. Stores these values int the input argument data
5005 * buffer 'data_buf' and returns these to the caller (ethtool.)
5010 static int s2io_ethtool_geeprom(struct net_device *dev,
5011 struct ethtool_eeprom *eeprom, u8 * data_buf)
5015 struct s2io_nic *sp = dev->priv;
5017 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5019 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5020 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5022 for (i = 0; i < eeprom->len; i += 4) {
5023 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5024 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5028 memcpy((data_buf + i), &valid, 4);
5034 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5035 * @sp : private member of the device structure, which is a pointer to the
5036 * s2io_nic structure.
5037 * @eeprom : pointer to the user level structure provided by ethtool,
5038 * containing all relevant information.
5039 * @data_buf ; user defined value to be written into Eeprom.
5041 * Tries to write the user provided value in the Eeprom, at the offset
5042 * given by the user.
5044 * 0 on success, -EFAULT on failure.
5047 static int s2io_ethtool_seeprom(struct net_device *dev,
5048 struct ethtool_eeprom *eeprom,
5051 int len = eeprom->len, cnt = 0;
5052 u64 valid = 0, data;
5053 struct s2io_nic *sp = dev->priv;
5055 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5057 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5058 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5064 data = (u32) data_buf[cnt] & 0x000000FF;
5066 valid = (u32) (data << 24);
5070 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5072 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5074 "write into the specified offset\n");
5085 * s2io_register_test - reads and writes into all clock domains.
5086 * @sp : private member of the device structure, which is a pointer to the
5087 * s2io_nic structure.
5088 * @data : variable that returns the result of each of the test conducted b
5091 * Read and write into all clock domains. The NIC has 3 clock domains,
5092 * see that registers in all the three regions are accessible.
5097 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5099 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5100 u64 val64 = 0, exp_val;
5103 val64 = readq(&bar0->pif_rd_swapper_fb);
5104 if (val64 != 0x123456789abcdefULL) {
5106 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5109 val64 = readq(&bar0->rmac_pause_cfg);
5110 if (val64 != 0xc000ffff00000000ULL) {
5112 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5115 val64 = readq(&bar0->rx_queue_cfg);
5116 if (sp->device_type == XFRAME_II_DEVICE)
5117 exp_val = 0x0404040404040404ULL;
5119 exp_val = 0x0808080808080808ULL;
5120 if (val64 != exp_val) {
5122 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5125 val64 = readq(&bar0->xgxs_efifo_cfg);
5126 if (val64 != 0x000000001923141EULL) {
5128 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5131 val64 = 0x5A5A5A5A5A5A5A5AULL;
5132 writeq(val64, &bar0->xmsi_data);
5133 val64 = readq(&bar0->xmsi_data);
5134 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5136 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5139 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5140 writeq(val64, &bar0->xmsi_data);
5141 val64 = readq(&bar0->xmsi_data);
5142 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5144 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5152 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5153 * @sp : private member of the device structure, which is a pointer to the
5154 * s2io_nic structure.
5155 * @data:variable that returns the result of each of the test conducted by
5158 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5164 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5167 u64 ret_data, org_4F0, org_7F0;
5168 u8 saved_4F0 = 0, saved_7F0 = 0;
5169 struct net_device *dev = sp->dev;
5171 /* Test Write Error at offset 0 */
5172 /* Note that SPI interface allows write access to all areas
5173 * of EEPROM. Hence doing all negative testing only for Xframe I.
5175 if (sp->device_type == XFRAME_I_DEVICE)
5176 if (!write_eeprom(sp, 0, 0, 3))
5179 /* Save current values at offsets 0x4F0 and 0x7F0 */
5180 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5182 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5185 /* Test Write at offset 4f0 */
5186 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5188 if (read_eeprom(sp, 0x4F0, &ret_data))
5191 if (ret_data != 0x012345) {
5192 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5193 "Data written %llx Data read %llx\n",
5194 dev->name, (unsigned long long)0x12345,
5195 (unsigned long long)ret_data);
5199 /* Reset the EEPROM data go FFFF */
5200 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5202 /* Test Write Request Error at offset 0x7c */
5203 if (sp->device_type == XFRAME_I_DEVICE)
5204 if (!write_eeprom(sp, 0x07C, 0, 3))
5207 /* Test Write Request at offset 0x7f0 */
5208 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5210 if (read_eeprom(sp, 0x7F0, &ret_data))
5213 if (ret_data != 0x012345) {
5214 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5215 "Data written %llx Data read %llx\n",
5216 dev->name, (unsigned long long)0x12345,
5217 (unsigned long long)ret_data);
5221 /* Reset the EEPROM data go FFFF */
5222 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5224 if (sp->device_type == XFRAME_I_DEVICE) {
5225 /* Test Write Error at offset 0x80 */
5226 if (!write_eeprom(sp, 0x080, 0, 3))
5229 /* Test Write Error at offset 0xfc */
5230 if (!write_eeprom(sp, 0x0FC, 0, 3))
5233 /* Test Write Error at offset 0x100 */
5234 if (!write_eeprom(sp, 0x100, 0, 3))
5237 /* Test Write Error at offset 4ec */
5238 if (!write_eeprom(sp, 0x4EC, 0, 3))
5242 /* Restore values at offsets 0x4F0 and 0x7F0 */
5244 write_eeprom(sp, 0x4F0, org_4F0, 3);
5246 write_eeprom(sp, 0x7F0, org_7F0, 3);
5253 * s2io_bist_test - invokes the MemBist test of the card .
5254 * @sp : private member of the device structure, which is a pointer to the
5255 * s2io_nic structure.
5256 * @data:variable that returns the result of each of the test conducted by
5259 * This invokes the MemBist test of the card. We give around
5260 * 2 secs time for the Test to complete. If it's still not complete
5261 * within this peiod, we consider that the test failed.
5263 * 0 on success and -1 on failure.
5266 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5269 int cnt = 0, ret = -1;
5271 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5272 bist |= PCI_BIST_START;
5273 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5276 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5277 if (!(bist & PCI_BIST_START)) {
5278 *data = (bist & PCI_BIST_CODE_MASK);
5290 * s2io-link_test - verifies the link state of the nic
5291 * @sp ; private member of the device structure, which is a pointer to the
5292 * s2io_nic structure.
5293 * @data: variable that returns the result of each of the test conducted by
5296 * The function verifies the link state of the NIC and updates the input
5297 * argument 'data' appropriately.
5302 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5304 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5307 val64 = readq(&bar0->adapter_status);
5308 if(!(LINK_IS_UP(val64)))
5317 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5318 * @sp - private member of the device structure, which is a pointer to the
5319 * s2io_nic structure.
5320 * @data - variable that returns the result of each of the test
5321 * conducted by the driver.
5323 * This is one of the offline test that tests the read and write
5324 * access to the RldRam chip on the NIC.
5329 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5331 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5333 int cnt, iteration = 0, test_fail = 0;
5335 val64 = readq(&bar0->adapter_control);
5336 val64 &= ~ADAPTER_ECC_EN;
5337 writeq(val64, &bar0->adapter_control);
5339 val64 = readq(&bar0->mc_rldram_test_ctrl);
5340 val64 |= MC_RLDRAM_TEST_MODE;
5341 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5343 val64 = readq(&bar0->mc_rldram_mrs);
5344 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5345 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5347 val64 |= MC_RLDRAM_MRS_ENABLE;
5348 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5350 while (iteration < 2) {
5351 val64 = 0x55555555aaaa0000ULL;
5352 if (iteration == 1) {
5353 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5355 writeq(val64, &bar0->mc_rldram_test_d0);
5357 val64 = 0xaaaa5a5555550000ULL;
5358 if (iteration == 1) {
5359 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5361 writeq(val64, &bar0->mc_rldram_test_d1);
5363 val64 = 0x55aaaaaaaa5a0000ULL;
5364 if (iteration == 1) {
5365 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5367 writeq(val64, &bar0->mc_rldram_test_d2);
5369 val64 = (u64) (0x0000003ffffe0100ULL);
5370 writeq(val64, &bar0->mc_rldram_test_add);
5372 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5374 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5376 for (cnt = 0; cnt < 5; cnt++) {
5377 val64 = readq(&bar0->mc_rldram_test_ctrl);
5378 if (val64 & MC_RLDRAM_TEST_DONE)
5386 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5387 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5389 for (cnt = 0; cnt < 5; cnt++) {
5390 val64 = readq(&bar0->mc_rldram_test_ctrl);
5391 if (val64 & MC_RLDRAM_TEST_DONE)
5399 val64 = readq(&bar0->mc_rldram_test_ctrl);
5400 if (!(val64 & MC_RLDRAM_TEST_PASS))
5408 /* Bring the adapter out of test mode */
5409 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5415 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5416 * @sp : private member of the device structure, which is a pointer to the
5417 * s2io_nic structure.
5418 * @ethtest : pointer to a ethtool command specific structure that will be
5419 * returned to the user.
5420 * @data : variable that returns the result of each of the test
5421 * conducted by the driver.
5423 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5424 * the health of the card.
5429 static void s2io_ethtool_test(struct net_device *dev,
5430 struct ethtool_test *ethtest,
5433 struct s2io_nic *sp = dev->priv;
5434 int orig_state = netif_running(sp->dev);
5436 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5437 /* Offline Tests. */
5439 s2io_close(sp->dev);
5441 if (s2io_register_test(sp, &data[0]))
5442 ethtest->flags |= ETH_TEST_FL_FAILED;
5446 if (s2io_rldram_test(sp, &data[3]))
5447 ethtest->flags |= ETH_TEST_FL_FAILED;
5451 if (s2io_eeprom_test(sp, &data[1]))
5452 ethtest->flags |= ETH_TEST_FL_FAILED;
5454 if (s2io_bist_test(sp, &data[4]))
5455 ethtest->flags |= ETH_TEST_FL_FAILED;
5465 "%s: is not up, cannot run test\n",
5474 if (s2io_link_test(sp, &data[2]))
5475 ethtest->flags |= ETH_TEST_FL_FAILED;
5484 static void s2io_get_ethtool_stats(struct net_device *dev,
5485 struct ethtool_stats *estats,
5489 struct s2io_nic *sp = dev->priv;
5490 struct stat_block *stat_info = sp->mac_control.stats_info;
5492 s2io_updt_stats(sp);
5494 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5495 le32_to_cpu(stat_info->tmac_frms);
5497 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5498 le32_to_cpu(stat_info->tmac_data_octets);
5499 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5501 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5502 le32_to_cpu(stat_info->tmac_mcst_frms);
5504 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5505 le32_to_cpu(stat_info->tmac_bcst_frms);
5506 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5508 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5509 le32_to_cpu(stat_info->tmac_ttl_octets);
5511 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5512 le32_to_cpu(stat_info->tmac_ucst_frms);
5514 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5515 le32_to_cpu(stat_info->tmac_nucst_frms);
5517 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5518 le32_to_cpu(stat_info->tmac_any_err_frms);
5519 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5520 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5522 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5523 le32_to_cpu(stat_info->tmac_vld_ip);
5525 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5526 le32_to_cpu(stat_info->tmac_drop_ip);
5528 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5529 le32_to_cpu(stat_info->tmac_icmp);
5531 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5532 le32_to_cpu(stat_info->tmac_rst_tcp);
5533 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5534 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5535 le32_to_cpu(stat_info->tmac_udp);
5537 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5538 le32_to_cpu(stat_info->rmac_vld_frms);
5540 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5541 le32_to_cpu(stat_info->rmac_data_octets);
5542 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5543 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5545 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5546 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5548 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5549 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5550 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5551 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5552 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5553 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5554 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5556 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5557 le32_to_cpu(stat_info->rmac_ttl_octets);
5559 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5560 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5562 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5563 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5565 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5566 le32_to_cpu(stat_info->rmac_discarded_frms);
5568 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5569 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5570 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5571 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5573 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5574 le32_to_cpu(stat_info->rmac_usized_frms);
5576 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5577 le32_to_cpu(stat_info->rmac_osized_frms);
5579 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5580 le32_to_cpu(stat_info->rmac_frag_frms);
5582 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5583 le32_to_cpu(stat_info->rmac_jabber_frms);
5584 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5585 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5586 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5587 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5588 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5589 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5591 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5592 le32_to_cpu(stat_info->rmac_ip);
5593 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5594 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5596 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5597 le32_to_cpu(stat_info->rmac_drop_ip);
5599 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5600 le32_to_cpu(stat_info->rmac_icmp);
5601 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5603 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5604 le32_to_cpu(stat_info->rmac_udp);
5606 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5607 le32_to_cpu(stat_info->rmac_err_drp_udp);
5608 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5609 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5610 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5611 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5612 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5613 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5614 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5615 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5616 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5617 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5618 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5619 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5620 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5621 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5622 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5623 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5624 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5626 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5627 le32_to_cpu(stat_info->rmac_pause_cnt);
5628 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5629 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5631 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5632 le32_to_cpu(stat_info->rmac_accepted_ip);
5633 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5634 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5635 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5636 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5637 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5638 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5639 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5640 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5641 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5642 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5643 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5644 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5645 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5646 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5647 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5648 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5649 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5650 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5651 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5653 /* Enhanced statistics exist only for Hercules */
5654 if(sp->device_type == XFRAME_II_DEVICE) {
5656 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5658 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5660 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5661 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5662 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5663 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5664 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5665 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5666 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5667 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5668 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5669 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5670 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5671 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5672 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5673 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5677 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5678 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5679 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5680 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5681 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5682 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5683 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5684 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5685 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5686 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5687 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5688 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5689 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5690 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5691 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5692 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5693 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5694 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5695 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5696 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5697 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5698 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5699 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5700 if (stat_info->sw_stat.num_aggregations) {
5701 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5704 * Since 64-bit divide does not work on all platforms,
5705 * do repeated subtraction.
5707 while (tmp >= stat_info->sw_stat.num_aggregations) {
5708 tmp -= stat_info->sw_stat.num_aggregations;
5711 tmp_stats[i++] = count;
5717 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5719 return (XENA_REG_SPACE);
5723 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5725 struct s2io_nic *sp = dev->priv;
5727 return (sp->rx_csum);
5730 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5732 struct s2io_nic *sp = dev->priv;
5742 static int s2io_get_eeprom_len(struct net_device *dev)
5744 return (XENA_EEPROM_SPACE);
5747 static int s2io_ethtool_self_test_count(struct net_device *dev)
5749 return (S2IO_TEST_LEN);
5752 static void s2io_ethtool_get_strings(struct net_device *dev,
5753 u32 stringset, u8 * data)
5756 struct s2io_nic *sp = dev->priv;
5758 switch (stringset) {
5760 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5763 stat_size = sizeof(ethtool_xena_stats_keys);
5764 memcpy(data, ðtool_xena_stats_keys,stat_size);
5765 if(sp->device_type == XFRAME_II_DEVICE) {
5766 memcpy(data + stat_size,
5767 ðtool_enhanced_stats_keys,
5768 sizeof(ethtool_enhanced_stats_keys));
5769 stat_size += sizeof(ethtool_enhanced_stats_keys);
5772 memcpy(data + stat_size, ðtool_driver_stats_keys,
5773 sizeof(ethtool_driver_stats_keys));
5776 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5778 struct s2io_nic *sp = dev->priv;
5780 switch(sp->device_type) {
5781 case XFRAME_I_DEVICE:
5782 stat_count = XFRAME_I_STAT_LEN;
5785 case XFRAME_II_DEVICE:
5786 stat_count = XFRAME_II_STAT_LEN;
5793 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5796 dev->features |= NETIF_F_IP_CSUM;
5798 dev->features &= ~NETIF_F_IP_CSUM;
5803 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5805 return (dev->features & NETIF_F_TSO) != 0;
5807 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5810 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5812 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5817 static const struct ethtool_ops netdev_ethtool_ops = {
5818 .get_settings = s2io_ethtool_gset,
5819 .set_settings = s2io_ethtool_sset,
5820 .get_drvinfo = s2io_ethtool_gdrvinfo,
5821 .get_regs_len = s2io_ethtool_get_regs_len,
5822 .get_regs = s2io_ethtool_gregs,
5823 .get_link = ethtool_op_get_link,
5824 .get_eeprom_len = s2io_get_eeprom_len,
5825 .get_eeprom = s2io_ethtool_geeprom,
5826 .set_eeprom = s2io_ethtool_seeprom,
5827 .get_pauseparam = s2io_ethtool_getpause_data,
5828 .set_pauseparam = s2io_ethtool_setpause_data,
5829 .get_rx_csum = s2io_ethtool_get_rx_csum,
5830 .set_rx_csum = s2io_ethtool_set_rx_csum,
5831 .get_tx_csum = ethtool_op_get_tx_csum,
5832 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5833 .get_sg = ethtool_op_get_sg,
5834 .set_sg = ethtool_op_set_sg,
5835 .get_tso = s2io_ethtool_op_get_tso,
5836 .set_tso = s2io_ethtool_op_set_tso,
5837 .get_ufo = ethtool_op_get_ufo,
5838 .set_ufo = ethtool_op_set_ufo,
5839 .self_test_count = s2io_ethtool_self_test_count,
5840 .self_test = s2io_ethtool_test,
5841 .get_strings = s2io_ethtool_get_strings,
5842 .phys_id = s2io_ethtool_idnic,
5843 .get_stats_count = s2io_ethtool_get_stats_count,
5844 .get_ethtool_stats = s2io_get_ethtool_stats
5848 * s2io_ioctl - Entry point for the Ioctl
5849 * @dev : Device pointer.
5850 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5851 * a proprietary structure used to pass information to the driver.
5852 * @cmd : This is used to distinguish between the different commands that
5853 * can be passed to the IOCTL functions.
5855 * Currently there are no special functionality supported in IOCTL, hence
5856 * function always return EOPNOTSUPPORTED
5859 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5865 * s2io_change_mtu - entry point to change MTU size for the device.
5866 * @dev : device pointer.
5867 * @new_mtu : the new MTU size for the device.
5868 * Description: A driver entry point to change MTU size for the device.
5869 * Before changing the MTU the device must be stopped.
5871 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5875 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5877 struct s2io_nic *sp = dev->priv;
5879 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5880 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5886 if (netif_running(dev)) {
5888 netif_stop_queue(dev);
5889 if (s2io_card_up(sp)) {
5890 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5893 if (netif_queue_stopped(dev))
5894 netif_wake_queue(dev);
5895 } else { /* Device is down */
5896 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5897 u64 val64 = new_mtu;
5899 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5906 * s2io_tasklet - Bottom half of the ISR.
5907 * @dev_adr : address of the device structure in dma_addr_t format.
5909 * This is the tasklet or the bottom half of the ISR. This is
5910 * an extension of the ISR which is scheduled by the scheduler to be run
5911 * when the load on the CPU is low. All low priority tasks of the ISR can
5912 * be pushed into the tasklet. For now the tasklet is used only to
5913 * replenish the Rx buffers in the Rx buffer descriptors.
5918 static void s2io_tasklet(unsigned long dev_addr)
5920 struct net_device *dev = (struct net_device *) dev_addr;
5921 struct s2io_nic *sp = dev->priv;
5923 struct mac_info *mac_control;
5924 struct config_param *config;
5926 mac_control = &sp->mac_control;
5927 config = &sp->config;
5929 if (!TASKLET_IN_USE) {
5930 for (i = 0; i < config->rx_ring_num; i++) {
5931 ret = fill_rx_buffers(sp, i);
5932 if (ret == -ENOMEM) {
5933 DBG_PRINT(ERR_DBG, "%s: Out of ",
5935 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5937 } else if (ret == -EFILL) {
5939 "%s: Rx Ring %d is full\n",
5944 clear_bit(0, (&sp->tasklet_status));
5949 * s2io_set_link - Set the LInk status
5950 * @data: long pointer to device private structue
5951 * Description: Sets the link status for the adapter
5954 static void s2io_set_link(struct work_struct *work)
5956 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
5957 struct net_device *dev = nic->dev;
5958 struct XENA_dev_config __iomem *bar0 = nic->bar0;
5964 if (!netif_running(dev))
5967 if (test_and_set_bit(0, &(nic->link_state))) {
5968 /* The card is being reset, no point doing anything */
5972 subid = nic->pdev->subsystem_device;
5973 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5975 * Allow a small delay for the NICs self initiated
5976 * cleanup to complete.
5981 val64 = readq(&bar0->adapter_status);
5982 if (LINK_IS_UP(val64)) {
5983 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5984 if (verify_xena_quiescence(nic)) {
5985 val64 = readq(&bar0->adapter_control);
5986 val64 |= ADAPTER_CNTL_EN;
5987 writeq(val64, &bar0->adapter_control);
5988 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5989 nic->device_type, subid)) {
5990 val64 = readq(&bar0->gpio_control);
5991 val64 |= GPIO_CTRL_GPIO_0;
5992 writeq(val64, &bar0->gpio_control);
5993 val64 = readq(&bar0->gpio_control);
5995 val64 |= ADAPTER_LED_ON;
5996 writeq(val64, &bar0->adapter_control);
5998 nic->device_enabled_once = TRUE;
6000 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6001 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6002 netif_stop_queue(dev);
6005 val64 = readq(&bar0->adapter_status);
6006 if (!LINK_IS_UP(val64)) {
6007 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6008 DBG_PRINT(ERR_DBG, " Link down after enabling ");
6009 DBG_PRINT(ERR_DBG, "device \n");
6011 s2io_link(nic, LINK_UP);
6013 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6015 val64 = readq(&bar0->gpio_control);
6016 val64 &= ~GPIO_CTRL_GPIO_0;
6017 writeq(val64, &bar0->gpio_control);
6018 val64 = readq(&bar0->gpio_control);
6020 s2io_link(nic, LINK_DOWN);
6022 clear_bit(0, &(nic->link_state));
6028 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6030 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6031 u64 *temp2, int size)
6033 struct net_device *dev = sp->dev;
6034 struct sk_buff *frag_list;
6036 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6039 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6041 * As Rx frame are not going to be processed,
6042 * using same mapped address for the Rxd
6045 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
6047 *skb = dev_alloc_skb(size);
6049 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
6050 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
6053 /* storing the mapped addr in a temp variable
6054 * such it will be used for next rxd whose
6055 * Host Control is NULL
6057 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
6058 pci_map_single( sp->pdev, (*skb)->data,
6059 size - NET_IP_ALIGN,
6060 PCI_DMA_FROMDEVICE);
6061 rxdp->Host_Control = (unsigned long) (*skb);
6063 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6064 /* Two buffer Mode */
6066 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6067 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6068 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6070 *skb = dev_alloc_skb(size);
6072 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6076 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6077 pci_map_single(sp->pdev, (*skb)->data,
6079 PCI_DMA_FROMDEVICE);
6080 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6081 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6082 PCI_DMA_FROMDEVICE);
6083 rxdp->Host_Control = (unsigned long) (*skb);
6085 /* Buffer-1 will be dummy buffer not used */
6086 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6087 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6088 PCI_DMA_FROMDEVICE);
6090 } else if ((rxdp->Host_Control == 0)) {
6091 /* Three buffer mode */
6093 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6094 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6095 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6097 *skb = dev_alloc_skb(size);
6099 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6103 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6104 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6105 PCI_DMA_FROMDEVICE);
6106 /* Buffer-1 receives L3/L4 headers */
6107 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6108 pci_map_single( sp->pdev, (*skb)->data,
6110 PCI_DMA_FROMDEVICE);
6112 * skb_shinfo(skb)->frag_list will have L4
6115 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6117 if (skb_shinfo(*skb)->frag_list == NULL) {
6118 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6119 failed\n ", dev->name);
6122 frag_list = skb_shinfo(*skb)->frag_list;
6123 frag_list->next = NULL;
6125 * Buffer-2 receives L4 data payload
6127 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6128 pci_map_single( sp->pdev, frag_list->data,
6129 dev->mtu, PCI_DMA_FROMDEVICE);
6134 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6137 struct net_device *dev = sp->dev;
6138 if (sp->rxd_mode == RXD_MODE_1) {
6139 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6140 } else if (sp->rxd_mode == RXD_MODE_3B) {
6141 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6142 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6143 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6145 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6146 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6147 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6151 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6153 int i, j, k, blk_cnt = 0, size;
6154 struct mac_info * mac_control = &sp->mac_control;
6155 struct config_param *config = &sp->config;
6156 struct net_device *dev = sp->dev;
6157 struct RxD_t *rxdp = NULL;
6158 struct sk_buff *skb = NULL;
6159 struct buffAdd *ba = NULL;
6160 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6162 /* Calculate the size based on ring mode */
6163 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6164 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6165 if (sp->rxd_mode == RXD_MODE_1)
6166 size += NET_IP_ALIGN;
6167 else if (sp->rxd_mode == RXD_MODE_3B)
6168 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6170 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6172 for (i = 0; i < config->rx_ring_num; i++) {
6173 blk_cnt = config->rx_cfg[i].num_rxd /
6174 (rxd_count[sp->rxd_mode] +1);
6176 for (j = 0; j < blk_cnt; j++) {
6177 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6178 rxdp = mac_control->rings[i].
6179 rx_blocks[j].rxds[k].virt_addr;
6180 if(sp->rxd_mode >= RXD_MODE_3A)
6181 ba = &mac_control->rings[i].ba[j][k];
6182 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6183 &skb,(u64 *)&temp0_64,
6190 set_rxd_buffer_size(sp, rxdp, size);
6192 /* flip the Ownership bit to Hardware */
6193 rxdp->Control_1 |= RXD_OWN_XENA;
6201 static int s2io_add_isr(struct s2io_nic * sp)
6204 struct net_device *dev = sp->dev;
6207 if (sp->intr_type == MSI)
6208 ret = s2io_enable_msi(sp);
6209 else if (sp->intr_type == MSI_X)
6210 ret = s2io_enable_msi_x(sp);
6212 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6213 sp->intr_type = INTA;
6216 /* Store the values of the MSIX table in the struct s2io_nic structure */
6217 store_xmsi_data(sp);
6219 /* After proper initialization of H/W, register ISR */
6220 if (sp->intr_type == MSI) {
6221 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6222 IRQF_SHARED, sp->name, dev);
6224 pci_disable_msi(sp->pdev);
6225 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6230 if (sp->intr_type == MSI_X) {
6231 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6233 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6234 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6235 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6237 err = request_irq(sp->entries[i].vector,
6238 s2io_msix_fifo_handle, 0, sp->desc[i],
6239 sp->s2io_entries[i].arg);
6240 /* If either data or addr is zero print it */
6241 if(!(sp->msix_info[i].addr &&
6242 sp->msix_info[i].data)) {
6243 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6244 "Data:0x%lx\n",sp->desc[i],
6245 (unsigned long long)
6246 sp->msix_info[i].addr,
6248 ntohl(sp->msix_info[i].data));
6253 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6255 err = request_irq(sp->entries[i].vector,
6256 s2io_msix_ring_handle, 0, sp->desc[i],
6257 sp->s2io_entries[i].arg);
6258 /* If either data or addr is zero print it */
6259 if(!(sp->msix_info[i].addr &&
6260 sp->msix_info[i].data)) {
6261 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6262 "Data:0x%lx\n",sp->desc[i],
6263 (unsigned long long)
6264 sp->msix_info[i].addr,
6266 ntohl(sp->msix_info[i].data));
6272 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6273 "failed\n", dev->name, i);
6274 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6277 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6279 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6280 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6282 if (sp->intr_type == INTA) {
6283 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6286 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6293 static void s2io_rem_isr(struct s2io_nic * sp)
6296 struct net_device *dev = sp->dev;
6298 if (sp->intr_type == MSI_X) {
6302 for (i=1; (sp->s2io_entries[i].in_use ==
6303 MSIX_REGISTERED_SUCCESS); i++) {
6304 int vector = sp->entries[i].vector;
6305 void *arg = sp->s2io_entries[i].arg;
6307 free_irq(vector, arg);
6309 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6310 msi_control &= 0xFFFE; /* Disable MSI */
6311 pci_write_config_word(sp->pdev, 0x42, msi_control);
6313 pci_disable_msix(sp->pdev);
6315 free_irq(sp->pdev->irq, dev);
6316 if (sp->intr_type == MSI) {
6319 pci_disable_msi(sp->pdev);
6320 pci_read_config_word(sp->pdev, 0x4c, &val);
6322 pci_write_config_word(sp->pdev, 0x4c, val);
6325 /* Waiting till all Interrupt handlers are complete */
6329 if (!atomic_read(&sp->isr_cnt))
6335 static void s2io_card_down(struct s2io_nic * sp)
6338 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6339 unsigned long flags;
6340 register u64 val64 = 0;
6342 del_timer_sync(&sp->alarm_timer);
6343 /* If s2io_set_link task is executing, wait till it completes. */
6344 while (test_and_set_bit(0, &(sp->link_state))) {
6347 atomic_set(&sp->card_state, CARD_DOWN);
6349 /* disable Tx and Rx traffic on the NIC */
6355 tasklet_kill(&sp->task);
6357 /* Check if the device is Quiescent and then Reset the NIC */
6359 /* As per the HW requirement we need to replenish the
6360 * receive buffer to avoid the ring bump. Since there is
6361 * no intention of processing the Rx frame at this pointwe are
6362 * just settting the ownership bit of rxd in Each Rx
6363 * ring to HW and set the appropriate buffer size
6364 * based on the ring mode
6366 rxd_owner_bit_reset(sp);
6368 val64 = readq(&bar0->adapter_status);
6369 if (verify_xena_quiescence(sp)) {
6370 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6378 "s2io_close:Device not Quiescent ");
6379 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6380 (unsigned long long) val64);
6386 spin_lock_irqsave(&sp->tx_lock, flags);
6387 /* Free all Tx buffers */
6388 free_tx_buffers(sp);
6389 spin_unlock_irqrestore(&sp->tx_lock, flags);
6391 /* Free all Rx buffers */
6392 spin_lock_irqsave(&sp->rx_lock, flags);
6393 free_rx_buffers(sp);
6394 spin_unlock_irqrestore(&sp->rx_lock, flags);
6396 clear_bit(0, &(sp->link_state));
6399 static int s2io_card_up(struct s2io_nic * sp)
6402 struct mac_info *mac_control;
6403 struct config_param *config;
6404 struct net_device *dev = (struct net_device *) sp->dev;
6407 /* Initialize the H/W I/O registers */
6408 if (init_nic(sp) != 0) {
6409 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6416 * Initializing the Rx buffers. For now we are considering only 1
6417 * Rx ring and initializing buffers into 30 Rx blocks
6419 mac_control = &sp->mac_control;
6420 config = &sp->config;
6422 for (i = 0; i < config->rx_ring_num; i++) {
6423 if ((ret = fill_rx_buffers(sp, i))) {
6424 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6427 free_rx_buffers(sp);
6430 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6431 atomic_read(&sp->rx_bufs_left[i]));
6433 /* Maintain the state prior to the open */
6434 if (sp->promisc_flg)
6435 sp->promisc_flg = 0;
6436 if (sp->m_cast_flg) {
6438 sp->all_multi_pos= 0;
6441 /* Setting its receive mode */
6442 s2io_set_multicast(dev);
6445 /* Initialize max aggregatable pkts per session based on MTU */
6446 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6447 /* Check if we can use(if specified) user provided value */
6448 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6449 sp->lro_max_aggr_per_sess = lro_max_pkts;
6452 /* Enable Rx Traffic and interrupts on the NIC */
6453 if (start_nic(sp)) {
6454 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6456 free_rx_buffers(sp);
6460 /* Add interrupt service routine */
6461 if (s2io_add_isr(sp) != 0) {
6462 if (sp->intr_type == MSI_X)
6465 free_rx_buffers(sp);
6469 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6471 /* Enable tasklet for the device */
6472 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6474 /* Enable select interrupts */
6475 if (sp->intr_type != INTA)
6476 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6478 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6479 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6480 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6481 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6485 atomic_set(&sp->card_state, CARD_UP);
6490 * s2io_restart_nic - Resets the NIC.
6491 * @data : long pointer to the device private structure
6493 * This function is scheduled to be run by the s2io_tx_watchdog
6494 * function after 0.5 secs to reset the NIC. The idea is to reduce
6495 * the run time of the watch dog routine which is run holding a
6499 static void s2io_restart_nic(struct work_struct *work)
6501 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6502 struct net_device *dev = sp->dev;
6506 if (!netif_running(dev))
6510 if (s2io_card_up(sp)) {
6511 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6514 netif_wake_queue(dev);
6515 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6522 * s2io_tx_watchdog - Watchdog for transmit side.
6523 * @dev : Pointer to net device structure
6525 * This function is triggered if the Tx Queue is stopped
6526 * for a pre-defined amount of time when the Interface is still up.
6527 * If the Interface is jammed in such a situation, the hardware is
6528 * reset (by s2io_close) and restarted again (by s2io_open) to
6529 * overcome any problem that might have been caused in the hardware.
6534 static void s2io_tx_watchdog(struct net_device *dev)
6536 struct s2io_nic *sp = dev->priv;
6538 if (netif_carrier_ok(dev)) {
6539 schedule_work(&sp->rst_timer_task);
6540 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6545 * rx_osm_handler - To perform some OS related operations on SKB.
6546 * @sp: private member of the device structure,pointer to s2io_nic structure.
6547 * @skb : the socket buffer pointer.
6548 * @len : length of the packet
6549 * @cksum : FCS checksum of the frame.
6550 * @ring_no : the ring from which this RxD was extracted.
6552 * This function is called by the Rx interrupt serivce routine to perform
6553 * some OS related operations on the SKB before passing it to the upper
6554 * layers. It mainly checks if the checksum is OK, if so adds it to the
6555 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6556 * to the upper layer. If the checksum is wrong, it increments the Rx
6557 * packet error count, frees the SKB and returns error.
6559 * SUCCESS on success and -1 on failure.
6561 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6563 struct s2io_nic *sp = ring_data->nic;
6564 struct net_device *dev = (struct net_device *) sp->dev;
6565 struct sk_buff *skb = (struct sk_buff *)
6566 ((unsigned long) rxdp->Host_Control);
6567 int ring_no = ring_data->ring_no;
6568 u16 l3_csum, l4_csum;
6569 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6575 /* Check for parity error */
6577 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6581 * Drop the packet if bad transfer code. Exception being
6582 * 0x5, which could be due to unsupported IPv6 extension header.
6583 * In this case, we let stack handle the packet.
6584 * Note that in this case, since checksum will be incorrect,
6585 * stack will validate the same.
6587 if (err && ((err >> 48) != 0x5)) {
6588 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6590 sp->stats.rx_crc_errors++;
6592 atomic_dec(&sp->rx_bufs_left[ring_no]);
6593 rxdp->Host_Control = 0;
6598 /* Updating statistics */
6599 rxdp->Host_Control = 0;
6601 sp->stats.rx_packets++;
6602 if (sp->rxd_mode == RXD_MODE_1) {
6603 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6605 sp->stats.rx_bytes += len;
6608 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6609 int get_block = ring_data->rx_curr_get_info.block_index;
6610 int get_off = ring_data->rx_curr_get_info.offset;
6611 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6612 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6613 unsigned char *buff = skb_push(skb, buf0_len);
6615 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6616 sp->stats.rx_bytes += buf0_len + buf2_len;
6617 memcpy(buff, ba->ba_0, buf0_len);
6619 if (sp->rxd_mode == RXD_MODE_3A) {
6620 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6622 skb_put(skb, buf1_len);
6623 skb->len += buf2_len;
6624 skb->data_len += buf2_len;
6625 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6626 sp->stats.rx_bytes += buf1_len;
6629 skb_put(skb, buf2_len);
6632 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6633 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6635 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6636 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6637 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6639 * NIC verifies if the Checksum of the received
6640 * frame is Ok or not and accordingly returns
6641 * a flag in the RxD.
6643 skb->ip_summed = CHECKSUM_UNNECESSARY;
6649 ret = s2io_club_tcp_session(skb->data, &tcp,
6650 &tcp_len, &lro, rxdp, sp);
6652 case 3: /* Begin anew */
6655 case 1: /* Aggregate */
6657 lro_append_pkt(sp, lro,
6661 case 4: /* Flush session */
6663 lro_append_pkt(sp, lro,
6665 queue_rx_frame(lro->parent);
6666 clear_lro_session(lro);
6667 sp->mac_control.stats_info->
6668 sw_stat.flush_max_pkts++;
6671 case 2: /* Flush both */
6672 lro->parent->data_len =
6674 sp->mac_control.stats_info->
6675 sw_stat.sending_both++;
6676 queue_rx_frame(lro->parent);
6677 clear_lro_session(lro);
6679 case 0: /* sessions exceeded */
6680 case -1: /* non-TCP or not
6684 * First pkt in session not
6685 * L3/L4 aggregatable
6690 "%s: Samadhana!!\n",
6697 * Packet with erroneous checksum, let the
6698 * upper layers deal with it.
6700 skb->ip_summed = CHECKSUM_NONE;
6703 skb->ip_summed = CHECKSUM_NONE;
6707 skb->protocol = eth_type_trans(skb, dev);
6708 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6710 /* Queueing the vlan frame to the upper layer */
6712 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6713 RXD_GET_VLAN_TAG(rxdp->Control_2));
6715 vlan_hwaccel_rx(skb, sp->vlgrp,
6716 RXD_GET_VLAN_TAG(rxdp->Control_2));
6719 netif_receive_skb(skb);
6725 queue_rx_frame(skb);
6727 dev->last_rx = jiffies;
6729 atomic_dec(&sp->rx_bufs_left[ring_no]);
6734 * s2io_link - stops/starts the Tx queue.
6735 * @sp : private member of the device structure, which is a pointer to the
6736 * s2io_nic structure.
6737 * @link : inidicates whether link is UP/DOWN.
6739 * This function stops/starts the Tx queue depending on whether the link
6740 * status of the NIC is is down or up. This is called by the Alarm
6741 * interrupt handler whenever a link change interrupt comes up.
6746 static void s2io_link(struct s2io_nic * sp, int link)
6748 struct net_device *dev = (struct net_device *) sp->dev;
6750 if (link != sp->last_link_state) {
6751 if (link == LINK_DOWN) {
6752 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6753 netif_carrier_off(dev);
6755 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6756 netif_carrier_on(dev);
6759 sp->last_link_state = link;
6763 * get_xena_rev_id - to identify revision ID of xena.
6764 * @pdev : PCI Dev structure
6766 * Function to identify the Revision ID of xena.
6768 * returns the revision ID of the device.
6771 static int get_xena_rev_id(struct pci_dev *pdev)
6775 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6780 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6781 * @sp : private member of the device structure, which is a pointer to the
6782 * s2io_nic structure.
6784 * This function initializes a few of the PCI and PCI-X configuration registers
6785 * with recommended values.
6790 static void s2io_init_pci(struct s2io_nic * sp)
6792 u16 pci_cmd = 0, pcix_cmd = 0;
6794 /* Enable Data Parity Error Recovery in PCI-X command register. */
6795 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6797 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6799 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6802 /* Set the PErr Response bit in PCI command register. */
6803 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6804 pci_write_config_word(sp->pdev, PCI_COMMAND,
6805 (pci_cmd | PCI_COMMAND_PARITY));
6806 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6809 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6811 if ( tx_fifo_num > 8) {
6812 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6814 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6817 if ( rx_ring_num > 8) {
6818 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6820 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6823 if (*dev_intr_type != INTA)
6826 #ifndef CONFIG_PCI_MSI
6827 if (*dev_intr_type != INTA) {
6828 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6829 "MSI/MSI-X. Defaulting to INTA\n");
6830 *dev_intr_type = INTA;
6833 if (*dev_intr_type > MSI_X) {
6834 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6835 "Defaulting to INTA\n");
6836 *dev_intr_type = INTA;
6839 if ((*dev_intr_type == MSI_X) &&
6840 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6841 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6842 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6843 "Defaulting to INTA\n");
6844 *dev_intr_type = INTA;
6847 if (rx_ring_mode > 3) {
6848 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6849 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6856 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6857 * or Traffic class respectively.
6858 * @nic: device peivate variable
6859 * Description: The function configures the receive steering to
6860 * desired receive ring.
6861 * Return Value: SUCCESS on success and
6862 * '-1' on failure (endian settings incorrect).
6864 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6866 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6867 register u64 val64 = 0;
6869 if (ds_codepoint > 63)
6872 val64 = RTS_DS_MEM_DATA(ring);
6873 writeq(val64, &bar0->rts_ds_mem_data);
6875 val64 = RTS_DS_MEM_CTRL_WE |
6876 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6877 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6879 writeq(val64, &bar0->rts_ds_mem_ctrl);
6881 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6882 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6887 * s2io_init_nic - Initialization of the adapter .
6888 * @pdev : structure containing the PCI related information of the device.
6889 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6891 * The function initializes an adapter identified by the pci_dec structure.
6892 * All OS related initialization including memory and device structure and
6893 * initlaization of the device private variable is done. Also the swapper
6894 * control register is initialized to enable read and write into the I/O
6895 * registers of the device.
6897 * returns 0 on success and negative on failure.
6900 static int __devinit
6901 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6903 struct s2io_nic *sp;
6904 struct net_device *dev;
6906 int dma_flag = FALSE;
6907 u32 mac_up, mac_down;
6908 u64 val64 = 0, tmp64 = 0;
6909 struct XENA_dev_config __iomem *bar0 = NULL;
6911 struct mac_info *mac_control;
6912 struct config_param *config;
6914 u8 dev_intr_type = intr_type;
6916 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6919 if ((ret = pci_enable_device(pdev))) {
6921 "s2io_init_nic: pci_enable_device failed\n");
6925 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6926 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6928 if (pci_set_consistent_dma_mask
6929 (pdev, DMA_64BIT_MASK)) {
6931 "Unable to obtain 64bit DMA for \
6932 consistent allocations\n");
6933 pci_disable_device(pdev);
6936 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6937 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6939 pci_disable_device(pdev);
6942 if (dev_intr_type != MSI_X) {
6943 if (pci_request_regions(pdev, s2io_driver_name)) {
6944 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6945 pci_disable_device(pdev);
6950 if (!(request_mem_region(pci_resource_start(pdev, 0),
6951 pci_resource_len(pdev, 0), s2io_driver_name))) {
6952 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6953 pci_disable_device(pdev);
6956 if (!(request_mem_region(pci_resource_start(pdev, 2),
6957 pci_resource_len(pdev, 2), s2io_driver_name))) {
6958 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6959 release_mem_region(pci_resource_start(pdev, 0),
6960 pci_resource_len(pdev, 0));
6961 pci_disable_device(pdev);
6966 dev = alloc_etherdev(sizeof(struct s2io_nic));
6968 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6969 pci_disable_device(pdev);
6970 pci_release_regions(pdev);
6974 pci_set_master(pdev);
6975 pci_set_drvdata(pdev, dev);
6976 SET_MODULE_OWNER(dev);
6977 SET_NETDEV_DEV(dev, &pdev->dev);
6979 /* Private member variable initialized to s2io NIC structure */
6981 memset(sp, 0, sizeof(struct s2io_nic));
6984 sp->high_dma_flag = dma_flag;
6985 sp->device_enabled_once = FALSE;
6986 if (rx_ring_mode == 1)
6987 sp->rxd_mode = RXD_MODE_1;
6988 if (rx_ring_mode == 2)
6989 sp->rxd_mode = RXD_MODE_3B;
6990 if (rx_ring_mode == 3)
6991 sp->rxd_mode = RXD_MODE_3A;
6993 sp->intr_type = dev_intr_type;
6995 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6996 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6997 sp->device_type = XFRAME_II_DEVICE;
6999 sp->device_type = XFRAME_I_DEVICE;
7003 /* Initialize some PCI/PCI-X fields of the NIC. */
7007 * Setting the device configuration parameters.
7008 * Most of these parameters can be specified by the user during
7009 * module insertion as they are module loadable parameters. If
7010 * these parameters are not not specified during load time, they
7011 * are initialized with default values.
7013 mac_control = &sp->mac_control;
7014 config = &sp->config;
7016 /* Tx side parameters. */
7017 config->tx_fifo_num = tx_fifo_num;
7018 for (i = 0; i < MAX_TX_FIFOS; i++) {
7019 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7020 config->tx_cfg[i].fifo_priority = i;
7023 /* mapping the QoS priority to the configured fifos */
7024 for (i = 0; i < MAX_TX_FIFOS; i++)
7025 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7027 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7028 for (i = 0; i < config->tx_fifo_num; i++) {
7029 config->tx_cfg[i].f_no_snoop =
7030 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7031 if (config->tx_cfg[i].fifo_len < 65) {
7032 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7036 /* + 2 because one Txd for skb->data and one Txd for UFO */
7037 config->max_txds = MAX_SKB_FRAGS + 2;
7039 /* Rx side parameters. */
7040 config->rx_ring_num = rx_ring_num;
7041 for (i = 0; i < MAX_RX_RINGS; i++) {
7042 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7043 (rxd_count[sp->rxd_mode] + 1);
7044 config->rx_cfg[i].ring_priority = i;
7047 for (i = 0; i < rx_ring_num; i++) {
7048 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7049 config->rx_cfg[i].f_no_snoop =
7050 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7053 /* Setting Mac Control parameters */
7054 mac_control->rmac_pause_time = rmac_pause_time;
7055 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7056 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7059 /* Initialize Ring buffer parameters. */
7060 for (i = 0; i < config->rx_ring_num; i++)
7061 atomic_set(&sp->rx_bufs_left[i], 0);
7063 /* Initialize the number of ISRs currently running */
7064 atomic_set(&sp->isr_cnt, 0);
7066 /* initialize the shared memory used by the NIC and the host */
7067 if (init_shared_mem(sp)) {
7068 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7071 goto mem_alloc_failed;
7074 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7075 pci_resource_len(pdev, 0));
7077 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7080 goto bar0_remap_failed;
7083 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7084 pci_resource_len(pdev, 2));
7086 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7089 goto bar1_remap_failed;
7092 dev->irq = pdev->irq;
7093 dev->base_addr = (unsigned long) sp->bar0;
7095 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7096 for (j = 0; j < MAX_TX_FIFOS; j++) {
7097 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7098 (sp->bar1 + (j * 0x00020000));
7101 /* Driver entry points */
7102 dev->open = &s2io_open;
7103 dev->stop = &s2io_close;
7104 dev->hard_start_xmit = &s2io_xmit;
7105 dev->get_stats = &s2io_get_stats;
7106 dev->set_multicast_list = &s2io_set_multicast;
7107 dev->do_ioctl = &s2io_ioctl;
7108 dev->change_mtu = &s2io_change_mtu;
7109 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7110 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7111 dev->vlan_rx_register = s2io_vlan_rx_register;
7112 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7115 * will use eth_mac_addr() for dev->set_mac_address
7116 * mac address will be set every time dev->open() is called
7118 dev->poll = s2io_poll;
7121 #ifdef CONFIG_NET_POLL_CONTROLLER
7122 dev->poll_controller = s2io_netpoll;
7125 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7126 if (sp->high_dma_flag == TRUE)
7127 dev->features |= NETIF_F_HIGHDMA;
7128 dev->features |= NETIF_F_TSO;
7129 dev->features |= NETIF_F_TSO6;
7130 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7131 dev->features |= NETIF_F_UFO;
7132 dev->features |= NETIF_F_HW_CSUM;
7135 dev->tx_timeout = &s2io_tx_watchdog;
7136 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7137 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7138 INIT_WORK(&sp->set_link_task, s2io_set_link);
7140 pci_save_state(sp->pdev);
7142 /* Setting swapper control on the NIC, for proper reset operation */
7143 if (s2io_set_swapper(sp)) {
7144 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7147 goto set_swap_failed;
7150 /* Verify if the Herc works on the slot its placed into */
7151 if (sp->device_type & XFRAME_II_DEVICE) {
7152 mode = s2io_verify_pci_mode(sp);
7154 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7155 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7157 goto set_swap_failed;
7161 /* Not needed for Herc */
7162 if (sp->device_type & XFRAME_I_DEVICE) {
7164 * Fix for all "FFs" MAC address problems observed on
7167 fix_mac_address(sp);
7172 * MAC address initialization.
7173 * For now only one mac address will be read and used.
7176 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7177 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7178 writeq(val64, &bar0->rmac_addr_cmd_mem);
7179 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7180 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7181 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7182 mac_down = (u32) tmp64;
7183 mac_up = (u32) (tmp64 >> 32);
7185 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7187 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7188 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7189 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7190 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7191 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7192 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7194 /* Set the factory defined MAC address initially */
7195 dev->addr_len = ETH_ALEN;
7196 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7198 /* reset Nic and bring it to known state */
7202 * Initialize the tasklet status and link state flags
7203 * and the card state parameter
7205 atomic_set(&(sp->card_state), 0);
7206 sp->tasklet_status = 0;
7209 /* Initialize spinlocks */
7210 spin_lock_init(&sp->tx_lock);
7213 spin_lock_init(&sp->put_lock);
7214 spin_lock_init(&sp->rx_lock);
7217 * SXE-002: Configure link and activity LED to init state
7220 subid = sp->pdev->subsystem_device;
7221 if ((subid & 0xFF) >= 0x07) {
7222 val64 = readq(&bar0->gpio_control);
7223 val64 |= 0x0000800000000000ULL;
7224 writeq(val64, &bar0->gpio_control);
7225 val64 = 0x0411040400000000ULL;
7226 writeq(val64, (void __iomem *) bar0 + 0x2700);
7227 val64 = readq(&bar0->gpio_control);
7230 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7232 if (register_netdev(dev)) {
7233 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7235 goto register_failed;
7238 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7239 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7240 sp->product_name, get_xena_rev_id(sp->pdev));
7241 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7242 s2io_driver_version);
7243 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7244 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7245 sp->def_mac_addr[0].mac_addr[0],
7246 sp->def_mac_addr[0].mac_addr[1],
7247 sp->def_mac_addr[0].mac_addr[2],
7248 sp->def_mac_addr[0].mac_addr[3],
7249 sp->def_mac_addr[0].mac_addr[4],
7250 sp->def_mac_addr[0].mac_addr[5]);
7251 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7252 if (sp->device_type & XFRAME_II_DEVICE) {
7253 mode = s2io_print_pci_mode(sp);
7255 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7257 unregister_netdev(dev);
7258 goto set_swap_failed;
7261 switch(sp->rxd_mode) {
7263 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7267 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7271 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7277 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7278 switch(sp->intr_type) {
7280 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7283 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7286 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7290 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7293 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7294 " enabled\n", dev->name);
7295 /* Initialize device name */
7296 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7298 /* Initialize bimodal Interrupts */
7299 sp->config.bimodal = bimodal;
7300 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7301 sp->config.bimodal = 0;
7302 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7307 * Make Link state as off at this point, when the Link change
7308 * interrupt comes the state will be automatically changed to
7311 netif_carrier_off(dev);
7322 free_shared_mem(sp);
7323 pci_disable_device(pdev);
7324 if (dev_intr_type != MSI_X)
7325 pci_release_regions(pdev);
7327 release_mem_region(pci_resource_start(pdev, 0),
7328 pci_resource_len(pdev, 0));
7329 release_mem_region(pci_resource_start(pdev, 2),
7330 pci_resource_len(pdev, 2));
7332 pci_set_drvdata(pdev, NULL);
7339 * s2io_rem_nic - Free the PCI device
7340 * @pdev: structure containing the PCI related information of the device.
7341 * Description: This function is called by the Pci subsystem to release a
7342 * PCI device and free up all resource held up by the device. This could
7343 * be in response to a Hot plug event or when the driver is to be removed
7347 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7349 struct net_device *dev =
7350 (struct net_device *) pci_get_drvdata(pdev);
7351 struct s2io_nic *sp;
7354 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7358 flush_scheduled_work();
7361 unregister_netdev(dev);
7363 free_shared_mem(sp);
7366 if (sp->intr_type != MSI_X)
7367 pci_release_regions(pdev);
7369 release_mem_region(pci_resource_start(pdev, 0),
7370 pci_resource_len(pdev, 0));
7371 release_mem_region(pci_resource_start(pdev, 2),
7372 pci_resource_len(pdev, 2));
7374 pci_set_drvdata(pdev, NULL);
7376 pci_disable_device(pdev);
7380 * s2io_starter - Entry point for the driver
7381 * Description: This function is the entry point for the driver. It verifies
7382 * the module loadable parameters and initializes PCI configuration space.
7385 int __init s2io_starter(void)
7387 return pci_register_driver(&s2io_driver);
7391 * s2io_closer - Cleanup routine for the driver
7392 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7395 static __exit void s2io_closer(void)
7397 pci_unregister_driver(&s2io_driver);
7398 DBG_PRINT(INIT_DBG, "cleanup done\n");
7401 module_init(s2io_starter);
7402 module_exit(s2io_closer);
7404 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7405 struct tcphdr **tcp, struct RxD_t *rxdp)
7408 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7410 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7411 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7417 * By default the VLAN field in the MAC is stripped by the card, if this
7418 * feature is turned off in rx_pa_cfg register, then the ip_off field
7419 * has to be shifted by a further 2 bytes
7422 case 0: /* DIX type */
7423 case 4: /* DIX type with VLAN */
7424 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7426 /* LLC, SNAP etc are considered non-mergeable */
7431 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7432 ip_len = (u8)((*ip)->ihl);
7434 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7439 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7442 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7443 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7444 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7449 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7451 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7454 static void initiate_new_session(struct lro *lro, u8 *l2h,
7455 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7457 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7461 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7462 lro->tcp_ack = ntohl(tcp->ack_seq);
7464 lro->total_len = ntohs(ip->tot_len);
7467 * check if we saw TCP timestamp. Other consistency checks have
7468 * already been done.
7470 if (tcp->doff == 8) {
7472 ptr = (u32 *)(tcp+1);
7474 lro->cur_tsval = *(ptr+1);
7475 lro->cur_tsecr = *(ptr+2);
7480 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7482 struct iphdr *ip = lro->iph;
7483 struct tcphdr *tcp = lro->tcph;
7485 struct stat_block *statinfo = sp->mac_control.stats_info;
7486 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7488 /* Update L3 header */
7489 ip->tot_len = htons(lro->total_len);
7491 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7494 /* Update L4 header */
7495 tcp->ack_seq = lro->tcp_ack;
7496 tcp->window = lro->window;
7498 /* Update tsecr field if this session has timestamps enabled */
7500 u32 *ptr = (u32 *)(tcp + 1);
7501 *(ptr+2) = lro->cur_tsecr;
7504 /* Update counters required for calculation of
7505 * average no. of packets aggregated.
7507 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7508 statinfo->sw_stat.num_aggregations++;
7511 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7512 struct tcphdr *tcp, u32 l4_pyld)
7514 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7515 lro->total_len += l4_pyld;
7516 lro->frags_len += l4_pyld;
7517 lro->tcp_next_seq += l4_pyld;
7520 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7521 lro->tcp_ack = tcp->ack_seq;
7522 lro->window = tcp->window;
7526 /* Update tsecr and tsval from this packet */
7527 ptr = (u32 *) (tcp + 1);
7528 lro->cur_tsval = *(ptr + 1);
7529 lro->cur_tsecr = *(ptr + 2);
7533 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7534 struct tcphdr *tcp, u32 tcp_pyld_len)
7538 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7540 if (!tcp_pyld_len) {
7541 /* Runt frame or a pure ack */
7545 if (ip->ihl != 5) /* IP has options */
7548 /* If we see CE codepoint in IP header, packet is not mergeable */
7549 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7552 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7553 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7554 tcp->ece || tcp->cwr || !tcp->ack) {
7556 * Currently recognize only the ack control word and
7557 * any other control field being set would result in
7558 * flushing the LRO session
7564 * Allow only one TCP timestamp option. Don't aggregate if
7565 * any other options are detected.
7567 if (tcp->doff != 5 && tcp->doff != 8)
7570 if (tcp->doff == 8) {
7571 ptr = (u8 *)(tcp + 1);
7572 while (*ptr == TCPOPT_NOP)
7574 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7577 /* Ensure timestamp value increases monotonically */
7579 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7582 /* timestamp echo reply should be non-zero */
7583 if (*((u32 *)(ptr+6)) == 0)
7591 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7592 struct RxD_t *rxdp, struct s2io_nic *sp)
7595 struct tcphdr *tcph;
7598 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7600 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7601 ip->saddr, ip->daddr);
7606 tcph = (struct tcphdr *)*tcp;
7607 *tcp_len = get_l4_pyld_length(ip, tcph);
7608 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7609 struct lro *l_lro = &sp->lro0_n[i];
7610 if (l_lro->in_use) {
7611 if (check_for_socket_match(l_lro, ip, tcph))
7613 /* Sock pair matched */
7616 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7617 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7618 "0x%x, actual 0x%x\n", __FUNCTION__,
7619 (*lro)->tcp_next_seq,
7622 sp->mac_control.stats_info->
7623 sw_stat.outof_sequence_pkts++;
7628 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7629 ret = 1; /* Aggregate */
7631 ret = 2; /* Flush both */
7637 /* Before searching for available LRO objects,
7638 * check if the pkt is L3/L4 aggregatable. If not
7639 * don't create new LRO session. Just send this
7642 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7646 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7647 struct lro *l_lro = &sp->lro0_n[i];
7648 if (!(l_lro->in_use)) {
7650 ret = 3; /* Begin anew */
7656 if (ret == 0) { /* sessions exceeded */
7657 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7665 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7668 update_L3L4_header(sp, *lro);
7671 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7672 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7673 update_L3L4_header(sp, *lro);
7674 ret = 4; /* Flush the LRO */
7678 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7686 static void clear_lro_session(struct lro *lro)
7688 static u16 lro_struct_size = sizeof(struct lro);
7690 memset(lro, 0, lro_struct_size);
7693 static void queue_rx_frame(struct sk_buff *skb)
7695 struct net_device *dev = skb->dev;
7697 skb->protocol = eth_type_trans(skb, dev);
7699 netif_receive_skb(skb);
7704 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7705 struct sk_buff *skb,
7708 struct sk_buff *first = lro->parent;
7710 first->len += tcp_len;
7711 first->data_len = lro->frags_len;
7712 skb_pull(skb, (skb->len - tcp_len));
7713 if (skb_shinfo(first)->frag_list)
7714 lro->last_frag->next = skb;
7716 skb_shinfo(first)->frag_list = skb;
7717 first->truesize += skb->truesize;
7718 lro->last_frag = skb;
7719 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;