1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
83 memcpy(&bp->bnx2x_txq[new_txdata_index],
84 &bp->bnx2x_txq[old_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
90 * bnx2x_fill_fw_str - Fill buffer with FW version string.
93 * @buf: character buffer to fill with the fw name
94 * @buf_len: length of the above buffer
97 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
100 u8 phy_fw_ver[PHY_FW_VER_LEN];
102 phy_fw_ver[0] = '\0';
103 bnx2x_get_ext_phy_fw_version(&bp->link_params,
104 phy_fw_ver, PHY_FW_VER_LEN);
105 strlcpy(buf, bp->fw_ver, buf_len);
106 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
108 (bp->common.bc_ver & 0xff0000) >> 16,
109 (bp->common.bc_ver & 0xff00) >> 8,
110 (bp->common.bc_ver & 0xff),
111 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
113 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
118 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
121 * @delta: number of eth queues which were not allocated
123 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
125 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
127 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
128 * backward along the array could cause memory to be overridden
130 for (cos = 1; cos < bp->max_cos; cos++) {
131 for (i = 0; i < old_eth_num - delta; i++) {
132 struct bnx2x_fastpath *fp = &bp->fp[i];
133 int new_idx = cos * (old_eth_num - delta) + i;
135 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
136 sizeof(struct bnx2x_fp_txdata));
137 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
142 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
144 /* free skb in the packet ring at pos idx
145 * return idx of last bd freed
147 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
148 u16 idx, unsigned int *pkts_compl,
149 unsigned int *bytes_compl)
151 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
152 struct eth_tx_start_bd *tx_start_bd;
153 struct eth_tx_bd *tx_data_bd;
154 struct sk_buff *skb = tx_buf->skb;
155 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
158 /* prefetch skb end pointer to speedup dev_kfree_skb() */
161 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
162 txdata->txq_index, idx, tx_buf, skb);
165 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
166 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
167 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170 #ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
176 new_cons = nbd + tx_buf->first_bd;
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
181 /* Skip a parse bd... */
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
205 (*bytes_compl) += skb->len;
208 dev_kfree_skb_any(skb);
209 tx_buf->first_bd = 0;
215 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
217 struct netdev_queue *txq;
218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
219 unsigned int pkts_compl = 0, bytes_compl = 0;
221 #ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
230 while (sw_cons != hw_cons) {
233 pkt_cons = TX_BD(sw_cons);
235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
240 &pkts_compl, &bytes_compl);
245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent re-enabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
272 __netif_tx_lock(txq, smp_processor_id());
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
277 netif_tx_wake_queue(txq);
279 __netif_tx_unlock(txq);
284 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
287 u16 last_max = fp->last_max_sge;
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
293 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
295 struct eth_end_agg_rx_cqe *cqe)
297 struct bnx2x *bp = fp->bp;
298 u16 last_max, last_elem, first_elem;
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
315 bnx2x_update_last_max_sge(fp,
316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
318 last_max = RX_SGE(fp->last_max_sge);
319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
346 /* Get Toeplitz hash value in the skb using the value from the
347 * CQE (calculated by HW).
349 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
350 const struct eth_fast_path_rx_cqe *cqe,
353 /* Get Toeplitz hash from CQE */
354 if ((bp->dev->features & NETIF_F_RXHASH) &&
355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
361 return le32_to_cpu(cqe->rss_hash_result);
367 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
369 struct eth_fast_path_rx_cqe *cqe)
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
383 /* Try to map an empty data buffer from the aggregation info */
384 mapping = dma_map_single(&bp->pdev->dev,
385 first_buf->data + NET_SKB_PAD,
386 fp->rx_buf_size, DMA_FROM_DEVICE);
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
395 bnx2x_reuse_rx_data(fp, cons, prod);
396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
403 /* point prod_bd to new data */
404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
421 tpa_info->gro_size = gro_size;
424 #ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426 #ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
435 /* Timestamp option length allowed for TPA aggregation:
437 * nop nop kind length echo val
439 #define TPA_TSTAMP_OPT_LEN 12
441 * bnx2x_set_gro_params - compute GRO values
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
447 * @pkt_len: length of all segments
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
451 * Compute number of aggregated segments, and gso_type.
453 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
457 /* TPA aggregation won't have either IP options or TCP options
458 * other than timestamp or IPv6 extension headers.
460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
463 PRS_FLAG_OVERETH_IPV6) {
464 hdrs_len += sizeof(struct ipv6hdr);
465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
467 hdrs_len += sizeof(struct iphdr);
468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
474 * Otherwise FW would close the aggregation.
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
487 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
501 SGE_PAGES, DMA_FROM_DEVICE);
502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
517 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
518 struct bnx2x_agg_info *tpa_info,
521 struct eth_end_agg_rx_cqe *cqe,
524 struct sw_rx_page *rx_pg, old_rx_pg;
525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
527 u16 len_on_bd = tpa_info->len_on_bd;
528 u16 full_page = 0, gro_size = 0;
530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
537 /* This is needed in order to enable forwarding support */
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
543 #ifdef BNX2X_STOP_ON_ERROR
544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
564 rx_pg = &fp->rx_page_ring[sge_idx];
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
575 /* Unmap the page as we're going to pass it to the stack */
576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
578 SGE_PAGES, DMA_FROM_DEVICE);
579 /* Add one frag and update the appropriate fields in the skb */
580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
590 get_page(old_rx_pg.page);
595 skb->data_len += frag_len;
596 skb->truesize += SGE_PAGES;
597 skb->len += frag_len;
599 frag_size -= frag_len;
605 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
613 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
622 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
624 const struct iphdr *iph = ip_hdr(skb);
627 skb_set_transport_header(skb, sizeof(struct iphdr));
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
634 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
636 struct ipv6hdr *iph = ipv6_hdr(skb);
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
646 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
647 void (*gro_func)(struct bnx2x*, struct sk_buff*))
649 skb_set_network_header(skb, 0);
651 tcp_gro_complete(skb);
655 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
659 if (skb_shinfo(skb)->gso_size) {
660 switch (be16_to_cpu(skb->protocol)) {
662 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
665 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
668 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
669 be16_to_cpu(skb->protocol));
673 napi_gro_receive(&fp->napi, skb);
676 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
677 struct bnx2x_agg_info *tpa_info,
679 struct eth_end_agg_rx_cqe *cqe,
682 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
683 u8 pad = tpa_info->placement_offset;
684 u16 len = tpa_info->len_on_bd;
685 struct sk_buff *skb = NULL;
686 u8 *new_data, *data = rx_buf->data;
687 u8 old_tpa_state = tpa_info->tpa_state;
689 tpa_info->tpa_state = BNX2X_TPA_STOP;
691 /* If we there was an error during the handling of the TPA_START -
692 * drop this aggregation.
694 if (old_tpa_state == BNX2X_TPA_ERROR)
697 /* Try to allocate the new data */
698 new_data = bnx2x_frag_alloc(fp);
699 /* Unmap skb in the pool anyway, as we are going to change
700 pool entry status to BNX2X_TPA_STOP even if new skb allocation
702 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
703 fp->rx_buf_size, DMA_FROM_DEVICE);
704 if (likely(new_data))
705 skb = build_skb(data, fp->rx_frag_size);
708 #ifdef BNX2X_STOP_ON_ERROR
709 if (pad + len > fp->rx_buf_size) {
710 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
711 pad, len, fp->rx_buf_size);
717 skb_reserve(skb, pad + NET_SKB_PAD);
719 skb->rxhash = tpa_info->rxhash;
720 skb->l4_rxhash = tpa_info->l4_rxhash;
722 skb->protocol = eth_type_trans(skb, bp->dev);
723 skb->ip_summed = CHECKSUM_UNNECESSARY;
725 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
726 skb, cqe, cqe_idx)) {
727 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
729 bnx2x_gro_receive(bp, fp, skb);
731 DP(NETIF_MSG_RX_STATUS,
732 "Failed to allocate new pages - dropping packet!\n");
733 dev_kfree_skb_any(skb);
736 /* put new data in bin */
737 rx_buf->data = new_data;
741 bnx2x_frag_free(fp, new_data);
743 /* drop the packet and keep the buffer in the bin */
744 DP(NETIF_MSG_RX_STATUS,
745 "Failed to allocate or map a new skb - dropping packet!\n");
746 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
749 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
750 struct bnx2x_fastpath *fp, u16 index)
753 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
754 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
757 data = bnx2x_frag_alloc(fp);
758 if (unlikely(data == NULL))
761 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
764 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
765 bnx2x_frag_free(fp, data);
766 BNX2X_ERR("Can't map rx data\n");
771 dma_unmap_addr_set(rx_buf, mapping, mapping);
773 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
774 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
780 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
781 struct bnx2x_fastpath *fp,
782 struct bnx2x_eth_q_stats *qstats)
784 /* Do nothing if no L4 csum validation was done.
785 * We do not check whether IP csum was validated. For IPv4 we assume
786 * that if the card got as far as validating the L4 csum, it also
787 * validated the IP csum. IPv6 has no IP csum.
789 if (cqe->fast_path_cqe.status_flags &
790 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
793 /* If L4 validation was done, check if an error was found. */
795 if (cqe->fast_path_cqe.type_error_flags &
796 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
797 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
798 qstats->hw_csum_err++;
800 skb->ip_summed = CHECKSUM_UNNECESSARY;
803 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
805 struct bnx2x *bp = fp->bp;
806 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
807 u16 sw_comp_cons, sw_comp_prod;
809 union eth_rx_cqe *cqe;
810 struct eth_fast_path_rx_cqe *cqe_fp;
812 #ifdef BNX2X_STOP_ON_ERROR
813 if (unlikely(bp->panic))
817 bd_cons = fp->rx_bd_cons;
818 bd_prod = fp->rx_bd_prod;
819 bd_prod_fw = bd_prod;
820 sw_comp_cons = fp->rx_comp_cons;
821 sw_comp_prod = fp->rx_comp_prod;
823 comp_ring_cons = RCQ_BD(sw_comp_cons);
824 cqe = &fp->rx_comp_ring[comp_ring_cons];
825 cqe_fp = &cqe->fast_path_cqe;
827 DP(NETIF_MSG_RX_STATUS,
828 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
830 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
831 struct sw_rx_bd *rx_buf = NULL;
834 enum eth_rx_cqe_type cqe_fp_type;
839 #ifdef BNX2X_STOP_ON_ERROR
840 if (unlikely(bp->panic))
844 bd_prod = RX_BD(bd_prod);
845 bd_cons = RX_BD(bd_cons);
847 cqe_fp_flags = cqe_fp->type_error_flags;
848 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
850 DP(NETIF_MSG_RX_STATUS,
851 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
852 CQE_TYPE(cqe_fp_flags),
853 cqe_fp_flags, cqe_fp->status_flags,
854 le32_to_cpu(cqe_fp->rss_hash_result),
855 le16_to_cpu(cqe_fp->vlan_tag),
856 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
858 /* is this a slowpath msg? */
859 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
860 bnx2x_sp_event(fp, cqe);
864 rx_buf = &fp->rx_buf_ring[bd_cons];
867 if (!CQE_TYPE_FAST(cqe_fp_type)) {
868 struct bnx2x_agg_info *tpa_info;
869 u16 frag_size, pages;
870 #ifdef BNX2X_STOP_ON_ERROR
872 if (fp->disable_tpa &&
873 (CQE_TYPE_START(cqe_fp_type) ||
874 CQE_TYPE_STOP(cqe_fp_type)))
875 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
876 CQE_TYPE(cqe_fp_type));
879 if (CQE_TYPE_START(cqe_fp_type)) {
880 u16 queue = cqe_fp->queue_index;
881 DP(NETIF_MSG_RX_STATUS,
882 "calling tpa_start on queue %d\n",
885 bnx2x_tpa_start(fp, queue,
891 queue = cqe->end_agg_cqe.queue_index;
892 tpa_info = &fp->tpa_info[queue];
893 DP(NETIF_MSG_RX_STATUS,
894 "calling tpa_stop on queue %d\n",
897 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
900 if (fp->mode == TPA_MODE_GRO)
901 pages = (frag_size + tpa_info->full_page - 1) /
904 pages = SGE_PAGE_ALIGN(frag_size) >>
907 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
908 &cqe->end_agg_cqe, comp_ring_cons);
909 #ifdef BNX2X_STOP_ON_ERROR
914 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
918 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
919 pad = cqe_fp->placement_offset;
920 dma_sync_single_for_cpu(&bp->pdev->dev,
921 dma_unmap_addr(rx_buf, mapping),
922 pad + RX_COPY_THRESH,
925 prefetch(data + pad); /* speedup eth_type_trans() */
926 /* is this an error packet? */
927 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
928 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
929 "ERROR flags %x rx packet %u\n",
930 cqe_fp_flags, sw_comp_cons);
931 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
935 /* Since we don't have a jumbo ring
936 * copy small packets if mtu > 1500
938 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
939 (len <= RX_COPY_THRESH)) {
940 skb = netdev_alloc_skb_ip_align(bp->dev, len);
942 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
943 "ERROR packet dropped because of alloc failure\n");
944 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
947 memcpy(skb->data, data + pad, len);
948 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
950 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
955 skb = build_skb(data, fp->rx_frag_size);
956 if (unlikely(!skb)) {
957 bnx2x_frag_free(fp, data);
958 bnx2x_fp_qstats(bp, fp)->
959 rx_skb_alloc_failed++;
962 skb_reserve(skb, pad);
964 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
965 "ERROR packet dropped because of alloc failure\n");
966 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
968 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
974 skb->protocol = eth_type_trans(skb, bp->dev);
976 /* Set Toeplitz hash for a none-LRO skb */
977 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
978 skb->l4_rxhash = l4_rxhash;
980 skb_checksum_none_assert(skb);
982 if (bp->dev->features & NETIF_F_RXCSUM)
983 bnx2x_csum_validate(skb, cqe, fp,
984 bnx2x_fp_qstats(bp, fp));
986 skb_record_rx_queue(skb, fp->rx_queue);
988 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
990 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
991 le16_to_cpu(cqe_fp->vlan_tag));
993 skb_mark_napi_id(skb, &fp->napi);
995 if (bnx2x_fp_ll_polling(fp))
996 netif_receive_skb(skb);
998 napi_gro_receive(&fp->napi, skb);
1000 rx_buf->data = NULL;
1002 bd_cons = NEXT_RX_IDX(bd_cons);
1003 bd_prod = NEXT_RX_IDX(bd_prod);
1004 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1007 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1008 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1010 /* mark CQE as free */
1011 BNX2X_SEED_CQE(cqe_fp);
1013 if (rx_pkt == budget)
1016 comp_ring_cons = RCQ_BD(sw_comp_cons);
1017 cqe = &fp->rx_comp_ring[comp_ring_cons];
1018 cqe_fp = &cqe->fast_path_cqe;
1021 fp->rx_bd_cons = bd_cons;
1022 fp->rx_bd_prod = bd_prod_fw;
1023 fp->rx_comp_cons = sw_comp_cons;
1024 fp->rx_comp_prod = sw_comp_prod;
1026 /* Update producers */
1027 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1030 fp->rx_pkt += rx_pkt;
1036 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1038 struct bnx2x_fastpath *fp = fp_cookie;
1039 struct bnx2x *bp = fp->bp;
1043 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1044 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1046 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1048 #ifdef BNX2X_STOP_ON_ERROR
1049 if (unlikely(bp->panic))
1053 /* Handle Rx and Tx according to MSI-X vector */
1054 for_each_cos_in_tx_queue(fp, cos)
1055 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1057 prefetch(&fp->sb_running_index[SM_RX_ID]);
1058 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1063 /* HW Lock for shared dual port PHYs */
1064 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1066 mutex_lock(&bp->port.phy_mutex);
1068 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1071 void bnx2x_release_phy_lock(struct bnx2x *bp)
1073 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1075 mutex_unlock(&bp->port.phy_mutex);
1078 /* calculates MF speed according to current linespeed and MF configuration */
1079 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1081 u16 line_speed = bp->link_vars.line_speed;
1083 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1084 bp->mf_config[BP_VN(bp)]);
1086 /* Calculate the current MAX line speed limit for the MF
1090 line_speed = (line_speed * maxCfg) / 100;
1091 else { /* SD mode */
1092 u16 vn_max_rate = maxCfg * 100;
1094 if (vn_max_rate < line_speed)
1095 line_speed = vn_max_rate;
1103 * bnx2x_fill_report_data - fill link report data to report
1105 * @bp: driver handle
1106 * @data: link state to update
1108 * It uses a none-atomic bit operations because is called under the mutex.
1110 static void bnx2x_fill_report_data(struct bnx2x *bp,
1111 struct bnx2x_link_report_data *data)
1113 u16 line_speed = bnx2x_get_mf_speed(bp);
1115 memset(data, 0, sizeof(*data));
1117 /* Fill the report data: effective line speed */
1118 data->line_speed = line_speed;
1121 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1122 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1123 &data->link_report_flags);
1126 if (bp->link_vars.duplex == DUPLEX_FULL)
1127 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1129 /* Rx Flow Control is ON */
1130 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1131 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1133 /* Tx Flow Control is ON */
1134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1135 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1139 * bnx2x_link_report - report link status to OS.
1141 * @bp: driver handle
1143 * Calls the __bnx2x_link_report() under the same locking scheme
1144 * as a link/PHY state managing code to ensure a consistent link
1148 void bnx2x_link_report(struct bnx2x *bp)
1150 bnx2x_acquire_phy_lock(bp);
1151 __bnx2x_link_report(bp);
1152 bnx2x_release_phy_lock(bp);
1156 * __bnx2x_link_report - report link status to OS.
1158 * @bp: driver handle
1160 * None atomic implementation.
1161 * Should be called under the phy_lock.
1163 void __bnx2x_link_report(struct bnx2x *bp)
1165 struct bnx2x_link_report_data cur_data;
1168 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1169 bnx2x_read_mf_cfg(bp);
1171 /* Read the current link report info */
1172 bnx2x_fill_report_data(bp, &cur_data);
1174 /* Don't report link down or exactly the same link status twice */
1175 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1176 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1177 &bp->last_reported_link.link_report_flags) &&
1178 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1179 &cur_data.link_report_flags)))
1184 /* We are going to report a new link parameters now -
1185 * remember the current data for the next time.
1187 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1189 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1190 &cur_data.link_report_flags)) {
1191 netif_carrier_off(bp->dev);
1192 netdev_err(bp->dev, "NIC Link is Down\n");
1198 netif_carrier_on(bp->dev);
1200 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1201 &cur_data.link_report_flags))
1206 /* Handle the FC at the end so that only these flags would be
1207 * possibly set. This way we may easily check if there is no FC
1210 if (cur_data.link_report_flags) {
1211 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1212 &cur_data.link_report_flags)) {
1213 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1214 &cur_data.link_report_flags))
1215 flow = "ON - receive & transmit";
1217 flow = "ON - receive";
1219 flow = "ON - transmit";
1224 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1225 cur_data.line_speed, duplex, flow);
1229 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1233 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234 struct eth_rx_sge *sge;
1236 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1238 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1239 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1242 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1243 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1247 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1248 struct bnx2x_fastpath *fp, int last)
1252 for (i = 0; i < last; i++) {
1253 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1254 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1255 u8 *data = first_buf->data;
1258 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1261 if (tpa_info->tpa_state == BNX2X_TPA_START)
1262 dma_unmap_single(&bp->pdev->dev,
1263 dma_unmap_addr(first_buf, mapping),
1264 fp->rx_buf_size, DMA_FROM_DEVICE);
1265 bnx2x_frag_free(fp, data);
1266 first_buf->data = NULL;
1270 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1274 for_each_rx_queue_cnic(bp, j) {
1275 struct bnx2x_fastpath *fp = &bp->fp[j];
1279 /* Activate BD ring */
1281 * this will generate an interrupt (to the TSTORM)
1282 * must only be done after chip is initialized
1284 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1289 void bnx2x_init_rx_rings(struct bnx2x *bp)
1291 int func = BP_FUNC(bp);
1295 /* Allocate TPA resources */
1296 for_each_eth_queue(bp, j) {
1297 struct bnx2x_fastpath *fp = &bp->fp[j];
1300 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1302 if (!fp->disable_tpa) {
1303 /* Fill the per-aggregation pool */
1304 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1305 struct bnx2x_agg_info *tpa_info =
1307 struct sw_rx_bd *first_buf =
1308 &tpa_info->first_buf;
1310 first_buf->data = bnx2x_frag_alloc(fp);
1311 if (!first_buf->data) {
1312 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1314 bnx2x_free_tpa_pool(bp, fp, i);
1315 fp->disable_tpa = 1;
1318 dma_unmap_addr_set(first_buf, mapping, 0);
1319 tpa_info->tpa_state = BNX2X_TPA_STOP;
1322 /* "next page" elements initialization */
1323 bnx2x_set_next_page_sgl(fp);
1325 /* set SGEs bit mask */
1326 bnx2x_init_sge_ring_bit_mask(fp);
1328 /* Allocate SGEs and initialize the ring elements */
1329 for (i = 0, ring_prod = 0;
1330 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1332 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1333 BNX2X_ERR("was only able to allocate %d rx sges\n",
1335 BNX2X_ERR("disabling TPA for queue[%d]\n",
1337 /* Cleanup already allocated elements */
1338 bnx2x_free_rx_sge_range(bp, fp,
1340 bnx2x_free_tpa_pool(bp, fp,
1342 fp->disable_tpa = 1;
1346 ring_prod = NEXT_SGE_IDX(ring_prod);
1349 fp->rx_sge_prod = ring_prod;
1353 for_each_eth_queue(bp, j) {
1354 struct bnx2x_fastpath *fp = &bp->fp[j];
1358 /* Activate BD ring */
1360 * this will generate an interrupt (to the TSTORM)
1361 * must only be done after chip is initialized
1363 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1369 if (CHIP_IS_E1(bp)) {
1370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1372 U64_LO(fp->rx_comp_mapping));
1373 REG_WR(bp, BAR_USTRORM_INTMEM +
1374 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1375 U64_HI(fp->rx_comp_mapping));
1380 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1383 struct bnx2x *bp = fp->bp;
1385 for_each_cos_in_tx_queue(fp, cos) {
1386 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1387 unsigned pkts_compl = 0, bytes_compl = 0;
1389 u16 sw_prod = txdata->tx_pkt_prod;
1390 u16 sw_cons = txdata->tx_pkt_cons;
1392 while (sw_cons != sw_prod) {
1393 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1394 &pkts_compl, &bytes_compl);
1398 netdev_tx_reset_queue(
1399 netdev_get_tx_queue(bp->dev,
1400 txdata->txq_index));
1404 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1408 for_each_tx_queue_cnic(bp, i) {
1409 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1413 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1417 for_each_eth_queue(bp, i) {
1418 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1422 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1424 struct bnx2x *bp = fp->bp;
1427 /* ring wasn't allocated */
1428 if (fp->rx_buf_ring == NULL)
1431 for (i = 0; i < NUM_RX_BD; i++) {
1432 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1433 u8 *data = rx_buf->data;
1437 dma_unmap_single(&bp->pdev->dev,
1438 dma_unmap_addr(rx_buf, mapping),
1439 fp->rx_buf_size, DMA_FROM_DEVICE);
1441 rx_buf->data = NULL;
1442 bnx2x_frag_free(fp, data);
1446 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1450 for_each_rx_queue_cnic(bp, j) {
1451 bnx2x_free_rx_bds(&bp->fp[j]);
1455 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1459 for_each_eth_queue(bp, j) {
1460 struct bnx2x_fastpath *fp = &bp->fp[j];
1462 bnx2x_free_rx_bds(fp);
1464 if (!fp->disable_tpa)
1465 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1469 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1471 bnx2x_free_tx_skbs_cnic(bp);
1472 bnx2x_free_rx_skbs_cnic(bp);
1475 void bnx2x_free_skbs(struct bnx2x *bp)
1477 bnx2x_free_tx_skbs(bp);
1478 bnx2x_free_rx_skbs(bp);
1481 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1483 /* load old values */
1484 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1486 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1487 /* leave all but MAX value */
1488 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1490 /* set new MAX value */
1491 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1492 & FUNC_MF_CFG_MAX_BW_MASK;
1494 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1499 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1501 * @bp: driver handle
1502 * @nvecs: number of vectors to be released
1504 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1508 if (nvecs == offset)
1511 /* VFs don't have a default SB */
1513 free_irq(bp->msix_table[offset].vector, bp->dev);
1514 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1515 bp->msix_table[offset].vector);
1519 if (CNIC_SUPPORT(bp)) {
1520 if (nvecs == offset)
1525 for_each_eth_queue(bp, i) {
1526 if (nvecs == offset)
1528 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1529 i, bp->msix_table[offset].vector);
1531 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1535 void bnx2x_free_irq(struct bnx2x *bp)
1537 if (bp->flags & USING_MSIX_FLAG &&
1538 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1539 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1541 /* vfs don't have a default status block */
1545 bnx2x_free_msix_irqs(bp, nvecs);
1547 free_irq(bp->dev->irq, bp->dev);
1551 int bnx2x_enable_msix(struct bnx2x *bp)
1553 int msix_vec = 0, i, rc;
1555 /* VFs don't have a default status block */
1557 bp->msix_table[msix_vec].entry = msix_vec;
1558 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1559 bp->msix_table[0].entry);
1563 /* Cnic requires an msix vector for itself */
1564 if (CNIC_SUPPORT(bp)) {
1565 bp->msix_table[msix_vec].entry = msix_vec;
1566 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1567 msix_vec, bp->msix_table[msix_vec].entry);
1571 /* We need separate vectors for ETH queues only (not FCoE) */
1572 for_each_eth_queue(bp, i) {
1573 bp->msix_table[msix_vec].entry = msix_vec;
1574 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1575 msix_vec, msix_vec, i);
1579 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1582 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1585 * reconfigure number of tx/rx queues according to available
1588 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1589 /* how less vectors we will have? */
1590 int diff = msix_vec - rc;
1592 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1594 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1597 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1601 * decrease number of queues by number of unallocated entries
1603 bp->num_ethernet_queues -= diff;
1604 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1606 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1608 } else if (rc > 0) {
1609 /* Get by with single vector */
1610 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1612 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1617 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1618 bp->flags |= USING_SINGLE_MSIX_FLAG;
1620 BNX2X_DEV_INFO("set number of queues to 1\n");
1621 bp->num_ethernet_queues = 1;
1622 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1623 } else if (rc < 0) {
1624 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1628 bp->flags |= USING_MSIX_FLAG;
1633 /* fall to INTx if not enough memory */
1635 bp->flags |= DISABLE_MSI_FLAG;
1640 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1642 int i, rc, offset = 0;
1644 /* no default status block for vf */
1646 rc = request_irq(bp->msix_table[offset++].vector,
1647 bnx2x_msix_sp_int, 0,
1648 bp->dev->name, bp->dev);
1650 BNX2X_ERR("request sp irq failed\n");
1655 if (CNIC_SUPPORT(bp))
1658 for_each_eth_queue(bp, i) {
1659 struct bnx2x_fastpath *fp = &bp->fp[i];
1660 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1663 rc = request_irq(bp->msix_table[offset].vector,
1664 bnx2x_msix_fp_int, 0, fp->name, fp);
1666 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1667 bp->msix_table[offset].vector, rc);
1668 bnx2x_free_msix_irqs(bp, offset);
1675 i = BNX2X_NUM_ETH_QUEUES(bp);
1677 offset = 1 + CNIC_SUPPORT(bp);
1678 netdev_info(bp->dev,
1679 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1680 bp->msix_table[0].vector,
1681 0, bp->msix_table[offset].vector,
1682 i - 1, bp->msix_table[offset + i - 1].vector);
1684 offset = CNIC_SUPPORT(bp);
1685 netdev_info(bp->dev,
1686 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1687 0, bp->msix_table[offset].vector,
1688 i - 1, bp->msix_table[offset + i - 1].vector);
1693 int bnx2x_enable_msi(struct bnx2x *bp)
1697 rc = pci_enable_msi(bp->pdev);
1699 BNX2X_DEV_INFO("MSI is not attainable\n");
1702 bp->flags |= USING_MSI_FLAG;
1707 static int bnx2x_req_irq(struct bnx2x *bp)
1709 unsigned long flags;
1712 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1715 flags = IRQF_SHARED;
1717 if (bp->flags & USING_MSIX_FLAG)
1718 irq = bp->msix_table[0].vector;
1720 irq = bp->pdev->irq;
1722 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1725 static int bnx2x_setup_irqs(struct bnx2x *bp)
1728 if (bp->flags & USING_MSIX_FLAG &&
1729 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1730 rc = bnx2x_req_msix_irqs(bp);
1734 rc = bnx2x_req_irq(bp);
1736 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1739 if (bp->flags & USING_MSI_FLAG) {
1740 bp->dev->irq = bp->pdev->irq;
1741 netdev_info(bp->dev, "using MSI IRQ %d\n",
1744 if (bp->flags & USING_MSIX_FLAG) {
1745 bp->dev->irq = bp->msix_table[0].vector;
1746 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1754 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1758 for_each_rx_queue_cnic(bp, i) {
1759 bnx2x_fp_init_lock(&bp->fp[i]);
1760 napi_enable(&bnx2x_fp(bp, i, napi));
1764 static void bnx2x_napi_enable(struct bnx2x *bp)
1768 for_each_eth_queue(bp, i) {
1769 bnx2x_fp_init_lock(&bp->fp[i]);
1770 napi_enable(&bnx2x_fp(bp, i, napi));
1774 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1779 for_each_rx_queue_cnic(bp, i) {
1780 napi_disable(&bnx2x_fp(bp, i, napi));
1781 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1787 static void bnx2x_napi_disable(struct bnx2x *bp)
1792 for_each_eth_queue(bp, i) {
1793 napi_disable(&bnx2x_fp(bp, i, napi));
1794 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1800 void bnx2x_netif_start(struct bnx2x *bp)
1802 if (netif_running(bp->dev)) {
1803 bnx2x_napi_enable(bp);
1804 if (CNIC_LOADED(bp))
1805 bnx2x_napi_enable_cnic(bp);
1806 bnx2x_int_enable(bp);
1807 if (bp->state == BNX2X_STATE_OPEN)
1808 netif_tx_wake_all_queues(bp->dev);
1812 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1814 bnx2x_int_disable_sync(bp, disable_hw);
1815 bnx2x_napi_disable(bp);
1816 if (CNIC_LOADED(bp))
1817 bnx2x_napi_disable_cnic(bp);
1820 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1822 struct bnx2x *bp = netdev_priv(dev);
1824 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1825 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1826 u16 ether_type = ntohs(hdr->h_proto);
1828 /* Skip VLAN tag if present */
1829 if (ether_type == ETH_P_8021Q) {
1830 struct vlan_ethhdr *vhdr =
1831 (struct vlan_ethhdr *)skb->data;
1833 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1836 /* If ethertype is FCoE or FIP - use FCoE ring */
1837 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1838 return bnx2x_fcoe_tx(bp, txq_index);
1841 /* select a non-FCoE queue */
1842 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1845 void bnx2x_set_num_queues(struct bnx2x *bp)
1848 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1850 /* override in STORAGE SD modes */
1851 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1852 bp->num_ethernet_queues = 1;
1854 /* Add special queues */
1855 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1856 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1858 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1862 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1864 * @bp: Driver handle
1866 * We currently support for at most 16 Tx queues for each CoS thus we will
1867 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1870 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1871 * index after all ETH L2 indices.
1873 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1874 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1875 * 16..31,...) with indices that are not coupled with any real Tx queue.
1877 * The proper configuration of skb->queue_mapping is handled by
1878 * bnx2x_select_queue() and __skb_tx_hash().
1880 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1881 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1883 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1887 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1888 rx = BNX2X_NUM_ETH_QUEUES(bp);
1890 /* account for fcoe queue */
1891 if (include_cnic && !NO_FCOE(bp)) {
1896 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1898 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1901 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1903 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1907 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1913 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1917 for_each_queue(bp, i) {
1918 struct bnx2x_fastpath *fp = &bp->fp[i];
1921 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1924 * Although there are no IP frames expected to arrive to
1925 * this ring we still want to add an
1926 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1929 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1932 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1933 IP_HEADER_ALIGNMENT_PADDING +
1936 BNX2X_FW_RX_ALIGN_END;
1937 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1938 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1939 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1941 fp->rx_frag_size = 0;
1945 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1948 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1950 /* Prepare the initial contents for the indirection table if RSS is
1953 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1954 bp->rss_conf_obj.ind_table[i] =
1956 ethtool_rxfh_indir_default(i, num_eth_queues);
1959 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1960 * per-port, so if explicit configuration is needed , do it only
1963 * For 57712 and newer on the other hand it's a per-function
1966 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1969 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1972 struct bnx2x_config_rss_params params = {NULL};
1974 /* Although RSS is meaningless when there is a single HW queue we
1975 * still need it enabled in order to have HW Rx hash generated.
1977 * if (!is_eth_multi(bp))
1978 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1981 params.rss_obj = rss_obj;
1983 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1985 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1987 /* RSS configuration */
1988 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1989 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1990 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1991 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1992 if (rss_obj->udp_rss_v4)
1993 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1994 if (rss_obj->udp_rss_v6)
1995 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1998 params.rss_result_mask = MULTI_MASK;
2000 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2004 prandom_bytes(params.rss_key, sizeof(params.rss_key));
2005 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2008 return bnx2x_config_rss(bp, ¶ms);
2011 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2013 struct bnx2x_func_state_params func_params = {NULL};
2015 /* Prepare parameters for function state transitions */
2016 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2018 func_params.f_obj = &bp->func_obj;
2019 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2021 func_params.params.hw_init.load_phase = load_code;
2023 return bnx2x_func_state_change(bp, &func_params);
2027 * Cleans the object that have internal lists without sending
2028 * ramrods. Should be run when interrupts are disabled.
2030 void bnx2x_squeeze_objects(struct bnx2x *bp)
2033 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2034 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2035 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2037 /***************** Cleanup MACs' object first *************************/
2039 /* Wait for completion of requested */
2040 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2041 /* Perform a dry cleanup */
2042 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2044 /* Clean ETH primary MAC */
2045 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2046 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2049 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2051 /* Cleanup UC list */
2053 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2054 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2057 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2059 /***************** Now clean mcast object *****************************/
2060 rparam.mcast_obj = &bp->mcast_obj;
2061 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2063 /* Add a DEL command... - Since we're doing a driver cleanup only,
2064 * we take a lock surrounding both the initial send and the CONTs,
2065 * as we don't want a true completion to disrupt us in the middle.
2067 netif_addr_lock_bh(bp->dev);
2068 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2070 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2073 /* ...and wait until all pending commands are cleared */
2074 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2077 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2079 netif_addr_unlock_bh(bp->dev);
2083 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2085 netif_addr_unlock_bh(bp->dev);
2088 #ifndef BNX2X_STOP_ON_ERROR
2089 #define LOAD_ERROR_EXIT(bp, label) \
2091 (bp)->state = BNX2X_STATE_ERROR; \
2095 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2097 bp->cnic_loaded = false; \
2100 #else /*BNX2X_STOP_ON_ERROR*/
2101 #define LOAD_ERROR_EXIT(bp, label) \
2103 (bp)->state = BNX2X_STATE_ERROR; \
2107 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2109 bp->cnic_loaded = false; \
2113 #endif /*BNX2X_STOP_ON_ERROR*/
2115 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2117 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2118 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2122 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2124 int num_groups, vf_headroom = 0;
2125 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2127 /* number of queues for statistics is number of eth queues + FCoE */
2128 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2130 /* Total number of FW statistics requests =
2131 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2132 * and fcoe l2 queue) stats + num of queues (which includes another 1
2133 * for fcoe l2 queue if applicable)
2135 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2137 /* vf stats appear in the request list, but their data is allocated by
2138 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2139 * it is used to determine where to place the vf stats queries in the
2143 vf_headroom = bnx2x_vf_headroom(bp);
2145 /* Request is built from stats_query_header and an array of
2146 * stats_query_cmd_group each of which contains
2147 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2148 * configured in the stats_query_header.
2151 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2152 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2155 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2156 bp->fw_stats_num, vf_headroom, num_groups);
2157 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2158 num_groups * sizeof(struct stats_query_cmd_group);
2160 /* Data for statistics requests + stats_counter
2161 * stats_counter holds per-STORM counters that are incremented
2162 * when STORM has finished with the current request.
2163 * memory for FCoE offloaded statistics are counted anyway,
2164 * even if they will not be sent.
2165 * VF stats are not accounted for here as the data of VF stats is stored
2166 * in memory allocated by the VF, not here.
2168 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2169 sizeof(struct per_pf_stats) +
2170 sizeof(struct fcoe_statistics_params) +
2171 sizeof(struct per_queue_stats) * num_queue_stats +
2172 sizeof(struct stats_counter);
2174 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2175 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2178 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2179 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2180 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2181 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2182 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2183 bp->fw_stats_req_sz;
2185 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2186 U64_HI(bp->fw_stats_req_mapping),
2187 U64_LO(bp->fw_stats_req_mapping));
2188 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2189 U64_HI(bp->fw_stats_data_mapping),
2190 U64_LO(bp->fw_stats_data_mapping));
2194 bnx2x_free_fw_stats_mem(bp);
2195 BNX2X_ERR("Can't allocate FW stats memory\n");
2199 /* send load request to mcp and analyze response */
2200 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2206 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2207 DRV_MSG_SEQ_NUMBER_MASK);
2208 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2210 /* Get current FW pulse sequence */
2211 bp->fw_drv_pulse_wr_seq =
2212 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2213 DRV_PULSE_SEQ_MASK);
2214 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2216 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2218 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2219 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2222 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2224 /* if mcp fails to respond we must abort */
2225 if (!(*load_code)) {
2226 BNX2X_ERR("MCP response failure, aborting\n");
2230 /* If mcp refused (e.g. other port is in diagnostic mode) we
2233 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2234 BNX2X_ERR("MCP refused load request, aborting\n");
2240 /* check whether another PF has already loaded FW to chip. In
2241 * virtualized environments a pf from another VM may have already
2242 * initialized the device including loading FW
2244 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2246 /* is another pf loaded on this engine? */
2247 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2248 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2249 /* build my FW version dword */
2250 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2251 (BCM_5710_FW_MINOR_VERSION << 8) +
2252 (BCM_5710_FW_REVISION_VERSION << 16) +
2253 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2255 /* read loaded FW from chip */
2256 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2258 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2261 /* abort nic load if version mismatch */
2262 if (my_fw != loaded_fw) {
2263 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2271 /* returns the "mcp load_code" according to global load_count array */
2272 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2274 int path = BP_PATH(bp);
2276 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2277 path, load_count[path][0], load_count[path][1],
2278 load_count[path][2]);
2279 load_count[path][0]++;
2280 load_count[path][1 + port]++;
2281 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2282 path, load_count[path][0], load_count[path][1],
2283 load_count[path][2]);
2284 if (load_count[path][0] == 1)
2285 return FW_MSG_CODE_DRV_LOAD_COMMON;
2286 else if (load_count[path][1 + port] == 1)
2287 return FW_MSG_CODE_DRV_LOAD_PORT;
2289 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2292 /* mark PMF if applicable */
2293 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2295 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2296 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2297 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2299 /* We need the barrier to ensure the ordering between the
2300 * writing to bp->port.pmf here and reading it from the
2301 * bnx2x_periodic_task().
2308 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2311 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2313 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2314 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2315 (bp->common.shmem2_base)) {
2316 if (SHMEM2_HAS(bp, dcc_support))
2317 SHMEM2_WR(bp, dcc_support,
2318 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2319 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2320 if (SHMEM2_HAS(bp, afex_driver_support))
2321 SHMEM2_WR(bp, afex_driver_support,
2322 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2325 /* Set AFEX default VLAN tag to an invalid value */
2326 bp->afex_def_vlan_tag = -1;
2330 * bnx2x_bz_fp - zero content of the fastpath structure.
2332 * @bp: driver handle
2333 * @index: fastpath index to be zeroed
2335 * Makes sure the contents of the bp->fp[index].napi is kept
2338 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2340 struct bnx2x_fastpath *fp = &bp->fp[index];
2342 struct napi_struct orig_napi = fp->napi;
2343 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2345 /* bzero bnx2x_fastpath contents */
2347 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2348 sizeof(struct bnx2x_agg_info));
2349 memset(fp, 0, sizeof(*fp));
2351 /* Restore the NAPI object as it has been already initialized */
2352 fp->napi = orig_napi;
2353 fp->tpa_info = orig_tpa_info;
2357 fp->max_cos = bp->max_cos;
2359 /* Special queues support only one CoS */
2362 /* Init txdata pointers */
2364 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2366 for_each_cos_in_tx_queue(fp, cos)
2367 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2368 BNX2X_NUM_ETH_QUEUES(bp) + index];
2370 /* set the tpa flag for each queue. The tpa flag determines the queue
2371 * minimal size so it must be set prior to queue memory allocation
2373 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2374 (bp->flags & GRO_ENABLE_FLAG &&
2375 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2376 if (bp->flags & TPA_ENABLE_FLAG)
2377 fp->mode = TPA_MODE_LRO;
2378 else if (bp->flags & GRO_ENABLE_FLAG)
2379 fp->mode = TPA_MODE_GRO;
2381 /* We don't want TPA on an FCoE L2 ring */
2383 fp->disable_tpa = 1;
2386 int bnx2x_load_cnic(struct bnx2x *bp)
2388 int i, rc, port = BP_PORT(bp);
2390 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2392 mutex_init(&bp->cnic_mutex);
2395 rc = bnx2x_alloc_mem_cnic(bp);
2397 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2398 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2402 rc = bnx2x_alloc_fp_mem_cnic(bp);
2404 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2405 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2408 /* Update the number of queues with the cnic queues */
2409 rc = bnx2x_set_real_num_queues(bp, 1);
2411 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2412 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2415 /* Add all CNIC NAPI objects */
2416 bnx2x_add_all_napi_cnic(bp);
2417 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2418 bnx2x_napi_enable_cnic(bp);
2420 rc = bnx2x_init_hw_func_cnic(bp);
2422 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2424 bnx2x_nic_init_cnic(bp);
2427 /* Enable Timer scan */
2428 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2430 /* setup cnic queues */
2431 for_each_cnic_queue(bp, i) {
2432 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2434 BNX2X_ERR("Queue setup failed\n");
2435 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2440 /* Initialize Rx filter. */
2441 bnx2x_set_rx_mode_inner(bp);
2443 /* re-read iscsi info */
2444 bnx2x_get_iscsi_info(bp);
2445 bnx2x_setup_cnic_irq_info(bp);
2446 bnx2x_setup_cnic_info(bp);
2447 bp->cnic_loaded = true;
2448 if (bp->state == BNX2X_STATE_OPEN)
2449 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2451 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2455 #ifndef BNX2X_STOP_ON_ERROR
2457 /* Disable Timer scan */
2458 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2461 bnx2x_napi_disable_cnic(bp);
2462 /* Update the number of queues without the cnic queues */
2463 rc = bnx2x_set_real_num_queues(bp, 0);
2465 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2467 BNX2X_ERR("CNIC-related load failed\n");
2468 bnx2x_free_fp_mem_cnic(bp);
2469 bnx2x_free_mem_cnic(bp);
2471 #endif /* ! BNX2X_STOP_ON_ERROR */
2474 /* must be called with rtnl_lock */
2475 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2477 int port = BP_PORT(bp);
2478 int i, rc = 0, load_code = 0;
2480 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2482 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2484 #ifdef BNX2X_STOP_ON_ERROR
2485 if (unlikely(bp->panic)) {
2486 BNX2X_ERR("Can't load NIC when there is panic\n");
2491 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2493 /* zero the structure w/o any lock, before SP handler is initialized */
2494 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2495 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2496 &bp->last_reported_link.link_report_flags);
2499 /* must be called before memory allocation and HW init */
2500 bnx2x_ilt_set_info(bp);
2503 * Zero fastpath structures preserving invariants like napi, which are
2504 * allocated only once, fp index, max_cos, bp pointer.
2505 * Also set fp->disable_tpa and txdata_ptr.
2507 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2508 for_each_queue(bp, i)
2510 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2511 bp->num_cnic_queues) *
2512 sizeof(struct bnx2x_fp_txdata));
2514 bp->fcoe_init = false;
2516 /* Set the receive queues buffer size */
2517 bnx2x_set_rx_buf_size(bp);
2520 rc = bnx2x_alloc_mem(bp);
2522 BNX2X_ERR("Unable to allocate bp memory\n");
2527 /* Allocated memory for FW statistics */
2528 if (bnx2x_alloc_fw_stats_mem(bp))
2529 LOAD_ERROR_EXIT(bp, load_error0);
2531 /* need to be done after alloc mem, since it's self adjusting to amount
2532 * of memory available for RSS queues
2534 rc = bnx2x_alloc_fp_mem(bp);
2536 BNX2X_ERR("Unable to allocate memory for fps\n");
2537 LOAD_ERROR_EXIT(bp, load_error0);
2540 /* request pf to initialize status blocks */
2542 rc = bnx2x_vfpf_init(bp);
2544 LOAD_ERROR_EXIT(bp, load_error0);
2547 /* As long as bnx2x_alloc_mem() may possibly update
2548 * bp->num_queues, bnx2x_set_real_num_queues() should always
2549 * come after it. At this stage cnic queues are not counted.
2551 rc = bnx2x_set_real_num_queues(bp, 0);
2553 BNX2X_ERR("Unable to set real_num_queues\n");
2554 LOAD_ERROR_EXIT(bp, load_error0);
2557 /* configure multi cos mappings in kernel.
2558 * this configuration may be overridden by a multi class queue
2559 * discipline or by a dcbx negotiation result.
2561 bnx2x_setup_tc(bp->dev, bp->max_cos);
2563 /* Add all NAPI objects */
2564 bnx2x_add_all_napi(bp);
2565 DP(NETIF_MSG_IFUP, "napi added\n");
2566 bnx2x_napi_enable(bp);
2569 /* set pf load just before approaching the MCP */
2570 bnx2x_set_pf_load(bp);
2572 /* if mcp exists send load request and analyze response */
2573 if (!BP_NOMCP(bp)) {
2574 /* attempt to load pf */
2575 rc = bnx2x_nic_load_request(bp, &load_code);
2577 LOAD_ERROR_EXIT(bp, load_error1);
2579 /* what did mcp say? */
2580 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2582 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2583 LOAD_ERROR_EXIT(bp, load_error2);
2586 load_code = bnx2x_nic_load_no_mcp(bp, port);
2589 /* mark pmf if applicable */
2590 bnx2x_nic_load_pmf(bp, load_code);
2592 /* Init Function state controlling object */
2593 bnx2x__init_func_obj(bp);
2596 rc = bnx2x_init_hw(bp, load_code);
2598 BNX2X_ERR("HW init failed, aborting\n");
2599 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2600 LOAD_ERROR_EXIT(bp, load_error2);
2604 bnx2x_pre_irq_nic_init(bp);
2606 /* Connect to IRQs */
2607 rc = bnx2x_setup_irqs(bp);
2609 BNX2X_ERR("setup irqs failed\n");
2611 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2612 LOAD_ERROR_EXIT(bp, load_error2);
2615 /* Init per-function objects */
2617 /* Setup NIC internals and enable interrupts */
2618 bnx2x_post_irq_nic_init(bp, load_code);
2620 bnx2x_init_bp_objs(bp);
2621 bnx2x_iov_nic_init(bp);
2623 /* Set AFEX default VLAN tag to an invalid value */
2624 bp->afex_def_vlan_tag = -1;
2625 bnx2x_nic_load_afex_dcc(bp, load_code);
2626 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2627 rc = bnx2x_func_start(bp);
2629 BNX2X_ERR("Function start failed!\n");
2630 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2632 LOAD_ERROR_EXIT(bp, load_error3);
2635 /* Send LOAD_DONE command to MCP */
2636 if (!BP_NOMCP(bp)) {
2637 load_code = bnx2x_fw_command(bp,
2638 DRV_MSG_CODE_LOAD_DONE, 0);
2640 BNX2X_ERR("MCP response failure, aborting\n");
2642 LOAD_ERROR_EXIT(bp, load_error3);
2646 /* initialize FW coalescing state machines in RAM */
2647 bnx2x_update_coalesce(bp);
2649 /* setup the leading queue */
2650 rc = bnx2x_setup_leading(bp);
2652 BNX2X_ERR("Setup leading failed!\n");
2653 LOAD_ERROR_EXIT(bp, load_error3);
2656 /* set up the rest of the queues */
2657 for_each_nondefault_eth_queue(bp, i) {
2658 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2660 BNX2X_ERR("Queue setup failed\n");
2661 LOAD_ERROR_EXIT(bp, load_error3);
2666 rc = bnx2x_init_rss_pf(bp);
2668 BNX2X_ERR("PF RSS init failed\n");
2669 LOAD_ERROR_EXIT(bp, load_error3);
2673 for_each_eth_queue(bp, i) {
2674 rc = bnx2x_vfpf_setup_q(bp, i);
2676 BNX2X_ERR("Queue setup failed\n");
2677 LOAD_ERROR_EXIT(bp, load_error3);
2682 /* Now when Clients are configured we are ready to work */
2683 bp->state = BNX2X_STATE_OPEN;
2685 /* Configure a ucast MAC */
2687 rc = bnx2x_set_eth_mac(bp, true);
2689 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2692 BNX2X_ERR("Setting Ethernet MAC failed\n");
2693 LOAD_ERROR_EXIT(bp, load_error3);
2696 if (IS_PF(bp) && bp->pending_max) {
2697 bnx2x_update_max_mf_config(bp, bp->pending_max);
2698 bp->pending_max = 0;
2702 rc = bnx2x_initial_phy_init(bp, load_mode);
2704 LOAD_ERROR_EXIT(bp, load_error3);
2706 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2708 /* Start fast path */
2710 /* Initialize Rx filter. */
2711 bnx2x_set_rx_mode_inner(bp);
2714 switch (load_mode) {
2716 /* Tx queue should be only re-enabled */
2717 netif_tx_wake_all_queues(bp->dev);
2721 netif_tx_start_all_queues(bp->dev);
2722 smp_mb__after_clear_bit();
2726 case LOAD_LOOPBACK_EXT:
2727 bp->state = BNX2X_STATE_DIAG;
2735 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2737 bnx2x__link_status_update(bp);
2739 /* start the timer */
2740 mod_timer(&bp->timer, jiffies + bp->current_interval);
2742 if (CNIC_ENABLED(bp))
2743 bnx2x_load_cnic(bp);
2745 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2746 /* mark driver is loaded in shmem2 */
2748 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2749 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2750 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2751 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2754 /* Wait for all pending SP commands to complete */
2755 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2756 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2757 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2761 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2762 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2763 bnx2x_dcbx_init(bp, false);
2765 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2769 #ifndef BNX2X_STOP_ON_ERROR
2772 bnx2x_int_disable_sync(bp, 1);
2774 /* Clean queueable objects */
2775 bnx2x_squeeze_objects(bp);
2778 /* Free SKBs, SGEs, TPA pool and driver internals */
2779 bnx2x_free_skbs(bp);
2780 for_each_rx_queue(bp, i)
2781 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2786 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2787 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2788 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2793 bnx2x_napi_disable(bp);
2794 bnx2x_del_all_napi(bp);
2796 /* clear pf_load status, as it was already set */
2798 bnx2x_clear_pf_load(bp);
2800 bnx2x_free_fp_mem(bp);
2801 bnx2x_free_fw_stats_mem(bp);
2805 #endif /* ! BNX2X_STOP_ON_ERROR */
2808 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2812 /* Wait until tx fastpath tasks complete */
2813 for_each_tx_queue(bp, i) {
2814 struct bnx2x_fastpath *fp = &bp->fp[i];
2816 for_each_cos_in_tx_queue(fp, cos)
2817 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2824 /* must be called with rtnl_lock */
2825 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2828 bool global = false;
2830 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2832 /* mark driver is unloaded in shmem2 */
2833 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2835 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2836 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2837 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2840 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2841 (bp->state == BNX2X_STATE_CLOSED ||
2842 bp->state == BNX2X_STATE_ERROR)) {
2843 /* We can get here if the driver has been unloaded
2844 * during parity error recovery and is either waiting for a
2845 * leader to complete or for other functions to unload and
2846 * then ifdown has been issued. In this case we want to
2847 * unload and let other functions to complete a recovery
2850 bp->recovery_state = BNX2X_RECOVERY_DONE;
2852 bnx2x_release_leader_lock(bp);
2855 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2856 BNX2X_ERR("Can't unload in closed or error state\n");
2860 /* Nothing to do during unload if previous bnx2x_nic_load()
2861 * have not completed successfully - all resources are released.
2863 * we can get here only after unsuccessful ndo_* callback, during which
2864 * dev->IFF_UP flag is still on.
2866 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2869 /* It's important to set the bp->state to the value different from
2870 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2871 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2873 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2876 /* indicate to VFs that the PF is going down */
2877 bnx2x_iov_channel_down(bp);
2879 if (CNIC_LOADED(bp))
2880 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2883 bnx2x_tx_disable(bp);
2884 netdev_reset_tc(bp->dev);
2886 bp->rx_mode = BNX2X_RX_MODE_NONE;
2888 del_timer_sync(&bp->timer);
2891 /* Set ALWAYS_ALIVE bit in shmem */
2892 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2893 bnx2x_drv_pulse(bp);
2894 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2895 bnx2x_save_statistics(bp);
2898 /* wait till consumers catch up with producers in all queues */
2899 bnx2x_drain_tx_queues(bp);
2901 /* if VF indicate to PF this function is going down (PF will delete sp
2902 * elements and clear initializations
2905 bnx2x_vfpf_close_vf(bp);
2906 else if (unload_mode != UNLOAD_RECOVERY)
2907 /* if this is a normal/close unload need to clean up chip*/
2908 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2910 /* Send the UNLOAD_REQUEST to the MCP */
2911 bnx2x_send_unload_req(bp, unload_mode);
2913 /* Prevent transactions to host from the functions on the
2914 * engine that doesn't reset global blocks in case of global
2915 * attention once global blocks are reset and gates are opened
2916 * (the engine which leader will perform the recovery
2919 if (!CHIP_IS_E1x(bp))
2920 bnx2x_pf_disable(bp);
2922 /* Disable HW interrupts, NAPI */
2923 bnx2x_netif_stop(bp, 1);
2924 /* Delete all NAPI objects */
2925 bnx2x_del_all_napi(bp);
2926 if (CNIC_LOADED(bp))
2927 bnx2x_del_all_napi_cnic(bp);
2931 /* Report UNLOAD_DONE to MCP */
2932 bnx2x_send_unload_done(bp, false);
2936 * At this stage no more interrupts will arrive so we may safely clean
2937 * the queueable objects here in case they failed to get cleaned so far.
2940 bnx2x_squeeze_objects(bp);
2942 /* There should be no more pending SP commands at this stage */
2947 /* Free SKBs, SGEs, TPA pool and driver internals */
2948 bnx2x_free_skbs(bp);
2949 if (CNIC_LOADED(bp))
2950 bnx2x_free_skbs_cnic(bp);
2951 for_each_rx_queue(bp, i)
2952 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2954 bnx2x_free_fp_mem(bp);
2955 if (CNIC_LOADED(bp))
2956 bnx2x_free_fp_mem_cnic(bp);
2959 if (CNIC_LOADED(bp))
2960 bnx2x_free_mem_cnic(bp);
2963 bp->state = BNX2X_STATE_CLOSED;
2964 bp->cnic_loaded = false;
2966 /* Check if there are pending parity attentions. If there are - set
2967 * RECOVERY_IN_PROGRESS.
2969 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2970 bnx2x_set_reset_in_progress(bp);
2972 /* Set RESET_IS_GLOBAL if needed */
2974 bnx2x_set_reset_global(bp);
2977 /* The last driver must disable a "close the gate" if there is no
2978 * parity attention or "process kill" pending.
2981 !bnx2x_clear_pf_load(bp) &&
2982 bnx2x_reset_is_done(bp, BP_PATH(bp)))
2983 bnx2x_disable_close_the_gate(bp);
2985 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2990 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2994 /* If there is no power capability, silently succeed */
2996 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3000 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3004 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3005 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3006 PCI_PM_CTRL_PME_STATUS));
3008 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3009 /* delay required during transition out of D3hot */
3014 /* If there are other clients above don't
3015 shut down the power */
3016 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3018 /* Don't shut down the power for emulation and FPGA */
3019 if (CHIP_REV_IS_SLOW(bp))
3022 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3026 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3028 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3031 /* No more memory access after this point until
3032 * device is brought back to D0.
3037 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3044 * net_device service functions
3046 int bnx2x_poll(struct napi_struct *napi, int budget)
3050 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3052 struct bnx2x *bp = fp->bp;
3055 #ifdef BNX2X_STOP_ON_ERROR
3056 if (unlikely(bp->panic)) {
3057 napi_complete(napi);
3061 if (!bnx2x_fp_lock_napi(fp))
3064 for_each_cos_in_tx_queue(fp, cos)
3065 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3066 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3068 if (bnx2x_has_rx_work(fp)) {
3069 work_done += bnx2x_rx_int(fp, budget - work_done);
3071 /* must not complete if we consumed full budget */
3072 if (work_done >= budget) {
3073 bnx2x_fp_unlock_napi(fp);
3078 /* Fall out from the NAPI loop if needed */
3079 if (!bnx2x_fp_unlock_napi(fp) &&
3080 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3082 /* No need to update SB for FCoE L2 ring as long as
3083 * it's connected to the default SB and the SB
3084 * has been updated when NAPI was scheduled.
3086 if (IS_FCOE_FP(fp)) {
3087 napi_complete(napi);
3090 bnx2x_update_fpsb_idx(fp);
3091 /* bnx2x_has_rx_work() reads the status block,
3092 * thus we need to ensure that status block indices
3093 * have been actually read (bnx2x_update_fpsb_idx)
3094 * prior to this check (bnx2x_has_rx_work) so that
3095 * we won't write the "newer" value of the status block
3096 * to IGU (if there was a DMA right after
3097 * bnx2x_has_rx_work and if there is no rmb, the memory
3098 * reading (bnx2x_update_fpsb_idx) may be postponed
3099 * to right before bnx2x_ack_sb). In this case there
3100 * will never be another interrupt until there is
3101 * another update of the status block, while there
3102 * is still unhandled work.
3106 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3107 napi_complete(napi);
3108 /* Re-enable interrupts */
3109 DP(NETIF_MSG_RX_STATUS,
3110 "Update index to %d\n", fp->fp_hc_idx);
3111 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3112 le16_to_cpu(fp->fp_hc_idx),
3122 #ifdef CONFIG_NET_RX_BUSY_POLL
3123 /* must be called with local_bh_disable()d */
3124 int bnx2x_low_latency_recv(struct napi_struct *napi)
3126 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3128 struct bnx2x *bp = fp->bp;
3131 if ((bp->state == BNX2X_STATE_CLOSED) ||
3132 (bp->state == BNX2X_STATE_ERROR) ||
3133 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3134 return LL_FLUSH_FAILED;
3136 if (!bnx2x_fp_lock_poll(fp))
3137 return LL_FLUSH_BUSY;
3139 if (bnx2x_has_rx_work(fp))
3140 found = bnx2x_rx_int(fp, 4);
3142 bnx2x_fp_unlock_poll(fp);
3148 /* we split the first BD into headers and data BDs
3149 * to ease the pain of our fellow microcode engineers
3150 * we use one mapping for both BDs
3152 static u16 bnx2x_tx_split(struct bnx2x *bp,
3153 struct bnx2x_fp_txdata *txdata,
3154 struct sw_tx_bd *tx_buf,
3155 struct eth_tx_start_bd **tx_bd, u16 hlen,
3158 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3159 struct eth_tx_bd *d_tx_bd;
3161 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3163 /* first fix first BD */
3164 h_tx_bd->nbytes = cpu_to_le16(hlen);
3166 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3167 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3169 /* now get a new data BD
3170 * (after the pbd) and fill it */
3171 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3172 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3174 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3175 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3177 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3178 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3179 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3181 /* this marks the BD as one that has no individual mapping */
3182 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3184 DP(NETIF_MSG_TX_QUEUED,
3185 "TSO split data size is %d (%x:%x)\n",
3186 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3189 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3194 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3195 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3196 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3198 __sum16 tsum = (__force __sum16) csum;
3201 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3202 csum_partial(t_header - fix, fix, 0)));
3205 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3206 csum_partial(t_header, -fix, 0)));
3208 return bswab16(tsum);
3211 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3217 if (skb->ip_summed != CHECKSUM_PARTIAL)
3220 protocol = vlan_get_protocol(skb);
3221 if (protocol == htons(ETH_P_IPV6)) {
3223 prot = ipv6_hdr(skb)->nexthdr;
3226 prot = ip_hdr(skb)->protocol;
3229 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3230 if (inner_ip_hdr(skb)->version == 6) {
3231 rc |= XMIT_CSUM_ENC_V6;
3232 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3233 rc |= XMIT_CSUM_TCP;
3235 rc |= XMIT_CSUM_ENC_V4;
3236 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3237 rc |= XMIT_CSUM_TCP;
3240 if (prot == IPPROTO_TCP)
3241 rc |= XMIT_CSUM_TCP;
3243 if (skb_is_gso_v6(skb)) {
3244 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3245 if (rc & XMIT_CSUM_ENC)
3246 rc |= XMIT_GSO_ENC_V6;
3247 } else if (skb_is_gso(skb)) {
3248 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3249 if (rc & XMIT_CSUM_ENC)
3250 rc |= XMIT_GSO_ENC_V4;
3256 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3257 /* check if packet requires linearization (packet is too fragmented)
3258 no need to check fragmentation if page size > 8K (there will be no
3259 violation to FW restrictions) */
3260 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3265 int first_bd_sz = 0;
3267 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3268 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3270 if (xmit_type & XMIT_GSO) {
3271 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3272 /* Check if LSO packet needs to be copied:
3273 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3274 int wnd_size = MAX_FETCH_BD - 3;
3275 /* Number of windows to check */
3276 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3281 /* Headers length */
3282 hlen = (int)(skb_transport_header(skb) - skb->data) +
3285 /* Amount of data (w/o headers) on linear part of SKB*/
3286 first_bd_sz = skb_headlen(skb) - hlen;
3288 wnd_sum = first_bd_sz;
3290 /* Calculate the first sum - it's special */
3291 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3293 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3295 /* If there was data on linear skb data - check it */
3296 if (first_bd_sz > 0) {
3297 if (unlikely(wnd_sum < lso_mss)) {
3302 wnd_sum -= first_bd_sz;
3305 /* Others are easier: run through the frag list and
3306 check all windows */
3307 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3309 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3311 if (unlikely(wnd_sum < lso_mss)) {
3316 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3319 /* in non-LSO too fragmented packet should always
3326 if (unlikely(to_copy))
3327 DP(NETIF_MSG_TX_QUEUED,
3328 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3329 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3330 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3336 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3339 struct ipv6hdr *ipv6;
3341 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3342 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3343 ETH_TX_PARSE_BD_E2_LSO_MSS;
3345 if (xmit_type & XMIT_GSO_ENC_V6)
3346 ipv6 = inner_ipv6_hdr(skb);
3347 else if (xmit_type & XMIT_GSO_V6)
3348 ipv6 = ipv6_hdr(skb);
3352 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3353 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3357 * bnx2x_set_pbd_gso - update PBD in GSO case.
3361 * @xmit_type: xmit flags
3363 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3364 struct eth_tx_parse_bd_e1x *pbd,
3365 struct eth_tx_start_bd *tx_start_bd,
3368 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3369 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3370 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3372 if (xmit_type & XMIT_GSO_V4) {
3373 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3374 pbd->tcp_pseudo_csum =
3375 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3377 0, IPPROTO_TCP, 0));
3379 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3380 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3382 pbd->tcp_pseudo_csum =
3383 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3384 &ipv6_hdr(skb)->daddr,
3385 0, IPPROTO_TCP, 0));
3389 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3393 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3395 * @bp: driver handle
3397 * @parsing_data: data to be updated
3398 * @xmit_type: xmit flags
3400 * 57712/578xx related, when skb has encapsulation
3402 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3403 u32 *parsing_data, u32 xmit_type)
3406 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3407 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3408 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3410 if (xmit_type & XMIT_CSUM_TCP) {
3411 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3412 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3413 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3415 return skb_inner_transport_header(skb) +
3416 inner_tcp_hdrlen(skb) - skb->data;
3419 /* We support checksum offload for TCP and UDP only.
3420 * No need to pass the UDP header length - it's a constant.
3422 return skb_inner_transport_header(skb) +
3423 sizeof(struct udphdr) - skb->data;
3427 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3429 * @bp: driver handle
3431 * @parsing_data: data to be updated
3432 * @xmit_type: xmit flags
3434 * 57712/578xx related
3436 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3437 u32 *parsing_data, u32 xmit_type)
3440 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3441 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3442 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3444 if (xmit_type & XMIT_CSUM_TCP) {
3445 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3446 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3447 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3449 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3451 /* We support checksum offload for TCP and UDP only.
3452 * No need to pass the UDP header length - it's a constant.
3454 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3457 /* set FW indication according to inner or outer protocols if tunneled */
3458 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3459 struct eth_tx_start_bd *tx_start_bd,
3462 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3464 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3465 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3467 if (!(xmit_type & XMIT_CSUM_TCP))
3468 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3472 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3474 * @bp: driver handle
3476 * @pbd: parse BD to be updated
3477 * @xmit_type: xmit flags
3479 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3480 struct eth_tx_parse_bd_e1x *pbd,
3483 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3485 /* for now NS flag is not used in Linux */
3488 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3489 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3491 pbd->ip_hlen_w = (skb_transport_header(skb) -
3492 skb_network_header(skb)) >> 1;
3494 hlen += pbd->ip_hlen_w;
3496 /* We support checksum offload for TCP and UDP only */
3497 if (xmit_type & XMIT_CSUM_TCP)
3498 hlen += tcp_hdrlen(skb) / 2;
3500 hlen += sizeof(struct udphdr) / 2;
3502 pbd->total_hlen_w = cpu_to_le16(hlen);
3505 if (xmit_type & XMIT_CSUM_TCP) {
3506 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3509 s8 fix = SKB_CS_OFF(skb); /* signed! */
3511 DP(NETIF_MSG_TX_QUEUED,
3512 "hlen %d fix %d csum before fix %x\n",
3513 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3515 /* HW bug: fixup the CSUM */
3516 pbd->tcp_pseudo_csum =
3517 bnx2x_csum_fix(skb_transport_header(skb),
3520 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3521 pbd->tcp_pseudo_csum);
3527 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3528 struct eth_tx_parse_bd_e2 *pbd_e2,
3529 struct eth_tx_parse_2nd_bd *pbd2,
3534 u8 outerip_off, outerip_len = 0;
3536 /* from outer IP to transport */
3537 hlen_w = (skb_inner_transport_header(skb) -
3538 skb_network_header(skb)) >> 1;
3541 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3543 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3545 /* outer IP header info */
3546 if (xmit_type & XMIT_CSUM_V4) {
3547 struct iphdr *iph = ip_hdr(skb);
3548 u32 csum = (__force u32)(~iph->check) -
3549 (__force u32)iph->tot_len -
3550 (__force u32)iph->frag_off;
3552 pbd2->fw_ip_csum_wo_len_flags_frag =
3553 bswab16(csum_fold((__force __wsum)csum));
3555 pbd2->fw_ip_hdr_to_payload_w =
3556 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3559 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3561 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3563 if (xmit_type & XMIT_GSO_V4) {
3564 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3566 pbd_e2->data.tunnel_data.pseudo_csum =
3567 bswab16(~csum_tcpudp_magic(
3568 inner_ip_hdr(skb)->saddr,
3569 inner_ip_hdr(skb)->daddr,
3570 0, IPPROTO_TCP, 0));
3572 outerip_len = ip_hdr(skb)->ihl << 1;
3574 pbd_e2->data.tunnel_data.pseudo_csum =
3575 bswab16(~csum_ipv6_magic(
3576 &inner_ipv6_hdr(skb)->saddr,
3577 &inner_ipv6_hdr(skb)->daddr,
3578 0, IPPROTO_TCP, 0));
3581 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3585 (!!(xmit_type & XMIT_CSUM_V6) <<
3586 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3588 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3589 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3590 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3592 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3593 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3594 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3598 /* called with netif_tx_lock
3599 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3600 * netif_wake_queue()
3602 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3604 struct bnx2x *bp = netdev_priv(dev);
3606 struct netdev_queue *txq;
3607 struct bnx2x_fp_txdata *txdata;
3608 struct sw_tx_bd *tx_buf;
3609 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3610 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3611 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3612 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3613 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3614 u32 pbd_e2_parsing_data = 0;
3615 u16 pkt_prod, bd_prod;
3618 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3621 __le16 pkt_size = 0;
3623 u8 mac_type = UNICAST_ADDRESS;
3625 #ifdef BNX2X_STOP_ON_ERROR
3626 if (unlikely(bp->panic))
3627 return NETDEV_TX_BUSY;
3630 txq_index = skb_get_queue_mapping(skb);
3631 txq = netdev_get_tx_queue(dev, txq_index);
3633 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3635 txdata = &bp->bnx2x_txq[txq_index];
3637 /* enable this debug print to view the transmission queue being used
3638 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3639 txq_index, fp_index, txdata_index); */
3641 /* enable this debug print to view the transmission details
3642 DP(NETIF_MSG_TX_QUEUED,
3643 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3644 txdata->cid, fp_index, txdata_index, txdata, fp); */
3646 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3647 skb_shinfo(skb)->nr_frags +
3649 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3650 /* Handle special storage cases separately */
3651 if (txdata->tx_ring_size == 0) {
3652 struct bnx2x_eth_q_stats *q_stats =
3653 bnx2x_fp_qstats(bp, txdata->parent_fp);
3654 q_stats->driver_filtered_tx_pkt++;
3656 return NETDEV_TX_OK;
3658 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3659 netif_tx_stop_queue(txq);
3660 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3662 return NETDEV_TX_BUSY;
3665 DP(NETIF_MSG_TX_QUEUED,
3666 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3667 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3668 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3671 eth = (struct ethhdr *)skb->data;
3673 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3674 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3675 if (is_broadcast_ether_addr(eth->h_dest))
3676 mac_type = BROADCAST_ADDRESS;
3678 mac_type = MULTICAST_ADDRESS;
3681 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3682 /* First, check if we need to linearize the skb (due to FW
3683 restrictions). No need to check fragmentation if page size > 8K
3684 (there will be no violation to FW restrictions) */
3685 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3686 /* Statistics of linearization */
3688 if (skb_linearize(skb) != 0) {
3689 DP(NETIF_MSG_TX_QUEUED,
3690 "SKB linearization failed - silently dropping this SKB\n");
3691 dev_kfree_skb_any(skb);
3692 return NETDEV_TX_OK;
3696 /* Map skb linear data for DMA */
3697 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3698 skb_headlen(skb), DMA_TO_DEVICE);
3699 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3700 DP(NETIF_MSG_TX_QUEUED,
3701 "SKB mapping failed - silently dropping this SKB\n");
3702 dev_kfree_skb_any(skb);
3703 return NETDEV_TX_OK;
3706 Please read carefully. First we use one BD which we mark as start,
3707 then we have a parsing info BD (used for TSO or xsum),
3708 and only then we have the rest of the TSO BDs.
3709 (don't forget to mark the last one as last,
3710 and to unmap only AFTER you write to the BD ...)
3711 And above all, all pdb sizes are in words - NOT DWORDS!
3714 /* get current pkt produced now - advance it just before sending packet
3715 * since mapping of pages may fail and cause packet to be dropped
3717 pkt_prod = txdata->tx_pkt_prod;
3718 bd_prod = TX_BD(txdata->tx_bd_prod);
3720 /* get a tx_buf and first BD
3721 * tx_start_bd may be changed during SPLIT,
3722 * but first_bd will always stay first
3724 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3725 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3726 first_bd = tx_start_bd;
3728 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3730 /* header nbd: indirectly zero other flags! */
3731 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3733 /* remember the first BD of the packet */
3734 tx_buf->first_bd = txdata->tx_bd_prod;
3738 DP(NETIF_MSG_TX_QUEUED,
3739 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3740 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3742 if (vlan_tx_tag_present(skb)) {
3743 tx_start_bd->vlan_or_ethertype =
3744 cpu_to_le16(vlan_tx_tag_get(skb));
3745 tx_start_bd->bd_flags.as_bitfield |=
3746 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3748 /* when transmitting in a vf, start bd must hold the ethertype
3749 * for fw to enforce it
3752 tx_start_bd->vlan_or_ethertype =
3753 cpu_to_le16(ntohs(eth->h_proto));
3755 /* used by FW for packet accounting */
3756 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3759 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3761 /* turn on parsing and get a BD */
3762 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3764 if (xmit_type & XMIT_CSUM)
3765 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3767 if (!CHIP_IS_E1x(bp)) {
3768 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3769 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3771 if (xmit_type & XMIT_CSUM_ENC) {
3772 u16 global_data = 0;
3774 /* Set PBD in enc checksum offload case */
3775 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3776 &pbd_e2_parsing_data,
3779 /* turn on 2nd parsing and get a BD */
3780 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3782 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3784 memset(pbd2, 0, sizeof(*pbd2));
3786 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3787 (skb_inner_network_header(skb) -
3790 if (xmit_type & XMIT_GSO_ENC)
3791 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3795 pbd2->global_data = cpu_to_le16(global_data);
3797 /* add addition parse BD indication to start BD */
3798 SET_FLAG(tx_start_bd->general_data,
3799 ETH_TX_START_BD_PARSE_NBDS, 1);
3800 /* set encapsulation flag in start BD */
3801 SET_FLAG(tx_start_bd->general_data,
3802 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3804 } else if (xmit_type & XMIT_CSUM) {
3805 /* Set PBD in checksum offload case w/o encapsulation */
3806 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3807 &pbd_e2_parsing_data,
3811 /* Add the macs to the parsing BD this is a vf */
3813 /* override GRE parameters in BD */
3814 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3815 &pbd_e2->data.mac_addr.src_mid,
3816 &pbd_e2->data.mac_addr.src_lo,
3819 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3820 &pbd_e2->data.mac_addr.dst_mid,
3821 &pbd_e2->data.mac_addr.dst_lo,
3825 SET_FLAG(pbd_e2_parsing_data,
3826 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3828 u16 global_data = 0;
3829 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3830 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3831 /* Set PBD in checksum offload case */
3832 if (xmit_type & XMIT_CSUM)
3833 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3835 SET_FLAG(global_data,
3836 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3837 pbd_e1x->global_data |= cpu_to_le16(global_data);
3840 /* Setup the data pointer of the first BD of the packet */
3841 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3842 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3843 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3844 pkt_size = tx_start_bd->nbytes;
3846 DP(NETIF_MSG_TX_QUEUED,
3847 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3848 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3849 le16_to_cpu(tx_start_bd->nbytes),
3850 tx_start_bd->bd_flags.as_bitfield,
3851 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3853 if (xmit_type & XMIT_GSO) {
3855 DP(NETIF_MSG_TX_QUEUED,
3856 "TSO packet len %d hlen %d total len %d tso size %d\n",
3857 skb->len, hlen, skb_headlen(skb),
3858 skb_shinfo(skb)->gso_size);
3860 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3862 if (unlikely(skb_headlen(skb) > hlen)) {
3864 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3868 if (!CHIP_IS_E1x(bp))
3869 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3872 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3875 /* Set the PBD's parsing_data field if not zero
3876 * (for the chips newer than 57711).
3878 if (pbd_e2_parsing_data)
3879 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3881 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3883 /* Handle fragmented skb */
3884 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3885 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3887 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3888 skb_frag_size(frag), DMA_TO_DEVICE);
3889 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3890 unsigned int pkts_compl = 0, bytes_compl = 0;
3892 DP(NETIF_MSG_TX_QUEUED,
3893 "Unable to map page - dropping packet...\n");
3895 /* we need unmap all buffers already mapped
3897 * first_bd->nbd need to be properly updated
3898 * before call to bnx2x_free_tx_pkt
3900 first_bd->nbd = cpu_to_le16(nbd);
3901 bnx2x_free_tx_pkt(bp, txdata,
3902 TX_BD(txdata->tx_pkt_prod),
3903 &pkts_compl, &bytes_compl);
3904 return NETDEV_TX_OK;
3907 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3908 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3909 if (total_pkt_bd == NULL)
3910 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3912 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3913 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3914 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3915 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3918 DP(NETIF_MSG_TX_QUEUED,
3919 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3920 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3921 le16_to_cpu(tx_data_bd->nbytes));
3924 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3926 /* update with actual num BDs */
3927 first_bd->nbd = cpu_to_le16(nbd);
3929 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3931 /* now send a tx doorbell, counting the next BD
3932 * if the packet contains or ends with it
3934 if (TX_BD_POFF(bd_prod) < nbd)
3937 /* total_pkt_bytes should be set on the first data BD if
3938 * it's not an LSO packet and there is more than one
3939 * data BD. In this case pkt_size is limited by an MTU value.
3940 * However we prefer to set it for an LSO packet (while we don't
3941 * have to) in order to save some CPU cycles in a none-LSO
3942 * case, when we much more care about them.
3944 if (total_pkt_bd != NULL)
3945 total_pkt_bd->total_pkt_bytes = pkt_size;
3948 DP(NETIF_MSG_TX_QUEUED,
3949 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3950 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3951 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3952 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3953 le16_to_cpu(pbd_e1x->total_hlen_w));
3955 DP(NETIF_MSG_TX_QUEUED,
3956 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3958 pbd_e2->data.mac_addr.dst_hi,
3959 pbd_e2->data.mac_addr.dst_mid,
3960 pbd_e2->data.mac_addr.dst_lo,
3961 pbd_e2->data.mac_addr.src_hi,
3962 pbd_e2->data.mac_addr.src_mid,
3963 pbd_e2->data.mac_addr.src_lo,
3964 pbd_e2->parsing_data);
3965 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3967 netdev_tx_sent_queue(txq, skb->len);
3969 skb_tx_timestamp(skb);
3971 txdata->tx_pkt_prod++;
3973 * Make sure that the BD data is updated before updating the producer
3974 * since FW might read the BD right after the producer is updated.
3975 * This is only applicable for weak-ordered memory model archs such
3976 * as IA-64. The following barrier is also mandatory since FW will
3977 * assumes packets must have BDs.
3981 txdata->tx_db.data.prod += nbd;
3984 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3988 txdata->tx_bd_prod += nbd;
3990 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3991 netif_tx_stop_queue(txq);
3993 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3994 * ordering of set_bit() in netif_tx_stop_queue() and read of
3998 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3999 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4000 netif_tx_wake_queue(txq);
4004 return NETDEV_TX_OK;
4008 * bnx2x_setup_tc - routine to configure net_device for multi tc
4010 * @netdev: net device to configure
4011 * @tc: number of traffic classes to enable
4013 * callback connected to the ndo_setup_tc function pointer
4015 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4017 int cos, prio, count, offset;
4018 struct bnx2x *bp = netdev_priv(dev);
4020 /* setup tc must be called under rtnl lock */
4023 /* no traffic classes requested. Aborting */
4025 netdev_reset_tc(dev);
4029 /* requested to support too many traffic classes */
4030 if (num_tc > bp->max_cos) {
4031 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4032 num_tc, bp->max_cos);
4036 /* declare amount of supported traffic classes */
4037 if (netdev_set_num_tc(dev, num_tc)) {
4038 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4042 /* configure priority to traffic class mapping */
4043 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4044 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4045 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4046 "mapping priority %d to tc %d\n",
4047 prio, bp->prio_to_cos[prio]);
4050 /* Use this configuration to differentiate tc0 from other COSes
4051 This can be used for ets or pfc, and save the effort of setting
4052 up a multio class queue disc or negotiating DCBX with a switch
4053 netdev_set_prio_tc_map(dev, 0, 0);
4054 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4055 for (prio = 1; prio < 16; prio++) {
4056 netdev_set_prio_tc_map(dev, prio, 1);
4057 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4060 /* configure traffic class to transmission queue mapping */
4061 for (cos = 0; cos < bp->max_cos; cos++) {
4062 count = BNX2X_NUM_ETH_QUEUES(bp);
4063 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4064 netdev_set_tc_queue(dev, cos, count, offset);
4065 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4066 "mapping tc %d to offset %d count %d\n",
4067 cos, offset, count);
4073 /* called with rtnl_lock */
4074 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4076 struct sockaddr *addr = p;
4077 struct bnx2x *bp = netdev_priv(dev);
4080 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4081 BNX2X_ERR("Requested MAC address is not valid\n");
4085 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4086 !is_zero_ether_addr(addr->sa_data)) {
4087 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4091 if (netif_running(dev)) {
4092 rc = bnx2x_set_eth_mac(bp, false);
4097 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4099 if (netif_running(dev))
4100 rc = bnx2x_set_eth_mac(bp, true);
4105 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4107 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4108 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4113 if (IS_FCOE_IDX(fp_index)) {
4114 memset(sb, 0, sizeof(union host_hc_status_block));
4115 fp->status_blk_mapping = 0;
4118 if (!CHIP_IS_E1x(bp))
4119 BNX2X_PCI_FREE(sb->e2_sb,
4120 bnx2x_fp(bp, fp_index,
4121 status_blk_mapping),
4122 sizeof(struct host_hc_status_block_e2));
4124 BNX2X_PCI_FREE(sb->e1x_sb,
4125 bnx2x_fp(bp, fp_index,
4126 status_blk_mapping),
4127 sizeof(struct host_hc_status_block_e1x));
4131 if (!skip_rx_queue(bp, fp_index)) {
4132 bnx2x_free_rx_bds(fp);
4134 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4135 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4136 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4137 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4138 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4140 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4141 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4142 sizeof(struct eth_fast_path_rx_cqe) *
4146 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4147 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4148 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4149 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4153 if (!skip_tx_queue(bp, fp_index)) {
4154 /* fastpath tx rings: tx_buf tx_desc */
4155 for_each_cos_in_tx_queue(fp, cos) {
4156 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4158 DP(NETIF_MSG_IFDOWN,
4159 "freeing tx memory of fp %d cos %d cid %d\n",
4160 fp_index, cos, txdata->cid);
4162 BNX2X_FREE(txdata->tx_buf_ring);
4163 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4164 txdata->tx_desc_mapping,
4165 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4168 /* end of fastpath */
4171 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4174 for_each_cnic_queue(bp, i)
4175 bnx2x_free_fp_mem_at(bp, i);
4178 void bnx2x_free_fp_mem(struct bnx2x *bp)
4181 for_each_eth_queue(bp, i)
4182 bnx2x_free_fp_mem_at(bp, i);
4185 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4187 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4188 if (!CHIP_IS_E1x(bp)) {
4189 bnx2x_fp(bp, index, sb_index_values) =
4190 (__le16 *)status_blk.e2_sb->sb.index_values;
4191 bnx2x_fp(bp, index, sb_running_index) =
4192 (__le16 *)status_blk.e2_sb->sb.running_index;
4194 bnx2x_fp(bp, index, sb_index_values) =
4195 (__le16 *)status_blk.e1x_sb->sb.index_values;
4196 bnx2x_fp(bp, index, sb_running_index) =
4197 (__le16 *)status_blk.e1x_sb->sb.running_index;
4201 /* Returns the number of actually allocated BDs */
4202 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4205 struct bnx2x *bp = fp->bp;
4206 u16 ring_prod, cqe_ring_prod;
4207 int i, failure_cnt = 0;
4209 fp->rx_comp_cons = 0;
4210 cqe_ring_prod = ring_prod = 0;
4212 /* This routine is called only during fo init so
4213 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4215 for (i = 0; i < rx_ring_size; i++) {
4216 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4220 ring_prod = NEXT_RX_IDX(ring_prod);
4221 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4222 WARN_ON(ring_prod <= (i - failure_cnt));
4226 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4227 i - failure_cnt, fp->index);
4229 fp->rx_bd_prod = ring_prod;
4230 /* Limit the CQE producer by the CQE ring size */
4231 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4233 fp->rx_pkt = fp->rx_calls = 0;
4235 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4237 return i - failure_cnt;
4240 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4244 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4245 struct eth_rx_cqe_next_page *nextpg;
4247 nextpg = (struct eth_rx_cqe_next_page *)
4248 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4250 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4251 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4253 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4254 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4258 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4260 union host_hc_status_block *sb;
4261 struct bnx2x_fastpath *fp = &bp->fp[index];
4264 int rx_ring_size = 0;
4266 if (!bp->rx_ring_size &&
4267 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4268 rx_ring_size = MIN_RX_SIZE_NONTPA;
4269 bp->rx_ring_size = rx_ring_size;
4270 } else if (!bp->rx_ring_size) {
4271 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4273 if (CHIP_IS_E3(bp)) {
4274 u32 cfg = SHMEM_RD(bp,
4275 dev_info.port_hw_config[BP_PORT(bp)].
4278 /* Decrease ring size for 1G functions */
4279 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4280 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4284 /* allocate at least number of buffers required by FW */
4285 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4286 MIN_RX_SIZE_TPA, rx_ring_size);
4288 bp->rx_ring_size = rx_ring_size;
4289 } else /* if rx_ring_size specified - use it */
4290 rx_ring_size = bp->rx_ring_size;
4292 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4295 sb = &bnx2x_fp(bp, index, status_blk);
4297 if (!IS_FCOE_IDX(index)) {
4299 if (!CHIP_IS_E1x(bp))
4300 BNX2X_PCI_ALLOC(sb->e2_sb,
4301 &bnx2x_fp(bp, index, status_blk_mapping),
4302 sizeof(struct host_hc_status_block_e2));
4304 BNX2X_PCI_ALLOC(sb->e1x_sb,
4305 &bnx2x_fp(bp, index, status_blk_mapping),
4306 sizeof(struct host_hc_status_block_e1x));
4309 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4310 * set shortcuts for it.
4312 if (!IS_FCOE_IDX(index))
4313 set_sb_shortcuts(bp, index);
4316 if (!skip_tx_queue(bp, index)) {
4317 /* fastpath tx rings: tx_buf tx_desc */
4318 for_each_cos_in_tx_queue(fp, cos) {
4319 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4322 "allocating tx memory of fp %d cos %d\n",
4325 BNX2X_ALLOC(txdata->tx_buf_ring,
4326 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4327 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4328 &txdata->tx_desc_mapping,
4329 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4334 if (!skip_rx_queue(bp, index)) {
4335 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4336 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4337 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4338 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4339 &bnx2x_fp(bp, index, rx_desc_mapping),
4340 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4342 /* Seed all CQEs by 1s */
4343 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4344 &bnx2x_fp(bp, index, rx_comp_mapping),
4345 sizeof(struct eth_fast_path_rx_cqe) *
4349 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4350 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4351 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4352 &bnx2x_fp(bp, index, rx_sge_mapping),
4353 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4355 bnx2x_set_next_page_rx_bd(fp);
4358 bnx2x_set_next_page_rx_cq(fp);
4361 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4362 if (ring_size < rx_ring_size)
4368 /* handles low memory cases */
4370 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4372 /* FW will drop all packets if queue is not big enough,
4373 * In these cases we disable the queue
4374 * Min size is different for OOO, TPA and non-TPA queues
4376 if (ring_size < (fp->disable_tpa ?
4377 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4378 /* release memory allocated for this queue */
4379 bnx2x_free_fp_mem_at(bp, index);
4385 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4389 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4390 /* we will fail load process instead of mark
4398 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4402 /* 1. Allocate FP for leading - fatal if error
4403 * 2. Allocate RSS - fix number of queues if error
4407 if (bnx2x_alloc_fp_mem_at(bp, 0))
4411 for_each_nondefault_eth_queue(bp, i)
4412 if (bnx2x_alloc_fp_mem_at(bp, i))
4415 /* handle memory failures */
4416 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4417 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4420 bnx2x_shrink_eth_fp(bp, delta);
4421 if (CNIC_SUPPORT(bp))
4422 /* move non eth FPs next to last eth FP
4423 * must be done in that order
4424 * FCOE_IDX < FWD_IDX < OOO_IDX
4427 /* move FCoE fp even NO_FCOE_FLAG is on */
4428 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4429 bp->num_ethernet_queues -= delta;
4430 bp->num_queues = bp->num_ethernet_queues +
4431 bp->num_cnic_queues;
4432 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4433 bp->num_queues + delta, bp->num_queues);
4439 void bnx2x_free_mem_bp(struct bnx2x *bp)
4443 for (i = 0; i < bp->fp_array_size; i++)
4444 kfree(bp->fp[i].tpa_info);
4447 kfree(bp->fp_stats);
4448 kfree(bp->bnx2x_txq);
4449 kfree(bp->msix_table);
4453 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4455 struct bnx2x_fastpath *fp;
4456 struct msix_entry *tbl;
4457 struct bnx2x_ilt *ilt;
4458 int msix_table_size = 0;
4459 int fp_array_size, txq_array_size;
4463 * The biggest MSI-X table we might need is as a maximum number of fast
4464 * path IGU SBs plus default SB (for PF only).
4466 msix_table_size = bp->igu_sb_cnt;
4469 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4471 /* fp array: RSS plus CNIC related L2 queues */
4472 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4473 bp->fp_array_size = fp_array_size;
4474 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4476 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4479 for (i = 0; i < bp->fp_array_size; i++) {
4481 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4482 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4483 if (!(fp[i].tpa_info))
4489 /* allocate sp objs */
4490 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4495 /* allocate fp_stats */
4496 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4501 /* Allocate memory for the transmission queues array */
4503 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4504 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4506 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4512 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4515 bp->msix_table = tbl;
4518 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4525 bnx2x_free_mem_bp(bp);
4529 int bnx2x_reload_if_running(struct net_device *dev)
4531 struct bnx2x *bp = netdev_priv(dev);
4533 if (unlikely(!netif_running(dev)))
4536 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4537 return bnx2x_nic_load(bp, LOAD_NORMAL);
4540 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4542 u32 sel_phy_idx = 0;
4543 if (bp->link_params.num_phys <= 1)
4546 if (bp->link_vars.link_up) {
4547 sel_phy_idx = EXT_PHY1;
4548 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4549 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4550 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4551 sel_phy_idx = EXT_PHY2;
4554 switch (bnx2x_phy_selection(&bp->link_params)) {
4555 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4556 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4557 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4558 sel_phy_idx = EXT_PHY1;
4560 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4561 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4562 sel_phy_idx = EXT_PHY2;
4569 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4571 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4573 * The selected activated PHY is always after swapping (in case PHY
4574 * swapping is enabled). So when swapping is enabled, we need to reverse
4578 if (bp->link_params.multi_phy_config &
4579 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4580 if (sel_phy_idx == EXT_PHY1)
4581 sel_phy_idx = EXT_PHY2;
4582 else if (sel_phy_idx == EXT_PHY2)
4583 sel_phy_idx = EXT_PHY1;
4585 return LINK_CONFIG_IDX(sel_phy_idx);
4588 #ifdef NETDEV_FCOE_WWNN
4589 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4591 struct bnx2x *bp = netdev_priv(dev);
4592 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4595 case NETDEV_FCOE_WWNN:
4596 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4597 cp->fcoe_wwn_node_name_lo);
4599 case NETDEV_FCOE_WWPN:
4600 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4601 cp->fcoe_wwn_port_name_lo);
4604 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4612 /* called with rtnl_lock */
4613 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4615 struct bnx2x *bp = netdev_priv(dev);
4617 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4618 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4622 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4623 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4624 BNX2X_ERR("Can't support requested MTU size\n");
4628 /* This does not race with packet allocation
4629 * because the actual alloc size is
4630 * only updated as part of load
4634 return bnx2x_reload_if_running(dev);
4637 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4638 netdev_features_t features)
4640 struct bnx2x *bp = netdev_priv(dev);
4642 /* TPA requires Rx CSUM offloading */
4643 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4644 features &= ~NETIF_F_LRO;
4645 features &= ~NETIF_F_GRO;
4651 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4653 struct bnx2x *bp = netdev_priv(dev);
4654 u32 flags = bp->flags;
4656 bool bnx2x_reload = false;
4658 if (features & NETIF_F_LRO)
4659 flags |= TPA_ENABLE_FLAG;
4661 flags &= ~TPA_ENABLE_FLAG;
4663 if (features & NETIF_F_GRO)
4664 flags |= GRO_ENABLE_FLAG;
4666 flags &= ~GRO_ENABLE_FLAG;
4668 if (features & NETIF_F_LOOPBACK) {
4669 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4670 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4671 bnx2x_reload = true;
4674 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4675 bp->link_params.loopback_mode = LOOPBACK_NONE;
4676 bnx2x_reload = true;
4680 changes = flags ^ bp->flags;
4682 /* if GRO is changed while LRO is enabled, don't force a reload */
4683 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4684 changes &= ~GRO_ENABLE_FLAG;
4687 bnx2x_reload = true;
4692 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4693 return bnx2x_reload_if_running(dev);
4694 /* else: bnx2x_nic_load() will be called at end of recovery */
4700 void bnx2x_tx_timeout(struct net_device *dev)
4702 struct bnx2x *bp = netdev_priv(dev);
4704 #ifdef BNX2X_STOP_ON_ERROR
4709 smp_mb__before_clear_bit();
4710 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4711 smp_mb__after_clear_bit();
4713 /* This allows the netif to be shutdown gracefully before resetting */
4714 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4717 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4719 struct net_device *dev = pci_get_drvdata(pdev);
4723 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4726 bp = netdev_priv(dev);
4730 pci_save_state(pdev);
4732 if (!netif_running(dev)) {
4737 netif_device_detach(dev);
4739 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4741 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4748 int bnx2x_resume(struct pci_dev *pdev)
4750 struct net_device *dev = pci_get_drvdata(pdev);
4755 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4758 bp = netdev_priv(dev);
4760 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4761 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4767 pci_restore_state(pdev);
4769 if (!netif_running(dev)) {
4774 bnx2x_set_power_state(bp, PCI_D0);
4775 netif_device_attach(dev);
4777 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4784 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4788 BNX2X_ERR("bad context pointer %p\n", cxt);
4792 /* ustorm cxt validation */
4793 cxt->ustorm_ag_context.cdu_usage =
4794 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4795 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4796 /* xcontext validation */
4797 cxt->xstorm_ag_context.cdu_reserved =
4798 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4799 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4802 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4803 u8 fw_sb_id, u8 sb_index,
4806 u32 addr = BAR_CSTRORM_INTMEM +
4807 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4808 REG_WR8(bp, addr, ticks);
4810 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4811 port, fw_sb_id, sb_index, ticks);
4814 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4815 u16 fw_sb_id, u8 sb_index,
4818 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4819 u32 addr = BAR_CSTRORM_INTMEM +
4820 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4821 u8 flags = REG_RD8(bp, addr);
4823 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4824 flags |= enable_flag;
4825 REG_WR8(bp, addr, flags);
4827 "port %x fw_sb_id %d sb_index %d disable %d\n",
4828 port, fw_sb_id, sb_index, disable);
4831 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4832 u8 sb_index, u8 disable, u16 usec)
4834 int port = BP_PORT(bp);
4835 u8 ticks = usec / BNX2X_BTR;
4837 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4839 disable = disable ? 1 : (usec ? 0 : 1);
4840 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);