1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
33 * bnx2x_move_fp - move content of the fastpath structure.
36 * @from: source FP index
37 * @to: destination FP index
39 * Makes sure the contents of the bp->fp[to].napi is kept
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
42 * source onto the target. Update txdata pointers and related
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
49 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
53 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
82 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
84 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
96 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
120 * @delta: number of eth queues which were not allocated
122 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
141 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
143 /* free skb in the packet ring at pos idx
144 * return idx of last bd freed
146 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
161 txdata->txq_index, idx, tx_buf, skb);
164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170 #ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
176 new_cons = nbd + tx_buf->first_bd;
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
181 /* Skip a parse bd... */
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
205 (*bytes_compl) += skb->len;
208 dev_kfree_skb_any(skb);
209 tx_buf->first_bd = 0;
215 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
217 struct netdev_queue *txq;
218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
219 unsigned int pkts_compl = 0, bytes_compl = 0;
221 #ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
230 while (sw_cons != hw_cons) {
233 pkt_cons = TX_BD(sw_cons);
235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
240 &pkts_compl, &bytes_compl);
245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
272 __netif_tx_lock(txq, smp_processor_id());
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
277 netif_tx_wake_queue(txq);
279 __netif_tx_unlock(txq);
284 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
287 u16 last_max = fp->last_max_sge;
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
293 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
295 struct eth_end_agg_rx_cqe *cqe)
297 struct bnx2x *bp = fp->bp;
298 u16 last_max, last_elem, first_elem;
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
315 bnx2x_update_last_max_sge(fp,
316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
318 last_max = RX_SGE(fp->last_max_sge);
319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
346 /* Set Toeplitz hash value in the skb using the value from the
347 * CQE (calculated by HW).
349 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
350 const struct eth_fast_path_rx_cqe *cqe,
353 /* Set Toeplitz hash from CQE */
354 if ((bp->dev->features & NETIF_F_RXHASH) &&
355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
361 return le32_to_cpu(cqe->rss_hash_result);
367 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
369 struct eth_fast_path_rx_cqe *cqe)
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
383 /* Try to map an empty data buffer from the aggregation info */
384 mapping = dma_map_single(&bp->pdev->dev,
385 first_buf->data + NET_SKB_PAD,
386 fp->rx_buf_size, DMA_FROM_DEVICE);
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
395 bnx2x_reuse_rx_data(fp, cons, prod);
396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
403 /* point prod_bd to new data */
404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420 tpa_info->full_page =
421 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
422 tpa_info->gro_size = gro_size;
425 #ifdef BNX2X_STOP_ON_ERROR
426 fp->tpa_queue_used |= (1 << queue);
427 #ifdef _ASM_GENERIC_INT_L64_H
428 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
430 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
436 /* Timestamp option length allowed for TPA aggregation:
438 * nop nop kind length echo val
440 #define TPA_TSTAMP_OPT_LEN 12
442 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
445 * @parsing_flags: parsing flags from the START CQE
446 * @len_on_bd: total length of the first packet for the
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
452 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
456 * TPA arrgregation won't have either IP options or TCP options
457 * other than timestamp or IPv6 extension headers.
459 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
462 PRS_FLAG_OVERETH_IPV6)
463 hdrs_len += sizeof(struct ipv6hdr);
465 hdrs_len += sizeof(struct iphdr);
468 /* Check if there was a TCP timestamp, if there is it's will
469 * always be 12 bytes length: nop nop kind length echo val.
471 * Otherwise FW would close the aggregation.
473 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
474 hdrs_len += TPA_TSTAMP_OPT_LEN;
476 return len_on_bd - hdrs_len;
479 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
480 struct bnx2x_fastpath *fp, u16 index)
482 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
483 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
484 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
487 if (unlikely(page == NULL)) {
488 BNX2X_ERR("Can't alloc sge\n");
492 mapping = dma_map_page(&bp->pdev->dev, page, 0,
493 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
494 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
495 __free_pages(page, PAGES_PER_SGE_SHIFT);
496 BNX2X_ERR("Can't map sge\n");
501 dma_unmap_addr_set(sw_buf, mapping, mapping);
503 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
504 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
509 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
510 struct bnx2x_agg_info *tpa_info,
513 struct eth_end_agg_rx_cqe *cqe,
516 struct sw_rx_page *rx_pg, old_rx_pg;
517 u32 i, frag_len, frag_size;
518 int err, j, frag_id = 0;
519 u16 len_on_bd = tpa_info->len_on_bd;
520 u16 full_page = 0, gro_size = 0;
522 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
524 if (fp->mode == TPA_MODE_GRO) {
525 gro_size = tpa_info->gro_size;
526 full_page = tpa_info->full_page;
529 /* This is needed in order to enable forwarding support */
531 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
532 tpa_info->parsing_flags, len_on_bd);
535 if (fp->mode == TPA_MODE_GRO && skb_shinfo(skb)->gso_size)
536 skb_shinfo(skb)->gso_type =
537 (GET_FLAG(tpa_info->parsing_flags,
538 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
539 PRS_FLAG_OVERETH_IPV6) ?
540 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
544 #ifdef BNX2X_STOP_ON_ERROR
545 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
546 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
548 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
554 /* Run through the SGL and compose the fragmented skb */
555 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
556 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
558 /* FW gives the indices of the SGE as if the ring is an array
559 (meaning that "next" element will consume 2 indices) */
560 if (fp->mode == TPA_MODE_GRO)
561 frag_len = min_t(u32, frag_size, (u32)full_page);
563 frag_len = min_t(u32, frag_size,
564 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
566 rx_pg = &fp->rx_page_ring[sge_idx];
569 /* If we fail to allocate a substitute page, we simply stop
570 where we are and drop the whole packet */
571 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
573 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
577 /* Unmap the page as we r going to pass it to the stack */
578 dma_unmap_page(&bp->pdev->dev,
579 dma_unmap_addr(&old_rx_pg, mapping),
580 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
581 /* Add one frag and update the appropriate fields in the skb */
582 if (fp->mode == TPA_MODE_LRO)
583 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
587 for (rem = frag_len; rem > 0; rem -= gro_size) {
588 int len = rem > gro_size ? gro_size : rem;
589 skb_fill_page_desc(skb, frag_id++,
590 old_rx_pg.page, offset, len);
592 get_page(old_rx_pg.page);
597 skb->data_len += frag_len;
598 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
599 skb->len += frag_len;
601 frag_size -= frag_len;
607 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
609 if (fp->rx_frag_size)
610 put_page(virt_to_head_page(data));
615 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
617 if (fp->rx_frag_size)
618 return netdev_alloc_frag(fp->rx_frag_size);
620 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
625 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
627 const struct iphdr *iph = ip_hdr(skb);
630 skb_set_transport_header(skb, sizeof(struct iphdr));
633 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
634 iph->saddr, iph->daddr, 0);
637 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
639 struct ipv6hdr *iph = ipv6_hdr(skb);
642 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
645 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
646 &iph->saddr, &iph->daddr, 0);
650 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
654 if (fp->mode == TPA_MODE_GRO && skb_shinfo(skb)->gso_size) {
655 skb_set_network_header(skb, 0);
656 switch (be16_to_cpu(skb->protocol)) {
658 bnx2x_gro_ip_csum(bp, skb);
661 bnx2x_gro_ipv6_csum(bp, skb);
664 BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
665 be16_to_cpu(skb->protocol));
667 tcp_gro_complete(skb);
670 napi_gro_receive(&fp->napi, skb);
673 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
674 struct bnx2x_agg_info *tpa_info,
676 struct eth_end_agg_rx_cqe *cqe,
679 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
680 u8 pad = tpa_info->placement_offset;
681 u16 len = tpa_info->len_on_bd;
682 struct sk_buff *skb = NULL;
683 u8 *new_data, *data = rx_buf->data;
684 u8 old_tpa_state = tpa_info->tpa_state;
686 tpa_info->tpa_state = BNX2X_TPA_STOP;
688 /* If we there was an error during the handling of the TPA_START -
689 * drop this aggregation.
691 if (old_tpa_state == BNX2X_TPA_ERROR)
694 /* Try to allocate the new data */
695 new_data = bnx2x_frag_alloc(fp);
696 /* Unmap skb in the pool anyway, as we are going to change
697 pool entry status to BNX2X_TPA_STOP even if new skb allocation
699 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
700 fp->rx_buf_size, DMA_FROM_DEVICE);
701 if (likely(new_data))
702 skb = build_skb(data, fp->rx_frag_size);
705 #ifdef BNX2X_STOP_ON_ERROR
706 if (pad + len > fp->rx_buf_size) {
707 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
708 pad, len, fp->rx_buf_size);
714 skb_reserve(skb, pad + NET_SKB_PAD);
716 skb->rxhash = tpa_info->rxhash;
717 skb->l4_rxhash = tpa_info->l4_rxhash;
719 skb->protocol = eth_type_trans(skb, bp->dev);
720 skb->ip_summed = CHECKSUM_UNNECESSARY;
722 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
723 skb, cqe, cqe_idx)) {
724 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
725 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
726 bnx2x_gro_receive(bp, fp, skb);
728 DP(NETIF_MSG_RX_STATUS,
729 "Failed to allocate new pages - dropping packet!\n");
730 dev_kfree_skb_any(skb);
734 /* put new data in bin */
735 rx_buf->data = new_data;
739 bnx2x_frag_free(fp, new_data);
741 /* drop the packet and keep the buffer in the bin */
742 DP(NETIF_MSG_RX_STATUS,
743 "Failed to allocate or map a new skb - dropping packet!\n");
744 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
747 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
748 struct bnx2x_fastpath *fp, u16 index)
751 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
752 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
755 data = bnx2x_frag_alloc(fp);
756 if (unlikely(data == NULL))
759 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
762 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
763 bnx2x_frag_free(fp, data);
764 BNX2X_ERR("Can't map rx data\n");
769 dma_unmap_addr_set(rx_buf, mapping, mapping);
771 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
772 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
778 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
779 struct bnx2x_fastpath *fp,
780 struct bnx2x_eth_q_stats *qstats)
782 /* Do nothing if no L4 csum validation was done.
783 * We do not check whether IP csum was validated. For IPv4 we assume
784 * that if the card got as far as validating the L4 csum, it also
785 * validated the IP csum. IPv6 has no IP csum.
787 if (cqe->fast_path_cqe.status_flags &
788 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
791 /* If L4 validation was done, check if an error was found. */
793 if (cqe->fast_path_cqe.type_error_flags &
794 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
795 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
796 qstats->hw_csum_err++;
798 skb->ip_summed = CHECKSUM_UNNECESSARY;
801 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
803 struct bnx2x *bp = fp->bp;
804 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
805 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
808 #ifdef BNX2X_STOP_ON_ERROR
809 if (unlikely(bp->panic))
813 /* CQ "next element" is of the size of the regular element,
814 that's why it's ok here */
815 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
816 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
819 bd_cons = fp->rx_bd_cons;
820 bd_prod = fp->rx_bd_prod;
821 bd_prod_fw = bd_prod;
822 sw_comp_cons = fp->rx_comp_cons;
823 sw_comp_prod = fp->rx_comp_prod;
825 /* Memory barrier necessary as speculative reads of the rx
826 * buffer can be ahead of the index in the status block
830 DP(NETIF_MSG_RX_STATUS,
831 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
832 fp->index, hw_comp_cons, sw_comp_cons);
834 while (sw_comp_cons != hw_comp_cons) {
835 struct sw_rx_bd *rx_buf = NULL;
837 union eth_rx_cqe *cqe;
838 struct eth_fast_path_rx_cqe *cqe_fp;
840 enum eth_rx_cqe_type cqe_fp_type;
845 #ifdef BNX2X_STOP_ON_ERROR
846 if (unlikely(bp->panic))
850 comp_ring_cons = RCQ_BD(sw_comp_cons);
851 bd_prod = RX_BD(bd_prod);
852 bd_cons = RX_BD(bd_cons);
854 cqe = &fp->rx_comp_ring[comp_ring_cons];
855 cqe_fp = &cqe->fast_path_cqe;
856 cqe_fp_flags = cqe_fp->type_error_flags;
857 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
859 DP(NETIF_MSG_RX_STATUS,
860 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
861 CQE_TYPE(cqe_fp_flags),
862 cqe_fp_flags, cqe_fp->status_flags,
863 le32_to_cpu(cqe_fp->rss_hash_result),
864 le16_to_cpu(cqe_fp->vlan_tag),
865 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
867 /* is this a slowpath msg? */
868 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
869 bnx2x_sp_event(fp, cqe);
873 rx_buf = &fp->rx_buf_ring[bd_cons];
876 if (!CQE_TYPE_FAST(cqe_fp_type)) {
877 struct bnx2x_agg_info *tpa_info;
878 u16 frag_size, pages;
879 #ifdef BNX2X_STOP_ON_ERROR
881 if (fp->disable_tpa &&
882 (CQE_TYPE_START(cqe_fp_type) ||
883 CQE_TYPE_STOP(cqe_fp_type)))
884 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
885 CQE_TYPE(cqe_fp_type));
888 if (CQE_TYPE_START(cqe_fp_type)) {
889 u16 queue = cqe_fp->queue_index;
890 DP(NETIF_MSG_RX_STATUS,
891 "calling tpa_start on queue %d\n",
894 bnx2x_tpa_start(fp, queue,
901 queue = cqe->end_agg_cqe.queue_index;
902 tpa_info = &fp->tpa_info[queue];
903 DP(NETIF_MSG_RX_STATUS,
904 "calling tpa_stop on queue %d\n",
907 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
910 if (fp->mode == TPA_MODE_GRO)
911 pages = (frag_size + tpa_info->full_page - 1) /
914 pages = SGE_PAGE_ALIGN(frag_size) >>
917 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
918 &cqe->end_agg_cqe, comp_ring_cons);
919 #ifdef BNX2X_STOP_ON_ERROR
924 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
928 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
929 pad = cqe_fp->placement_offset;
930 dma_sync_single_for_cpu(&bp->pdev->dev,
931 dma_unmap_addr(rx_buf, mapping),
932 pad + RX_COPY_THRESH,
935 prefetch(data + pad); /* speedup eth_type_trans() */
936 /* is this an error packet? */
937 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
938 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
939 "ERROR flags %x rx packet %u\n",
940 cqe_fp_flags, sw_comp_cons);
941 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
945 /* Since we don't have a jumbo ring
946 * copy small packets if mtu > 1500
948 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
949 (len <= RX_COPY_THRESH)) {
950 skb = netdev_alloc_skb_ip_align(bp->dev, len);
952 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
953 "ERROR packet dropped because of alloc failure\n");
954 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
957 memcpy(skb->data, data + pad, len);
958 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
960 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
961 dma_unmap_single(&bp->pdev->dev,
962 dma_unmap_addr(rx_buf, mapping),
965 skb = build_skb(data, fp->rx_frag_size);
966 if (unlikely(!skb)) {
967 bnx2x_frag_free(fp, data);
968 bnx2x_fp_qstats(bp, fp)->
969 rx_skb_alloc_failed++;
972 skb_reserve(skb, pad);
974 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
975 "ERROR packet dropped because of alloc failure\n");
976 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
978 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
984 skb->protocol = eth_type_trans(skb, bp->dev);
986 /* Set Toeplitz hash for a none-LRO skb */
987 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
988 skb->l4_rxhash = l4_rxhash;
990 skb_checksum_none_assert(skb);
992 if (bp->dev->features & NETIF_F_RXCSUM)
993 bnx2x_csum_validate(skb, cqe, fp,
994 bnx2x_fp_qstats(bp, fp));
996 skb_record_rx_queue(skb, fp->rx_queue);
998 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1000 __vlan_hwaccel_put_tag(skb,
1001 le16_to_cpu(cqe_fp->vlan_tag));
1002 napi_gro_receive(&fp->napi, skb);
1006 rx_buf->data = NULL;
1008 bd_cons = NEXT_RX_IDX(bd_cons);
1009 bd_prod = NEXT_RX_IDX(bd_prod);
1010 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1013 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1014 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1016 if (rx_pkt == budget)
1020 fp->rx_bd_cons = bd_cons;
1021 fp->rx_bd_prod = bd_prod_fw;
1022 fp->rx_comp_cons = sw_comp_cons;
1023 fp->rx_comp_prod = sw_comp_prod;
1025 /* Update producers */
1026 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1029 fp->rx_pkt += rx_pkt;
1035 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1037 struct bnx2x_fastpath *fp = fp_cookie;
1038 struct bnx2x *bp = fp->bp;
1042 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1043 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1044 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1046 #ifdef BNX2X_STOP_ON_ERROR
1047 if (unlikely(bp->panic))
1051 /* Handle Rx and Tx according to MSI-X vector */
1052 prefetch(fp->rx_cons_sb);
1054 for_each_cos_in_tx_queue(fp, cos)
1055 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1057 prefetch(&fp->sb_running_index[SM_RX_ID]);
1058 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1063 /* HW Lock for shared dual port PHYs */
1064 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1066 mutex_lock(&bp->port.phy_mutex);
1068 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1071 void bnx2x_release_phy_lock(struct bnx2x *bp)
1073 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1075 mutex_unlock(&bp->port.phy_mutex);
1078 /* calculates MF speed according to current linespeed and MF configuration */
1079 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1081 u16 line_speed = bp->link_vars.line_speed;
1083 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1084 bp->mf_config[BP_VN(bp)]);
1086 /* Calculate the current MAX line speed limit for the MF
1090 line_speed = (line_speed * maxCfg) / 100;
1091 else { /* SD mode */
1092 u16 vn_max_rate = maxCfg * 100;
1094 if (vn_max_rate < line_speed)
1095 line_speed = vn_max_rate;
1103 * bnx2x_fill_report_data - fill link report data to report
1105 * @bp: driver handle
1106 * @data: link state to update
1108 * It uses a none-atomic bit operations because is called under the mutex.
1110 static void bnx2x_fill_report_data(struct bnx2x *bp,
1111 struct bnx2x_link_report_data *data)
1113 u16 line_speed = bnx2x_get_mf_speed(bp);
1115 memset(data, 0, sizeof(*data));
1117 /* Fill the report data: efective line speed */
1118 data->line_speed = line_speed;
1121 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1122 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1123 &data->link_report_flags);
1126 if (bp->link_vars.duplex == DUPLEX_FULL)
1127 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1129 /* Rx Flow Control is ON */
1130 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1131 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1133 /* Tx Flow Control is ON */
1134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1135 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1139 * bnx2x_link_report - report link status to OS.
1141 * @bp: driver handle
1143 * Calls the __bnx2x_link_report() under the same locking scheme
1144 * as a link/PHY state managing code to ensure a consistent link
1148 void bnx2x_link_report(struct bnx2x *bp)
1150 bnx2x_acquire_phy_lock(bp);
1151 __bnx2x_link_report(bp);
1152 bnx2x_release_phy_lock(bp);
1156 * __bnx2x_link_report - report link status to OS.
1158 * @bp: driver handle
1160 * None atomic inmlementation.
1161 * Should be called under the phy_lock.
1163 void __bnx2x_link_report(struct bnx2x *bp)
1165 struct bnx2x_link_report_data cur_data;
1168 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1169 bnx2x_read_mf_cfg(bp);
1171 /* Read the current link report info */
1172 bnx2x_fill_report_data(bp, &cur_data);
1174 /* Don't report link down or exactly the same link status twice */
1175 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1176 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1177 &bp->last_reported_link.link_report_flags) &&
1178 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1179 &cur_data.link_report_flags)))
1184 /* We are going to report a new link parameters now -
1185 * remember the current data for the next time.
1187 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1189 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1190 &cur_data.link_report_flags)) {
1191 netif_carrier_off(bp->dev);
1192 netdev_err(bp->dev, "NIC Link is Down\n");
1198 netif_carrier_on(bp->dev);
1200 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1201 &cur_data.link_report_flags))
1206 /* Handle the FC at the end so that only these flags would be
1207 * possibly set. This way we may easily check if there is no FC
1210 if (cur_data.link_report_flags) {
1211 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1212 &cur_data.link_report_flags)) {
1213 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1214 &cur_data.link_report_flags))
1215 flow = "ON - receive & transmit";
1217 flow = "ON - receive";
1219 flow = "ON - transmit";
1224 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1225 cur_data.line_speed, duplex, flow);
1229 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1233 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234 struct eth_rx_sge *sge;
1236 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1238 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1239 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1242 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1243 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1247 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1248 struct bnx2x_fastpath *fp, int last)
1252 for (i = 0; i < last; i++) {
1253 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1254 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1255 u8 *data = first_buf->data;
1258 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1261 if (tpa_info->tpa_state == BNX2X_TPA_START)
1262 dma_unmap_single(&bp->pdev->dev,
1263 dma_unmap_addr(first_buf, mapping),
1264 fp->rx_buf_size, DMA_FROM_DEVICE);
1265 bnx2x_frag_free(fp, data);
1266 first_buf->data = NULL;
1270 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1274 for_each_rx_queue_cnic(bp, j) {
1275 struct bnx2x_fastpath *fp = &bp->fp[j];
1279 /* Activate BD ring */
1281 * this will generate an interrupt (to the TSTORM)
1282 * must only be done after chip is initialized
1284 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1289 void bnx2x_init_rx_rings(struct bnx2x *bp)
1291 int func = BP_FUNC(bp);
1295 /* Allocate TPA resources */
1296 for_each_eth_queue(bp, j) {
1297 struct bnx2x_fastpath *fp = &bp->fp[j];
1300 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1302 if (!fp->disable_tpa) {
1303 /* Fill the per-aggregtion pool */
1304 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1305 struct bnx2x_agg_info *tpa_info =
1307 struct sw_rx_bd *first_buf =
1308 &tpa_info->first_buf;
1310 first_buf->data = bnx2x_frag_alloc(fp);
1311 if (!first_buf->data) {
1312 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1314 bnx2x_free_tpa_pool(bp, fp, i);
1315 fp->disable_tpa = 1;
1318 dma_unmap_addr_set(first_buf, mapping, 0);
1319 tpa_info->tpa_state = BNX2X_TPA_STOP;
1322 /* "next page" elements initialization */
1323 bnx2x_set_next_page_sgl(fp);
1325 /* set SGEs bit mask */
1326 bnx2x_init_sge_ring_bit_mask(fp);
1328 /* Allocate SGEs and initialize the ring elements */
1329 for (i = 0, ring_prod = 0;
1330 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1332 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1333 BNX2X_ERR("was only able to allocate %d rx sges\n",
1335 BNX2X_ERR("disabling TPA for queue[%d]\n",
1337 /* Cleanup already allocated elements */
1338 bnx2x_free_rx_sge_range(bp, fp,
1340 bnx2x_free_tpa_pool(bp, fp,
1342 fp->disable_tpa = 1;
1346 ring_prod = NEXT_SGE_IDX(ring_prod);
1349 fp->rx_sge_prod = ring_prod;
1353 for_each_eth_queue(bp, j) {
1354 struct bnx2x_fastpath *fp = &bp->fp[j];
1358 /* Activate BD ring */
1360 * this will generate an interrupt (to the TSTORM)
1361 * must only be done after chip is initialized
1363 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1369 if (CHIP_IS_E1(bp)) {
1370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1372 U64_LO(fp->rx_comp_mapping));
1373 REG_WR(bp, BAR_USTRORM_INTMEM +
1374 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1375 U64_HI(fp->rx_comp_mapping));
1380 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1383 struct bnx2x *bp = fp->bp;
1385 for_each_cos_in_tx_queue(fp, cos) {
1386 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1387 unsigned pkts_compl = 0, bytes_compl = 0;
1389 u16 sw_prod = txdata->tx_pkt_prod;
1390 u16 sw_cons = txdata->tx_pkt_cons;
1392 while (sw_cons != sw_prod) {
1393 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1394 &pkts_compl, &bytes_compl);
1398 netdev_tx_reset_queue(
1399 netdev_get_tx_queue(bp->dev,
1400 txdata->txq_index));
1404 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1408 for_each_tx_queue_cnic(bp, i) {
1409 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1413 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1417 for_each_eth_queue(bp, i) {
1418 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1422 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1424 struct bnx2x *bp = fp->bp;
1427 /* ring wasn't allocated */
1428 if (fp->rx_buf_ring == NULL)
1431 for (i = 0; i < NUM_RX_BD; i++) {
1432 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1433 u8 *data = rx_buf->data;
1437 dma_unmap_single(&bp->pdev->dev,
1438 dma_unmap_addr(rx_buf, mapping),
1439 fp->rx_buf_size, DMA_FROM_DEVICE);
1441 rx_buf->data = NULL;
1442 bnx2x_frag_free(fp, data);
1446 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1450 for_each_rx_queue_cnic(bp, j) {
1451 bnx2x_free_rx_bds(&bp->fp[j]);
1455 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1459 for_each_eth_queue(bp, j) {
1460 struct bnx2x_fastpath *fp = &bp->fp[j];
1462 bnx2x_free_rx_bds(fp);
1464 if (!fp->disable_tpa)
1465 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1469 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1471 bnx2x_free_tx_skbs_cnic(bp);
1472 bnx2x_free_rx_skbs_cnic(bp);
1475 void bnx2x_free_skbs(struct bnx2x *bp)
1477 bnx2x_free_tx_skbs(bp);
1478 bnx2x_free_rx_skbs(bp);
1481 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1483 /* load old values */
1484 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1486 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1487 /* leave all but MAX value */
1488 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1490 /* set new MAX value */
1491 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1492 & FUNC_MF_CFG_MAX_BW_MASK;
1494 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1499 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1501 * @bp: driver handle
1502 * @nvecs: number of vectors to be released
1504 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1508 if (nvecs == offset)
1511 /* VFs don't have a default SB */
1513 free_irq(bp->msix_table[offset].vector, bp->dev);
1514 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1515 bp->msix_table[offset].vector);
1519 if (CNIC_SUPPORT(bp)) {
1520 if (nvecs == offset)
1525 for_each_eth_queue(bp, i) {
1526 if (nvecs == offset)
1528 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1529 i, bp->msix_table[offset].vector);
1531 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1535 void bnx2x_free_irq(struct bnx2x *bp)
1537 if (bp->flags & USING_MSIX_FLAG &&
1538 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1539 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1541 /* vfs don't have a default status block */
1545 bnx2x_free_msix_irqs(bp, nvecs);
1547 free_irq(bp->dev->irq, bp->dev);
1551 int bnx2x_enable_msix(struct bnx2x *bp)
1553 int msix_vec = 0, i, rc;
1555 /* VFs don't have a default status block */
1557 bp->msix_table[msix_vec].entry = msix_vec;
1558 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1559 bp->msix_table[0].entry);
1563 /* Cnic requires an msix vector for itself */
1564 if (CNIC_SUPPORT(bp)) {
1565 bp->msix_table[msix_vec].entry = msix_vec;
1566 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1567 msix_vec, bp->msix_table[msix_vec].entry);
1571 /* We need separate vectors for ETH queues only (not FCoE) */
1572 for_each_eth_queue(bp, i) {
1573 bp->msix_table[msix_vec].entry = msix_vec;
1574 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1575 msix_vec, msix_vec, i);
1579 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1582 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1585 * reconfigure number of tx/rx queues according to available
1588 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1589 /* how less vectors we will have? */
1590 int diff = msix_vec - rc;
1592 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1594 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1597 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1601 * decrease number of queues by number of unallocated entries
1603 bp->num_ethernet_queues -= diff;
1604 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1606 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1608 } else if (rc > 0) {
1609 /* Get by with single vector */
1610 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1612 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1617 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1618 bp->flags |= USING_SINGLE_MSIX_FLAG;
1620 BNX2X_DEV_INFO("set number of queues to 1\n");
1621 bp->num_ethernet_queues = 1;
1622 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1623 } else if (rc < 0) {
1624 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1628 bp->flags |= USING_MSIX_FLAG;
1633 /* fall to INTx if not enough memory */
1635 bp->flags |= DISABLE_MSI_FLAG;
1640 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1642 int i, rc, offset = 0;
1644 /* no default status block for vf */
1646 rc = request_irq(bp->msix_table[offset++].vector,
1647 bnx2x_msix_sp_int, 0,
1648 bp->dev->name, bp->dev);
1650 BNX2X_ERR("request sp irq failed\n");
1655 if (CNIC_SUPPORT(bp))
1658 for_each_eth_queue(bp, i) {
1659 struct bnx2x_fastpath *fp = &bp->fp[i];
1660 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1663 rc = request_irq(bp->msix_table[offset].vector,
1664 bnx2x_msix_fp_int, 0, fp->name, fp);
1666 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1667 bp->msix_table[offset].vector, rc);
1668 bnx2x_free_msix_irqs(bp, offset);
1675 i = BNX2X_NUM_ETH_QUEUES(bp);
1677 offset = 1 + CNIC_SUPPORT(bp);
1678 netdev_info(bp->dev,
1679 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1680 bp->msix_table[0].vector,
1681 0, bp->msix_table[offset].vector,
1682 i - 1, bp->msix_table[offset + i - 1].vector);
1684 offset = CNIC_SUPPORT(bp);
1685 netdev_info(bp->dev,
1686 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1687 0, bp->msix_table[offset].vector,
1688 i - 1, bp->msix_table[offset + i - 1].vector);
1693 int bnx2x_enable_msi(struct bnx2x *bp)
1697 rc = pci_enable_msi(bp->pdev);
1699 BNX2X_DEV_INFO("MSI is not attainable\n");
1702 bp->flags |= USING_MSI_FLAG;
1707 static int bnx2x_req_irq(struct bnx2x *bp)
1709 unsigned long flags;
1712 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1715 flags = IRQF_SHARED;
1717 if (bp->flags & USING_MSIX_FLAG)
1718 irq = bp->msix_table[0].vector;
1720 irq = bp->pdev->irq;
1722 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1725 static int bnx2x_setup_irqs(struct bnx2x *bp)
1728 if (bp->flags & USING_MSIX_FLAG &&
1729 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1730 rc = bnx2x_req_msix_irqs(bp);
1734 rc = bnx2x_req_irq(bp);
1736 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1739 if (bp->flags & USING_MSI_FLAG) {
1740 bp->dev->irq = bp->pdev->irq;
1741 netdev_info(bp->dev, "using MSI IRQ %d\n",
1744 if (bp->flags & USING_MSIX_FLAG) {
1745 bp->dev->irq = bp->msix_table[0].vector;
1746 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1754 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1758 for_each_rx_queue_cnic(bp, i)
1759 napi_enable(&bnx2x_fp(bp, i, napi));
1762 static void bnx2x_napi_enable(struct bnx2x *bp)
1766 for_each_eth_queue(bp, i)
1767 napi_enable(&bnx2x_fp(bp, i, napi));
1770 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1774 for_each_rx_queue_cnic(bp, i)
1775 napi_disable(&bnx2x_fp(bp, i, napi));
1778 static void bnx2x_napi_disable(struct bnx2x *bp)
1782 for_each_eth_queue(bp, i)
1783 napi_disable(&bnx2x_fp(bp, i, napi));
1786 void bnx2x_netif_start(struct bnx2x *bp)
1788 if (netif_running(bp->dev)) {
1789 bnx2x_napi_enable(bp);
1790 if (CNIC_LOADED(bp))
1791 bnx2x_napi_enable_cnic(bp);
1792 bnx2x_int_enable(bp);
1793 if (bp->state == BNX2X_STATE_OPEN)
1794 netif_tx_wake_all_queues(bp->dev);
1798 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1800 bnx2x_int_disable_sync(bp, disable_hw);
1801 bnx2x_napi_disable(bp);
1802 if (CNIC_LOADED(bp))
1803 bnx2x_napi_disable_cnic(bp);
1806 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1808 struct bnx2x *bp = netdev_priv(dev);
1810 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1811 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1812 u16 ether_type = ntohs(hdr->h_proto);
1814 /* Skip VLAN tag if present */
1815 if (ether_type == ETH_P_8021Q) {
1816 struct vlan_ethhdr *vhdr =
1817 (struct vlan_ethhdr *)skb->data;
1819 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1822 /* If ethertype is FCoE or FIP - use FCoE ring */
1823 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1824 return bnx2x_fcoe_tx(bp, txq_index);
1827 /* select a non-FCoE queue */
1828 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1832 void bnx2x_set_num_queues(struct bnx2x *bp)
1835 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1837 /* override in STORAGE SD modes */
1838 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1839 bp->num_ethernet_queues = 1;
1841 /* Add special queues */
1842 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1843 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1845 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1849 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1851 * @bp: Driver handle
1853 * We currently support for at most 16 Tx queues for each CoS thus we will
1854 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1857 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1858 * index after all ETH L2 indices.
1860 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1861 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1862 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1864 * The proper configuration of skb->queue_mapping is handled by
1865 * bnx2x_select_queue() and __skb_tx_hash().
1867 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1868 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1870 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1874 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1875 rx = BNX2X_NUM_ETH_QUEUES(bp);
1877 /* account for fcoe queue */
1878 if (include_cnic && !NO_FCOE(bp)) {
1883 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1885 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1888 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1890 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1894 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1900 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1904 for_each_queue(bp, i) {
1905 struct bnx2x_fastpath *fp = &bp->fp[i];
1908 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1911 * Although there are no IP frames expected to arrive to
1912 * this ring we still want to add an
1913 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1916 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1919 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1920 IP_HEADER_ALIGNMENT_PADDING +
1923 BNX2X_FW_RX_ALIGN_END;
1924 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1925 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1926 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1928 fp->rx_frag_size = 0;
1932 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1935 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1937 /* Prepare the initial contents fo the indirection table if RSS is
1940 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1941 bp->rss_conf_obj.ind_table[i] =
1943 ethtool_rxfh_indir_default(i, num_eth_queues);
1946 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1947 * per-port, so if explicit configuration is needed , do it only
1950 * For 57712 and newer on the other hand it's a per-function
1953 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1956 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1959 struct bnx2x_config_rss_params params = {NULL};
1961 /* Although RSS is meaningless when there is a single HW queue we
1962 * still need it enabled in order to have HW Rx hash generated.
1964 * if (!is_eth_multi(bp))
1965 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1968 params.rss_obj = rss_obj;
1970 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1972 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1974 /* RSS configuration */
1975 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1976 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1977 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1978 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1979 if (rss_obj->udp_rss_v4)
1980 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1981 if (rss_obj->udp_rss_v6)
1982 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1985 params.rss_result_mask = MULTI_MASK;
1987 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1991 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1992 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1995 return bnx2x_config_rss(bp, ¶ms);
1998 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2000 struct bnx2x_func_state_params func_params = {NULL};
2002 /* Prepare parameters for function state transitions */
2003 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2005 func_params.f_obj = &bp->func_obj;
2006 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2008 func_params.params.hw_init.load_phase = load_code;
2010 return bnx2x_func_state_change(bp, &func_params);
2014 * Cleans the object that have internal lists without sending
2015 * ramrods. Should be run when interrutps are disabled.
2017 static void bnx2x_squeeze_objects(struct bnx2x *bp)
2020 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2021 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2022 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2024 /***************** Cleanup MACs' object first *************************/
2026 /* Wait for completion of requested */
2027 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2028 /* Perform a dry cleanup */
2029 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2031 /* Clean ETH primary MAC */
2032 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2033 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2036 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2038 /* Cleanup UC list */
2040 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2041 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2044 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2046 /***************** Now clean mcast object *****************************/
2047 rparam.mcast_obj = &bp->mcast_obj;
2048 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2050 /* Add a DEL command... */
2051 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2053 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2056 /* ...and wait until all pending commands are cleared */
2057 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2060 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2065 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2069 #ifndef BNX2X_STOP_ON_ERROR
2070 #define LOAD_ERROR_EXIT(bp, label) \
2072 (bp)->state = BNX2X_STATE_ERROR; \
2076 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2078 bp->cnic_loaded = false; \
2081 #else /*BNX2X_STOP_ON_ERROR*/
2082 #define LOAD_ERROR_EXIT(bp, label) \
2084 (bp)->state = BNX2X_STATE_ERROR; \
2088 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2090 bp->cnic_loaded = false; \
2094 #endif /*BNX2X_STOP_ON_ERROR*/
2096 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2098 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2099 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2103 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2105 int num_groups, vf_headroom = 0;
2106 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2108 /* number of queues for statistics is number of eth queues + FCoE */
2109 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2111 /* Total number of FW statistics requests =
2112 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2113 * and fcoe l2 queue) stats + num of queues (which includes another 1
2114 * for fcoe l2 queue if applicable)
2116 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2118 /* vf stats appear in the request list, but their data is allocated by
2119 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2120 * it is used to determine where to place the vf stats queries in the
2124 vf_headroom = bnx2x_vf_headroom(bp);
2126 /* Request is built from stats_query_header and an array of
2127 * stats_query_cmd_group each of which contains
2128 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2129 * configured in the stats_query_header.
2132 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2133 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2136 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2137 bp->fw_stats_num, vf_headroom, num_groups);
2138 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2139 num_groups * sizeof(struct stats_query_cmd_group);
2141 /* Data for statistics requests + stats_counter
2142 * stats_counter holds per-STORM counters that are incremented
2143 * when STORM has finished with the current request.
2144 * memory for FCoE offloaded statistics are counted anyway,
2145 * even if they will not be sent.
2146 * VF stats are not accounted for here as the data of VF stats is stored
2147 * in memory allocated by the VF, not here.
2149 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2150 sizeof(struct per_pf_stats) +
2151 sizeof(struct fcoe_statistics_params) +
2152 sizeof(struct per_queue_stats) * num_queue_stats +
2153 sizeof(struct stats_counter);
2155 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2156 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2159 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2160 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2161 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2162 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2163 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2164 bp->fw_stats_req_sz;
2166 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2167 U64_HI(bp->fw_stats_req_mapping),
2168 U64_LO(bp->fw_stats_req_mapping));
2169 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2170 U64_HI(bp->fw_stats_data_mapping),
2171 U64_LO(bp->fw_stats_data_mapping));
2175 bnx2x_free_fw_stats_mem(bp);
2176 BNX2X_ERR("Can't allocate FW stats memory\n");
2180 /* send load request to mcp and analyze response */
2181 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2185 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2186 DRV_MSG_SEQ_NUMBER_MASK);
2187 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2189 /* Get current FW pulse sequence */
2190 bp->fw_drv_pulse_wr_seq =
2191 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2192 DRV_PULSE_SEQ_MASK);
2193 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2196 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2197 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2199 /* if mcp fails to respond we must abort */
2200 if (!(*load_code)) {
2201 BNX2X_ERR("MCP response failure, aborting\n");
2205 /* If mcp refused (e.g. other port is in diagnostic mode) we
2208 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2209 BNX2X_ERR("MCP refused load request, aborting\n");
2215 /* check whether another PF has already loaded FW to chip. In
2216 * virtualized environments a pf from another VM may have already
2217 * initialized the device including loading FW
2219 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2221 /* is another pf loaded on this engine? */
2222 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2223 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2224 /* build my FW version dword */
2225 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2226 (BCM_5710_FW_MINOR_VERSION << 8) +
2227 (BCM_5710_FW_REVISION_VERSION << 16) +
2228 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2230 /* read loaded FW from chip */
2231 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2233 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2236 /* abort nic load if version mismatch */
2237 if (my_fw != loaded_fw) {
2238 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2246 /* returns the "mcp load_code" according to global load_count array */
2247 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2249 int path = BP_PATH(bp);
2251 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2252 path, load_count[path][0], load_count[path][1],
2253 load_count[path][2]);
2254 load_count[path][0]++;
2255 load_count[path][1 + port]++;
2256 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2257 path, load_count[path][0], load_count[path][1],
2258 load_count[path][2]);
2259 if (load_count[path][0] == 1)
2260 return FW_MSG_CODE_DRV_LOAD_COMMON;
2261 else if (load_count[path][1 + port] == 1)
2262 return FW_MSG_CODE_DRV_LOAD_PORT;
2264 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2267 /* mark PMF if applicable */
2268 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2270 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2271 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2272 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2274 /* We need the barrier to ensure the ordering between the
2275 * writing to bp->port.pmf here and reading it from the
2276 * bnx2x_periodic_task().
2283 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2286 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2288 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2289 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2290 (bp->common.shmem2_base)) {
2291 if (SHMEM2_HAS(bp, dcc_support))
2292 SHMEM2_WR(bp, dcc_support,
2293 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2294 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2295 if (SHMEM2_HAS(bp, afex_driver_support))
2296 SHMEM2_WR(bp, afex_driver_support,
2297 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2300 /* Set AFEX default VLAN tag to an invalid value */
2301 bp->afex_def_vlan_tag = -1;
2305 * bnx2x_bz_fp - zero content of the fastpath structure.
2307 * @bp: driver handle
2308 * @index: fastpath index to be zeroed
2310 * Makes sure the contents of the bp->fp[index].napi is kept
2313 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2315 struct bnx2x_fastpath *fp = &bp->fp[index];
2316 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2319 struct napi_struct orig_napi = fp->napi;
2320 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2321 /* bzero bnx2x_fastpath contents */
2322 if (bp->stats_init) {
2323 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2324 memset(fp, 0, sizeof(*fp));
2326 /* Keep Queue statistics */
2327 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2328 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2330 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2332 if (tmp_eth_q_stats)
2333 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
2334 sizeof(struct bnx2x_eth_q_stats));
2336 tmp_eth_q_stats_old =
2337 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2339 if (tmp_eth_q_stats_old)
2340 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
2341 sizeof(struct bnx2x_eth_q_stats_old));
2343 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2344 memset(fp, 0, sizeof(*fp));
2346 if (tmp_eth_q_stats) {
2347 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2348 sizeof(struct bnx2x_eth_q_stats));
2349 kfree(tmp_eth_q_stats);
2352 if (tmp_eth_q_stats_old) {
2353 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
2354 sizeof(struct bnx2x_eth_q_stats_old));
2355 kfree(tmp_eth_q_stats_old);
2360 /* Restore the NAPI object as it has been already initialized */
2361 fp->napi = orig_napi;
2362 fp->tpa_info = orig_tpa_info;
2366 fp->max_cos = bp->max_cos;
2368 /* Special queues support only one CoS */
2371 /* Init txdata pointers */
2373 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2375 for_each_cos_in_tx_queue(fp, cos)
2376 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2377 BNX2X_NUM_ETH_QUEUES(bp) + index];
2380 * set the tpa flag for each queue. The tpa flag determines the queue
2381 * minimal size so it must be set prior to queue memory allocation
2383 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2384 (bp->flags & GRO_ENABLE_FLAG &&
2385 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2386 if (bp->flags & TPA_ENABLE_FLAG)
2387 fp->mode = TPA_MODE_LRO;
2388 else if (bp->flags & GRO_ENABLE_FLAG)
2389 fp->mode = TPA_MODE_GRO;
2391 /* We don't want TPA on an FCoE L2 ring */
2393 fp->disable_tpa = 1;
2396 int bnx2x_load_cnic(struct bnx2x *bp)
2398 int i, rc, port = BP_PORT(bp);
2400 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2402 mutex_init(&bp->cnic_mutex);
2405 rc = bnx2x_alloc_mem_cnic(bp);
2407 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2408 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2412 rc = bnx2x_alloc_fp_mem_cnic(bp);
2414 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2415 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2418 /* Update the number of queues with the cnic queues */
2419 rc = bnx2x_set_real_num_queues(bp, 1);
2421 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2422 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2425 /* Add all CNIC NAPI objects */
2426 bnx2x_add_all_napi_cnic(bp);
2427 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2428 bnx2x_napi_enable_cnic(bp);
2430 rc = bnx2x_init_hw_func_cnic(bp);
2432 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2434 bnx2x_nic_init_cnic(bp);
2437 /* Enable Timer scan */
2438 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2440 /* setup cnic queues */
2441 for_each_cnic_queue(bp, i) {
2442 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2444 BNX2X_ERR("Queue setup failed\n");
2445 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2450 /* Initialize Rx filter. */
2451 netif_addr_lock_bh(bp->dev);
2452 bnx2x_set_rx_mode(bp->dev);
2453 netif_addr_unlock_bh(bp->dev);
2455 /* re-read iscsi info */
2456 bnx2x_get_iscsi_info(bp);
2457 bnx2x_setup_cnic_irq_info(bp);
2458 bnx2x_setup_cnic_info(bp);
2459 bp->cnic_loaded = true;
2460 if (bp->state == BNX2X_STATE_OPEN)
2461 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2464 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2468 #ifndef BNX2X_STOP_ON_ERROR
2470 /* Disable Timer scan */
2471 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2474 bnx2x_napi_disable_cnic(bp);
2475 /* Update the number of queues without the cnic queues */
2476 rc = bnx2x_set_real_num_queues(bp, 0);
2478 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2480 BNX2X_ERR("CNIC-related load failed\n");
2481 bnx2x_free_fp_mem_cnic(bp);
2482 bnx2x_free_mem_cnic(bp);
2484 #endif /* ! BNX2X_STOP_ON_ERROR */
2488 /* must be called with rtnl_lock */
2489 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2491 int port = BP_PORT(bp);
2492 int i, rc = 0, load_code = 0;
2494 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2496 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2498 #ifdef BNX2X_STOP_ON_ERROR
2499 if (unlikely(bp->panic)) {
2500 BNX2X_ERR("Can't load NIC when there is panic\n");
2505 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2507 /* Set the initial link reported state to link down */
2508 bnx2x_acquire_phy_lock(bp);
2509 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2510 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2511 &bp->last_reported_link.link_report_flags);
2512 bnx2x_release_phy_lock(bp);
2515 /* must be called before memory allocation and HW init */
2516 bnx2x_ilt_set_info(bp);
2519 * Zero fastpath structures preserving invariants like napi, which are
2520 * allocated only once, fp index, max_cos, bp pointer.
2521 * Also set fp->disable_tpa and txdata_ptr.
2523 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2524 for_each_queue(bp, i)
2526 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2527 bp->num_cnic_queues) *
2528 sizeof(struct bnx2x_fp_txdata));
2530 bp->fcoe_init = false;
2532 /* Set the receive queues buffer size */
2533 bnx2x_set_rx_buf_size(bp);
2536 rc = bnx2x_alloc_mem(bp);
2538 BNX2X_ERR("Unable to allocate bp memory\n");
2543 /* Allocated memory for FW statistics */
2544 if (bnx2x_alloc_fw_stats_mem(bp))
2545 LOAD_ERROR_EXIT(bp, load_error0);
2547 /* need to be done after alloc mem, since it's self adjusting to amount
2548 * of memory available for RSS queues
2550 rc = bnx2x_alloc_fp_mem(bp);
2552 BNX2X_ERR("Unable to allocate memory for fps\n");
2553 LOAD_ERROR_EXIT(bp, load_error0);
2556 /* request pf to initialize status blocks */
2558 rc = bnx2x_vfpf_init(bp);
2560 LOAD_ERROR_EXIT(bp, load_error0);
2563 /* As long as bnx2x_alloc_mem() may possibly update
2564 * bp->num_queues, bnx2x_set_real_num_queues() should always
2565 * come after it. At this stage cnic queues are not counted.
2567 rc = bnx2x_set_real_num_queues(bp, 0);
2569 BNX2X_ERR("Unable to set real_num_queues\n");
2570 LOAD_ERROR_EXIT(bp, load_error0);
2573 /* configure multi cos mappings in kernel.
2574 * this configuration may be overriden by a multi class queue discipline
2575 * or by a dcbx negotiation result.
2577 bnx2x_setup_tc(bp->dev, bp->max_cos);
2579 /* Add all NAPI objects */
2580 bnx2x_add_all_napi(bp);
2581 DP(NETIF_MSG_IFUP, "napi added\n");
2582 bnx2x_napi_enable(bp);
2585 /* set pf load just before approaching the MCP */
2586 bnx2x_set_pf_load(bp);
2588 /* if mcp exists send load request and analyze response */
2589 if (!BP_NOMCP(bp)) {
2590 /* attempt to load pf */
2591 rc = bnx2x_nic_load_request(bp, &load_code);
2593 LOAD_ERROR_EXIT(bp, load_error1);
2595 /* what did mcp say? */
2596 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2598 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2599 LOAD_ERROR_EXIT(bp, load_error2);
2602 load_code = bnx2x_nic_load_no_mcp(bp, port);
2605 /* mark pmf if applicable */
2606 bnx2x_nic_load_pmf(bp, load_code);
2608 /* Init Function state controlling object */
2609 bnx2x__init_func_obj(bp);
2612 rc = bnx2x_init_hw(bp, load_code);
2614 BNX2X_ERR("HW init failed, aborting\n");
2615 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2616 LOAD_ERROR_EXIT(bp, load_error2);
2620 /* Connect to IRQs */
2621 rc = bnx2x_setup_irqs(bp);
2623 BNX2X_ERR("setup irqs failed\n");
2625 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2626 LOAD_ERROR_EXIT(bp, load_error2);
2629 /* Setup NIC internals and enable interrupts */
2630 bnx2x_nic_init(bp, load_code);
2632 /* Init per-function objects */
2634 bnx2x_init_bp_objs(bp);
2635 bnx2x_iov_nic_init(bp);
2637 /* Set AFEX default VLAN tag to an invalid value */
2638 bp->afex_def_vlan_tag = -1;
2639 bnx2x_nic_load_afex_dcc(bp, load_code);
2640 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2641 rc = bnx2x_func_start(bp);
2643 BNX2X_ERR("Function start failed!\n");
2644 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2646 LOAD_ERROR_EXIT(bp, load_error3);
2649 /* Send LOAD_DONE command to MCP */
2650 if (!BP_NOMCP(bp)) {
2651 load_code = bnx2x_fw_command(bp,
2652 DRV_MSG_CODE_LOAD_DONE, 0);
2654 BNX2X_ERR("MCP response failure, aborting\n");
2656 LOAD_ERROR_EXIT(bp, load_error3);
2660 /* setup the leading queue */
2661 rc = bnx2x_setup_leading(bp);
2663 BNX2X_ERR("Setup leading failed!\n");
2664 LOAD_ERROR_EXIT(bp, load_error3);
2667 /* set up the rest of the queues */
2668 for_each_nondefault_eth_queue(bp, i) {
2669 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2671 BNX2X_ERR("Queue setup failed\n");
2672 LOAD_ERROR_EXIT(bp, load_error3);
2677 rc = bnx2x_init_rss_pf(bp);
2679 BNX2X_ERR("PF RSS init failed\n");
2680 LOAD_ERROR_EXIT(bp, load_error3);
2684 for_each_eth_queue(bp, i) {
2685 rc = bnx2x_vfpf_setup_q(bp, i);
2687 BNX2X_ERR("Queue setup failed\n");
2688 LOAD_ERROR_EXIT(bp, load_error3);
2693 /* Now when Clients are configured we are ready to work */
2694 bp->state = BNX2X_STATE_OPEN;
2696 /* Configure a ucast MAC */
2698 rc = bnx2x_set_eth_mac(bp, true);
2700 rc = bnx2x_vfpf_set_mac(bp);
2702 BNX2X_ERR("Setting Ethernet MAC failed\n");
2703 LOAD_ERROR_EXIT(bp, load_error3);
2706 if (IS_PF(bp) && bp->pending_max) {
2707 bnx2x_update_max_mf_config(bp, bp->pending_max);
2708 bp->pending_max = 0;
2712 rc = bnx2x_initial_phy_init(bp, load_mode);
2714 LOAD_ERROR_EXIT(bp, load_error3);
2716 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2718 /* Start fast path */
2720 /* Initialize Rx filter. */
2721 netif_addr_lock_bh(bp->dev);
2722 bnx2x_set_rx_mode(bp->dev);
2723 netif_addr_unlock_bh(bp->dev);
2726 switch (load_mode) {
2728 /* Tx queue should be only reenabled */
2729 netif_tx_wake_all_queues(bp->dev);
2733 netif_tx_start_all_queues(bp->dev);
2734 smp_mb__after_clear_bit();
2738 case LOAD_LOOPBACK_EXT:
2739 bp->state = BNX2X_STATE_DIAG;
2747 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2749 bnx2x__link_status_update(bp);
2751 /* start the timer */
2752 mod_timer(&bp->timer, jiffies + bp->current_interval);
2754 if (CNIC_ENABLED(bp))
2755 bnx2x_load_cnic(bp);
2757 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2758 /* mark driver is loaded in shmem2 */
2760 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2761 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2762 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2763 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2766 /* Wait for all pending SP commands to complete */
2767 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2768 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2769 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2773 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2774 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2775 bnx2x_dcbx_init(bp, false);
2777 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2781 #ifndef BNX2X_STOP_ON_ERROR
2784 bnx2x_int_disable_sync(bp, 1);
2786 /* Clean queueable objects */
2787 bnx2x_squeeze_objects(bp);
2790 /* Free SKBs, SGEs, TPA pool and driver internals */
2791 bnx2x_free_skbs(bp);
2792 for_each_rx_queue(bp, i)
2793 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2798 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2799 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2800 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2805 bnx2x_napi_disable(bp);
2807 /* clear pf_load status, as it was already set */
2809 bnx2x_clear_pf_load(bp);
2811 bnx2x_free_fp_mem(bp);
2812 bnx2x_free_fw_stats_mem(bp);
2816 #endif /* ! BNX2X_STOP_ON_ERROR */
2819 static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2823 /* Wait until tx fastpath tasks complete */
2824 for_each_tx_queue(bp, i) {
2825 struct bnx2x_fastpath *fp = &bp->fp[i];
2827 for_each_cos_in_tx_queue(fp, cos)
2828 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2835 /* must be called with rtnl_lock */
2836 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2839 bool global = false;
2841 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2843 /* mark driver is unloaded in shmem2 */
2844 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2846 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2847 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2848 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2852 (bp->state == BNX2X_STATE_CLOSED ||
2853 bp->state == BNX2X_STATE_ERROR)) {
2854 /* We can get here if the driver has been unloaded
2855 * during parity error recovery and is either waiting for a
2856 * leader to complete or for other functions to unload and
2857 * then ifdown has been issued. In this case we want to
2858 * unload and let other functions to complete a recovery
2861 bp->recovery_state = BNX2X_RECOVERY_DONE;
2863 bnx2x_release_leader_lock(bp);
2866 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2867 BNX2X_ERR("Can't unload in closed or error state\n");
2872 * It's important to set the bp->state to the value different from
2873 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2874 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2876 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2879 if (CNIC_LOADED(bp))
2880 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2883 bnx2x_tx_disable(bp);
2884 netdev_reset_tc(bp->dev);
2886 bp->rx_mode = BNX2X_RX_MODE_NONE;
2888 del_timer_sync(&bp->timer);
2891 /* Set ALWAYS_ALIVE bit in shmem */
2892 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2893 bnx2x_drv_pulse(bp);
2894 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2895 bnx2x_save_statistics(bp);
2898 /* wait till consumers catch up with producers in all queues */
2899 bnx2x_drain_tx_queues(bp);
2901 /* if VF indicate to PF this function is going down (PF will delete sp
2902 * elements and clear initializations
2905 bnx2x_vfpf_close_vf(bp);
2906 else if (unload_mode != UNLOAD_RECOVERY)
2907 /* if this is a normal/close unload need to clean up chip*/
2908 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2910 /* Send the UNLOAD_REQUEST to the MCP */
2911 bnx2x_send_unload_req(bp, unload_mode);
2914 * Prevent transactions to host from the functions on the
2915 * engine that doesn't reset global blocks in case of global
2916 * attention once gloabl blocks are reset and gates are opened
2917 * (the engine which leader will perform the recovery
2920 if (!CHIP_IS_E1x(bp))
2921 bnx2x_pf_disable(bp);
2923 /* Disable HW interrupts, NAPI */
2924 bnx2x_netif_stop(bp, 1);
2925 /* Delete all NAPI objects */
2926 bnx2x_del_all_napi(bp);
2927 if (CNIC_LOADED(bp))
2928 bnx2x_del_all_napi_cnic(bp);
2932 /* Report UNLOAD_DONE to MCP */
2933 bnx2x_send_unload_done(bp, false);
2937 * At this stage no more interrupts will arrive so we may safly clean
2938 * the queueable objects here in case they failed to get cleaned so far.
2941 bnx2x_squeeze_objects(bp);
2943 /* There should be no more pending SP commands at this stage */
2948 /* Free SKBs, SGEs, TPA pool and driver internals */
2949 bnx2x_free_skbs(bp);
2950 if (CNIC_LOADED(bp))
2951 bnx2x_free_skbs_cnic(bp);
2952 for_each_rx_queue(bp, i)
2953 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2955 bnx2x_free_fp_mem(bp);
2956 if (CNIC_LOADED(bp))
2957 bnx2x_free_fp_mem_cnic(bp);
2961 if (CNIC_LOADED(bp))
2962 bnx2x_free_mem_cnic(bp);
2964 bp->state = BNX2X_STATE_CLOSED;
2965 bp->cnic_loaded = false;
2967 /* Check if there are pending parity attentions. If there are - set
2968 * RECOVERY_IN_PROGRESS.
2970 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2971 bnx2x_set_reset_in_progress(bp);
2973 /* Set RESET_IS_GLOBAL if needed */
2975 bnx2x_set_reset_global(bp);
2979 /* The last driver must disable a "close the gate" if there is no
2980 * parity attention or "process kill" pending.
2983 !bnx2x_clear_pf_load(bp) &&
2984 bnx2x_reset_is_done(bp, BP_PATH(bp)))
2985 bnx2x_disable_close_the_gate(bp);
2987 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2992 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2996 /* If there is no power capability, silently succeed */
2998 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3002 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3006 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3007 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3008 PCI_PM_CTRL_PME_STATUS));
3010 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3011 /* delay required during transition out of D3hot */
3016 /* If there are other clients above don't
3017 shut down the power */
3018 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3020 /* Don't shut down the power for emulation and FPGA */
3021 if (CHIP_REV_IS_SLOW(bp))
3024 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3028 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3030 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3033 /* No more memory access after this point until
3034 * device is brought back to D0.
3039 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3046 * net_device service functions
3048 int bnx2x_poll(struct napi_struct *napi, int budget)
3052 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3054 struct bnx2x *bp = fp->bp;
3057 #ifdef BNX2X_STOP_ON_ERROR
3058 if (unlikely(bp->panic)) {
3059 napi_complete(napi);
3064 for_each_cos_in_tx_queue(fp, cos)
3065 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3066 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3069 if (bnx2x_has_rx_work(fp)) {
3070 work_done += bnx2x_rx_int(fp, budget - work_done);
3072 /* must not complete if we consumed full budget */
3073 if (work_done >= budget)
3077 /* Fall out from the NAPI loop if needed */
3078 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3080 /* No need to update SB for FCoE L2 ring as long as
3081 * it's connected to the default SB and the SB
3082 * has been updated when NAPI was scheduled.
3084 if (IS_FCOE_FP(fp)) {
3085 napi_complete(napi);
3088 bnx2x_update_fpsb_idx(fp);
3089 /* bnx2x_has_rx_work() reads the status block,
3090 * thus we need to ensure that status block indices
3091 * have been actually read (bnx2x_update_fpsb_idx)
3092 * prior to this check (bnx2x_has_rx_work) so that
3093 * we won't write the "newer" value of the status block
3094 * to IGU (if there was a DMA right after
3095 * bnx2x_has_rx_work and if there is no rmb, the memory
3096 * reading (bnx2x_update_fpsb_idx) may be postponed
3097 * to right before bnx2x_ack_sb). In this case there
3098 * will never be another interrupt until there is
3099 * another update of the status block, while there
3100 * is still unhandled work.
3104 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3105 napi_complete(napi);
3106 /* Re-enable interrupts */
3107 DP(NETIF_MSG_RX_STATUS,
3108 "Update index to %d\n", fp->fp_hc_idx);
3109 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3110 le16_to_cpu(fp->fp_hc_idx),
3120 /* we split the first BD into headers and data BDs
3121 * to ease the pain of our fellow microcode engineers
3122 * we use one mapping for both BDs
3124 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
3125 struct bnx2x_fp_txdata *txdata,
3126 struct sw_tx_bd *tx_buf,
3127 struct eth_tx_start_bd **tx_bd, u16 hlen,
3128 u16 bd_prod, int nbd)
3130 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3131 struct eth_tx_bd *d_tx_bd;
3133 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3135 /* first fix first BD */
3136 h_tx_bd->nbd = cpu_to_le16(nbd);
3137 h_tx_bd->nbytes = cpu_to_le16(hlen);
3139 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
3140 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
3142 /* now get a new data BD
3143 * (after the pbd) and fill it */
3144 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3145 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3147 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3148 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3150 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3151 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3152 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3154 /* this marks the BD as one that has no individual mapping */
3155 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3157 DP(NETIF_MSG_TX_QUEUED,
3158 "TSO split data size is %d (%x:%x)\n",
3159 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3162 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3167 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3170 csum = (u16) ~csum_fold(csum_sub(csum,
3171 csum_partial(t_header - fix, fix, 0)));
3174 csum = (u16) ~csum_fold(csum_add(csum,
3175 csum_partial(t_header, -fix, 0)));
3177 return swab16(csum);
3180 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3184 if (skb->ip_summed != CHECKSUM_PARTIAL)
3188 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
3190 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3191 rc |= XMIT_CSUM_TCP;
3195 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3196 rc |= XMIT_CSUM_TCP;
3200 if (skb_is_gso_v6(skb))
3201 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
3202 else if (skb_is_gso(skb))
3203 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
3208 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3209 /* check if packet requires linearization (packet is too fragmented)
3210 no need to check fragmentation if page size > 8K (there will be no
3211 violation to FW restrictions) */
3212 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3217 int first_bd_sz = 0;
3219 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3220 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3222 if (xmit_type & XMIT_GSO) {
3223 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3224 /* Check if LSO packet needs to be copied:
3225 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3226 int wnd_size = MAX_FETCH_BD - 3;
3227 /* Number of windows to check */
3228 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3233 /* Headers length */
3234 hlen = (int)(skb_transport_header(skb) - skb->data) +
3237 /* Amount of data (w/o headers) on linear part of SKB*/
3238 first_bd_sz = skb_headlen(skb) - hlen;
3240 wnd_sum = first_bd_sz;
3242 /* Calculate the first sum - it's special */
3243 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3245 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3247 /* If there was data on linear skb data - check it */
3248 if (first_bd_sz > 0) {
3249 if (unlikely(wnd_sum < lso_mss)) {
3254 wnd_sum -= first_bd_sz;
3257 /* Others are easier: run through the frag list and
3258 check all windows */
3259 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3261 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3263 if (unlikely(wnd_sum < lso_mss)) {
3268 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3271 /* in non-LSO too fragmented packet should always
3278 if (unlikely(to_copy))
3279 DP(NETIF_MSG_TX_QUEUED,
3280 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3281 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3282 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3288 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3291 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3292 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3293 ETH_TX_PARSE_BD_E2_LSO_MSS;
3294 if ((xmit_type & XMIT_GSO_V6) &&
3295 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
3296 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3300 * bnx2x_set_pbd_gso - update PBD in GSO case.
3304 * @xmit_type: xmit flags
3306 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3307 struct eth_tx_parse_bd_e1x *pbd,
3310 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3311 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
3312 pbd->tcp_flags = pbd_tcp_flags(skb);
3314 if (xmit_type & XMIT_GSO_V4) {
3315 pbd->ip_id = swab16(ip_hdr(skb)->id);
3316 pbd->tcp_pseudo_csum =
3317 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3319 0, IPPROTO_TCP, 0));
3322 pbd->tcp_pseudo_csum =
3323 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3324 &ipv6_hdr(skb)->daddr,
3325 0, IPPROTO_TCP, 0));
3327 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
3331 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3333 * @bp: driver handle
3335 * @parsing_data: data to be updated
3336 * @xmit_type: xmit flags
3340 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3341 u32 *parsing_data, u32 xmit_type)
3344 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3345 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3346 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
3348 if (xmit_type & XMIT_CSUM_TCP) {
3349 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3350 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3351 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3353 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3355 /* We support checksum offload for TCP and UDP only.
3356 * No need to pass the UDP header length - it's a constant.
3358 return skb_transport_header(skb) +
3359 sizeof(struct udphdr) - skb->data;
3362 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3363 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3365 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3367 if (xmit_type & XMIT_CSUM_V4)
3368 tx_start_bd->bd_flags.as_bitfield |=
3369 ETH_TX_BD_FLAGS_IP_CSUM;
3371 tx_start_bd->bd_flags.as_bitfield |=
3372 ETH_TX_BD_FLAGS_IPV6;
3374 if (!(xmit_type & XMIT_CSUM_TCP))
3375 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3379 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3381 * @bp: driver handle
3383 * @pbd: parse BD to be updated
3384 * @xmit_type: xmit flags
3386 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3387 struct eth_tx_parse_bd_e1x *pbd,
3390 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3392 /* for now NS flag is not used in Linux */
3394 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3395 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3397 pbd->ip_hlen_w = (skb_transport_header(skb) -
3398 skb_network_header(skb)) >> 1;
3400 hlen += pbd->ip_hlen_w;
3402 /* We support checksum offload for TCP and UDP only */
3403 if (xmit_type & XMIT_CSUM_TCP)
3404 hlen += tcp_hdrlen(skb) / 2;
3406 hlen += sizeof(struct udphdr) / 2;
3408 pbd->total_hlen_w = cpu_to_le16(hlen);
3411 if (xmit_type & XMIT_CSUM_TCP) {
3412 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3415 s8 fix = SKB_CS_OFF(skb); /* signed! */
3417 DP(NETIF_MSG_TX_QUEUED,
3418 "hlen %d fix %d csum before fix %x\n",
3419 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3421 /* HW bug: fixup the CSUM */
3422 pbd->tcp_pseudo_csum =
3423 bnx2x_csum_fix(skb_transport_header(skb),
3426 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3427 pbd->tcp_pseudo_csum);
3433 /* called with netif_tx_lock
3434 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3435 * netif_wake_queue()
3437 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3439 struct bnx2x *bp = netdev_priv(dev);
3441 struct netdev_queue *txq;
3442 struct bnx2x_fp_txdata *txdata;
3443 struct sw_tx_bd *tx_buf;
3444 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3445 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3446 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3447 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3448 u32 pbd_e2_parsing_data = 0;
3449 u16 pkt_prod, bd_prod;
3452 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3455 __le16 pkt_size = 0;
3457 u8 mac_type = UNICAST_ADDRESS;
3459 #ifdef BNX2X_STOP_ON_ERROR
3460 if (unlikely(bp->panic))
3461 return NETDEV_TX_BUSY;
3464 txq_index = skb_get_queue_mapping(skb);
3465 txq = netdev_get_tx_queue(dev, txq_index);
3467 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3469 txdata = &bp->bnx2x_txq[txq_index];
3471 /* enable this debug print to view the transmission queue being used
3472 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3473 txq_index, fp_index, txdata_index); */
3475 /* enable this debug print to view the tranmission details
3476 DP(NETIF_MSG_TX_QUEUED,
3477 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3478 txdata->cid, fp_index, txdata_index, txdata, fp); */
3480 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3481 skb_shinfo(skb)->nr_frags +
3483 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3484 /* Handle special storage cases separately */
3485 if (txdata->tx_ring_size == 0) {
3486 struct bnx2x_eth_q_stats *q_stats =
3487 bnx2x_fp_qstats(bp, txdata->parent_fp);
3488 q_stats->driver_filtered_tx_pkt++;
3490 return NETDEV_TX_OK;
3492 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3493 netif_tx_stop_queue(txq);
3494 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3496 return NETDEV_TX_BUSY;
3499 DP(NETIF_MSG_TX_QUEUED,
3500 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
3501 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3502 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3504 eth = (struct ethhdr *)skb->data;
3506 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3507 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3508 if (is_broadcast_ether_addr(eth->h_dest))
3509 mac_type = BROADCAST_ADDRESS;
3511 mac_type = MULTICAST_ADDRESS;
3514 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3515 /* First, check if we need to linearize the skb (due to FW
3516 restrictions). No need to check fragmentation if page size > 8K
3517 (there will be no violation to FW restrictions) */
3518 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3519 /* Statistics of linearization */
3521 if (skb_linearize(skb) != 0) {
3522 DP(NETIF_MSG_TX_QUEUED,
3523 "SKB linearization failed - silently dropping this SKB\n");
3524 dev_kfree_skb_any(skb);
3525 return NETDEV_TX_OK;
3529 /* Map skb linear data for DMA */
3530 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3531 skb_headlen(skb), DMA_TO_DEVICE);
3532 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3533 DP(NETIF_MSG_TX_QUEUED,
3534 "SKB mapping failed - silently dropping this SKB\n");
3535 dev_kfree_skb_any(skb);
3536 return NETDEV_TX_OK;
3539 Please read carefully. First we use one BD which we mark as start,
3540 then we have a parsing info BD (used for TSO or xsum),
3541 and only then we have the rest of the TSO BDs.
3542 (don't forget to mark the last one as last,
3543 and to unmap only AFTER you write to the BD ...)
3544 And above all, all pdb sizes are in words - NOT DWORDS!
3547 /* get current pkt produced now - advance it just before sending packet
3548 * since mapping of pages may fail and cause packet to be dropped
3550 pkt_prod = txdata->tx_pkt_prod;
3551 bd_prod = TX_BD(txdata->tx_bd_prod);
3553 /* get a tx_buf and first BD
3554 * tx_start_bd may be changed during SPLIT,
3555 * but first_bd will always stay first
3557 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3558 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3559 first_bd = tx_start_bd;
3561 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3562 SET_FLAG(tx_start_bd->general_data,
3563 ETH_TX_START_BD_PARSE_NBDS,
3567 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3569 /* remember the first BD of the packet */
3570 tx_buf->first_bd = txdata->tx_bd_prod;
3574 DP(NETIF_MSG_TX_QUEUED,
3575 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3576 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3578 if (vlan_tx_tag_present(skb)) {
3579 tx_start_bd->vlan_or_ethertype =
3580 cpu_to_le16(vlan_tx_tag_get(skb));
3581 tx_start_bd->bd_flags.as_bitfield |=
3582 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3584 /* when transmitting in a vf, start bd must hold the ethertype
3585 * for fw to enforce it
3587 #ifndef BNX2X_STOP_ON_ERROR
3590 tx_start_bd->vlan_or_ethertype =
3591 cpu_to_le16(ntohs(eth->h_proto));
3592 #ifndef BNX2X_STOP_ON_ERROR
3594 /* used by FW for packet accounting */
3595 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3600 /* turn on parsing and get a BD */
3601 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3603 if (xmit_type & XMIT_CSUM)
3604 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3606 if (!CHIP_IS_E1x(bp)) {
3607 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3608 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3609 /* Set PBD in checksum offload case */
3610 if (xmit_type & XMIT_CSUM)
3611 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3612 &pbd_e2_parsing_data,
3615 if (IS_MF_SI(bp) || IS_VF(bp)) {
3616 /* fill in the MAC addresses in the PBD - for local
3619 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3620 &pbd_e2->src_mac_addr_mid,
3621 &pbd_e2->src_mac_addr_lo,
3623 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3624 &pbd_e2->dst_mac_addr_mid,
3625 &pbd_e2->dst_mac_addr_lo,
3629 SET_FLAG(pbd_e2_parsing_data,
3630 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3632 u16 global_data = 0;
3633 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3634 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3635 /* Set PBD in checksum offload case */
3636 if (xmit_type & XMIT_CSUM)
3637 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3639 SET_FLAG(global_data,
3640 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3641 pbd_e1x->global_data |= cpu_to_le16(global_data);
3644 /* Setup the data pointer of the first BD of the packet */
3645 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3646 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3647 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3648 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3649 pkt_size = tx_start_bd->nbytes;
3651 DP(NETIF_MSG_TX_QUEUED,
3652 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3653 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3654 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3655 tx_start_bd->bd_flags.as_bitfield,
3656 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3658 if (xmit_type & XMIT_GSO) {
3660 DP(NETIF_MSG_TX_QUEUED,
3661 "TSO packet len %d hlen %d total len %d tso size %d\n",
3662 skb->len, hlen, skb_headlen(skb),
3663 skb_shinfo(skb)->gso_size);
3665 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3667 if (unlikely(skb_headlen(skb) > hlen))
3668 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3671 if (!CHIP_IS_E1x(bp))
3672 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3675 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3678 /* Set the PBD's parsing_data field if not zero
3679 * (for the chips newer than 57711).
3681 if (pbd_e2_parsing_data)
3682 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3684 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3686 /* Handle fragmented skb */
3687 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3688 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3690 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3691 skb_frag_size(frag), DMA_TO_DEVICE);
3692 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3693 unsigned int pkts_compl = 0, bytes_compl = 0;
3695 DP(NETIF_MSG_TX_QUEUED,
3696 "Unable to map page - dropping packet...\n");
3698 /* we need unmap all buffers already mapped
3700 * first_bd->nbd need to be properly updated
3701 * before call to bnx2x_free_tx_pkt
3703 first_bd->nbd = cpu_to_le16(nbd);
3704 bnx2x_free_tx_pkt(bp, txdata,
3705 TX_BD(txdata->tx_pkt_prod),
3706 &pkts_compl, &bytes_compl);
3707 return NETDEV_TX_OK;
3710 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3711 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3712 if (total_pkt_bd == NULL)
3713 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3715 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3716 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3717 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3718 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3721 DP(NETIF_MSG_TX_QUEUED,
3722 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3723 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3724 le16_to_cpu(tx_data_bd->nbytes));
3727 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3729 /* update with actual num BDs */
3730 first_bd->nbd = cpu_to_le16(nbd);
3732 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3734 /* now send a tx doorbell, counting the next BD
3735 * if the packet contains or ends with it
3737 if (TX_BD_POFF(bd_prod) < nbd)
3740 /* total_pkt_bytes should be set on the first data BD if
3741 * it's not an LSO packet and there is more than one
3742 * data BD. In this case pkt_size is limited by an MTU value.
3743 * However we prefer to set it for an LSO packet (while we don't
3744 * have to) in order to save some CPU cycles in a none-LSO
3745 * case, when we much more care about them.
3747 if (total_pkt_bd != NULL)
3748 total_pkt_bd->total_pkt_bytes = pkt_size;
3751 DP(NETIF_MSG_TX_QUEUED,
3752 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3753 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3754 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3755 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3756 le16_to_cpu(pbd_e1x->total_hlen_w));
3758 DP(NETIF_MSG_TX_QUEUED,
3759 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3760 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3761 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3762 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3763 pbd_e2->parsing_data);
3764 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3766 netdev_tx_sent_queue(txq, skb->len);
3768 skb_tx_timestamp(skb);
3770 txdata->tx_pkt_prod++;
3772 * Make sure that the BD data is updated before updating the producer
3773 * since FW might read the BD right after the producer is updated.
3774 * This is only applicable for weak-ordered memory model archs such
3775 * as IA-64. The following barrier is also mandatory since FW will
3776 * assumes packets must have BDs.
3780 txdata->tx_db.data.prod += nbd;
3783 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3787 txdata->tx_bd_prod += nbd;
3789 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3790 netif_tx_stop_queue(txq);
3792 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3793 * ordering of set_bit() in netif_tx_stop_queue() and read of
3797 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3798 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3799 netif_tx_wake_queue(txq);
3803 return NETDEV_TX_OK;
3807 * bnx2x_setup_tc - routine to configure net_device for multi tc
3809 * @netdev: net device to configure
3810 * @tc: number of traffic classes to enable
3812 * callback connected to the ndo_setup_tc function pointer
3814 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3816 int cos, prio, count, offset;
3817 struct bnx2x *bp = netdev_priv(dev);
3819 /* setup tc must be called under rtnl lock */
3822 /* no traffic classes requested. aborting */
3824 netdev_reset_tc(dev);
3828 /* requested to support too many traffic classes */
3829 if (num_tc > bp->max_cos) {
3830 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3831 num_tc, bp->max_cos);
3835 /* declare amount of supported traffic classes */
3836 if (netdev_set_num_tc(dev, num_tc)) {
3837 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3841 /* configure priority to traffic class mapping */
3842 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3843 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3844 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3845 "mapping priority %d to tc %d\n",
3846 prio, bp->prio_to_cos[prio]);
3850 /* Use this configuration to diffrentiate tc0 from other COSes
3851 This can be used for ets or pfc, and save the effort of setting
3852 up a multio class queue disc or negotiating DCBX with a switch
3853 netdev_set_prio_tc_map(dev, 0, 0);
3854 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3855 for (prio = 1; prio < 16; prio++) {
3856 netdev_set_prio_tc_map(dev, prio, 1);
3857 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3860 /* configure traffic class to transmission queue mapping */
3861 for (cos = 0; cos < bp->max_cos; cos++) {
3862 count = BNX2X_NUM_ETH_QUEUES(bp);
3863 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3864 netdev_set_tc_queue(dev, cos, count, offset);
3865 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3866 "mapping tc %d to offset %d count %d\n",
3867 cos, offset, count);
3873 /* called with rtnl_lock */
3874 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3876 struct sockaddr *addr = p;
3877 struct bnx2x *bp = netdev_priv(dev);
3880 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3881 BNX2X_ERR("Requested MAC address is not valid\n");
3885 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3886 !is_zero_ether_addr(addr->sa_data)) {
3887 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3891 if (netif_running(dev)) {
3892 rc = bnx2x_set_eth_mac(bp, false);
3897 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3899 if (netif_running(dev))
3900 rc = bnx2x_set_eth_mac(bp, true);
3905 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3907 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3908 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3913 if (IS_FCOE_IDX(fp_index)) {
3914 memset(sb, 0, sizeof(union host_hc_status_block));
3915 fp->status_blk_mapping = 0;
3918 if (!CHIP_IS_E1x(bp))
3919 BNX2X_PCI_FREE(sb->e2_sb,
3920 bnx2x_fp(bp, fp_index,
3921 status_blk_mapping),
3922 sizeof(struct host_hc_status_block_e2));
3924 BNX2X_PCI_FREE(sb->e1x_sb,
3925 bnx2x_fp(bp, fp_index,
3926 status_blk_mapping),
3927 sizeof(struct host_hc_status_block_e1x));
3931 if (!skip_rx_queue(bp, fp_index)) {
3932 bnx2x_free_rx_bds(fp);
3934 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3935 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3936 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3937 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3938 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3940 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3941 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3942 sizeof(struct eth_fast_path_rx_cqe) *
3946 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3947 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3948 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3949 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3953 if (!skip_tx_queue(bp, fp_index)) {
3954 /* fastpath tx rings: tx_buf tx_desc */
3955 for_each_cos_in_tx_queue(fp, cos) {
3956 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3958 DP(NETIF_MSG_IFDOWN,
3959 "freeing tx memory of fp %d cos %d cid %d\n",
3960 fp_index, cos, txdata->cid);
3962 BNX2X_FREE(txdata->tx_buf_ring);
3963 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3964 txdata->tx_desc_mapping,
3965 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3968 /* end of fastpath */
3971 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3974 for_each_cnic_queue(bp, i)
3975 bnx2x_free_fp_mem_at(bp, i);
3978 void bnx2x_free_fp_mem(struct bnx2x *bp)
3981 for_each_eth_queue(bp, i)
3982 bnx2x_free_fp_mem_at(bp, i);
3985 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3987 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3988 if (!CHIP_IS_E1x(bp)) {
3989 bnx2x_fp(bp, index, sb_index_values) =
3990 (__le16 *)status_blk.e2_sb->sb.index_values;
3991 bnx2x_fp(bp, index, sb_running_index) =
3992 (__le16 *)status_blk.e2_sb->sb.running_index;
3994 bnx2x_fp(bp, index, sb_index_values) =
3995 (__le16 *)status_blk.e1x_sb->sb.index_values;
3996 bnx2x_fp(bp, index, sb_running_index) =
3997 (__le16 *)status_blk.e1x_sb->sb.running_index;
4001 /* Returns the number of actually allocated BDs */
4002 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4005 struct bnx2x *bp = fp->bp;
4006 u16 ring_prod, cqe_ring_prod;
4007 int i, failure_cnt = 0;
4009 fp->rx_comp_cons = 0;
4010 cqe_ring_prod = ring_prod = 0;
4012 /* This routine is called only during fo init so
4013 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4015 for (i = 0; i < rx_ring_size; i++) {
4016 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4020 ring_prod = NEXT_RX_IDX(ring_prod);
4021 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4022 WARN_ON(ring_prod <= (i - failure_cnt));
4026 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4027 i - failure_cnt, fp->index);
4029 fp->rx_bd_prod = ring_prod;
4030 /* Limit the CQE producer by the CQE ring size */
4031 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4033 fp->rx_pkt = fp->rx_calls = 0;
4035 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4037 return i - failure_cnt;
4040 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4044 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4045 struct eth_rx_cqe_next_page *nextpg;
4047 nextpg = (struct eth_rx_cqe_next_page *)
4048 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4050 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4051 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4053 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4054 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4058 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4060 union host_hc_status_block *sb;
4061 struct bnx2x_fastpath *fp = &bp->fp[index];
4064 int rx_ring_size = 0;
4066 if (!bp->rx_ring_size &&
4067 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4068 rx_ring_size = MIN_RX_SIZE_NONTPA;
4069 bp->rx_ring_size = rx_ring_size;
4070 } else if (!bp->rx_ring_size) {
4071 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4073 if (CHIP_IS_E3(bp)) {
4074 u32 cfg = SHMEM_RD(bp,
4075 dev_info.port_hw_config[BP_PORT(bp)].
4078 /* Decrease ring size for 1G functions */
4079 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4080 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4084 /* allocate at least number of buffers required by FW */
4085 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4086 MIN_RX_SIZE_TPA, rx_ring_size);
4088 bp->rx_ring_size = rx_ring_size;
4089 } else /* if rx_ring_size specified - use it */
4090 rx_ring_size = bp->rx_ring_size;
4093 sb = &bnx2x_fp(bp, index, status_blk);
4095 if (!IS_FCOE_IDX(index)) {
4097 if (!CHIP_IS_E1x(bp))
4098 BNX2X_PCI_ALLOC(sb->e2_sb,
4099 &bnx2x_fp(bp, index, status_blk_mapping),
4100 sizeof(struct host_hc_status_block_e2));
4102 BNX2X_PCI_ALLOC(sb->e1x_sb,
4103 &bnx2x_fp(bp, index, status_blk_mapping),
4104 sizeof(struct host_hc_status_block_e1x));
4107 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4108 * set shortcuts for it.
4110 if (!IS_FCOE_IDX(index))
4111 set_sb_shortcuts(bp, index);
4114 if (!skip_tx_queue(bp, index)) {
4115 /* fastpath tx rings: tx_buf tx_desc */
4116 for_each_cos_in_tx_queue(fp, cos) {
4117 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4120 "allocating tx memory of fp %d cos %d\n",
4123 BNX2X_ALLOC(txdata->tx_buf_ring,
4124 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4125 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4126 &txdata->tx_desc_mapping,
4127 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4132 if (!skip_rx_queue(bp, index)) {
4133 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4134 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4135 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4136 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4137 &bnx2x_fp(bp, index, rx_desc_mapping),
4138 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4140 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4141 &bnx2x_fp(bp, index, rx_comp_mapping),
4142 sizeof(struct eth_fast_path_rx_cqe) *
4146 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4147 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4148 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4149 &bnx2x_fp(bp, index, rx_sge_mapping),
4150 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4152 bnx2x_set_next_page_rx_bd(fp);
4155 bnx2x_set_next_page_rx_cq(fp);
4158 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4159 if (ring_size < rx_ring_size)
4165 /* handles low memory cases */
4167 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4169 /* FW will drop all packets if queue is not big enough,
4170 * In these cases we disable the queue
4171 * Min size is different for OOO, TPA and non-TPA queues
4173 if (ring_size < (fp->disable_tpa ?
4174 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4175 /* release memory allocated for this queue */
4176 bnx2x_free_fp_mem_at(bp, index);
4182 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4186 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4187 /* we will fail load process instead of mark
4195 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4199 /* 1. Allocate FP for leading - fatal if error
4200 * 2. Allocate RSS - fix number of queues if error
4204 if (bnx2x_alloc_fp_mem_at(bp, 0))
4208 for_each_nondefault_eth_queue(bp, i)
4209 if (bnx2x_alloc_fp_mem_at(bp, i))
4212 /* handle memory failures */
4213 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4214 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4217 bnx2x_shrink_eth_fp(bp, delta);
4218 if (CNIC_SUPPORT(bp))
4219 /* move non eth FPs next to last eth FP
4220 * must be done in that order
4221 * FCOE_IDX < FWD_IDX < OOO_IDX
4224 /* move FCoE fp even NO_FCOE_FLAG is on */
4225 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4226 bp->num_ethernet_queues -= delta;
4227 bp->num_queues = bp->num_ethernet_queues +
4228 bp->num_cnic_queues;
4229 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4230 bp->num_queues + delta, bp->num_queues);
4236 void bnx2x_free_mem_bp(struct bnx2x *bp)
4238 kfree(bp->fp->tpa_info);
4241 kfree(bp->fp_stats);
4242 kfree(bp->bnx2x_txq);
4243 kfree(bp->msix_table);
4247 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4249 struct bnx2x_fastpath *fp;
4250 struct msix_entry *tbl;
4251 struct bnx2x_ilt *ilt;
4252 int msix_table_size = 0;
4253 int fp_array_size, txq_array_size;
4257 * The biggest MSI-X table we might need is as a maximum number of fast
4258 * path IGU SBs plus default SB (for PF).
4260 msix_table_size = bp->igu_sb_cnt;
4263 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4265 /* fp array: RSS plus CNIC related L2 queues */
4266 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4267 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
4269 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
4272 for (i = 0; i < fp_array_size; i++) {
4274 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4275 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4276 if (!(fp[i].tpa_info))
4282 /* allocate sp objs */
4283 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
4288 /* allocate fp_stats */
4289 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
4294 /* Allocate memory for the transmission queues array */
4296 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4297 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4299 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4305 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4308 bp->msix_table = tbl;
4311 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4318 bnx2x_free_mem_bp(bp);
4323 int bnx2x_reload_if_running(struct net_device *dev)
4325 struct bnx2x *bp = netdev_priv(dev);
4327 if (unlikely(!netif_running(dev)))
4330 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4331 return bnx2x_nic_load(bp, LOAD_NORMAL);
4334 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4336 u32 sel_phy_idx = 0;
4337 if (bp->link_params.num_phys <= 1)
4340 if (bp->link_vars.link_up) {
4341 sel_phy_idx = EXT_PHY1;
4342 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4343 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4344 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4345 sel_phy_idx = EXT_PHY2;
4348 switch (bnx2x_phy_selection(&bp->link_params)) {
4349 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4350 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4351 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4352 sel_phy_idx = EXT_PHY1;
4354 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4355 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4356 sel_phy_idx = EXT_PHY2;
4364 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4366 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4368 * The selected actived PHY is always after swapping (in case PHY
4369 * swapping is enabled). So when swapping is enabled, we need to reverse
4373 if (bp->link_params.multi_phy_config &
4374 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4375 if (sel_phy_idx == EXT_PHY1)
4376 sel_phy_idx = EXT_PHY2;
4377 else if (sel_phy_idx == EXT_PHY2)
4378 sel_phy_idx = EXT_PHY1;
4380 return LINK_CONFIG_IDX(sel_phy_idx);
4383 #ifdef NETDEV_FCOE_WWNN
4384 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4386 struct bnx2x *bp = netdev_priv(dev);
4387 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4390 case NETDEV_FCOE_WWNN:
4391 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4392 cp->fcoe_wwn_node_name_lo);
4394 case NETDEV_FCOE_WWPN:
4395 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4396 cp->fcoe_wwn_port_name_lo);
4399 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4407 /* called with rtnl_lock */
4408 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4410 struct bnx2x *bp = netdev_priv(dev);
4412 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4413 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4417 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4418 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4419 BNX2X_ERR("Can't support requested MTU size\n");
4423 /* This does not race with packet allocation
4424 * because the actual alloc size is
4425 * only updated as part of load
4429 return bnx2x_reload_if_running(dev);
4432 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4433 netdev_features_t features)
4435 struct bnx2x *bp = netdev_priv(dev);
4437 /* TPA requires Rx CSUM offloading */
4438 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4439 features &= ~NETIF_F_LRO;
4440 features &= ~NETIF_F_GRO;
4446 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4448 struct bnx2x *bp = netdev_priv(dev);
4449 u32 flags = bp->flags;
4450 bool bnx2x_reload = false;
4452 if (features & NETIF_F_LRO)
4453 flags |= TPA_ENABLE_FLAG;
4455 flags &= ~TPA_ENABLE_FLAG;
4457 if (features & NETIF_F_GRO)
4458 flags |= GRO_ENABLE_FLAG;
4460 flags &= ~GRO_ENABLE_FLAG;
4462 if (features & NETIF_F_LOOPBACK) {
4463 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4464 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4465 bnx2x_reload = true;
4468 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4469 bp->link_params.loopback_mode = LOOPBACK_NONE;
4470 bnx2x_reload = true;
4474 if (flags ^ bp->flags) {
4476 bnx2x_reload = true;
4480 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4481 return bnx2x_reload_if_running(dev);
4482 /* else: bnx2x_nic_load() will be called at end of recovery */
4488 void bnx2x_tx_timeout(struct net_device *dev)
4490 struct bnx2x *bp = netdev_priv(dev);
4492 #ifdef BNX2X_STOP_ON_ERROR
4497 smp_mb__before_clear_bit();
4498 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4499 smp_mb__after_clear_bit();
4501 /* This allows the netif to be shutdown gracefully before resetting */
4502 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4505 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4507 struct net_device *dev = pci_get_drvdata(pdev);
4511 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4514 bp = netdev_priv(dev);
4518 pci_save_state(pdev);
4520 if (!netif_running(dev)) {
4525 netif_device_detach(dev);
4527 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4529 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4536 int bnx2x_resume(struct pci_dev *pdev)
4538 struct net_device *dev = pci_get_drvdata(pdev);
4543 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4546 bp = netdev_priv(dev);
4548 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4549 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4555 pci_restore_state(pdev);
4557 if (!netif_running(dev)) {
4562 bnx2x_set_power_state(bp, PCI_D0);
4563 netif_device_attach(dev);
4565 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4573 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4576 /* ustorm cxt validation */
4577 cxt->ustorm_ag_context.cdu_usage =
4578 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4579 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4580 /* xcontext validation */
4581 cxt->xstorm_ag_context.cdu_reserved =
4582 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4583 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4586 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4587 u8 fw_sb_id, u8 sb_index,
4591 u32 addr = BAR_CSTRORM_INTMEM +
4592 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4593 REG_WR8(bp, addr, ticks);
4595 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4596 port, fw_sb_id, sb_index, ticks);
4599 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4600 u16 fw_sb_id, u8 sb_index,
4603 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4604 u32 addr = BAR_CSTRORM_INTMEM +
4605 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4606 u16 flags = REG_RD16(bp, addr);
4608 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4609 flags |= enable_flag;
4610 REG_WR16(bp, addr, flags);
4612 "port %x fw_sb_id %d sb_index %d disable %d\n",
4613 port, fw_sb_id, sb_index, disable);
4616 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4617 u8 sb_index, u8 disable, u16 usec)
4619 int port = BP_PORT(bp);
4620 u8 ticks = usec / BNX2X_BTR;
4622 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4624 disable = disable ? 1 : (usec ? 0 : 1);
4625 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);