1 /* bnx2x_sriov.c: Broadcom Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
72 u8 igu_sb_id, u8 segment, u16 index, u8 op,
75 /* acking a VF sb through the PF - use the GRC */
77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
79 u32 func_encode = vf->abs_vfid;
80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
81 struct igu_regular cmd_data = {0};
83 cmd_data.sb_id_and_flags =
84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
86 (update << IGU_REGULAR_BUPDATE_SHIFT) |
87 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
90 func_encode << IGU_CTRL_REG_FID_SHIFT |
91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94 cmd_data.sb_id_and_flags, igu_addr_data);
95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 REG_WR(bp, igu_addr_ctl, ctl);
105 /* VFOP - VF slow-path operation support */
107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
109 /* VFOP operations states */
110 enum bnx2x_vfop_qctor_state {
111 BNX2X_VFOP_QCTOR_INIT,
112 BNX2X_VFOP_QCTOR_SETUP,
113 BNX2X_VFOP_QCTOR_INT_EN
116 enum bnx2x_vfop_qdtor_state {
117 BNX2X_VFOP_QDTOR_HALT,
118 BNX2X_VFOP_QDTOR_TERMINATE,
119 BNX2X_VFOP_QDTOR_CFCDEL,
120 BNX2X_VFOP_QDTOR_DONE
123 enum bnx2x_vfop_vlan_mac_state {
124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
125 BNX2X_VFOP_VLAN_MAC_CLEAR,
126 BNX2X_VFOP_VLAN_MAC_CHK_DONE,
127 BNX2X_VFOP_MAC_CONFIG_LIST,
128 BNX2X_VFOP_VLAN_CONFIG_LIST,
129 BNX2X_VFOP_VLAN_CONFIG_LIST_0
132 enum bnx2x_vfop_qsetup_state {
133 BNX2X_VFOP_QSETUP_CTOR,
134 BNX2X_VFOP_QSETUP_VLAN0,
135 BNX2X_VFOP_QSETUP_DONE
138 enum bnx2x_vfop_mcast_state {
139 BNX2X_VFOP_MCAST_DEL,
140 BNX2X_VFOP_MCAST_ADD,
141 BNX2X_VFOP_MCAST_CHK_DONE
143 enum bnx2x_vfop_qflr_state {
144 BNX2X_VFOP_QFLR_CLR_VLAN,
145 BNX2X_VFOP_QFLR_CLR_MAC,
146 BNX2X_VFOP_QFLR_TERMINATE,
150 enum bnx2x_vfop_flr_state {
151 BNX2X_VFOP_FLR_QUEUES,
155 enum bnx2x_vfop_close_state {
156 BNX2X_VFOP_CLOSE_QUEUES,
160 enum bnx2x_vfop_rxmode_state {
161 BNX2X_VFOP_RXMODE_CONFIG,
162 BNX2X_VFOP_RXMODE_DONE
165 enum bnx2x_vfop_qteardown_state {
166 BNX2X_VFOP_QTEARDOWN_RXMODE,
167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
168 BNX2X_VFOP_QTEARDOWN_CLR_MAC,
169 BNX2X_VFOP_QTEARDOWN_QDTOR,
170 BNX2X_VFOP_QTEARDOWN_DONE
173 enum bnx2x_vfop_rss_state {
174 BNX2X_VFOP_RSS_CONFIG,
178 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
180 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
181 struct bnx2x_queue_init_params *init_params,
182 struct bnx2x_queue_setup_params *setup_params,
183 u16 q_idx, u16 sb_idx)
186 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
190 init_params->tx.sb_cq_index,
191 init_params->tx.hc_rate,
193 setup_params->txq_params.traffic_type);
196 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
197 struct bnx2x_queue_init_params *init_params,
198 struct bnx2x_queue_setup_params *setup_params,
199 u16 q_idx, u16 sb_idx)
201 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
203 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
204 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
208 init_params->rx.sb_cq_index,
209 init_params->rx.hc_rate,
210 setup_params->gen_params.mtu,
212 rxq_params->sge_buf_sz,
213 rxq_params->max_sges_pkt,
214 rxq_params->tpa_agg_sz,
216 rxq_params->drop_flags,
217 rxq_params->cache_line_log);
220 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
221 struct bnx2x_virtf *vf,
222 struct bnx2x_vf_queue *q,
223 struct bnx2x_vfop_qctor_params *p,
224 unsigned long q_type)
226 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
227 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
231 /* Enable host coalescing in the transition to INIT state */
232 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
233 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
235 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
236 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
239 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
240 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
243 init_p->cxts[0] = q->cxt;
247 /* Setup-op general parameters */
248 setup_p->gen_params.spcl_id = vf->sp_cl_id;
249 setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
251 /* Setup-op pause params:
252 * Nothing to do, the pause thresholds are set by default to 0 which
253 * effectively turns off the feature for this queue. We don't want
254 * one queue (VF) to interfering with another queue (another VF)
256 if (vf->cfg_flags & VF_CFG_FW_FC)
257 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
260 * collect statistics, zero statistics, local-switching, security,
261 * OV for Flex10, RSS and MCAST for leading
263 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
264 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
266 /* for VFs, enable tx switching, bd coherency, and mac address
269 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
270 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
271 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
273 /* Setup-op rx parameters */
274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
277 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
278 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
279 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
281 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
282 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
285 /* Setup-op tx parameters */
286 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
287 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
288 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
292 /* VFOP queue construction */
293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
295 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
296 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
297 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
298 enum bnx2x_vfop_qctor_state state = vfop->state;
300 bnx2x_vfop_reset_wq(vf);
305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
308 case BNX2X_VFOP_QCTOR_INIT:
310 /* has this queue already been opened? */
311 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
312 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
314 "Entered qctor but queue was already up. Aborting gracefully\n");
319 vfop->state = BNX2X_VFOP_QCTOR_SETUP;
321 q_params->cmd = BNX2X_Q_CMD_INIT;
322 vfop->rc = bnx2x_queue_state_change(bp, q_params);
324 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
326 case BNX2X_VFOP_QCTOR_SETUP:
328 vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
330 /* copy pre-prepared setup params to the queue-state params */
331 vfop->op_p->qctor.qstate.params.setup =
332 vfop->op_p->qctor.prep_qsetup;
334 q_params->cmd = BNX2X_Q_CMD_SETUP;
335 vfop->rc = bnx2x_queue_state_change(bp, q_params);
337 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
339 case BNX2X_VFOP_QCTOR_INT_EN:
341 /* enable interrupts */
342 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
343 USTORM_ID, 0, IGU_INT_ENABLE, 0);
346 bnx2x_vfop_default(state);
349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
350 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
352 bnx2x_vfop_end(bp, vf, vfop);
357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
358 struct bnx2x_virtf *vf,
359 struct bnx2x_vfop_cmd *cmd,
362 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
365 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
367 vfop->args.qctor.qid = qid;
368 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
371 bnx2x_vfop_qctor, cmd->done);
372 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
378 /* VFOP queue destruction */
379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
381 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
382 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
383 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
384 enum bnx2x_vfop_qdtor_state state = vfop->state;
386 bnx2x_vfop_reset_wq(vf);
391 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
394 case BNX2X_VFOP_QDTOR_HALT:
396 /* has this queue already been stopped? */
397 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
398 BNX2X_Q_LOGICAL_STATE_STOPPED) {
400 "Entered qdtor but queue was already stopped. Aborting gracefully\n");
403 vfop->state = BNX2X_VFOP_QDTOR_DONE;
405 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
409 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
411 q_params->cmd = BNX2X_Q_CMD_HALT;
412 vfop->rc = bnx2x_queue_state_change(bp, q_params);
414 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
416 case BNX2X_VFOP_QDTOR_TERMINATE:
418 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
420 q_params->cmd = BNX2X_Q_CMD_TERMINATE;
421 vfop->rc = bnx2x_queue_state_change(bp, q_params);
423 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
425 case BNX2X_VFOP_QDTOR_CFCDEL:
427 vfop->state = BNX2X_VFOP_QDTOR_DONE;
429 q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
430 vfop->rc = bnx2x_queue_state_change(bp, q_params);
432 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
434 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
435 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
437 case BNX2X_VFOP_QDTOR_DONE:
438 /* invalidate the context */
440 qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
441 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
443 bnx2x_vfop_end(bp, vf, vfop);
446 bnx2x_vfop_default(state);
452 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
453 struct bnx2x_virtf *vf,
454 struct bnx2x_vfop_cmd *cmd,
457 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
460 struct bnx2x_queue_state_params *qstate =
461 &vf->op_params.qctor.qstate;
463 memset(qstate, 0, sizeof(*qstate));
464 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
466 vfop->args.qdtor.qid = qid;
467 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
469 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
470 bnx2x_vfop_qdtor, cmd->done);
471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
474 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
475 vf->abs_vfid, vfop->rc);
480 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
482 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
484 /* the first igu entry belonging to VFs of this PF */
485 if (!BP_VFDB(bp)->first_vf_igu_entry)
486 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
488 /* the first igu entry belonging to this VF */
489 if (!vf_sb_count(vf))
490 vf->igu_base_id = igu_sb_id;
495 BP_VFDB(bp)->vf_sbs_pool++;
498 /* VFOP MAC/VLAN helpers */
499 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
500 struct bnx2x_vfop *vfop,
501 struct bnx2x_vlan_mac_obj *obj)
503 struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
505 /* update credit only if there is no error
506 * and a valid credit counter
508 if (!vfop->rc && args->credit) {
509 struct list_head *pos;
513 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
515 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
517 list_for_each(pos, &obj->head)
521 bnx2x_vlan_mac_h_read_unlock(bp, obj);
523 atomic_set(args->credit, cnt);
527 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
528 struct bnx2x_vfop_filter *pos,
529 struct bnx2x_vlan_mac_data *user_req)
531 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
535 case BNX2X_VFOP_FILTER_MAC:
536 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
538 case BNX2X_VFOP_FILTER_VLAN:
539 user_req->u.vlan.vlan = pos->vid;
542 BNX2X_ERR("Invalid filter type, skipping\n");
549 bnx2x_vfop_config_vlan0(struct bnx2x *bp,
550 struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
555 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
557 vlan_mac->user_req.u.vlan.vlan = 0;
559 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
565 static int bnx2x_vfop_config_list(struct bnx2x *bp,
566 struct bnx2x_vfop_filters *filters,
567 struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
569 struct bnx2x_vfop_filter *pos, *tmp;
570 struct list_head rollback_list, *filters_list = &filters->head;
571 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
574 INIT_LIST_HEAD(&rollback_list);
576 list_for_each_entry_safe(pos, tmp, filters_list, link) {
577 if (bnx2x_vfop_set_user_req(bp, pos, user_req))
580 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
582 cnt += pos->add ? 1 : -1;
583 list_move(&pos->link, &rollback_list);
585 } else if (rc == -EEXIST) {
588 BNX2X_ERR("Failed to add a new vlan_mac command\n");
593 /* rollback if error or too many rules added */
594 if (rc || cnt > filters->add_cnt) {
595 BNX2X_ERR("error or too many rules added. Performing rollback\n");
596 list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
597 pos->add = !pos->add; /* reverse op */
598 bnx2x_vfop_set_user_req(bp, pos, user_req);
599 bnx2x_config_vlan_mac(bp, vlan_mac);
600 list_del(&pos->link);
606 filters->add_cnt = cnt;
610 /* VFOP set VLAN/MAC */
611 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
613 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
614 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
615 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
616 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
618 enum bnx2x_vfop_vlan_mac_state state = vfop->state;
623 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
625 bnx2x_vfop_reset_wq(vf);
628 case BNX2X_VFOP_VLAN_MAC_CLEAR:
630 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
633 vfop->rc = obj->delete_all(bp, obj,
634 &vlan_mac->user_req.vlan_mac_flags,
635 &vlan_mac->ramrod_flags);
637 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
639 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
641 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
644 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
645 if (vfop->rc == -EEXIST)
648 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
650 case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
651 vfop->rc = !!obj->raw.check_pending(&obj->raw);
652 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
654 case BNX2X_VFOP_MAC_CONFIG_LIST:
656 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
659 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
663 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
664 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
665 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
667 case BNX2X_VFOP_VLAN_CONFIG_LIST:
669 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
671 /* remove vlan0 - could be no-op */
672 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
676 /* Do vlan list config. if this operation fails we try to
677 * restore vlan0 to keep the queue is working order
679 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
681 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
682 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
684 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
686 case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
688 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
690 if (list_empty(&obj->head))
692 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
693 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
696 bnx2x_vfop_default(state);
699 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
702 bnx2x_vfop_credit(bp, vfop, obj);
703 bnx2x_vfop_end(bp, vf, vfop);
708 struct bnx2x_vfop_vlan_mac_flags {
716 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
717 struct bnx2x_vfop_vlan_mac_flags *flags)
719 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
721 memset(ramrod, 0, sizeof(*ramrod));
725 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
726 if (flags->single_cmd)
727 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
730 if (flags->dont_consume)
731 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
734 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
738 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
739 struct bnx2x_vfop_vlan_mac_flags *flags)
741 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
742 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
745 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
746 struct bnx2x_virtf *vf,
747 struct bnx2x_vfop_cmd *cmd,
748 int qid, bool drv_only)
750 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
754 struct bnx2x_vfop_args_filters filters = {
755 .multi_filter = NULL, /* single */
756 .credit = NULL, /* consume credit */
758 struct bnx2x_vfop_vlan_mac_flags flags = {
759 .drv_only = drv_only,
760 .dont_consume = (filters.credit != NULL),
762 .add = false /* don't care */,
764 struct bnx2x_vlan_mac_ramrod_params *ramrod =
765 &vf->op_params.vlan_mac;
767 /* set ramrod params */
768 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
771 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
774 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
777 vfop->args.filters = filters;
779 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
780 bnx2x_vfop_vlan_mac, cmd->done);
781 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
787 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
788 struct bnx2x_virtf *vf,
789 struct bnx2x_vfop_cmd *cmd,
790 struct bnx2x_vfop_filters *macs,
791 int qid, bool drv_only)
793 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
797 struct bnx2x_vfop_args_filters filters = {
798 .multi_filter = macs,
799 .credit = NULL, /* consume credit */
801 struct bnx2x_vfop_vlan_mac_flags flags = {
802 .drv_only = drv_only,
803 .dont_consume = (filters.credit != NULL),
805 .add = false, /* don't care since only the items in the
806 * filters list affect the sp operation,
807 * not the list itself
810 struct bnx2x_vlan_mac_ramrod_params *ramrod =
811 &vf->op_params.vlan_mac;
813 /* set ramrod params */
814 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
817 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
820 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
823 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
824 vfop->args.filters = filters;
826 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
827 bnx2x_vfop_vlan_mac, cmd->done);
828 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
834 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
835 struct bnx2x_virtf *vf,
836 struct bnx2x_vfop_cmd *cmd,
837 int qid, u16 vid, bool add)
839 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
843 struct bnx2x_vfop_args_filters filters = {
844 .multi_filter = NULL, /* single command */
845 .credit = &bnx2x_vfq(vf, qid, vlan_count),
847 struct bnx2x_vfop_vlan_mac_flags flags = {
849 .dont_consume = (filters.credit != NULL),
853 struct bnx2x_vlan_mac_ramrod_params *ramrod =
854 &vf->op_params.vlan_mac;
856 /* set ramrod params */
857 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
858 ramrod->user_req.u.vlan.vlan = vid;
861 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
864 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
867 vfop->args.filters = filters;
869 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
870 bnx2x_vfop_vlan_mac, cmd->done);
871 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
877 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
878 struct bnx2x_virtf *vf,
879 struct bnx2x_vfop_cmd *cmd,
880 int qid, bool drv_only)
882 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
886 struct bnx2x_vfop_args_filters filters = {
887 .multi_filter = NULL, /* single command */
888 .credit = &bnx2x_vfq(vf, qid, vlan_count),
890 struct bnx2x_vfop_vlan_mac_flags flags = {
891 .drv_only = drv_only,
892 .dont_consume = (filters.credit != NULL),
894 .add = false, /* don't care */
896 struct bnx2x_vlan_mac_ramrod_params *ramrod =
897 &vf->op_params.vlan_mac;
899 /* set ramrod params */
900 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
903 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
906 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
909 vfop->args.filters = filters;
911 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
912 bnx2x_vfop_vlan_mac, cmd->done);
913 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
919 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
920 struct bnx2x_virtf *vf,
921 struct bnx2x_vfop_cmd *cmd,
922 struct bnx2x_vfop_filters *vlans,
923 int qid, bool drv_only)
925 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
929 struct bnx2x_vfop_args_filters filters = {
930 .multi_filter = vlans,
931 .credit = &bnx2x_vfq(vf, qid, vlan_count),
933 struct bnx2x_vfop_vlan_mac_flags flags = {
934 .drv_only = drv_only,
935 .dont_consume = (filters.credit != NULL),
937 .add = false, /* don't care */
939 struct bnx2x_vlan_mac_ramrod_params *ramrod =
940 &vf->op_params.vlan_mac;
942 /* set ramrod params */
943 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
946 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
949 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
952 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
953 atomic_read(filters.credit);
955 vfop->args.filters = filters;
957 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
958 bnx2x_vfop_vlan_mac, cmd->done);
959 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
965 /* VFOP queue setup (queue constructor + set vlan 0) */
966 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
968 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
969 int qid = vfop->args.qctor.qid;
970 enum bnx2x_vfop_qsetup_state state = vfop->state;
971 struct bnx2x_vfop_cmd cmd = {
972 .done = bnx2x_vfop_qsetup,
979 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
982 case BNX2X_VFOP_QSETUP_CTOR:
983 /* init the queue ctor command */
984 vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
985 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
990 case BNX2X_VFOP_QSETUP_VLAN0:
991 /* skip if non-leading or FPGA/EMU*/
995 /* init the queue set-vlan command (for vlan 0) */
996 vfop->state = BNX2X_VFOP_QSETUP_DONE;
997 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
1002 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
1004 case BNX2X_VFOP_QSETUP_DONE:
1005 vf->cfg_flags |= VF_CFG_VLAN;
1006 smp_mb__before_clear_bit();
1007 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1008 &bp->sp_rtnl_state);
1009 smp_mb__after_clear_bit();
1010 schedule_delayed_work(&bp->sp_rtnl_task, 0);
1011 bnx2x_vfop_end(bp, vf, vfop);
1014 bnx2x_vfop_default(state);
1018 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
1019 struct bnx2x_virtf *vf,
1020 struct bnx2x_vfop_cmd *cmd,
1023 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1026 vfop->args.qctor.qid = qid;
1028 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
1029 bnx2x_vfop_qsetup, cmd->done);
1030 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
1036 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
1037 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1039 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1040 int qid = vfop->args.qx.qid;
1041 enum bnx2x_vfop_qflr_state state = vfop->state;
1042 struct bnx2x_queue_state_params *qstate;
1043 struct bnx2x_vfop_cmd cmd;
1045 bnx2x_vfop_reset_wq(vf);
1050 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
1052 cmd.done = bnx2x_vfop_qflr;
1056 case BNX2X_VFOP_QFLR_CLR_VLAN:
1057 /* vlan-clear-all: driver-only, don't consume credit */
1058 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1059 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
1060 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
1064 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1066 case BNX2X_VFOP_QFLR_CLR_MAC:
1067 /* mac-clear-all: driver only consume credit */
1068 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1069 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
1070 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
1073 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
1074 vf->abs_vfid, vfop->rc);
1077 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1079 case BNX2X_VFOP_QFLR_TERMINATE:
1080 qstate = &vfop->op_p->qctor.qstate;
1081 memset(qstate , 0, sizeof(*qstate));
1082 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1083 vfop->state = BNX2X_VFOP_QFLR_DONE;
1085 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1086 vf->abs_vfid, qstate->q_obj->state);
1088 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1089 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1090 qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1091 vfop->rc = bnx2x_queue_state_change(bp, qstate);
1092 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1098 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1099 vf->abs_vfid, qid, vfop->rc);
1101 case BNX2X_VFOP_QFLR_DONE:
1102 bnx2x_vfop_end(bp, vf, vfop);
1105 bnx2x_vfop_default(state);
1111 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1112 struct bnx2x_virtf *vf,
1113 struct bnx2x_vfop_cmd *cmd,
1116 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1119 vfop->args.qx.qid = qid;
1120 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1121 bnx2x_vfop_qflr, cmd->done);
1122 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1128 /* VFOP multi-casts */
1129 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
1131 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1132 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
1133 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
1134 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
1135 enum bnx2x_vfop_mcast_state state = vfop->state;
1138 bnx2x_vfop_reset_wq(vf);
1143 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1146 case BNX2X_VFOP_MCAST_DEL:
1147 /* clear existing mcasts */
1148 vfop->state = BNX2X_VFOP_MCAST_ADD;
1149 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
1150 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1152 case BNX2X_VFOP_MCAST_ADD:
1153 if (raw->check_pending(raw))
1157 /* update mcast list on the ramrod params */
1158 INIT_LIST_HEAD(&mcast->mcast_list);
1159 for (i = 0; i < args->mc_num; i++)
1160 list_add_tail(&(args->mc[i].link),
1161 &mcast->mcast_list);
1162 /* add new mcasts */
1163 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1164 vfop->rc = bnx2x_config_mcast(bp, mcast,
1165 BNX2X_MCAST_CMD_ADD);
1167 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1169 case BNX2X_VFOP_MCAST_CHK_DONE:
1170 vfop->rc = raw->check_pending(raw) ? 1 : 0;
1171 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1173 bnx2x_vfop_default(state);
1176 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1179 bnx2x_vfop_end(bp, vf, vfop);
1184 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1185 struct bnx2x_virtf *vf,
1186 struct bnx2x_vfop_cmd *cmd,
1187 bnx2x_mac_addr_t *mcasts,
1188 int mcast_num, bool drv_only)
1190 struct bnx2x_vfop *vfop = NULL;
1191 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1192 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1196 vfop = bnx2x_vfop_add(bp, vf);
1199 struct bnx2x_mcast_ramrod_params *ramrod =
1200 &vf->op_params.mcast;
1202 /* set ramrod params */
1203 memset(ramrod, 0, sizeof(*ramrod));
1204 ramrod->mcast_obj = &vf->mcast_obj;
1206 set_bit(RAMROD_DRV_CLR_ONLY,
1207 &ramrod->ramrod_flags);
1209 /* copy mcasts pointers */
1210 vfop->args.mc_list.mc_num = mcast_num;
1211 vfop->args.mc_list.mc = mc;
1212 for (i = 0; i < mcast_num; i++)
1213 mc[i].mac = mcasts[i];
1215 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1216 bnx2x_vfop_mcast, cmd->done);
1217 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1227 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1229 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1230 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1231 enum bnx2x_vfop_rxmode_state state = vfop->state;
1233 bnx2x_vfop_reset_wq(vf);
1238 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1241 case BNX2X_VFOP_RXMODE_CONFIG:
1243 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1245 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1246 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1248 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1250 case BNX2X_VFOP_RXMODE_DONE:
1251 bnx2x_vfop_end(bp, vf, vfop);
1254 bnx2x_vfop_default(state);
1260 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1261 struct bnx2x_virtf *vf,
1262 struct bnx2x_vfop_cmd *cmd,
1263 int qid, unsigned long accept_flags)
1265 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1266 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1269 struct bnx2x_rx_mode_ramrod_params *ramrod =
1270 &vf->op_params.rx_mode;
1272 memset(ramrod, 0, sizeof(*ramrod));
1274 /* Prepare ramrod parameters */
1275 ramrod->cid = vfq->cid;
1276 ramrod->cl_id = vfq_cl_id(vf, vfq);
1277 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1278 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1280 ramrod->rx_accept_flags = accept_flags;
1281 ramrod->tx_accept_flags = accept_flags;
1282 ramrod->pstate = &vf->filter_state;
1283 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1285 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1286 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1287 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1290 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1291 ramrod->rdata_mapping =
1292 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1294 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1295 bnx2x_vfop_rxmode, cmd->done);
1296 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1302 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1305 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1307 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1308 int qid = vfop->args.qx.qid;
1309 enum bnx2x_vfop_qteardown_state state = vfop->state;
1310 struct bnx2x_vfop_cmd cmd;
1315 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1317 cmd.done = bnx2x_vfop_qdown;
1321 case BNX2X_VFOP_QTEARDOWN_RXMODE:
1323 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1324 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1329 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1330 /* vlan-clear-all: don't consume credit */
1331 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1332 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1337 case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1338 /* mac-clear-all: consume credit */
1339 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1340 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1345 case BNX2X_VFOP_QTEARDOWN_QDTOR:
1346 /* run the queue destruction flow */
1347 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1348 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1349 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1350 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1351 DP(BNX2X_MSG_IOV, "returned from cmd\n");
1356 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1357 vf->abs_vfid, qid, vfop->rc);
1359 case BNX2X_VFOP_QTEARDOWN_DONE:
1360 bnx2x_vfop_end(bp, vf, vfop);
1363 bnx2x_vfop_default(state);
1367 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1368 struct bnx2x_virtf *vf,
1369 struct bnx2x_vfop_cmd *cmd,
1372 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1374 /* for non leading queues skip directly to qdown sate */
1376 vfop->args.qx.qid = qid;
1377 bnx2x_vfop_opset(qid == LEADING_IDX ?
1378 BNX2X_VFOP_QTEARDOWN_RXMODE :
1379 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
1381 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1388 /* VF enable primitives
1389 * when pretend is required the caller is responsible
1390 * for calling pretend prior to calling these routines
1393 /* internal vf enable - until vf is enabled internally all transactions
1394 * are blocked. This routine should always be called last with pretend.
1396 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1398 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1401 /* clears vf error in all semi blocks */
1402 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1404 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1405 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1406 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1407 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1410 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1412 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1413 u32 was_err_reg = 0;
1415 switch (was_err_group) {
1417 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1420 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1423 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1426 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1429 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1432 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1437 /* Set VF masks and configuration - pretend */
1438 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1440 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1441 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1442 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1443 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1444 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1445 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1447 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1448 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1449 if (vf->cfg_flags & VF_CFG_INT_SIMD)
1450 val |= IGU_VF_CONF_SINGLE_ISR_EN;
1451 val &= ~IGU_VF_CONF_PARENT_MASK;
1452 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
1453 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1456 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
1457 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
1459 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1461 /* iterate over all queues, clear sb consumer */
1462 for (i = 0; i < vf_sb_count(vf); i++) {
1463 u8 igu_sb_id = vf_igu_sb(vf, i);
1465 /* zero prod memory */
1466 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1468 /* clear sb state machine */
1469 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1472 /* disable + update */
1473 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1474 IGU_INT_DISABLE, 1);
1478 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1480 /* set the VF-PF association in the FW */
1481 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1482 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1484 /* clear vf errors*/
1485 bnx2x_vf_semi_clear_err(bp, abs_vfid);
1486 bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1488 /* internal vf-enable - pretend */
1489 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1490 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1491 bnx2x_vf_enable_internal(bp, true);
1492 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1495 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1497 /* Reset vf in IGU interrupts are still disabled */
1498 bnx2x_vf_igu_reset(bp, vf);
1500 /* pretend to enable the vf with the PBF */
1501 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1502 REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1503 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1506 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1508 struct pci_dev *dev;
1509 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1514 dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1516 return bnx2x_is_pcie_pending(dev);
1520 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1522 /* Verify no pending pci transactions */
1523 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1524 BNX2X_ERR("PCIE Transactions still pending\n");
1529 /* must be called after the number of PF queues and the number of VFs are
1533 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1535 struct vf_pf_resc_request *resc = &vf->alloc_resc;
1538 /* will be set only during VF-ACQUIRE */
1542 /* no credit calculations for macs (just yet) */
1543 resc->num_mac_filters = 1;
1545 /* divvy up vlan rules */
1546 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1547 vlan_count = 1 << ilog2(vlan_count);
1548 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1550 /* no real limitation */
1551 resc->num_mc_filters = 0;
1553 /* num_sbs already set */
1554 resc->num_sbs = vf->sb_count;
1558 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1560 /* reset the state variables */
1561 bnx2x_iov_static_resc(bp, vf);
1562 vf->state = VF_FREE;
1565 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1567 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1569 /* DQ usage counter */
1570 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1571 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
1572 "DQ VF usage counter timed out",
1574 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1576 /* FW cleanup command - poll for the results */
1577 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
1579 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
1581 /* verify TX hw is flushed */
1582 bnx2x_tx_hw_flushed(bp, poll_cnt);
1585 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1587 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1588 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1589 enum bnx2x_vfop_flr_state state = vfop->state;
1590 struct bnx2x_vfop_cmd cmd = {
1591 .done = bnx2x_vfop_flr,
1598 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1601 case BNX2X_VFOP_FLR_QUEUES:
1602 /* the cleanup operations are valid if and only if the VF
1603 * was first acquired.
1605 if (++(qx->qid) < vf_rxq_count(vf)) {
1606 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1612 /* remove multicasts */
1613 vfop->state = BNX2X_VFOP_FLR_HW;
1614 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1619 case BNX2X_VFOP_FLR_HW:
1621 /* dispatch final cleanup and wait for HW queues to flush */
1622 bnx2x_vf_flr_clnup_hw(bp, vf);
1624 /* release VF resources */
1625 bnx2x_vf_free_resc(bp, vf);
1627 /* re-open the mailbox */
1628 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1632 bnx2x_vfop_default(state);
1635 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1637 vf->flr_clnup_stage = VF_FLR_ACK;
1638 bnx2x_vfop_end(bp, vf, vfop);
1639 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1642 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
1643 struct bnx2x_virtf *vf,
1644 vfop_handler_t done)
1646 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1648 vfop->args.qx.qid = -1; /* loop */
1649 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1650 bnx2x_vfop_flr, done);
1651 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1656 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
1658 int i = prev_vf ? prev_vf->index + 1 : 0;
1659 struct bnx2x_virtf *vf;
1661 /* find next VF to cleanup */
1664 i < BNX2X_NR_VIRTFN(bp) &&
1665 (bnx2x_vf(bp, i, state) != VF_RESET ||
1666 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1670 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
1671 BNX2X_NR_VIRTFN(bp));
1673 if (i < BNX2X_NR_VIRTFN(bp)) {
1676 /* lock the vf pf channel */
1677 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1679 /* invoke the VF FLR SM */
1680 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
1681 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1684 /* mark the VF to be ACKED and continue */
1685 vf->flr_clnup_stage = VF_FLR_ACK;
1686 goto next_vf_to_clean;
1691 /* we are done, update vf records */
1692 for_each_vf(bp, i) {
1695 if (vf->flr_clnup_stage != VF_FLR_ACK)
1698 vf->flr_clnup_stage = VF_FLR_EPILOG;
1701 /* Acknowledge the handled VFs.
1702 * we are acknowledge all the vfs which an flr was requested for, even
1703 * if amongst them there are such that we never opened, since the mcp
1704 * will interrupt us immediately again if we only ack some of the bits,
1705 * resulting in an endless loop. This can happen for example in KVM
1706 * where an 'all ones' flr request is sometimes given by hyper visor
1708 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1709 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1710 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1711 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1712 bp->vfdb->flrd_vfs[i]);
1714 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1716 /* clear the acked bits - better yet if the MCP implemented
1717 * write to clear semantics
1719 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1720 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1723 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1727 /* Read FLR'd VFs */
1728 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1729 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1732 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1733 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1735 for_each_vf(bp, i) {
1736 struct bnx2x_virtf *vf = BP_VF(bp, i);
1739 if (vf->abs_vfid < 32)
1740 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1742 reset = bp->vfdb->flrd_vfs[1] &
1743 (1 << (vf->abs_vfid - 32));
1746 /* set as reset and ready for cleanup */
1747 vf->state = VF_RESET;
1748 vf->flr_clnup_stage = VF_FLR_CLN;
1751 "Initiating Final cleanup for VF %d\n",
1756 /* do the FLR cleanup for all marked VFs*/
1757 bnx2x_vf_flr_clnup(bp, NULL);
1760 /* IOV global initialization routines */
1761 void bnx2x_iov_init_dq(struct bnx2x *bp)
1766 /* Set the DQ such that the CID reflect the abs_vfid */
1767 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1768 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1770 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1773 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1775 /* The VF window size is the log2 of the max number of CIDs per VF */
1776 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1778 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1779 * the Pf doorbell size although the 2 are independent.
1781 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1783 /* No security checks for now -
1784 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1785 * CID range 0 - 0x1ffff
1787 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1788 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1789 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1790 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1792 /* set the number of VF allowed doorbells to the full DQ range */
1793 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1795 /* set the VF doorbell threshold */
1796 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1799 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1801 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1802 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1805 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1807 struct pci_dev *dev = bp->pdev;
1808 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1810 return dev->bus->number + ((dev->devfn + iov->offset +
1811 iov->stride * vfid) >> 8);
1814 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1816 struct pci_dev *dev = bp->pdev;
1817 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1819 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1822 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1825 struct pci_dev *dev = bp->pdev;
1826 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1828 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1829 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1830 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1833 vf->bars[n].bar = start + size * vf->abs_vfid;
1834 vf->bars[n].size = size;
1838 static int bnx2x_ari_enabled(struct pci_dev *dev)
1840 return dev->bus->self && dev->bus->self->ari_enabled;
1844 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1848 u8 fid, current_pf = 0;
1850 /* IGU in normal mode - read CAM */
1851 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1852 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1853 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1855 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1856 if (fid & IGU_FID_ENCODE_IS_PF)
1857 current_pf = fid & IGU_FID_PF_NUM_MASK;
1858 else if (current_pf == BP_ABS_FUNC(bp))
1859 bnx2x_vf_set_igu_info(bp, sb_id,
1860 (fid & IGU_FID_VF_NUM_MASK));
1861 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1862 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1863 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1864 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1865 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1867 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1870 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1873 kfree(bp->vfdb->vfqs);
1874 kfree(bp->vfdb->vfs);
1880 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1883 struct pci_dev *dev = bp->pdev;
1885 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1887 BNX2X_ERR("failed to find SRIOV capability in device\n");
1892 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1893 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1894 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1895 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1896 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1897 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1898 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1899 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1900 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1905 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1909 /* read the SRIOV capability structure
1910 * The fields can be read via configuration read or
1911 * directly from the device (starting at offset PCICFG_OFFSET)
1913 if (bnx2x_sriov_pci_cfg_info(bp, iov))
1916 /* get the number of SRIOV bars */
1919 /* read the first_vfid */
1920 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1921 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1922 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1925 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1927 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1928 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1933 /* must be called after PF bars are mapped */
1934 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1938 struct bnx2x_sriov *iov;
1939 struct pci_dev *dev = bp->pdev;
1947 /* verify sriov capability is present in configuration space */
1948 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1951 /* verify chip revision */
1952 if (CHIP_IS_E1x(bp))
1955 /* check if SRIOV support is turned off */
1959 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1960 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1961 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1962 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1966 /* SRIOV can be enabled only with MSIX */
1967 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1968 int_mode_param == BNX2X_INT_MODE_INTX) {
1969 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1974 /* verify ari is enabled */
1975 if (!bnx2x_ari_enabled(bp->pdev)) {
1976 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1980 /* verify igu is in normal mode */
1981 if (CHIP_INT_MODE_IS_BC(bp)) {
1982 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1986 /* allocate the vfs database */
1987 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1989 BNX2X_ERR("failed to allocate vf database\n");
1994 /* get the sriov info - Linux already collected all the pertinent
1995 * information, however the sriov structure is for the private use
1996 * of the pci module. Also we want this information regardless
1997 * of the hyper-visor.
1999 iov = &(bp->vfdb->sriov);
2000 err = bnx2x_sriov_info(bp, iov);
2004 /* SR-IOV capability was enabled but there are no VFs*/
2005 if (iov->total == 0)
2008 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
2010 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
2011 num_vfs_param, iov->nr_virtfn);
2013 /* allocate the vf array */
2014 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
2015 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
2016 if (!bp->vfdb->vfs) {
2017 BNX2X_ERR("failed to allocate vf array\n");
2022 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
2023 for_each_vf(bp, i) {
2024 bnx2x_vf(bp, i, index) = i;
2025 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
2026 bnx2x_vf(bp, i, state) = VF_FREE;
2027 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
2028 mutex_init(&bnx2x_vf(bp, i, op_mutex));
2029 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
2032 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
2033 bnx2x_get_vf_igu_cam_info(bp);
2035 /* allocate the queue arrays for all VFs */
2036 bp->vfdb->vfqs = kzalloc(
2037 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
2040 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
2042 if (!bp->vfdb->vfqs) {
2043 BNX2X_ERR("failed to allocate vf queue array\n");
2050 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
2051 __bnx2x_iov_free_vfdb(bp);
2055 void bnx2x_iov_remove_one(struct bnx2x *bp)
2057 /* if SRIOV is not enabled there's nothing to do */
2061 DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
2062 pci_disable_sriov(bp->pdev);
2063 DP(BNX2X_MSG_IOV, "sriov disabled\n");
2065 /* free vf database */
2066 __bnx2x_iov_free_vfdb(bp);
2069 void bnx2x_iov_free_mem(struct bnx2x *bp)
2076 /* free vfs hw contexts */
2077 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2078 struct hw_dma *cxt = &bp->vfdb->context[i];
2079 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
2082 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
2083 BP_VFDB(bp)->sp_dma.mapping,
2084 BP_VFDB(bp)->sp_dma.size);
2086 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
2087 BP_VF_MBX_DMA(bp)->mapping,
2088 BP_VF_MBX_DMA(bp)->size);
2090 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
2091 BP_VF_BULLETIN_DMA(bp)->mapping,
2092 BP_VF_BULLETIN_DMA(bp)->size);
2095 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2103 /* allocate vfs hw contexts */
2104 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
2105 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
2107 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2108 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
2109 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2112 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
2117 tot_size -= cxt->size;
2120 /* allocate vfs ramrods dma memory - client_init and set_mac */
2121 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2122 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
2124 BP_VFDB(bp)->sp_dma.size = tot_size;
2126 /* allocate mailboxes */
2127 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2128 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
2130 BP_VF_MBX_DMA(bp)->size = tot_size;
2132 /* allocate local bulletin boards */
2133 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2134 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
2135 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
2136 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2144 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2145 struct bnx2x_vf_queue *q)
2147 u8 cl_id = vfq_cl_id(vf, q);
2148 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
2149 unsigned long q_type = 0;
2151 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
2152 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
2154 /* Queue State object */
2155 bnx2x_init_queue_obj(bp, &q->sp_obj,
2156 cl_id, &q->cid, 1, func_id,
2157 bnx2x_vf_sp(bp, vf, q_data),
2158 bnx2x_vf_sp_map(bp, vf, q_data),
2162 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2163 vf->abs_vfid, q->sp_obj.func_id, q->cid);
2166 /* called by bnx2x_nic_load */
2167 int bnx2x_iov_nic_init(struct bnx2x *bp)
2171 if (!IS_SRIOV(bp)) {
2172 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
2176 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
2178 /* let FLR complete ... */
2181 /* initialize vf database */
2182 for_each_vf(bp, vfid) {
2183 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2185 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
2188 union cdu_context *base_cxt = (union cdu_context *)
2189 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2190 (base_vf_cid & (ILT_PAGE_CIDS-1));
2193 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
2194 vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
2195 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2197 /* init statically provisioned resources */
2198 bnx2x_iov_static_resc(bp, vf);
2200 /* queues are initialized during VF-ACQUIRE */
2202 /* reserve the vf vlan credit */
2203 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
2205 vf->filter_state = 0;
2206 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
2208 /* init mcast object - This object will be re-initialized
2209 * during VF-ACQUIRE with the proper cl_id and cid.
2210 * It needs to be initialized here so that it can be safely
2211 * handled by a subsequent FLR flow.
2213 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
2215 bnx2x_vf_sp(bp, vf, mcast_rdata),
2216 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2217 BNX2X_FILTER_MCAST_PENDING,
2219 BNX2X_OBJ_TYPE_RX_TX);
2221 /* set the mailbox message addresses */
2222 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
2223 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
2224 MBX_MSG_ALIGNED_SIZE);
2226 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
2227 vfid * MBX_MSG_ALIGNED_SIZE;
2229 /* Enable vf mailbox */
2230 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
2234 for_each_vf(bp, vfid) {
2235 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2237 /* fill in the BDF and bars */
2238 vf->bus = bnx2x_vf_bus(bp, vfid);
2239 vf->devfn = bnx2x_vf_devfn(bp, vfid);
2240 bnx2x_vf_set_bars(bp, vf);
2243 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
2244 vf->abs_vfid, vf->bus, vf->devfn,
2245 (unsigned)vf->bars[0].bar, vf->bars[0].size,
2246 (unsigned)vf->bars[1].bar, vf->bars[1].size,
2247 (unsigned)vf->bars[2].bar, vf->bars[2].size);
2253 /* called by bnx2x_chip_cleanup */
2254 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
2261 /* release all the VFs */
2263 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
2268 /* called by bnx2x_init_hw_func, returns the next ilt line */
2269 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
2272 struct bnx2x_ilt *ilt = BP_ILT(bp);
2277 /* set vfs ilt lines */
2278 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2279 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
2281 ilt->lines[line+i].page = hw_cxt->addr;
2282 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
2283 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
2288 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
2290 return ((cid >= BNX2X_FIRST_VF_CID) &&
2291 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
2295 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
2296 struct bnx2x_vf_queue *vfq,
2297 union event_ring_elem *elem)
2299 unsigned long ramrod_flags = 0;
2302 /* Always push next commands out, don't wait here */
2303 set_bit(RAMROD_CONT, &ramrod_flags);
2305 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
2306 case BNX2X_FILTER_MAC_PENDING:
2307 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
2310 case BNX2X_FILTER_VLAN_PENDING:
2311 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
2315 BNX2X_ERR("Unsupported classification command: %d\n",
2316 elem->message.data.eth_event.echo);
2320 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2322 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2326 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2327 struct bnx2x_virtf *vf)
2329 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2332 rparam.mcast_obj = &vf->mcast_obj;
2333 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2335 /* If there are pending mcast commands - send them */
2336 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2337 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2339 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2345 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2346 struct bnx2x_virtf *vf)
2348 smp_mb__before_clear_bit();
2349 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2350 smp_mb__after_clear_bit();
2353 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2355 struct bnx2x_virtf *vf;
2356 int qidx = 0, abs_vfid;
2363 /* first get the cid - the only events we handle here are cfc-delete
2364 * and set-mac completion
2366 opcode = elem->message.opcode;
2369 case EVENT_RING_OPCODE_CFC_DEL:
2370 cid = SW_CID((__force __le32)
2371 elem->message.data.cfc_del_event.cid);
2372 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2374 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2375 case EVENT_RING_OPCODE_MULTICAST_RULES:
2376 case EVENT_RING_OPCODE_FILTERS_RULES:
2377 cid = (elem->message.data.eth_event.echo &
2379 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2381 case EVENT_RING_OPCODE_VF_FLR:
2382 abs_vfid = elem->message.data.vf_flr_event.vf_id;
2383 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2386 case EVENT_RING_OPCODE_MALICIOUS_VF:
2387 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2388 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2389 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2395 /* check if the cid is the VF range */
2396 if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2397 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2401 /* extract vf and rxq index from vf_cid - relies on the following:
2402 * 1. vfid on cid reflects the true abs_vfid
2403 * 2. The max number of VFs (per path) is 64
2405 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2406 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2408 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2411 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2417 case EVENT_RING_OPCODE_CFC_DEL:
2418 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2419 vf->abs_vfid, qidx);
2420 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2423 BNX2X_Q_CMD_CFC_DEL);
2425 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2426 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2427 vf->abs_vfid, qidx);
2428 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2430 case EVENT_RING_OPCODE_MULTICAST_RULES:
2431 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2432 vf->abs_vfid, qidx);
2433 bnx2x_vf_handle_mcast_eqe(bp, vf);
2435 case EVENT_RING_OPCODE_FILTERS_RULES:
2436 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2437 vf->abs_vfid, qidx);
2438 bnx2x_vf_handle_filters_eqe(bp, vf);
2440 case EVENT_RING_OPCODE_VF_FLR:
2441 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
2443 /* Do nothing for now */
2445 case EVENT_RING_OPCODE_MALICIOUS_VF:
2446 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
2447 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2448 /* Do nothing for now */
2451 /* SRIOV: reschedule any 'in_progress' operations */
2452 bnx2x_iov_sp_event(bp, cid, false);
2457 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2459 /* extract the vf from vf_cid - relies on the following:
2460 * 1. vfid on cid reflects the true abs_vfid
2461 * 2. The max number of VFs (per path) is 64
2463 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2464 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2467 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2468 struct bnx2x_queue_sp_obj **q_obj)
2470 struct bnx2x_virtf *vf;
2475 vf = bnx2x_vf_by_cid(bp, vf_cid);
2478 /* extract queue index from vf_cid - relies on the following:
2479 * 1. vfid on cid reflects the true abs_vfid
2480 * 2. The max number of VFs (per path) is 64
2482 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2483 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2485 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2489 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2491 struct bnx2x_virtf *vf;
2493 /* check if the cid is the VF range */
2494 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2497 vf = bnx2x_vf_by_cid(bp, vf_cid);
2499 /* set in_progress flag */
2500 atomic_set(&vf->op_in_progress, 1);
2502 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2506 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2509 int first_queue_query_index, num_queues_req;
2510 dma_addr_t cur_data_offset;
2511 struct stats_query_entry *cur_query_entry;
2513 bool is_fcoe = false;
2521 /* fcoe adds one global request and one queue request */
2522 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2523 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2527 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2528 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2529 first_queue_query_index + num_queues_req);
2531 cur_data_offset = bp->fw_stats_data_mapping +
2532 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2533 num_queues_req * sizeof(struct per_queue_stats);
2535 cur_query_entry = &bp->fw_stats_req->
2536 query[first_queue_query_index + num_queues_req];
2538 for_each_vf(bp, i) {
2540 struct bnx2x_virtf *vf = BP_VF(bp, i);
2542 if (vf->state != VF_ENABLED) {
2544 "vf %d not enabled so no stats for it\n",
2549 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2550 for_each_vfq(vf, j) {
2551 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2553 dma_addr_t q_stats_addr =
2554 vf->fw_stat_map + j * vf->stats_stride;
2556 /* collect stats fro active queues only */
2557 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2558 BNX2X_Q_LOGICAL_STATE_STOPPED)
2561 /* create stats query entry for this queue */
2562 cur_query_entry->kind = STATS_TYPE_QUEUE;
2563 cur_query_entry->index = vfq_stat_id(vf, rxq);
2564 cur_query_entry->funcID =
2565 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2566 cur_query_entry->address.hi =
2567 cpu_to_le32(U64_HI(q_stats_addr));
2568 cur_query_entry->address.lo =
2569 cpu_to_le32(U64_LO(q_stats_addr));
2571 "added address %x %x for vf %d queue %d client %d\n",
2572 cur_query_entry->address.hi,
2573 cur_query_entry->address.lo, cur_query_entry->funcID,
2574 j, cur_query_entry->index);
2576 cur_data_offset += sizeof(struct per_queue_stats);
2579 /* all stats are coalesced to the leading queue */
2580 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
2584 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2587 void bnx2x_iov_sp_task(struct bnx2x *bp)
2593 /* Iterate over all VFs and invoke state transition for VFs with
2594 * 'in-progress' slow-path operations
2596 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
2597 for_each_vf(bp, i) {
2598 struct bnx2x_virtf *vf = BP_VF(bp, i);
2601 BNX2X_ERR("VF was null! skipping...\n");
2605 if (!list_empty(&vf->op_list_head) &&
2606 atomic_read(&vf->op_in_progress)) {
2607 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2608 bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2614 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2617 struct bnx2x_virtf *vf = NULL;
2619 for_each_vf(bp, i) {
2621 if (stat_id >= vf->igu_base_id &&
2622 stat_id < vf->igu_base_id + vf_sb_count(vf))
2628 /* VF API helpers */
2629 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2632 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2633 u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2635 REG_WR(bp, reg, val);
2638 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2643 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2644 vfq_qzone_id(vf, vfq_get(vf, i)), false);
2647 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2651 /* clear the VF configuration - pretend */
2652 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2653 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2654 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2655 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2656 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2657 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2660 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2662 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2663 BNX2X_VF_MAX_QUEUES);
2667 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2668 struct vf_pf_resc_request *req_resc)
2670 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2671 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2673 return ((req_resc->num_rxqs <= rxq_cnt) &&
2674 (req_resc->num_txqs <= txq_cnt) &&
2675 (req_resc->num_sbs <= vf_sb_count(vf)) &&
2676 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2677 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2681 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2682 struct vf_pf_resc_request *resc)
2684 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2687 union cdu_context *base_cxt = (union cdu_context *)
2688 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2689 (base_vf_cid & (ILT_PAGE_CIDS-1));
2692 /* if state is 'acquired' the VF was not released or FLR'd, in
2693 * this case the returned resources match the acquired already
2694 * acquired resources. Verify that the requested numbers do
2695 * not exceed the already acquired numbers.
2697 if (vf->state == VF_ACQUIRED) {
2698 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2701 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2702 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2709 /* Otherwise vf state must be 'free' or 'reset' */
2710 if (vf->state != VF_FREE && vf->state != VF_RESET) {
2711 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2712 vf->abs_vfid, vf->state);
2716 /* static allocation:
2717 * the global maximum number are fixed per VF. Fail the request if
2718 * requested number exceed these globals
2720 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2722 "cannot fulfill vf resource request. Placing maximal available values in response\n");
2723 /* set the max resource in the vf */
2727 /* Set resources counters - 0 request means max available */
2728 vf_sb_count(vf) = resc->num_sbs;
2729 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2730 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2731 if (resc->num_mac_filters)
2732 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2733 if (resc->num_vlan_filters)
2734 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2737 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2738 vf_sb_count(vf), vf_rxq_count(vf),
2739 vf_txq_count(vf), vf_mac_rules_cnt(vf),
2740 vf_vlan_rules_cnt(vf));
2742 /* Initialize the queues */
2744 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2748 for_each_vfq(vf, i) {
2749 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2752 BNX2X_ERR("q number %d was not allocated\n", i);
2757 q->cxt = &((base_cxt + i)->eth);
2758 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2760 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2761 vf->abs_vfid, i, q->index, q->cid, q->cxt);
2763 /* init SP objects */
2764 bnx2x_vfq_init(bp, vf, q);
2766 vf->state = VF_ACQUIRED;
2770 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2772 struct bnx2x_func_init_params func_init = {0};
2776 /* the sb resources are initialized at this point, do the
2777 * FW/HW initializations
2779 for_each_vf_sb(vf, i)
2780 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2781 vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2784 if (vf->state != VF_ACQUIRED) {
2785 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2786 vf->abs_vfid, vf->state);
2790 /* let FLR complete ... */
2793 /* FLR cleanup epilogue */
2794 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2797 /* reset IGU VF statistics: MSIX */
2798 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2801 if (vf->cfg_flags & VF_CFG_STATS)
2802 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2804 if (vf->cfg_flags & VF_CFG_TPA)
2805 flags |= FUNC_FLG_TPA;
2807 if (is_vf_multi(vf))
2808 flags |= FUNC_FLG_RSS;
2810 /* function setup */
2811 func_init.func_flgs = flags;
2812 func_init.pf_id = BP_FUNC(bp);
2813 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2814 func_init.fw_stat_map = vf->fw_stat_map;
2815 func_init.spq_map = vf->spq_map;
2816 func_init.spq_prod = 0;
2817 bnx2x_func_init(bp, &func_init);
2820 bnx2x_vf_enable_access(bp, vf->abs_vfid);
2821 bnx2x_vf_enable_traffic(bp, vf);
2823 /* queue protection table */
2825 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2826 vfq_qzone_id(vf, vfq_get(vf, i)), true);
2828 vf->state = VF_ENABLED;
2830 /* update vf bulletin board */
2831 bnx2x_post_vf_bulletin(bp, vf->index);
2836 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2837 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2839 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2840 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2841 enum bnx2x_vfop_close_state state = vfop->state;
2842 struct bnx2x_vfop_cmd cmd = {
2843 .done = bnx2x_vfop_close,
2850 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2853 case BNX2X_VFOP_CLOSE_QUEUES:
2855 if (++(qx->qid) < vf_rxq_count(vf)) {
2856 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2862 /* remove multicasts */
2863 vfop->state = BNX2X_VFOP_CLOSE_HW;
2864 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
2869 case BNX2X_VFOP_CLOSE_HW:
2871 /* disable the interrupts */
2872 DP(BNX2X_MSG_IOV, "disabling igu\n");
2873 bnx2x_vf_igu_disable(bp, vf);
2875 /* disable the VF */
2876 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2877 bnx2x_vf_clr_qtbl(bp, vf);
2881 bnx2x_vfop_default(state);
2884 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2886 vf->state = VF_ACQUIRED;
2887 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2888 bnx2x_vfop_end(bp, vf, vfop);
2891 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2892 struct bnx2x_virtf *vf,
2893 struct bnx2x_vfop_cmd *cmd)
2895 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2897 vfop->args.qx.qid = -1; /* loop */
2898 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2899 bnx2x_vfop_close, cmd->done);
2900 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2906 /* VF release can be called either: 1. The VF was acquired but
2907 * not enabled 2. the vf was enabled or in the process of being
2910 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2912 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2913 struct bnx2x_vfop_cmd cmd = {
2914 .done = bnx2x_vfop_release,
2918 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2923 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2924 vf->state == VF_FREE ? "Free" :
2925 vf->state == VF_ACQUIRED ? "Acquired" :
2926 vf->state == VF_ENABLED ? "Enabled" :
2927 vf->state == VF_RESET ? "Reset" :
2930 switch (vf->state) {
2932 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
2938 DP(BNX2X_MSG_IOV, "about to free resources\n");
2939 bnx2x_vf_free_resc(bp, vf);
2940 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2948 bnx2x_vfop_default(vf->state);
2951 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
2953 bnx2x_vfop_end(bp, vf, vfop);
2956 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
2958 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2959 enum bnx2x_vfop_rss_state state;
2962 BNX2X_ERR("vfop was null\n");
2966 state = vfop->state;
2967 bnx2x_vfop_reset_wq(vf);
2972 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2975 case BNX2X_VFOP_RSS_CONFIG:
2977 vfop->state = BNX2X_VFOP_RSS_DONE;
2978 bnx2x_config_rss(bp, &vfop->op_p->rss);
2979 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
2981 BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
2983 case BNX2X_VFOP_RSS_DONE:
2984 bnx2x_vfop_end(bp, vf, vfop);
2987 bnx2x_vfop_default(state);
2993 int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2994 struct bnx2x_virtf *vf,
2995 struct bnx2x_vfop_cmd *cmd)
2997 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2999 bnx2x_vfop_opset(-1, /* use vf->state */
3000 bnx2x_vfop_release, cmd->done);
3001 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
3007 int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
3008 struct bnx2x_virtf *vf,
3009 struct bnx2x_vfop_cmd *cmd)
3011 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3014 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
3016 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
3022 /* VF release ~ VF close + VF release-resources
3023 * Release is the ultimate SW shutdown and is called whenever an
3024 * irrecoverable error is encountered.
3026 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
3028 struct bnx2x_vfop_cmd cmd = {
3034 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
3035 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
3037 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
3040 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
3044 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
3045 struct bnx2x_virtf *vf, u32 *sbdf)
3047 *sbdf = vf->devfn | (vf->bus << 8);
3050 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
3051 struct bnx2x_vf_bar_info *bar_info)
3055 bar_info->nr_bars = bp->vfdb->sriov.nres;
3056 for (n = 0; n < bar_info->nr_bars; n++)
3057 bar_info->bars[n] = vf->bars[n];
3060 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3061 enum channel_tlvs tlv)
3063 /* we don't lock the channel for unsupported tlvs */
3064 if (!bnx2x_tlv_supported(tlv)) {
3065 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
3069 /* lock the channel */
3070 mutex_lock(&vf->op_mutex);
3072 /* record the locking op */
3073 vf->op_current = tlv;
3076 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
3080 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3081 enum channel_tlvs expected_tlv)
3083 enum channel_tlvs current_tlv;
3086 BNX2X_ERR("VF was %p\n", vf);
3090 current_tlv = vf->op_current;
3092 /* we don't unlock the channel for unsupported tlvs */
3093 if (!bnx2x_tlv_supported(expected_tlv))
3096 WARN(expected_tlv != vf->op_current,
3097 "lock mismatch: expected %d found %d", expected_tlv,
3100 /* record the locking op */
3101 vf->op_current = CHANNEL_TLV_NONE;
3103 /* lock the channel */
3104 mutex_unlock(&vf->op_mutex);
3106 /* log the unlock */
3107 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3108 vf->abs_vfid, vf->op_current);
3111 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3113 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3115 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3116 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3118 /* HW channel is only operational when PF is up */
3119 if (bp->state != BNX2X_STATE_OPEN) {
3120 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3124 /* we are always bound by the total_vfs in the configuration space */
3125 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
3126 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3127 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3128 num_vfs_param = BNX2X_NR_VIRTFN(bp);
3131 bp->requested_nr_virtfn = num_vfs_param;
3132 if (num_vfs_param == 0) {
3133 pci_disable_sriov(dev);
3136 return bnx2x_enable_sriov(bp);
3139 #define IGU_ENTRY_SIZE 4
3141 int bnx2x_enable_sriov(struct bnx2x *bp)
3143 int rc = 0, req_vfs = bp->requested_nr_virtfn;
3144 int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
3145 u32 igu_entry, address;
3151 first_vf = bp->vfdb->sriov.first_vf_in_pf;
3153 /* statically distribute vf sb pool between VFs */
3154 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
3155 BP_VFDB(bp)->vf_sbs_pool / req_vfs);
3157 /* zero previous values learned from igu cam */
3158 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
3159 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3162 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
3164 bp->vfdb->vf_sbs_pool = 0;
3166 /* prepare IGU cam */
3167 sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
3168 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
3169 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3170 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
3171 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
3172 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
3173 IGU_REG_MAPPING_MEMORY_VALID;
3174 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
3176 REG_WR(bp, address, igu_entry);
3178 address += IGU_ENTRY_SIZE;
3182 /* Reinitialize vf database according to igu cam */
3183 bnx2x_get_vf_igu_cam_info(bp);
3185 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
3186 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
3189 for_each_vf(bp, vf_idx) {
3190 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3192 /* set local queue arrays */
3193 vf->vfqs = &bp->vfdb->vfqs[qcount];
3194 qcount += vf_sb_count(vf);
3197 /* prepare msix vectors in VF configuration space */
3198 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3199 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3200 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3203 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3205 /* enable sriov. This will probe all the VFs, and consequentially cause
3206 * the "acquire" messages to appear on the VF PF channel.
3208 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
3209 pci_disable_sriov(bp->pdev);
3210 rc = pci_enable_sriov(bp->pdev, req_vfs);
3212 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3215 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
3219 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
3222 struct pf_vf_bulletin_content *bulletin;
3224 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
3225 for_each_vf(bp, vfidx) {
3226 bulletin = BP_VF_BULLETIN(bp, vfidx);
3227 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
3228 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
3232 void bnx2x_disable_sriov(struct bnx2x *bp)
3234 pci_disable_sriov(bp->pdev);
3237 int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
3238 struct pf_vf_bulletin_content **bulletin)
3240 if (bp->state != BNX2X_STATE_OPEN) {
3241 BNX2X_ERR("vf ndo called though PF is down\n");
3245 if (!IS_SRIOV(bp)) {
3246 BNX2X_ERR("vf ndo called though sriov is disabled\n");
3250 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
3251 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3252 vfidx, BNX2X_NR_VIRTFN(bp));
3257 *vf = BP_VF(bp, vfidx);
3258 *bulletin = BP_VF_BULLETIN(bp, vfidx);
3261 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
3267 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
3273 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
3281 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3282 struct ifla_vf_info *ivi)
3284 struct bnx2x *bp = netdev_priv(dev);
3285 struct bnx2x_virtf *vf = NULL;
3286 struct pf_vf_bulletin_content *bulletin = NULL;
3287 struct bnx2x_vlan_mac_obj *mac_obj;
3288 struct bnx2x_vlan_mac_obj *vlan_obj;
3291 /* sanity and init */
3292 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3295 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3296 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3297 if (!mac_obj || !vlan_obj) {
3298 BNX2X_ERR("VF partially initialized\n");
3304 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
3305 ivi->spoofchk = 1; /*always enabled */
3306 if (vf->state == VF_ENABLED) {
3307 /* mac and vlan are in vlan_mac objects */
3308 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
3309 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3311 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
3312 vlan_obj->get_n_elements(bp, vlan_obj, 1,
3313 (u8 *)&ivi->vlan, 0,
3317 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
3318 /* mac configured by ndo so its in bulletin board */
3319 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3321 /* function has not been loaded yet. Show mac as 0s */
3322 memset(&ivi->mac, 0, ETH_ALEN);
3325 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3326 /* vlan configured by ndo so its in bulletin board */
3327 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3329 /* function has not been loaded yet. Show vlans as 0s */
3330 memset(&ivi->vlan, 0, VLAN_HLEN);
3336 /* New mac for VF. Consider these cases:
3337 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3338 * supply at acquire.
3339 * 2. VF has already been acquired but has not yet initialized - store in local
3340 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
3341 * will configure this mac when it is ready.
3342 * 3. VF has already initialized but has not yet setup a queue - post the new
3343 * mac on VF's bulletin board right now. VF will configure this mac when it
3345 * 4. VF has already set a queue - delete any macs already configured for this
3346 * queue and manually config the new mac.
3347 * In any event, once this function has been called refuse any attempts by the
3348 * VF to configure any mac for itself except for this mac. In case of a race
3349 * where the VF fails to see the new post on its bulletin board before sending a
3350 * mac configuration request, the PF will simply fail the request and VF can try
3351 * again after consulting its bulletin board.
3353 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3355 struct bnx2x *bp = netdev_priv(dev);
3356 int rc, q_logical_state;
3357 struct bnx2x_virtf *vf = NULL;
3358 struct pf_vf_bulletin_content *bulletin = NULL;
3360 /* sanity and init */
3361 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3364 if (!is_valid_ether_addr(mac)) {
3365 BNX2X_ERR("mac address invalid\n");
3369 /* update PF's copy of the VF's bulletin. Will no longer accept mac
3370 * configuration requests from vf unless match this mac
3372 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3373 memcpy(bulletin->mac, mac, ETH_ALEN);
3375 /* Post update on VF's bulletin board */
3376 rc = bnx2x_post_vf_bulletin(bp, vfidx);
3378 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3383 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3384 if (vf->state == VF_ENABLED &&
3385 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3386 /* configure the mac in device on this vf's queue */
3387 unsigned long ramrod_flags = 0;
3388 struct bnx2x_vlan_mac_obj *mac_obj =
3389 &bnx2x_leading_vfq(vf, mac_obj);
3391 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3395 /* must lock vfpf channel to protect against vf flows */
3396 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3398 /* remove existing eth macs */
3399 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3401 BNX2X_ERR("failed to delete eth macs\n");
3405 /* remove existing uc list macs */
3406 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3408 BNX2X_ERR("failed to delete uc_list macs\n");
3412 /* configure the new mac to device */
3413 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3414 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3415 BNX2X_ETH_MAC, &ramrod_flags);
3417 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3423 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3425 struct bnx2x *bp = netdev_priv(dev);
3426 int rc, q_logical_state;
3427 struct bnx2x_virtf *vf = NULL;
3428 struct pf_vf_bulletin_content *bulletin = NULL;
3430 /* sanity and init */
3431 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3436 BNX2X_ERR("illegal vlan value %d\n", vlan);
3440 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
3443 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3444 * to the VF since it doesn't have anything to do with it. But it useful
3445 * to store it here in case the VF is not up yet and we can only
3446 * configure the vlan later when it does.
3448 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3449 bulletin->vlan = vlan;
3451 /* is vf initialized and queue set up? */
3453 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3454 if (vf->state == VF_ENABLED &&
3455 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3456 /* configure the vlan in device on this vf's queue */
3457 unsigned long ramrod_flags = 0;
3458 unsigned long vlan_mac_flags = 0;
3459 struct bnx2x_vlan_mac_obj *vlan_obj =
3460 &bnx2x_leading_vfq(vf, vlan_obj);
3461 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3462 struct bnx2x_queue_state_params q_params = {NULL};
3463 struct bnx2x_queue_update_params *update_params;
3465 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3468 memset(&ramrod_param, 0, sizeof(ramrod_param));
3470 /* must lock vfpf channel to protect against vf flows */
3471 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3473 /* remove existing vlans */
3474 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3475 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3478 BNX2X_ERR("failed to delete vlans\n");
3482 /* send queue update ramrod to configure default vlan and silent
3485 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3486 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3487 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3488 update_params = &q_params.params.update;
3489 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3490 &update_params->update_flags);
3491 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3492 &update_params->update_flags);
3495 /* if vlan is 0 then we want to leave the VF traffic
3496 * untagged, and leave the incoming traffic untouched
3497 * (i.e. do not remove any vlan tags).
3499 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3500 &update_params->update_flags);
3501 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3502 &update_params->update_flags);
3504 /* configure the new vlan to device */
3505 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3506 ramrod_param.vlan_mac_obj = vlan_obj;
3507 ramrod_param.ramrod_flags = ramrod_flags;
3508 ramrod_param.user_req.u.vlan.vlan = vlan;
3509 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3510 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3512 BNX2X_ERR("failed to configure vlan\n");
3516 /* configure default vlan to vf queue and set silent
3517 * vlan removal (the vf remains unaware of this vlan).
3519 update_params = &q_params.params.update;
3520 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3521 &update_params->update_flags);
3522 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3523 &update_params->update_flags);
3524 update_params->def_vlan = vlan;
3527 /* Update the Queue state */
3528 rc = bnx2x_queue_state_change(bp, &q_params);
3530 BNX2X_ERR("Failed to configure default VLAN\n");
3534 /* clear the flag indicating that this VF needs its vlan
3535 * (will only be set if the HV configured th Vlan before vf was
3536 * and we were called because the VF came up later
3538 vf->cfg_flags &= ~VF_CFG_VLAN;
3540 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3545 /* crc is the first field in the bulletin board. Compute the crc over the
3546 * entire bulletin board excluding the crc field itself. Use the length field
3547 * as the Bulletin Board was posted by a PF with possibly a different version
3548 * from the vf which will sample it. Therefore, the length is computed by the
3549 * PF and the used blindly by the VF.
3551 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
3552 struct pf_vf_bulletin_content *bulletin)
3554 return crc32(BULLETIN_CRC_SEED,
3555 ((u8 *)bulletin) + sizeof(bulletin->crc),
3556 bulletin->length - sizeof(bulletin->crc));
3559 /* Check for new posts on the bulletin board */
3560 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3562 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
3565 /* bulletin board hasn't changed since last sample */
3566 if (bp->old_bulletin.version == bulletin.version)
3567 return PFVF_BULLETIN_UNCHANGED;
3569 /* validate crc of new bulletin board */
3570 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
3571 /* sampling structure in mid post may result with corrupted data
3572 * validate crc to ensure coherency.
3574 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3575 bulletin = bp->pf2vf_bulletin->content;
3576 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
3579 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3581 bnx2x_crc_vf_bulletin(bp, &bulletin));
3583 if (attempts >= BULLETIN_ATTEMPTS) {
3584 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3586 return PFVF_BULLETIN_CRC_ERR;
3590 /* the mac address in bulletin board is valid and is new */
3591 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
3592 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
3593 /* update new mac to net device */
3594 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3597 /* the vlan in bulletin board is valid and is new */
3598 if (bulletin.valid_bitmap & 1 << VLAN_VALID)
3599 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
3601 /* copy new bulletin board to bp */
3602 bp->old_bulletin = bulletin;
3604 return PFVF_BULLETIN_UPDATED;
3607 void bnx2x_timer_sriov(struct bnx2x *bp)
3609 bnx2x_sample_bulletin(bp);
3611 /* if channel is down we need to self destruct */
3612 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
3613 smp_mb__before_clear_bit();
3614 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3615 &bp->sp_rtnl_state);
3616 smp_mb__after_clear_bit();
3617 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3621 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3623 /* vf doorbells are embedded within the regview */
3624 return bp->regview + PXP_VF_ADDR_DB_START;
3627 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3629 mutex_init(&bp->vf2pf_mutex);
3631 /* allocate vf2pf mailbox for vf to pf channel */
3632 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
3633 sizeof(struct bnx2x_vf_mbx_msg));
3635 /* allocate pf 2 vf bulletin board */
3636 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
3637 sizeof(union pf_vf_bulletin));
3642 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3643 sizeof(struct bnx2x_vf_mbx_msg));
3644 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3645 sizeof(union pf_vf_bulletin));
3649 int bnx2x_open_epilog(struct bnx2x *bp)
3651 /* Enable sriov via delayed work. This must be done via delayed work
3652 * because it causes the probe of the vf devices to be run, which invoke
3653 * register_netdevice which must have rtnl lock taken. As we are holding
3654 * the lock right now, that could only work if the probe would not take
3655 * the lock. However, as the probe of the vf may be called from other
3656 * contexts as well (such as passthrough to vm fails) it can't assume
3657 * the lock is being held for it. Using delayed work here allows the
3658 * probe code to simply take the lock (i.e. wait for it to be released
3659 * if it is being held). We only want to do this if the number of VFs
3660 * was set before PF driver was loaded.
3662 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
3663 smp_mb__before_clear_bit();
3664 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
3665 smp_mb__after_clear_bit();
3666 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3672 void bnx2x_iov_channel_down(struct bnx2x *bp)
3675 struct pf_vf_bulletin_content *bulletin;
3680 for_each_vf(bp, vf_idx) {
3681 /* locate this VFs bulletin board and update the channel down
3684 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3685 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3687 /* update vf bulletin board */
3688 bnx2x_post_vf_bulletin(bp, vf_idx);