1 /* bnx2x_sriov.c: Broadcom Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
72 u8 igu_sb_id, u8 segment, u16 index, u8 op,
75 /* acking a VF sb through the PF - use the GRC */
77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
79 u32 func_encode = vf->abs_vfid;
80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
81 struct igu_regular cmd_data = {0};
83 cmd_data.sb_id_and_flags =
84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
86 (update << IGU_REGULAR_BUPDATE_SHIFT) |
87 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
90 func_encode << IGU_CTRL_REG_FID_SHIFT |
91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94 cmd_data.sb_id_and_flags, igu_addr_data);
95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 REG_WR(bp, igu_addr_ctl, ctl);
105 /* VFOP - VF slow-path operation support */
107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
109 /* VFOP operations states */
110 enum bnx2x_vfop_qctor_state {
111 BNX2X_VFOP_QCTOR_INIT,
112 BNX2X_VFOP_QCTOR_SETUP,
113 BNX2X_VFOP_QCTOR_INT_EN
116 enum bnx2x_vfop_qdtor_state {
117 BNX2X_VFOP_QDTOR_HALT,
118 BNX2X_VFOP_QDTOR_TERMINATE,
119 BNX2X_VFOP_QDTOR_CFCDEL,
120 BNX2X_VFOP_QDTOR_DONE
123 enum bnx2x_vfop_vlan_mac_state {
124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
125 BNX2X_VFOP_VLAN_MAC_CLEAR,
126 BNX2X_VFOP_VLAN_MAC_CHK_DONE,
127 BNX2X_VFOP_MAC_CONFIG_LIST,
128 BNX2X_VFOP_VLAN_CONFIG_LIST,
129 BNX2X_VFOP_VLAN_CONFIG_LIST_0
132 enum bnx2x_vfop_qsetup_state {
133 BNX2X_VFOP_QSETUP_CTOR,
134 BNX2X_VFOP_QSETUP_VLAN0,
135 BNX2X_VFOP_QSETUP_DONE
138 enum bnx2x_vfop_mcast_state {
139 BNX2X_VFOP_MCAST_DEL,
140 BNX2X_VFOP_MCAST_ADD,
141 BNX2X_VFOP_MCAST_CHK_DONE
143 enum bnx2x_vfop_qflr_state {
144 BNX2X_VFOP_QFLR_CLR_VLAN,
145 BNX2X_VFOP_QFLR_CLR_MAC,
146 BNX2X_VFOP_QFLR_TERMINATE,
150 enum bnx2x_vfop_flr_state {
151 BNX2X_VFOP_FLR_QUEUES,
155 enum bnx2x_vfop_close_state {
156 BNX2X_VFOP_CLOSE_QUEUES,
160 enum bnx2x_vfop_rxmode_state {
161 BNX2X_VFOP_RXMODE_CONFIG,
162 BNX2X_VFOP_RXMODE_DONE
165 enum bnx2x_vfop_qteardown_state {
166 BNX2X_VFOP_QTEARDOWN_RXMODE,
167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
168 BNX2X_VFOP_QTEARDOWN_CLR_MAC,
169 BNX2X_VFOP_QTEARDOWN_QDTOR,
170 BNX2X_VFOP_QTEARDOWN_DONE
173 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
175 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
176 struct bnx2x_queue_init_params *init_params,
177 struct bnx2x_queue_setup_params *setup_params,
178 u16 q_idx, u16 sb_idx)
181 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
185 init_params->tx.sb_cq_index,
186 init_params->tx.hc_rate,
188 setup_params->txq_params.traffic_type);
191 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
192 struct bnx2x_queue_init_params *init_params,
193 struct bnx2x_queue_setup_params *setup_params,
194 u16 q_idx, u16 sb_idx)
196 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
198 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
199 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
203 init_params->rx.sb_cq_index,
204 init_params->rx.hc_rate,
205 setup_params->gen_params.mtu,
207 rxq_params->sge_buf_sz,
208 rxq_params->max_sges_pkt,
209 rxq_params->tpa_agg_sz,
211 rxq_params->drop_flags,
212 rxq_params->cache_line_log);
215 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
216 struct bnx2x_virtf *vf,
217 struct bnx2x_vf_queue *q,
218 struct bnx2x_vfop_qctor_params *p,
219 unsigned long q_type)
221 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
222 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
226 /* Enable host coalescing in the transition to INIT state */
227 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
228 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
230 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
231 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
234 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
235 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
238 init_p->cxts[0] = q->cxt;
242 /* Setup-op general parameters */
243 setup_p->gen_params.spcl_id = vf->sp_cl_id;
244 setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
246 /* Setup-op pause params:
247 * Nothing to do, the pause thresholds are set by default to 0 which
248 * effectively turns off the feature for this queue. We don't want
249 * one queue (VF) to interfering with another queue (another VF)
251 if (vf->cfg_flags & VF_CFG_FW_FC)
252 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
255 * collect statistics, zero statistics, local-switching, security,
256 * OV for Flex10, RSS and MCAST for leading
258 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
259 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
261 /* for VFs, enable tx switching, bd coherency, and mac address
264 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
265 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
266 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
268 if (vfq_is_leading(q)) {
269 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
270 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
273 /* Setup-op rx parameters */
274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
277 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
278 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
279 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
281 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
282 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
285 /* Setup-op tx parameters */
286 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
287 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
288 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
292 /* VFOP queue construction */
293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
295 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
296 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
297 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
298 enum bnx2x_vfop_qctor_state state = vfop->state;
300 bnx2x_vfop_reset_wq(vf);
305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
308 case BNX2X_VFOP_QCTOR_INIT:
310 /* has this queue already been opened? */
311 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
312 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
314 "Entered qctor but queue was already up. Aborting gracefully\n");
319 vfop->state = BNX2X_VFOP_QCTOR_SETUP;
321 q_params->cmd = BNX2X_Q_CMD_INIT;
322 vfop->rc = bnx2x_queue_state_change(bp, q_params);
324 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
326 case BNX2X_VFOP_QCTOR_SETUP:
328 vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
330 /* copy pre-prepared setup params to the queue-state params */
331 vfop->op_p->qctor.qstate.params.setup =
332 vfop->op_p->qctor.prep_qsetup;
334 q_params->cmd = BNX2X_Q_CMD_SETUP;
335 vfop->rc = bnx2x_queue_state_change(bp, q_params);
337 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
339 case BNX2X_VFOP_QCTOR_INT_EN:
341 /* enable interrupts */
342 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
343 USTORM_ID, 0, IGU_INT_ENABLE, 0);
346 bnx2x_vfop_default(state);
349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
350 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
352 bnx2x_vfop_end(bp, vf, vfop);
357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
358 struct bnx2x_virtf *vf,
359 struct bnx2x_vfop_cmd *cmd,
362 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
365 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
367 vfop->args.qctor.qid = qid;
368 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
371 bnx2x_vfop_qctor, cmd->done);
372 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
378 /* VFOP queue destruction */
379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
381 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
382 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
383 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
384 enum bnx2x_vfop_qdtor_state state = vfop->state;
386 bnx2x_vfop_reset_wq(vf);
391 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
394 case BNX2X_VFOP_QDTOR_HALT:
396 /* has this queue already been stopped? */
397 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
398 BNX2X_Q_LOGICAL_STATE_STOPPED) {
400 "Entered qdtor but queue was already stopped. Aborting gracefully\n");
405 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
407 q_params->cmd = BNX2X_Q_CMD_HALT;
408 vfop->rc = bnx2x_queue_state_change(bp, q_params);
410 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
412 case BNX2X_VFOP_QDTOR_TERMINATE:
414 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
416 q_params->cmd = BNX2X_Q_CMD_TERMINATE;
417 vfop->rc = bnx2x_queue_state_change(bp, q_params);
419 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
421 case BNX2X_VFOP_QDTOR_CFCDEL:
423 vfop->state = BNX2X_VFOP_QDTOR_DONE;
425 q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
426 vfop->rc = bnx2x_queue_state_change(bp, q_params);
428 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
430 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
431 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
433 case BNX2X_VFOP_QDTOR_DONE:
434 /* invalidate the context */
435 qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
436 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
437 bnx2x_vfop_end(bp, vf, vfop);
440 bnx2x_vfop_default(state);
446 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
447 struct bnx2x_virtf *vf,
448 struct bnx2x_vfop_cmd *cmd,
451 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
454 struct bnx2x_queue_state_params *qstate =
455 &vf->op_params.qctor.qstate;
457 memset(qstate, 0, sizeof(*qstate));
458 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
460 vfop->args.qdtor.qid = qid;
461 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
463 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
464 bnx2x_vfop_qdtor, cmd->done);
465 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
468 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
473 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
475 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
477 if (!vf_sb_count(vf))
478 vf->igu_base_id = igu_sb_id;
483 /* VFOP MAC/VLAN helpers */
484 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
485 struct bnx2x_vfop *vfop,
486 struct bnx2x_vlan_mac_obj *obj)
488 struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
490 /* update credit only if there is no error
491 * and a valid credit counter
493 if (!vfop->rc && args->credit) {
495 struct list_head *pos;
497 list_for_each(pos, &obj->head)
500 atomic_set(args->credit, cnt);
504 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
505 struct bnx2x_vfop_filter *pos,
506 struct bnx2x_vlan_mac_data *user_req)
508 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
512 case BNX2X_VFOP_FILTER_MAC:
513 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
515 case BNX2X_VFOP_FILTER_VLAN:
516 user_req->u.vlan.vlan = pos->vid;
519 BNX2X_ERR("Invalid filter type, skipping\n");
525 static int bnx2x_vfop_config_list(struct bnx2x *bp,
526 struct bnx2x_vfop_filters *filters,
527 struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
529 struct bnx2x_vfop_filter *pos, *tmp;
530 struct list_head rollback_list, *filters_list = &filters->head;
531 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
534 INIT_LIST_HEAD(&rollback_list);
536 list_for_each_entry_safe(pos, tmp, filters_list, link) {
537 if (bnx2x_vfop_set_user_req(bp, pos, user_req))
540 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
542 cnt += pos->add ? 1 : -1;
543 list_move(&pos->link, &rollback_list);
545 } else if (rc == -EEXIST) {
548 BNX2X_ERR("Failed to add a new vlan_mac command\n");
553 /* rollback if error or too many rules added */
554 if (rc || cnt > filters->add_cnt) {
555 BNX2X_ERR("error or too many rules added. Performing rollback\n");
556 list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
557 pos->add = !pos->add; /* reverse op */
558 bnx2x_vfop_set_user_req(bp, pos, user_req);
559 bnx2x_config_vlan_mac(bp, vlan_mac);
560 list_del(&pos->link);
566 filters->add_cnt = cnt;
570 /* VFOP set VLAN/MAC */
571 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
573 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
574 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
575 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
576 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
578 enum bnx2x_vfop_vlan_mac_state state = vfop->state;
583 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
585 bnx2x_vfop_reset_wq(vf);
588 case BNX2X_VFOP_VLAN_MAC_CLEAR:
590 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
593 vfop->rc = obj->delete_all(bp, obj,
594 &vlan_mac->user_req.vlan_mac_flags,
595 &vlan_mac->ramrod_flags);
597 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
599 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
601 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
604 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
605 if (vfop->rc == -EEXIST)
608 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
610 case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
611 vfop->rc = !!obj->raw.check_pending(&obj->raw);
612 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
614 case BNX2X_VFOP_MAC_CONFIG_LIST:
616 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
619 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
623 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
624 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
625 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
627 case BNX2X_VFOP_VLAN_CONFIG_LIST:
629 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
632 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
634 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
635 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
637 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
640 bnx2x_vfop_default(state);
643 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
646 bnx2x_vfop_credit(bp, vfop, obj);
647 bnx2x_vfop_end(bp, vf, vfop);
652 struct bnx2x_vfop_vlan_mac_flags {
660 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
661 struct bnx2x_vfop_vlan_mac_flags *flags)
663 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
665 memset(ramrod, 0, sizeof(*ramrod));
669 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
670 if (flags->single_cmd)
671 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
674 if (flags->dont_consume)
675 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
678 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
682 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
683 struct bnx2x_vfop_vlan_mac_flags *flags)
685 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
686 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
689 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
690 struct bnx2x_virtf *vf,
691 struct bnx2x_vfop_cmd *cmd,
692 int qid, bool drv_only)
694 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
697 struct bnx2x_vfop_args_filters filters = {
698 .multi_filter = NULL, /* single */
699 .credit = NULL, /* consume credit */
701 struct bnx2x_vfop_vlan_mac_flags flags = {
702 .drv_only = drv_only,
703 .dont_consume = (filters.credit != NULL),
705 .add = false /* don't care */,
707 struct bnx2x_vlan_mac_ramrod_params *ramrod =
708 &vf->op_params.vlan_mac;
710 /* set ramrod params */
711 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
714 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
717 vfop->args.filters = filters;
719 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
720 bnx2x_vfop_vlan_mac, cmd->done);
721 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
727 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
728 struct bnx2x_virtf *vf,
729 struct bnx2x_vfop_cmd *cmd,
730 struct bnx2x_vfop_filters *macs,
731 int qid, bool drv_only)
733 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
736 struct bnx2x_vfop_args_filters filters = {
737 .multi_filter = macs,
738 .credit = NULL, /* consume credit */
740 struct bnx2x_vfop_vlan_mac_flags flags = {
741 .drv_only = drv_only,
742 .dont_consume = (filters.credit != NULL),
744 .add = false, /* don't care since only the items in the
745 * filters list affect the sp operation,
746 * not the list itself
749 struct bnx2x_vlan_mac_ramrod_params *ramrod =
750 &vf->op_params.vlan_mac;
752 /* set ramrod params */
753 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
756 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
759 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
760 vfop->args.filters = filters;
762 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
763 bnx2x_vfop_vlan_mac, cmd->done);
764 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
770 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
771 struct bnx2x_virtf *vf,
772 struct bnx2x_vfop_cmd *cmd,
773 int qid, u16 vid, bool add)
775 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
778 struct bnx2x_vfop_args_filters filters = {
779 .multi_filter = NULL, /* single command */
780 .credit = &bnx2x_vfq(vf, qid, vlan_count),
782 struct bnx2x_vfop_vlan_mac_flags flags = {
784 .dont_consume = (filters.credit != NULL),
788 struct bnx2x_vlan_mac_ramrod_params *ramrod =
789 &vf->op_params.vlan_mac;
791 /* set ramrod params */
792 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
793 ramrod->user_req.u.vlan.vlan = vid;
796 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
799 vfop->args.filters = filters;
801 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
802 bnx2x_vfop_vlan_mac, cmd->done);
803 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
809 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
810 struct bnx2x_virtf *vf,
811 struct bnx2x_vfop_cmd *cmd,
812 int qid, bool drv_only)
814 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
817 struct bnx2x_vfop_args_filters filters = {
818 .multi_filter = NULL, /* single command */
819 .credit = &bnx2x_vfq(vf, qid, vlan_count),
821 struct bnx2x_vfop_vlan_mac_flags flags = {
822 .drv_only = drv_only,
823 .dont_consume = (filters.credit != NULL),
825 .add = false, /* don't care */
827 struct bnx2x_vlan_mac_ramrod_params *ramrod =
828 &vf->op_params.vlan_mac;
830 /* set ramrod params */
831 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
834 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
837 vfop->args.filters = filters;
839 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
840 bnx2x_vfop_vlan_mac, cmd->done);
841 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
847 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
848 struct bnx2x_virtf *vf,
849 struct bnx2x_vfop_cmd *cmd,
850 struct bnx2x_vfop_filters *vlans,
851 int qid, bool drv_only)
853 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
856 struct bnx2x_vfop_args_filters filters = {
857 .multi_filter = vlans,
858 .credit = &bnx2x_vfq(vf, qid, vlan_count),
860 struct bnx2x_vfop_vlan_mac_flags flags = {
861 .drv_only = drv_only,
862 .dont_consume = (filters.credit != NULL),
864 .add = false, /* don't care */
866 struct bnx2x_vlan_mac_ramrod_params *ramrod =
867 &vf->op_params.vlan_mac;
869 /* set ramrod params */
870 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
873 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
876 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
877 atomic_read(filters.credit);
879 vfop->args.filters = filters;
881 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
882 bnx2x_vfop_vlan_mac, cmd->done);
883 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
889 /* VFOP queue setup (queue constructor + set vlan 0) */
890 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
892 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
893 int qid = vfop->args.qctor.qid;
894 enum bnx2x_vfop_qsetup_state state = vfop->state;
895 struct bnx2x_vfop_cmd cmd = {
896 .done = bnx2x_vfop_qsetup,
903 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
906 case BNX2X_VFOP_QSETUP_CTOR:
907 /* init the queue ctor command */
908 vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
909 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
914 case BNX2X_VFOP_QSETUP_VLAN0:
915 /* skip if non-leading or FPGA/EMU*/
919 /* init the queue set-vlan command (for vlan 0) */
920 vfop->state = BNX2X_VFOP_QSETUP_DONE;
921 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
926 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
928 case BNX2X_VFOP_QSETUP_DONE:
929 vf->cfg_flags |= VF_CFG_VLAN;
930 smp_mb__before_clear_bit();
931 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
933 smp_mb__after_clear_bit();
934 schedule_delayed_work(&bp->sp_rtnl_task, 0);
935 bnx2x_vfop_end(bp, vf, vfop);
938 bnx2x_vfop_default(state);
942 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
943 struct bnx2x_virtf *vf,
944 struct bnx2x_vfop_cmd *cmd,
947 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
950 vfop->args.qctor.qid = qid;
952 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
953 bnx2x_vfop_qsetup, cmd->done);
954 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
960 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
961 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
963 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
964 int qid = vfop->args.qx.qid;
965 enum bnx2x_vfop_qflr_state state = vfop->state;
966 struct bnx2x_queue_state_params *qstate;
967 struct bnx2x_vfop_cmd cmd;
969 bnx2x_vfop_reset_wq(vf);
974 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
976 cmd.done = bnx2x_vfop_qflr;
980 case BNX2X_VFOP_QFLR_CLR_VLAN:
981 /* vlan-clear-all: driver-only, don't consume credit */
982 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
983 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
988 case BNX2X_VFOP_QFLR_CLR_MAC:
989 /* mac-clear-all: driver only consume credit */
990 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
991 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
993 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
994 vf->abs_vfid, vfop->rc);
999 case BNX2X_VFOP_QFLR_TERMINATE:
1000 qstate = &vfop->op_p->qctor.qstate;
1001 memset(qstate , 0, sizeof(*qstate));
1002 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1003 vfop->state = BNX2X_VFOP_QFLR_DONE;
1005 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1006 vf->abs_vfid, qstate->q_obj->state);
1008 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1009 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1010 qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1011 vfop->rc = bnx2x_queue_state_change(bp, qstate);
1012 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1018 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1019 vf->abs_vfid, qid, vfop->rc);
1021 case BNX2X_VFOP_QFLR_DONE:
1022 bnx2x_vfop_end(bp, vf, vfop);
1025 bnx2x_vfop_default(state);
1031 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1032 struct bnx2x_virtf *vf,
1033 struct bnx2x_vfop_cmd *cmd,
1036 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1039 vfop->args.qx.qid = qid;
1040 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1041 bnx2x_vfop_qflr, cmd->done);
1042 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1048 /* VFOP multi-casts */
1049 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
1051 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1052 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
1053 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
1054 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
1055 enum bnx2x_vfop_mcast_state state = vfop->state;
1058 bnx2x_vfop_reset_wq(vf);
1063 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1066 case BNX2X_VFOP_MCAST_DEL:
1067 /* clear existing mcasts */
1068 vfop->state = BNX2X_VFOP_MCAST_ADD;
1069 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
1070 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1072 case BNX2X_VFOP_MCAST_ADD:
1073 if (raw->check_pending(raw))
1077 /* update mcast list on the ramrod params */
1078 INIT_LIST_HEAD(&mcast->mcast_list);
1079 for (i = 0; i < args->mc_num; i++)
1080 list_add_tail(&(args->mc[i].link),
1081 &mcast->mcast_list);
1082 /* add new mcasts */
1083 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1084 vfop->rc = bnx2x_config_mcast(bp, mcast,
1085 BNX2X_MCAST_CMD_ADD);
1087 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1089 case BNX2X_VFOP_MCAST_CHK_DONE:
1090 vfop->rc = raw->check_pending(raw) ? 1 : 0;
1091 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1093 bnx2x_vfop_default(state);
1096 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1099 bnx2x_vfop_end(bp, vf, vfop);
1104 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1105 struct bnx2x_virtf *vf,
1106 struct bnx2x_vfop_cmd *cmd,
1107 bnx2x_mac_addr_t *mcasts,
1108 int mcast_num, bool drv_only)
1110 struct bnx2x_vfop *vfop = NULL;
1111 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1112 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1116 vfop = bnx2x_vfop_add(bp, vf);
1119 struct bnx2x_mcast_ramrod_params *ramrod =
1120 &vf->op_params.mcast;
1122 /* set ramrod params */
1123 memset(ramrod, 0, sizeof(*ramrod));
1124 ramrod->mcast_obj = &vf->mcast_obj;
1126 set_bit(RAMROD_DRV_CLR_ONLY,
1127 &ramrod->ramrod_flags);
1129 /* copy mcasts pointers */
1130 vfop->args.mc_list.mc_num = mcast_num;
1131 vfop->args.mc_list.mc = mc;
1132 for (i = 0; i < mcast_num; i++)
1133 mc[i].mac = mcasts[i];
1135 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1136 bnx2x_vfop_mcast, cmd->done);
1137 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1147 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1149 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1150 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1151 enum bnx2x_vfop_rxmode_state state = vfop->state;
1153 bnx2x_vfop_reset_wq(vf);
1158 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1161 case BNX2X_VFOP_RXMODE_CONFIG:
1163 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1165 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1166 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1168 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1170 case BNX2X_VFOP_RXMODE_DONE:
1171 bnx2x_vfop_end(bp, vf, vfop);
1174 bnx2x_vfop_default(state);
1180 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1181 struct bnx2x_virtf *vf,
1182 struct bnx2x_vfop_cmd *cmd,
1183 int qid, unsigned long accept_flags)
1185 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1186 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1189 struct bnx2x_rx_mode_ramrod_params *ramrod =
1190 &vf->op_params.rx_mode;
1192 memset(ramrod, 0, sizeof(*ramrod));
1194 /* Prepare ramrod parameters */
1195 ramrod->cid = vfq->cid;
1196 ramrod->cl_id = vfq_cl_id(vf, vfq);
1197 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1198 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1200 ramrod->rx_accept_flags = accept_flags;
1201 ramrod->tx_accept_flags = accept_flags;
1202 ramrod->pstate = &vf->filter_state;
1203 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1205 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1206 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1207 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1210 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1211 ramrod->rdata_mapping =
1212 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1214 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1215 bnx2x_vfop_rxmode, cmd->done);
1216 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1222 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1225 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1227 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1228 int qid = vfop->args.qx.qid;
1229 enum bnx2x_vfop_qteardown_state state = vfop->state;
1230 struct bnx2x_vfop_cmd cmd;
1235 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1237 cmd.done = bnx2x_vfop_qdown;
1241 case BNX2X_VFOP_QTEARDOWN_RXMODE:
1243 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1244 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1249 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1250 /* vlan-clear-all: don't consume credit */
1251 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1252 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1257 case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1258 /* mac-clear-all: consume credit */
1259 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1260 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1265 case BNX2X_VFOP_QTEARDOWN_QDTOR:
1266 /* run the queue destruction flow */
1267 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1268 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1269 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1270 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1271 DP(BNX2X_MSG_IOV, "returned from cmd\n");
1276 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1277 vf->abs_vfid, qid, vfop->rc);
1279 case BNX2X_VFOP_QTEARDOWN_DONE:
1280 bnx2x_vfop_end(bp, vf, vfop);
1283 bnx2x_vfop_default(state);
1287 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1288 struct bnx2x_virtf *vf,
1289 struct bnx2x_vfop_cmd *cmd,
1292 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1295 vfop->args.qx.qid = qid;
1296 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
1297 bnx2x_vfop_qdown, cmd->done);
1298 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1305 /* VF enable primitives
1306 * when pretend is required the caller is responsible
1307 * for calling pretend prior to calling these routines
1310 /* internal vf enable - until vf is enabled internally all transactions
1311 * are blocked. This routine should always be called last with pretend.
1313 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1315 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1318 /* clears vf error in all semi blocks */
1319 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1321 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1322 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1323 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1324 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1327 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1329 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1330 u32 was_err_reg = 0;
1332 switch (was_err_group) {
1334 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1337 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1340 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1343 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1346 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1349 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1354 /* Set VF masks and configuration - pretend */
1355 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1357 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1358 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1359 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1360 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1361 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1362 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1364 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1365 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1366 if (vf->cfg_flags & VF_CFG_INT_SIMD)
1367 val |= IGU_VF_CONF_SINGLE_ISR_EN;
1368 val &= ~IGU_VF_CONF_PARENT_MASK;
1369 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
1370 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1373 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
1374 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
1376 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1378 /* iterate over all queues, clear sb consumer */
1379 for (i = 0; i < vf_sb_count(vf); i++) {
1380 u8 igu_sb_id = vf_igu_sb(vf, i);
1382 /* zero prod memory */
1383 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1385 /* clear sb state machine */
1386 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1389 /* disable + update */
1390 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1391 IGU_INT_DISABLE, 1);
1395 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1397 /* set the VF-PF association in the FW */
1398 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1399 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1401 /* clear vf errors*/
1402 bnx2x_vf_semi_clear_err(bp, abs_vfid);
1403 bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1405 /* internal vf-enable - pretend */
1406 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1407 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1408 bnx2x_vf_enable_internal(bp, true);
1409 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1412 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1414 /* Reset vf in IGU interrupts are still disabled */
1415 bnx2x_vf_igu_reset(bp, vf);
1417 /* pretend to enable the vf with the PBF */
1418 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1419 REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1420 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1423 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1425 struct pci_dev *dev;
1426 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1431 dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1433 return bnx2x_is_pcie_pending(dev);
1437 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1439 /* Verify no pending pci transactions */
1440 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1441 BNX2X_ERR("PCIE Transactions still pending\n");
1446 /* must be called after the number of PF queues and the number of VFs are
1450 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1454 /* will be set only during VF-ACQUIRE */
1458 /* no credit calculcis for macs (just yet) */
1459 resc->num_mac_filters = 1;
1461 /* divvy up vlan rules */
1462 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1463 vlan_count = 1 << ilog2(vlan_count);
1464 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1466 /* no real limitation */
1467 resc->num_mc_filters = 0;
1469 /* num_sbs already set */
1473 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1475 /* reset the state variables */
1476 bnx2x_iov_static_resc(bp, &vf->alloc_resc);
1477 vf->state = VF_FREE;
1480 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1482 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1484 /* DQ usage counter */
1485 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1486 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
1487 "DQ VF usage counter timed out",
1489 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1491 /* FW cleanup command - poll for the results */
1492 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
1494 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
1496 /* verify TX hw is flushed */
1497 bnx2x_tx_hw_flushed(bp, poll_cnt);
1500 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1502 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1503 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1504 enum bnx2x_vfop_flr_state state = vfop->state;
1505 struct bnx2x_vfop_cmd cmd = {
1506 .done = bnx2x_vfop_flr,
1513 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1516 case BNX2X_VFOP_FLR_QUEUES:
1517 /* the cleanup operations are valid if and only if the VF
1518 * was first acquired.
1520 if (++(qx->qid) < vf_rxq_count(vf)) {
1521 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1527 /* remove multicasts */
1528 vfop->state = BNX2X_VFOP_FLR_HW;
1529 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1534 case BNX2X_VFOP_FLR_HW:
1536 /* dispatch final cleanup and wait for HW queues to flush */
1537 bnx2x_vf_flr_clnup_hw(bp, vf);
1539 /* release VF resources */
1540 bnx2x_vf_free_resc(bp, vf);
1542 /* re-open the mailbox */
1543 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1547 bnx2x_vfop_default(state);
1550 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1552 vf->flr_clnup_stage = VF_FLR_ACK;
1553 bnx2x_vfop_end(bp, vf, vfop);
1554 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1557 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
1558 struct bnx2x_virtf *vf,
1559 vfop_handler_t done)
1561 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1563 vfop->args.qx.qid = -1; /* loop */
1564 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1565 bnx2x_vfop_flr, done);
1566 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1571 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
1573 int i = prev_vf ? prev_vf->index + 1 : 0;
1574 struct bnx2x_virtf *vf;
1576 /* find next VF to cleanup */
1579 i < BNX2X_NR_VIRTFN(bp) &&
1580 (bnx2x_vf(bp, i, state) != VF_RESET ||
1581 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1585 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
1586 BNX2X_NR_VIRTFN(bp));
1588 if (i < BNX2X_NR_VIRTFN(bp)) {
1591 /* lock the vf pf channel */
1592 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1594 /* invoke the VF FLR SM */
1595 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
1596 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1599 /* mark the VF to be ACKED and continue */
1600 vf->flr_clnup_stage = VF_FLR_ACK;
1601 goto next_vf_to_clean;
1606 /* we are done, update vf records */
1607 for_each_vf(bp, i) {
1610 if (vf->flr_clnup_stage != VF_FLR_ACK)
1613 vf->flr_clnup_stage = VF_FLR_EPILOG;
1616 /* Acknowledge the handled VFs.
1617 * we are acknowledge all the vfs which an flr was requested for, even
1618 * if amongst them there are such that we never opened, since the mcp
1619 * will interrupt us immediately again if we only ack some of the bits,
1620 * resulting in an endless loop. This can happen for example in KVM
1621 * where an 'all ones' flr request is sometimes given by hyper visor
1623 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1624 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1625 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1626 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1627 bp->vfdb->flrd_vfs[i]);
1629 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1631 /* clear the acked bits - better yet if the MCP implemented
1632 * write to clear semantics
1634 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1635 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1638 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1642 /* Read FLR'd VFs */
1643 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1644 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1647 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1648 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1650 for_each_vf(bp, i) {
1651 struct bnx2x_virtf *vf = BP_VF(bp, i);
1654 if (vf->abs_vfid < 32)
1655 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1657 reset = bp->vfdb->flrd_vfs[1] &
1658 (1 << (vf->abs_vfid - 32));
1661 /* set as reset and ready for cleanup */
1662 vf->state = VF_RESET;
1663 vf->flr_clnup_stage = VF_FLR_CLN;
1666 "Initiating Final cleanup for VF %d\n",
1671 /* do the FLR cleanup for all marked VFs*/
1672 bnx2x_vf_flr_clnup(bp, NULL);
1675 /* IOV global initialization routines */
1676 void bnx2x_iov_init_dq(struct bnx2x *bp)
1681 /* Set the DQ such that the CID reflect the abs_vfid */
1682 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1683 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1685 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1688 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1690 /* The VF window size is the log2 of the max number of CIDs per VF */
1691 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1693 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1694 * the Pf doorbell size although the 2 are independent.
1696 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
1697 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1699 /* No security checks for now -
1700 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1701 * CID range 0 - 0x1ffff
1703 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1704 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1705 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1706 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1708 /* set the number of VF allowed doorbells to the full DQ range */
1709 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1711 /* set the VF doorbell threshold */
1712 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1715 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1717 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1718 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1721 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1723 struct pci_dev *dev = bp->pdev;
1724 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1726 return dev->bus->number + ((dev->devfn + iov->offset +
1727 iov->stride * vfid) >> 8);
1730 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1732 struct pci_dev *dev = bp->pdev;
1733 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1735 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1738 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1741 struct pci_dev *dev = bp->pdev;
1742 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1744 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1745 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1746 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1749 vf->bars[n].bar = start + size * vf->abs_vfid;
1750 vf->bars[n].size = size;
1754 static int bnx2x_ari_enabled(struct pci_dev *dev)
1756 return dev->bus->self && dev->bus->self->ari_enabled;
1760 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1766 /* IGU in normal mode - read CAM */
1767 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1768 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1769 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1771 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1772 if (!(fid & IGU_FID_ENCODE_IS_PF))
1773 bnx2x_vf_set_igu_info(bp, sb_id,
1774 (fid & IGU_FID_VF_NUM_MASK));
1776 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1777 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1778 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1779 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1780 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1784 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1787 kfree(bp->vfdb->vfqs);
1788 kfree(bp->vfdb->vfs);
1794 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1797 struct pci_dev *dev = bp->pdev;
1799 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1801 BNX2X_ERR("failed to find SRIOV capability in device\n");
1806 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1807 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1808 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1809 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1810 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1811 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1812 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1813 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1814 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1819 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1823 /* read the SRIOV capability structure
1824 * The fields can be read via configuration read or
1825 * directly from the device (starting at offset PCICFG_OFFSET)
1827 if (bnx2x_sriov_pci_cfg_info(bp, iov))
1830 /* get the number of SRIOV bars */
1833 /* read the first_vfid */
1834 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1835 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1836 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1839 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1841 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1842 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1847 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1854 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1859 /* must be called after PF bars are mapped */
1860 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1864 struct bnx2x_sriov *iov;
1865 struct pci_dev *dev = bp->pdev;
1873 /* verify sriov capability is present in configuration space */
1874 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1877 /* verify chip revision */
1878 if (CHIP_IS_E1x(bp))
1881 /* check if SRIOV support is turned off */
1885 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1886 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1887 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1888 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1892 /* SRIOV can be enabled only with MSIX */
1893 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1894 int_mode_param == BNX2X_INT_MODE_INTX) {
1895 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1900 /* verify ari is enabled */
1901 if (!bnx2x_ari_enabled(bp->pdev)) {
1902 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1906 /* verify igu is in normal mode */
1907 if (CHIP_INT_MODE_IS_BC(bp)) {
1908 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1912 /* allocate the vfs database */
1913 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1915 BNX2X_ERR("failed to allocate vf database\n");
1920 /* get the sriov info - Linux already collected all the pertinent
1921 * information, however the sriov structure is for the private use
1922 * of the pci module. Also we want this information regardless
1923 * of the hyper-visor.
1925 iov = &(bp->vfdb->sriov);
1926 err = bnx2x_sriov_info(bp, iov);
1930 /* SR-IOV capability was enabled but there are no VFs*/
1931 if (iov->total == 0)
1934 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1936 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1937 num_vfs_param, iov->nr_virtfn);
1939 /* allocate the vf array */
1940 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1941 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1942 if (!bp->vfdb->vfs) {
1943 BNX2X_ERR("failed to allocate vf array\n");
1948 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1949 for_each_vf(bp, i) {
1950 bnx2x_vf(bp, i, index) = i;
1951 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1952 bnx2x_vf(bp, i, state) = VF_FREE;
1953 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
1954 mutex_init(&bnx2x_vf(bp, i, op_mutex));
1955 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1958 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1959 bnx2x_get_vf_igu_cam_info(bp);
1961 /* get the total queue count and allocate the global queue arrays */
1962 qcount = bnx2x_iov_get_max_queue_count(bp);
1964 /* allocate the queue arrays for all VFs */
1965 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
1967 if (!bp->vfdb->vfqs) {
1968 BNX2X_ERR("failed to allocate vf queue array\n");
1975 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1976 __bnx2x_iov_free_vfdb(bp);
1980 void bnx2x_iov_remove_one(struct bnx2x *bp)
1982 /* if SRIOV is not enabled there's nothing to do */
1986 DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
1987 pci_disable_sriov(bp->pdev);
1988 DP(BNX2X_MSG_IOV, "sriov disabled\n");
1990 /* free vf database */
1991 __bnx2x_iov_free_vfdb(bp);
1994 void bnx2x_iov_free_mem(struct bnx2x *bp)
2001 /* free vfs hw contexts */
2002 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2003 struct hw_dma *cxt = &bp->vfdb->context[i];
2004 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
2007 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
2008 BP_VFDB(bp)->sp_dma.mapping,
2009 BP_VFDB(bp)->sp_dma.size);
2011 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
2012 BP_VF_MBX_DMA(bp)->mapping,
2013 BP_VF_MBX_DMA(bp)->size);
2015 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
2016 BP_VF_BULLETIN_DMA(bp)->mapping,
2017 BP_VF_BULLETIN_DMA(bp)->size);
2020 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2028 /* allocate vfs hw contexts */
2029 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
2030 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
2032 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2033 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
2034 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2037 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
2042 tot_size -= cxt->size;
2045 /* allocate vfs ramrods dma memory - client_init and set_mac */
2046 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2047 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
2049 BP_VFDB(bp)->sp_dma.size = tot_size;
2051 /* allocate mailboxes */
2052 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2053 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
2055 BP_VF_MBX_DMA(bp)->size = tot_size;
2057 /* allocate local bulletin boards */
2058 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2059 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
2060 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
2061 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2069 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2070 struct bnx2x_vf_queue *q)
2072 u8 cl_id = vfq_cl_id(vf, q);
2073 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
2074 unsigned long q_type = 0;
2076 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
2077 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
2079 /* Queue State object */
2080 bnx2x_init_queue_obj(bp, &q->sp_obj,
2081 cl_id, &q->cid, 1, func_id,
2082 bnx2x_vf_sp(bp, vf, q_data),
2083 bnx2x_vf_sp_map(bp, vf, q_data),
2087 "initialized vf %d's queue object. func id set to %d\n",
2088 vf->abs_vfid, q->sp_obj.func_id);
2090 /* mac/vlan objects are per queue, but only those
2091 * that belong to the leading queue are initialized
2093 if (vfq_is_leading(q)) {
2095 bnx2x_init_mac_obj(bp, &q->mac_obj,
2096 cl_id, q->cid, func_id,
2097 bnx2x_vf_sp(bp, vf, mac_rdata),
2098 bnx2x_vf_sp_map(bp, vf, mac_rdata),
2099 BNX2X_FILTER_MAC_PENDING,
2101 BNX2X_OBJ_TYPE_RX_TX,
2104 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
2105 cl_id, q->cid, func_id,
2106 bnx2x_vf_sp(bp, vf, vlan_rdata),
2107 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
2108 BNX2X_FILTER_VLAN_PENDING,
2110 BNX2X_OBJ_TYPE_RX_TX,
2114 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
2115 q->cid, func_id, func_id,
2116 bnx2x_vf_sp(bp, vf, mcast_rdata),
2117 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2118 BNX2X_FILTER_MCAST_PENDING,
2120 BNX2X_OBJ_TYPE_RX_TX);
2122 vf->leading_rss = cl_id;
2126 /* called by bnx2x_nic_load */
2127 int bnx2x_iov_nic_init(struct bnx2x *bp)
2129 int vfid, qcount, i;
2131 if (!IS_SRIOV(bp)) {
2132 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
2136 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
2138 /* let FLR complete ... */
2141 /* initialize vf database */
2142 for_each_vf(bp, vfid) {
2143 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2145 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
2148 union cdu_context *base_cxt = (union cdu_context *)
2149 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2150 (base_vf_cid & (ILT_PAGE_CIDS-1));
2153 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
2154 vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
2155 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2157 /* init statically provisioned resources */
2158 bnx2x_iov_static_resc(bp, &vf->alloc_resc);
2160 /* queues are initialized during VF-ACQUIRE */
2162 /* reserve the vf vlan credit */
2163 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
2165 vf->filter_state = 0;
2166 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
2168 /* init mcast object - This object will be re-initialized
2169 * during VF-ACQUIRE with the proper cl_id and cid.
2170 * It needs to be initialized here so that it can be safely
2171 * handled by a subsequent FLR flow.
2173 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
2175 bnx2x_vf_sp(bp, vf, mcast_rdata),
2176 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2177 BNX2X_FILTER_MCAST_PENDING,
2179 BNX2X_OBJ_TYPE_RX_TX);
2181 /* set the mailbox message addresses */
2182 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
2183 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
2184 MBX_MSG_ALIGNED_SIZE);
2186 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
2187 vfid * MBX_MSG_ALIGNED_SIZE;
2189 /* Enable vf mailbox */
2190 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
2195 for_each_vf(bp, i) {
2196 struct bnx2x_virtf *vf = BP_VF(bp, i);
2198 /* fill in the BDF and bars */
2199 vf->bus = bnx2x_vf_bus(bp, i);
2200 vf->devfn = bnx2x_vf_devfn(bp, i);
2201 bnx2x_vf_set_bars(bp, vf);
2204 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
2205 vf->abs_vfid, vf->bus, vf->devfn,
2206 (unsigned)vf->bars[0].bar, vf->bars[0].size,
2207 (unsigned)vf->bars[1].bar, vf->bars[1].size,
2208 (unsigned)vf->bars[2].bar, vf->bars[2].size);
2210 /* set local queue arrays */
2211 vf->vfqs = &bp->vfdb->vfqs[qcount];
2212 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
2218 /* called by bnx2x_chip_cleanup */
2219 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
2226 /* release all the VFs */
2228 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
2233 /* called by bnx2x_init_hw_func, returns the next ilt line */
2234 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
2237 struct bnx2x_ilt *ilt = BP_ILT(bp);
2242 /* set vfs ilt lines */
2243 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2244 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
2246 ilt->lines[line+i].page = hw_cxt->addr;
2247 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
2248 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
2253 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
2255 return ((cid >= BNX2X_FIRST_VF_CID) &&
2256 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
2260 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
2261 struct bnx2x_vf_queue *vfq,
2262 union event_ring_elem *elem)
2264 unsigned long ramrod_flags = 0;
2267 /* Always push next commands out, don't wait here */
2268 set_bit(RAMROD_CONT, &ramrod_flags);
2270 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
2271 case BNX2X_FILTER_MAC_PENDING:
2272 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
2275 case BNX2X_FILTER_VLAN_PENDING:
2276 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
2280 BNX2X_ERR("Unsupported classification command: %d\n",
2281 elem->message.data.eth_event.echo);
2285 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2287 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2291 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2292 struct bnx2x_virtf *vf)
2294 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2297 rparam.mcast_obj = &vf->mcast_obj;
2298 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2300 /* If there are pending mcast commands - send them */
2301 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2302 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2304 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2310 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2311 struct bnx2x_virtf *vf)
2313 smp_mb__before_clear_bit();
2314 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2315 smp_mb__after_clear_bit();
2318 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2320 struct bnx2x_virtf *vf;
2321 int qidx = 0, abs_vfid;
2328 /* first get the cid - the only events we handle here are cfc-delete
2329 * and set-mac completion
2331 opcode = elem->message.opcode;
2334 case EVENT_RING_OPCODE_CFC_DEL:
2335 cid = SW_CID((__force __le32)
2336 elem->message.data.cfc_del_event.cid);
2337 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2339 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2340 case EVENT_RING_OPCODE_MULTICAST_RULES:
2341 case EVENT_RING_OPCODE_FILTERS_RULES:
2342 cid = (elem->message.data.eth_event.echo &
2344 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2346 case EVENT_RING_OPCODE_VF_FLR:
2347 abs_vfid = elem->message.data.vf_flr_event.vf_id;
2348 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2351 case EVENT_RING_OPCODE_MALICIOUS_VF:
2352 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2353 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2354 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2360 /* check if the cid is the VF range */
2361 if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2362 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2366 /* extract vf and rxq index from vf_cid - relies on the following:
2367 * 1. vfid on cid reflects the true abs_vfid
2368 * 2. The max number of VFs (per path) is 64
2370 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2371 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2373 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2376 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2382 case EVENT_RING_OPCODE_CFC_DEL:
2383 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2384 vf->abs_vfid, qidx);
2385 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2388 BNX2X_Q_CMD_CFC_DEL);
2390 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2391 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2392 vf->abs_vfid, qidx);
2393 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2395 case EVENT_RING_OPCODE_MULTICAST_RULES:
2396 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2397 vf->abs_vfid, qidx);
2398 bnx2x_vf_handle_mcast_eqe(bp, vf);
2400 case EVENT_RING_OPCODE_FILTERS_RULES:
2401 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2402 vf->abs_vfid, qidx);
2403 bnx2x_vf_handle_filters_eqe(bp, vf);
2405 case EVENT_RING_OPCODE_VF_FLR:
2406 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
2408 /* Do nothing for now */
2410 case EVENT_RING_OPCODE_MALICIOUS_VF:
2411 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
2412 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2413 /* Do nothing for now */
2416 /* SRIOV: reschedule any 'in_progress' operations */
2417 bnx2x_iov_sp_event(bp, cid, false);
2422 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2424 /* extract the vf from vf_cid - relies on the following:
2425 * 1. vfid on cid reflects the true abs_vfid
2426 * 2. The max number of VFs (per path) is 64
2428 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2429 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2432 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2433 struct bnx2x_queue_sp_obj **q_obj)
2435 struct bnx2x_virtf *vf;
2440 vf = bnx2x_vf_by_cid(bp, vf_cid);
2443 /* extract queue index from vf_cid - relies on the following:
2444 * 1. vfid on cid reflects the true abs_vfid
2445 * 2. The max number of VFs (per path) is 64
2447 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2448 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2450 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2454 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2456 struct bnx2x_virtf *vf;
2458 /* check if the cid is the VF range */
2459 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2462 vf = bnx2x_vf_by_cid(bp, vf_cid);
2464 /* set in_progress flag */
2465 atomic_set(&vf->op_in_progress, 1);
2467 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2471 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2474 int first_queue_query_index, num_queues_req;
2475 dma_addr_t cur_data_offset;
2476 struct stats_query_entry *cur_query_entry;
2478 bool is_fcoe = false;
2486 /* fcoe adds one global request and one queue request */
2487 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2488 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2492 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2493 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2494 first_queue_query_index + num_queues_req);
2496 cur_data_offset = bp->fw_stats_data_mapping +
2497 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2498 num_queues_req * sizeof(struct per_queue_stats);
2500 cur_query_entry = &bp->fw_stats_req->
2501 query[first_queue_query_index + num_queues_req];
2503 for_each_vf(bp, i) {
2505 struct bnx2x_virtf *vf = BP_VF(bp, i);
2507 if (vf->state != VF_ENABLED) {
2509 "vf %d not enabled so no stats for it\n",
2514 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2515 for_each_vfq(vf, j) {
2516 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2518 /* collect stats fro active queues only */
2519 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2520 BNX2X_Q_LOGICAL_STATE_STOPPED)
2523 /* create stats query entry for this queue */
2524 cur_query_entry->kind = STATS_TYPE_QUEUE;
2525 cur_query_entry->index = vfq_cl_id(vf, rxq);
2526 cur_query_entry->funcID =
2527 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2528 cur_query_entry->address.hi =
2529 cpu_to_le32(U64_HI(vf->fw_stat_map));
2530 cur_query_entry->address.lo =
2531 cpu_to_le32(U64_LO(vf->fw_stat_map));
2533 "added address %x %x for vf %d queue %d client %d\n",
2534 cur_query_entry->address.hi,
2535 cur_query_entry->address.lo, cur_query_entry->funcID,
2536 j, cur_query_entry->index);
2538 cur_data_offset += sizeof(struct per_queue_stats);
2542 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2545 void bnx2x_iov_sp_task(struct bnx2x *bp)
2551 /* Iterate over all VFs and invoke state transition for VFs with
2552 * 'in-progress' slow-path operations
2554 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
2555 for_each_vf(bp, i) {
2556 struct bnx2x_virtf *vf = BP_VF(bp, i);
2558 if (!list_empty(&vf->op_list_head) &&
2559 atomic_read(&vf->op_in_progress)) {
2560 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2561 bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2567 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2570 struct bnx2x_virtf *vf = NULL;
2572 for_each_vf(bp, i) {
2574 if (stat_id >= vf->igu_base_id &&
2575 stat_id < vf->igu_base_id + vf_sb_count(vf))
2581 /* VF API helpers */
2582 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2585 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2586 u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2588 REG_WR(bp, reg, val);
2591 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2596 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2597 vfq_qzone_id(vf, vfq_get(vf, i)), false);
2600 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2604 /* clear the VF configuration - pretend */
2605 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2606 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2607 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2608 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2609 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2610 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2613 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2615 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2616 BNX2X_VF_MAX_QUEUES);
2620 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2621 struct vf_pf_resc_request *req_resc)
2623 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2624 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2626 return ((req_resc->num_rxqs <= rxq_cnt) &&
2627 (req_resc->num_txqs <= txq_cnt) &&
2628 (req_resc->num_sbs <= vf_sb_count(vf)) &&
2629 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2630 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2634 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2635 struct vf_pf_resc_request *resc)
2637 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2640 union cdu_context *base_cxt = (union cdu_context *)
2641 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2642 (base_vf_cid & (ILT_PAGE_CIDS-1));
2645 /* if state is 'acquired' the VF was not released or FLR'd, in
2646 * this case the returned resources match the acquired already
2647 * acquired resources. Verify that the requested numbers do
2648 * not exceed the already acquired numbers.
2650 if (vf->state == VF_ACQUIRED) {
2651 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2654 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2655 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2662 /* Otherwise vf state must be 'free' or 'reset' */
2663 if (vf->state != VF_FREE && vf->state != VF_RESET) {
2664 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2665 vf->abs_vfid, vf->state);
2669 /* static allocation:
2670 * the global maximum number are fixed per VF. Fail the request if
2671 * requested number exceed these globals
2673 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2675 "cannot fulfill vf resource request. Placing maximal available values in response\n");
2676 /* set the max resource in the vf */
2680 /* Set resources counters - 0 request means max available */
2681 vf_sb_count(vf) = resc->num_sbs;
2682 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2683 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2684 if (resc->num_mac_filters)
2685 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2686 if (resc->num_vlan_filters)
2687 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2690 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2691 vf_sb_count(vf), vf_rxq_count(vf),
2692 vf_txq_count(vf), vf_mac_rules_cnt(vf),
2693 vf_vlan_rules_cnt(vf));
2695 /* Initialize the queues */
2697 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2701 for_each_vfq(vf, i) {
2702 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2705 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
2710 q->cxt = &((base_cxt + i)->eth);
2711 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2713 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2714 vf->abs_vfid, i, q->index, q->cid, q->cxt);
2716 /* init SP objects */
2717 bnx2x_vfq_init(bp, vf, q);
2719 vf->state = VF_ACQUIRED;
2723 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2725 struct bnx2x_func_init_params func_init = {0};
2729 /* the sb resources are initialized at this point, do the
2730 * FW/HW initializations
2732 for_each_vf_sb(vf, i)
2733 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2734 vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2737 if (vf->state != VF_ACQUIRED) {
2738 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2739 vf->abs_vfid, vf->state);
2743 /* let FLR complete ... */
2746 /* FLR cleanup epilogue */
2747 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2750 /* reset IGU VF statistics: MSIX */
2751 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2754 if (vf->cfg_flags & VF_CFG_STATS)
2755 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2757 if (vf->cfg_flags & VF_CFG_TPA)
2758 flags |= FUNC_FLG_TPA;
2760 if (is_vf_multi(vf))
2761 flags |= FUNC_FLG_RSS;
2763 /* function setup */
2764 func_init.func_flgs = flags;
2765 func_init.pf_id = BP_FUNC(bp);
2766 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2767 func_init.fw_stat_map = vf->fw_stat_map;
2768 func_init.spq_map = vf->spq_map;
2769 func_init.spq_prod = 0;
2770 bnx2x_func_init(bp, &func_init);
2773 bnx2x_vf_enable_access(bp, vf->abs_vfid);
2774 bnx2x_vf_enable_traffic(bp, vf);
2776 /* queue protection table */
2778 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2779 vfq_qzone_id(vf, vfq_get(vf, i)), true);
2781 vf->state = VF_ENABLED;
2783 /* update vf bulletin board */
2784 bnx2x_post_vf_bulletin(bp, vf->index);
2789 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2790 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2792 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2793 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2794 enum bnx2x_vfop_close_state state = vfop->state;
2795 struct bnx2x_vfop_cmd cmd = {
2796 .done = bnx2x_vfop_close,
2803 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2806 case BNX2X_VFOP_CLOSE_QUEUES:
2808 if (++(qx->qid) < vf_rxq_count(vf)) {
2809 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2815 /* remove multicasts */
2816 vfop->state = BNX2X_VFOP_CLOSE_HW;
2817 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
2822 case BNX2X_VFOP_CLOSE_HW:
2824 /* disable the interrupts */
2825 DP(BNX2X_MSG_IOV, "disabling igu\n");
2826 bnx2x_vf_igu_disable(bp, vf);
2828 /* disable the VF */
2829 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2830 bnx2x_vf_clr_qtbl(bp, vf);
2834 bnx2x_vfop_default(state);
2837 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2839 vf->state = VF_ACQUIRED;
2840 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2841 bnx2x_vfop_end(bp, vf, vfop);
2844 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2845 struct bnx2x_virtf *vf,
2846 struct bnx2x_vfop_cmd *cmd)
2848 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2850 vfop->args.qx.qid = -1; /* loop */
2851 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2852 bnx2x_vfop_close, cmd->done);
2853 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2859 /* VF release can be called either: 1. The VF was acquired but
2860 * not enabled 2. the vf was enabled or in the process of being
2863 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2865 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2866 struct bnx2x_vfop_cmd cmd = {
2867 .done = bnx2x_vfop_release,
2871 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2876 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2877 vf->state == VF_FREE ? "Free" :
2878 vf->state == VF_ACQUIRED ? "Acquired" :
2879 vf->state == VF_ENABLED ? "Enabled" :
2880 vf->state == VF_RESET ? "Reset" :
2883 switch (vf->state) {
2885 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
2891 DP(BNX2X_MSG_IOV, "about to free resources\n");
2892 bnx2x_vf_free_resc(bp, vf);
2893 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2901 bnx2x_vfop_default(vf->state);
2904 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
2906 bnx2x_vfop_end(bp, vf, vfop);
2909 int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2910 struct bnx2x_virtf *vf,
2911 struct bnx2x_vfop_cmd *cmd)
2913 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2915 bnx2x_vfop_opset(-1, /* use vf->state */
2916 bnx2x_vfop_release, cmd->done);
2917 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
2923 /* VF release ~ VF close + VF release-resources
2924 * Release is the ultimate SW shutdown and is called whenever an
2925 * irrecoverable error is encountered.
2927 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
2929 struct bnx2x_vfop_cmd cmd = {
2934 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2936 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
2939 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2943 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
2944 struct bnx2x_virtf *vf, u32 *sbdf)
2946 *sbdf = vf->devfn | (vf->bus << 8);
2949 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
2950 struct bnx2x_vf_bar_info *bar_info)
2954 bar_info->nr_bars = bp->vfdb->sriov.nres;
2955 for (n = 0; n < bar_info->nr_bars; n++)
2956 bar_info->bars[n] = vf->bars[n];
2959 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2960 enum channel_tlvs tlv)
2962 /* lock the channel */
2963 mutex_lock(&vf->op_mutex);
2965 /* record the locking op */
2966 vf->op_current = tlv;
2969 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2973 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2974 enum channel_tlvs expected_tlv)
2976 WARN(expected_tlv != vf->op_current,
2977 "lock mismatch: expected %d found %d", expected_tlv,
2980 /* lock the channel */
2981 mutex_unlock(&vf->op_mutex);
2983 /* log the unlock */
2984 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2985 vf->abs_vfid, vf->op_current);
2987 /* record the locking op */
2988 vf->op_current = CHANNEL_TLV_NONE;
2991 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2993 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2995 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2996 num_vfs_param, BNX2X_NR_VIRTFN(bp));
2998 /* HW channel is only operational when PF is up */
2999 if (bp->state != BNX2X_STATE_OPEN) {
3000 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3004 /* we are always bound by the total_vfs in the configuration space */
3005 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
3006 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3007 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3008 num_vfs_param = BNX2X_NR_VIRTFN(bp);
3011 bp->requested_nr_virtfn = num_vfs_param;
3012 if (num_vfs_param == 0) {
3013 pci_disable_sriov(dev);
3016 return bnx2x_enable_sriov(bp);
3020 int bnx2x_enable_sriov(struct bnx2x *bp)
3022 int rc = 0, req_vfs = bp->requested_nr_virtfn;
3024 rc = pci_enable_sriov(bp->pdev, req_vfs);
3026 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3029 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
3033 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
3036 struct pf_vf_bulletin_content *bulletin;
3038 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
3039 for_each_vf(bp, vfidx) {
3040 bulletin = BP_VF_BULLETIN(bp, vfidx);
3041 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
3042 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
3046 void bnx2x_disable_sriov(struct bnx2x *bp)
3048 pci_disable_sriov(bp->pdev);
3051 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3052 struct bnx2x_virtf **vf,
3053 struct pf_vf_bulletin_content **bulletin)
3055 if (bp->state != BNX2X_STATE_OPEN) {
3056 BNX2X_ERR("vf ndo called though PF is down\n");
3060 if (!IS_SRIOV(bp)) {
3061 BNX2X_ERR("vf ndo called though sriov is disabled\n");
3065 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
3066 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3067 vfidx, BNX2X_NR_VIRTFN(bp));
3072 *vf = BP_VF(bp, vfidx);
3073 *bulletin = BP_VF_BULLETIN(bp, vfidx);
3076 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
3082 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
3090 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3091 struct ifla_vf_info *ivi)
3093 struct bnx2x *bp = netdev_priv(dev);
3094 struct bnx2x_virtf *vf = NULL;
3095 struct pf_vf_bulletin_content *bulletin = NULL;
3096 struct bnx2x_vlan_mac_obj *mac_obj;
3097 struct bnx2x_vlan_mac_obj *vlan_obj;
3100 /* sanity and init */
3101 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3104 mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3105 vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
3106 if (!mac_obj || !vlan_obj) {
3107 BNX2X_ERR("VF partially initialized\n");
3113 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
3114 ivi->spoofchk = 1; /*always enabled */
3115 if (vf->state == VF_ENABLED) {
3116 /* mac and vlan are in vlan_mac objects */
3117 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3119 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
3123 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
3124 /* mac configured by ndo so its in bulletin board */
3125 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3127 /* function has not been loaded yet. Show mac as 0s */
3128 memset(&ivi->mac, 0, ETH_ALEN);
3131 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3132 /* vlan configured by ndo so its in bulletin board */
3133 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3135 /* function has not been loaded yet. Show vlans as 0s */
3136 memset(&ivi->vlan, 0, VLAN_HLEN);
3142 /* New mac for VF. Consider these cases:
3143 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3144 * supply at acquire.
3145 * 2. VF has already been acquired but has not yet initialized - store in local
3146 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
3147 * will configure this mac when it is ready.
3148 * 3. VF has already initialized but has not yet setup a queue - post the new
3149 * mac on VF's bulletin board right now. VF will configure this mac when it
3151 * 4. VF has already set a queue - delete any macs already configured for this
3152 * queue and manually config the new mac.
3153 * In any event, once this function has been called refuse any attempts by the
3154 * VF to configure any mac for itself except for this mac. In case of a race
3155 * where the VF fails to see the new post on its bulletin board before sending a
3156 * mac configuration request, the PF will simply fail the request and VF can try
3157 * again after consulting its bulletin board.
3159 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3161 struct bnx2x *bp = netdev_priv(dev);
3162 int rc, q_logical_state;
3163 struct bnx2x_virtf *vf = NULL;
3164 struct pf_vf_bulletin_content *bulletin = NULL;
3166 /* sanity and init */
3167 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3170 if (!is_valid_ether_addr(mac)) {
3171 BNX2X_ERR("mac address invalid\n");
3175 /* update PF's copy of the VF's bulletin. Will no longer accept mac
3176 * configuration requests from vf unless match this mac
3178 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3179 memcpy(bulletin->mac, mac, ETH_ALEN);
3181 /* Post update on VF's bulletin board */
3182 rc = bnx2x_post_vf_bulletin(bp, vfidx);
3184 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3188 /* is vf initialized and queue set up? */
3190 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3191 if (vf->state == VF_ENABLED &&
3192 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3193 /* configure the mac in device on this vf's queue */
3194 unsigned long ramrod_flags = 0;
3195 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3197 /* must lock vfpf channel to protect against vf flows */
3198 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3200 /* remove existing eth macs */
3201 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3203 BNX2X_ERR("failed to delete eth macs\n");
3207 /* remove existing uc list macs */
3208 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3210 BNX2X_ERR("failed to delete uc_list macs\n");
3214 /* configure the new mac to device */
3215 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3216 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3217 BNX2X_ETH_MAC, &ramrod_flags);
3219 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3225 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3227 struct bnx2x *bp = netdev_priv(dev);
3228 int rc, q_logical_state;
3229 struct bnx2x_virtf *vf = NULL;
3230 struct pf_vf_bulletin_content *bulletin = NULL;
3232 /* sanity and init */
3233 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3238 BNX2X_ERR("illegal vlan value %d\n", vlan);
3242 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
3245 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3246 * to the VF since it doesn't have anything to do with it. But it useful
3247 * to store it here in case the VF is not up yet and we can only
3248 * configure the vlan later when it does.
3250 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3251 bulletin->vlan = vlan;
3253 /* is vf initialized and queue set up? */
3255 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3256 if (vf->state == VF_ENABLED &&
3257 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3258 /* configure the vlan in device on this vf's queue */
3259 unsigned long ramrod_flags = 0;
3260 unsigned long vlan_mac_flags = 0;
3261 struct bnx2x_vlan_mac_obj *vlan_obj =
3262 &bnx2x_vfq(vf, 0, vlan_obj);
3263 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3264 struct bnx2x_queue_state_params q_params = {NULL};
3265 struct bnx2x_queue_update_params *update_params;
3267 memset(&ramrod_param, 0, sizeof(ramrod_param));
3269 /* must lock vfpf channel to protect against vf flows */
3270 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3272 /* remove existing vlans */
3273 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3274 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3277 BNX2X_ERR("failed to delete vlans\n");
3281 /* send queue update ramrod to configure default vlan and silent
3284 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3285 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3286 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
3287 update_params = &q_params.params.update;
3288 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3289 &update_params->update_flags);
3290 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3291 &update_params->update_flags);
3294 /* if vlan is 0 then we want to leave the VF traffic
3295 * untagged, and leave the incoming traffic untouched
3296 * (i.e. do not remove any vlan tags).
3298 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3299 &update_params->update_flags);
3300 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3301 &update_params->update_flags);
3303 /* configure the new vlan to device */
3304 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3305 ramrod_param.vlan_mac_obj = vlan_obj;
3306 ramrod_param.ramrod_flags = ramrod_flags;
3307 ramrod_param.user_req.u.vlan.vlan = vlan;
3308 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3309 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3311 BNX2X_ERR("failed to configure vlan\n");
3315 /* configure default vlan to vf queue and set silent
3316 * vlan removal (the vf remains unaware of this vlan).
3318 update_params = &q_params.params.update;
3319 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3320 &update_params->update_flags);
3321 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3322 &update_params->update_flags);
3323 update_params->def_vlan = vlan;
3326 /* Update the Queue state */
3327 rc = bnx2x_queue_state_change(bp, &q_params);
3329 BNX2X_ERR("Failed to configure default VLAN\n");
3333 /* clear the flag indicating that this VF needs its vlan
3334 * (will only be set if the HV configured th Vlan before vf was
3335 * and we were called because the VF came up later
3337 vf->cfg_flags &= ~VF_CFG_VLAN;
3339 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3344 /* crc is the first field in the bulletin board. Compute the crc over the
3345 * entire bulletin board excluding the crc field itself. Use the length field
3346 * as the Bulletin Board was posted by a PF with possibly a different version
3347 * from the vf which will sample it. Therefore, the length is computed by the
3348 * PF and the used blindly by the VF.
3350 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
3351 struct pf_vf_bulletin_content *bulletin)
3353 return crc32(BULLETIN_CRC_SEED,
3354 ((u8 *)bulletin) + sizeof(bulletin->crc),
3355 bulletin->length - sizeof(bulletin->crc));
3358 /* Check for new posts on the bulletin board */
3359 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3361 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
3364 /* bulletin board hasn't changed since last sample */
3365 if (bp->old_bulletin.version == bulletin.version)
3366 return PFVF_BULLETIN_UNCHANGED;
3368 /* validate crc of new bulletin board */
3369 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
3370 /* sampling structure in mid post may result with corrupted data
3371 * validate crc to ensure coherency.
3373 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3374 bulletin = bp->pf2vf_bulletin->content;
3375 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
3378 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3380 bnx2x_crc_vf_bulletin(bp, &bulletin));
3382 if (attempts >= BULLETIN_ATTEMPTS) {
3383 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3385 return PFVF_BULLETIN_CRC_ERR;
3389 /* the mac address in bulletin board is valid and is new */
3390 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
3391 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
3392 /* update new mac to net device */
3393 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3396 /* the vlan in bulletin board is valid and is new */
3397 if (bulletin.valid_bitmap & 1 << VLAN_VALID)
3398 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
3400 /* copy new bulletin board to bp */
3401 bp->old_bulletin = bulletin;
3403 return PFVF_BULLETIN_UPDATED;
3406 void bnx2x_timer_sriov(struct bnx2x *bp)
3408 bnx2x_sample_bulletin(bp);
3410 /* if channel is down we need to self destruct */
3411 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
3412 smp_mb__before_clear_bit();
3413 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3414 &bp->sp_rtnl_state);
3415 smp_mb__after_clear_bit();
3416 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3420 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3422 /* vf doorbells are embedded within the regview */
3423 return bp->regview + PXP_VF_ADDR_DB_START;
3426 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3428 mutex_init(&bp->vf2pf_mutex);
3430 /* allocate vf2pf mailbox for vf to pf channel */
3431 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
3432 sizeof(struct bnx2x_vf_mbx_msg));
3434 /* allocate pf 2 vf bulletin board */
3435 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
3436 sizeof(union pf_vf_bulletin));
3441 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3442 sizeof(struct bnx2x_vf_mbx_msg));
3443 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3444 sizeof(union pf_vf_bulletin));
3448 int bnx2x_open_epilog(struct bnx2x *bp)
3450 /* Enable sriov via delayed work. This must be done via delayed work
3451 * because it causes the probe of the vf devices to be run, which invoke
3452 * register_netdevice which must have rtnl lock taken. As we are holding
3453 * the lock right now, that could only work if the probe would not take
3454 * the lock. However, as the probe of the vf may be called from other
3455 * contexts as well (such as passthrough to vm fails) it can't assume
3456 * the lock is being held for it. Using delayed work here allows the
3457 * probe code to simply take the lock (i.e. wait for it to be released
3458 * if it is being held). We only want to do this if the number of VFs
3459 * was set before PF driver was loaded.
3461 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
3462 smp_mb__before_clear_bit();
3463 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
3464 smp_mb__after_clear_bit();
3465 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3471 void bnx2x_iov_channel_down(struct bnx2x *bp)
3474 struct pf_vf_bulletin_content *bulletin;
3479 for_each_vf(bp, vf_idx) {
3480 /* locate this VFs bulletin board and update the channel down
3483 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3484 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3486 /* update vf bulletin board */
3487 bnx2x_post_vf_bulletin(bp, vf_idx);