1 /* bnx2x_sp.c: Broadcom Everest network driver.
3 * Copyright (c) 2011-2012 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
28 #include "bnx2x_cmn.h"
31 #define BNX2X_MAX_EMUL_MULTI 16
33 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
35 /**** Exe Queue interfaces ****/
38 * bnx2x_exe_queue_init - init the Exe Queue object
40 * @o: poiter to the object
42 * @owner: poiter to the owner
43 * @validate: validate function pointer
44 * @optimize: optimize function pointer
45 * @exec: execute function pointer
46 * @get: get function pointer
48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 struct bnx2x_exe_queue_obj *o,
51 union bnx2x_qable_obj *owner,
52 exe_q_validate validate,
54 exe_q_optimize optimize,
58 memset(o, 0, sizeof(*o));
60 INIT_LIST_HEAD(&o->exe_queue);
61 INIT_LIST_HEAD(&o->pending_comp);
63 spin_lock_init(&o->lock);
65 o->exe_chunk_len = exe_len;
68 /* Owner specific callbacks */
69 o->validate = validate;
71 o->optimize = optimize;
75 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80 struct bnx2x_exeq_elem *elem)
82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
86 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
88 struct bnx2x_exeq_elem *elem;
91 spin_lock_bh(&o->lock);
93 list_for_each_entry(elem, &o->exe_queue, link)
96 spin_unlock_bh(&o->lock);
102 * bnx2x_exe_queue_add - add a new element to the execution queue
106 * @cmd: new command to add
107 * @restore: true - do not optimize the command
109 * If the element is optimized or is illegal, frees it.
111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112 struct bnx2x_exe_queue_obj *o,
113 struct bnx2x_exeq_elem *elem,
118 spin_lock_bh(&o->lock);
121 /* Try to cancel this element queue */
122 rc = o->optimize(bp, o->owner, elem);
126 /* Check if this request is ok */
127 rc = o->validate(bp, o->owner, elem);
129 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
134 /* If so, add it to the execution queue */
135 list_add_tail(&elem->link, &o->exe_queue);
137 spin_unlock_bh(&o->lock);
142 bnx2x_exe_queue_free_elem(bp, elem);
144 spin_unlock_bh(&o->lock);
150 static inline void __bnx2x_exe_queue_reset_pending(
152 struct bnx2x_exe_queue_obj *o)
154 struct bnx2x_exeq_elem *elem;
156 while (!list_empty(&o->pending_comp)) {
157 elem = list_first_entry(&o->pending_comp,
158 struct bnx2x_exeq_elem, link);
160 list_del(&elem->link);
161 bnx2x_exe_queue_free_elem(bp, elem);
165 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166 struct bnx2x_exe_queue_obj *o)
169 spin_lock_bh(&o->lock);
171 __bnx2x_exe_queue_reset_pending(bp, o);
173 spin_unlock_bh(&o->lock);
178 * bnx2x_exe_queue_step - execute one execution chunk atomically
182 * @ramrod_flags: flags
184 * (Atomicy is ensured using the exe_queue->lock).
186 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 struct bnx2x_exe_queue_obj *o,
188 unsigned long *ramrod_flags)
190 struct bnx2x_exeq_elem *elem, spacer;
193 memset(&spacer, 0, sizeof(spacer));
195 spin_lock_bh(&o->lock);
198 * Next step should not be performed until the current is finished,
199 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
200 * properly clear object internals without sending any command to the FW
201 * which also implies there won't be any completion to clear the
204 if (!list_empty(&o->pending_comp)) {
205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
207 __bnx2x_exe_queue_reset_pending(bp, o);
209 spin_unlock_bh(&o->lock);
215 * Run through the pending commands list and create a next
218 while (!list_empty(&o->exe_queue)) {
219 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
221 WARN_ON(!elem->cmd_len);
223 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
224 cur_len += elem->cmd_len;
226 * Prevent from both lists being empty when moving an
227 * element. This will allow the call of
228 * bnx2x_exe_queue_empty() without locking.
230 list_add_tail(&spacer.link, &o->pending_comp);
232 list_move_tail(&elem->link, &o->pending_comp);
233 list_del(&spacer.link);
240 spin_unlock_bh(&o->lock);
244 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
247 * In case of an error return the commands back to the queue
248 * and reset the pending_comp.
250 list_splice_init(&o->pending_comp, &o->exe_queue);
253 * If zero is returned, means there are no outstanding pending
254 * completions and we may dismiss the pending list.
256 __bnx2x_exe_queue_reset_pending(bp, o);
258 spin_unlock_bh(&o->lock);
262 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
264 bool empty = list_empty(&o->exe_queue);
266 /* Don't reorder!!! */
269 return empty && list_empty(&o->pending_comp);
272 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
275 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
276 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
279 /************************ raw_obj functions ***********************************/
280 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
282 return !!test_bit(o->state, o->pstate);
285 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
287 smp_mb__before_clear_bit();
288 clear_bit(o->state, o->pstate);
289 smp_mb__after_clear_bit();
292 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
294 smp_mb__before_clear_bit();
295 set_bit(o->state, o->pstate);
296 smp_mb__after_clear_bit();
300 * bnx2x_state_wait - wait until the given bit(state) is cleared
303 * @state: state which is to be cleared
304 * @state_p: state buffer
307 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
308 unsigned long *pstate)
310 /* can take a while if any port is running */
314 if (CHIP_REV_IS_EMUL(bp))
317 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
321 if (!test_bit(state, pstate)) {
322 #ifdef BNX2X_STOP_ON_ERROR
323 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
328 usleep_range(1000, 1000);
335 BNX2X_ERR("timeout waiting for state %d\n", state);
336 #ifdef BNX2X_STOP_ON_ERROR
343 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
345 return bnx2x_state_wait(bp, raw->state, raw->pstate);
348 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
349 /* credit handling callbacks */
350 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
352 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
356 return mp->get_entry(mp, offset);
359 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
361 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
365 return mp->get(mp, 1);
368 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
370 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
374 return vp->get_entry(vp, offset);
377 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
379 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
383 return vp->get(vp, 1);
386 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
388 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
389 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
394 if (!vp->get(vp, 1)) {
402 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
404 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
406 return mp->put_entry(mp, offset);
409 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
411 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
413 return mp->put(mp, 1);
416 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
418 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
420 return vp->put_entry(vp, offset);
423 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
425 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
427 return vp->put(vp, 1);
430 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
432 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
433 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
438 if (!vp->put(vp, 1)) {
446 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
449 struct bnx2x_vlan_mac_registry_elem *pos;
454 list_for_each_entry(pos, &o->head, link) {
456 /* place leading zeroes in buffer */
457 memset(next, 0, MAC_LEADING_ZERO_CNT);
459 /* place mac after leading zeroes*/
460 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
463 /* calculate address of next element and
467 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
469 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
470 counter, next, pos->u.mac.mac);
473 return counter * ETH_ALEN;
476 /* check_add() callbacks */
477 static int bnx2x_check_mac_add(struct bnx2x *bp,
478 struct bnx2x_vlan_mac_obj *o,
479 union bnx2x_classification_ramrod_data *data)
481 struct bnx2x_vlan_mac_registry_elem *pos;
483 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
485 if (!is_valid_ether_addr(data->mac.mac))
488 /* Check if a requested MAC already exists */
489 list_for_each_entry(pos, &o->head, link)
490 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
496 static int bnx2x_check_vlan_add(struct bnx2x *bp,
497 struct bnx2x_vlan_mac_obj *o,
498 union bnx2x_classification_ramrod_data *data)
500 struct bnx2x_vlan_mac_registry_elem *pos;
502 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
504 list_for_each_entry(pos, &o->head, link)
505 if (data->vlan.vlan == pos->u.vlan.vlan)
511 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
512 struct bnx2x_vlan_mac_obj *o,
513 union bnx2x_classification_ramrod_data *data)
515 struct bnx2x_vlan_mac_registry_elem *pos;
517 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
518 data->vlan_mac.mac, data->vlan_mac.vlan);
520 list_for_each_entry(pos, &o->head, link)
521 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
522 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
530 /* check_del() callbacks */
531 static struct bnx2x_vlan_mac_registry_elem *
532 bnx2x_check_mac_del(struct bnx2x *bp,
533 struct bnx2x_vlan_mac_obj *o,
534 union bnx2x_classification_ramrod_data *data)
536 struct bnx2x_vlan_mac_registry_elem *pos;
538 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
540 list_for_each_entry(pos, &o->head, link)
541 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
547 static struct bnx2x_vlan_mac_registry_elem *
548 bnx2x_check_vlan_del(struct bnx2x *bp,
549 struct bnx2x_vlan_mac_obj *o,
550 union bnx2x_classification_ramrod_data *data)
552 struct bnx2x_vlan_mac_registry_elem *pos;
554 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
556 list_for_each_entry(pos, &o->head, link)
557 if (data->vlan.vlan == pos->u.vlan.vlan)
563 static struct bnx2x_vlan_mac_registry_elem *
564 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
565 struct bnx2x_vlan_mac_obj *o,
566 union bnx2x_classification_ramrod_data *data)
568 struct bnx2x_vlan_mac_registry_elem *pos;
570 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
571 data->vlan_mac.mac, data->vlan_mac.vlan);
573 list_for_each_entry(pos, &o->head, link)
574 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
575 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
582 /* check_move() callback */
583 static bool bnx2x_check_move(struct bnx2x *bp,
584 struct bnx2x_vlan_mac_obj *src_o,
585 struct bnx2x_vlan_mac_obj *dst_o,
586 union bnx2x_classification_ramrod_data *data)
588 struct bnx2x_vlan_mac_registry_elem *pos;
591 /* Check if we can delete the requested configuration from the first
594 pos = src_o->check_del(bp, src_o, data);
596 /* check if configuration can be added */
597 rc = dst_o->check_add(bp, dst_o, data);
599 /* If this classification can not be added (is already set)
600 * or can't be deleted - return an error.
608 static bool bnx2x_check_move_always_err(
610 struct bnx2x_vlan_mac_obj *src_o,
611 struct bnx2x_vlan_mac_obj *dst_o,
612 union bnx2x_classification_ramrod_data *data)
618 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
620 struct bnx2x_raw_obj *raw = &o->raw;
623 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
624 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
625 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
627 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
628 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
629 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
635 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
636 bool add, unsigned char *dev_addr, int index)
639 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
640 NIG_REG_LLH0_FUNC_MEM;
642 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
645 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
648 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
649 (add ? "ADD" : "DELETE"), index);
652 /* LLH_FUNC_MEM is a u64 WB register */
653 reg_offset += 8*index;
655 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
656 (dev_addr[4] << 8) | dev_addr[5]);
657 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
659 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
662 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
663 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
667 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
670 * @o: queue for which we want to configure this rule
671 * @add: if true the command is an ADD command, DEL otherwise
672 * @opcode: CLASSIFY_RULE_OPCODE_XXX
673 * @hdr: pointer to a header to setup
676 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
677 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
678 struct eth_classify_cmd_header *hdr)
680 struct bnx2x_raw_obj *raw = &o->raw;
682 hdr->client_id = raw->cl_id;
683 hdr->func_id = raw->func_id;
685 /* Rx or/and Tx (internal switching) configuration ? */
686 hdr->cmd_general_data |=
687 bnx2x_vlan_mac_get_rx_tx_flag(o);
690 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
692 hdr->cmd_general_data |=
693 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
697 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
699 * @cid: connection id
700 * @type: BNX2X_FILTER_XXX_PENDING
701 * @hdr: poiter to header to setup
704 * currently we always configure one rule and echo field to contain a CID and an
707 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
708 struct eth_classify_header *hdr, int rule_cnt)
710 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
711 hdr->rule_cnt = (u8)rule_cnt;
715 /* hw_config() callbacks */
716 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
717 struct bnx2x_vlan_mac_obj *o,
718 struct bnx2x_exeq_elem *elem, int rule_idx,
721 struct bnx2x_raw_obj *raw = &o->raw;
722 struct eth_classify_rules_ramrod_data *data =
723 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
724 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
725 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
726 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
727 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
728 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
731 * Set LLH CAM entry: currently only iSCSI and ETH macs are
732 * relevant. In addition, current implementation is tuned for a
735 * When multiple unicast ETH MACs PF configuration in switch
736 * independent mode is required (NetQ, multiple netdev MACs,
737 * etc.), consider better utilisation of 8 per function MAC
738 * entries in the LLH register. There is also
739 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
740 * total number of CAM entries to 16.
742 * Currently we won't configure NIG for MACs other than a primary ETH
743 * MAC and iSCSI L2 MAC.
745 * If this MAC is moving from one Queue to another, no need to change
748 if (cmd != BNX2X_VLAN_MAC_MOVE) {
749 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
750 bnx2x_set_mac_in_nig(bp, add, mac,
751 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
752 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
753 bnx2x_set_mac_in_nig(bp, add, mac,
754 BNX2X_LLH_CAM_ETH_LINE);
757 /* Reset the ramrod data buffer for the first rule */
759 memset(data, 0, sizeof(*data));
761 /* Setup a command header */
762 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
763 &rule_entry->mac.header);
765 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
766 (add ? "add" : "delete"), mac, raw->cl_id);
768 /* Set a MAC itself */
769 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
770 &rule_entry->mac.mac_mid,
771 &rule_entry->mac.mac_lsb, mac);
773 /* MOVE: Add a rule that will add this MAC to the target Queue */
774 if (cmd == BNX2X_VLAN_MAC_MOVE) {
778 /* Setup ramrod data */
779 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
780 elem->cmd_data.vlan_mac.target_obj,
781 true, CLASSIFY_RULE_OPCODE_MAC,
782 &rule_entry->mac.header);
784 /* Set a MAC itself */
785 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
786 &rule_entry->mac.mac_mid,
787 &rule_entry->mac.mac_lsb, mac);
790 /* Set the ramrod data header */
791 /* TODO: take this to the higher level in order to prevent multiple
793 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
798 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
803 * @cam_offset: offset in cam memory
804 * @hdr: pointer to a header to setup
808 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
809 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
810 struct mac_configuration_hdr *hdr)
812 struct bnx2x_raw_obj *r = &o->raw;
815 hdr->offset = (u8)cam_offset;
816 hdr->client_id = 0xff;
817 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
820 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
821 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
822 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
824 struct bnx2x_raw_obj *r = &o->raw;
825 u32 cl_bit_vec = (1 << r->cl_id);
827 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
828 cfg_entry->pf_id = r->func_id;
829 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
832 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
833 T_ETH_MAC_COMMAND_SET);
834 SET_FLAG(cfg_entry->flags,
835 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
837 /* Set a MAC in a ramrod data */
838 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
839 &cfg_entry->middle_mac_addr,
840 &cfg_entry->lsb_mac_addr, mac);
842 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
843 T_ETH_MAC_COMMAND_INVALIDATE);
846 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
847 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
848 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
850 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
851 struct bnx2x_raw_obj *raw = &o->raw;
853 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
855 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
858 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
859 (add ? "setting" : "clearing"),
860 mac, raw->cl_id, cam_offset);
864 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
867 * @o: bnx2x_vlan_mac_obj
868 * @elem: bnx2x_exeq_elem
869 * @rule_idx: rule_idx
870 * @cam_offset: cam_offset
872 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
873 struct bnx2x_vlan_mac_obj *o,
874 struct bnx2x_exeq_elem *elem, int rule_idx,
877 struct bnx2x_raw_obj *raw = &o->raw;
878 struct mac_configuration_cmd *config =
879 (struct mac_configuration_cmd *)(raw->rdata);
881 * 57710 and 57711 do not support MOVE command,
882 * so it's either ADD or DEL
884 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
887 /* Reset the ramrod data buffer */
888 memset(config, 0, sizeof(*config));
890 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
892 elem->cmd_data.vlan_mac.u.mac.mac, 0,
893 ETH_VLAN_FILTER_ANY_VLAN, config);
896 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
897 struct bnx2x_vlan_mac_obj *o,
898 struct bnx2x_exeq_elem *elem, int rule_idx,
901 struct bnx2x_raw_obj *raw = &o->raw;
902 struct eth_classify_rules_ramrod_data *data =
903 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
904 int rule_cnt = rule_idx + 1;
905 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
906 int cmd = elem->cmd_data.vlan_mac.cmd;
907 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
908 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
910 /* Reset the ramrod data buffer for the first rule */
912 memset(data, 0, sizeof(*data));
914 /* Set a rule header */
915 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
916 &rule_entry->vlan.header);
918 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
921 /* Set a VLAN itself */
922 rule_entry->vlan.vlan = cpu_to_le16(vlan);
924 /* MOVE: Add a rule that will add this MAC to the target Queue */
925 if (cmd == BNX2X_VLAN_MAC_MOVE) {
929 /* Setup ramrod data */
930 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
931 elem->cmd_data.vlan_mac.target_obj,
932 true, CLASSIFY_RULE_OPCODE_VLAN,
933 &rule_entry->vlan.header);
935 /* Set a VLAN itself */
936 rule_entry->vlan.vlan = cpu_to_le16(vlan);
939 /* Set the ramrod data header */
940 /* TODO: take this to the higher level in order to prevent multiple
942 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
946 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
947 struct bnx2x_vlan_mac_obj *o,
948 struct bnx2x_exeq_elem *elem,
949 int rule_idx, int cam_offset)
951 struct bnx2x_raw_obj *raw = &o->raw;
952 struct eth_classify_rules_ramrod_data *data =
953 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
954 int rule_cnt = rule_idx + 1;
955 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
956 int cmd = elem->cmd_data.vlan_mac.cmd;
957 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
958 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
959 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
962 /* Reset the ramrod data buffer for the first rule */
964 memset(data, 0, sizeof(*data));
966 /* Set a rule header */
967 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
968 &rule_entry->pair.header);
970 /* Set VLAN and MAC themselvs */
971 rule_entry->pair.vlan = cpu_to_le16(vlan);
972 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
973 &rule_entry->pair.mac_mid,
974 &rule_entry->pair.mac_lsb, mac);
976 /* MOVE: Add a rule that will add this MAC to the target Queue */
977 if (cmd == BNX2X_VLAN_MAC_MOVE) {
981 /* Setup ramrod data */
982 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
983 elem->cmd_data.vlan_mac.target_obj,
984 true, CLASSIFY_RULE_OPCODE_PAIR,
985 &rule_entry->pair.header);
987 /* Set a VLAN itself */
988 rule_entry->pair.vlan = cpu_to_le16(vlan);
989 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
990 &rule_entry->pair.mac_mid,
991 &rule_entry->pair.mac_lsb, mac);
994 /* Set the ramrod data header */
995 /* TODO: take this to the higher level in order to prevent multiple
997 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1002 * bnx2x_set_one_vlan_mac_e1h -
1004 * @bp: device handle
1005 * @o: bnx2x_vlan_mac_obj
1006 * @elem: bnx2x_exeq_elem
1007 * @rule_idx: rule_idx
1008 * @cam_offset: cam_offset
1010 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1011 struct bnx2x_vlan_mac_obj *o,
1012 struct bnx2x_exeq_elem *elem,
1013 int rule_idx, int cam_offset)
1015 struct bnx2x_raw_obj *raw = &o->raw;
1016 struct mac_configuration_cmd *config =
1017 (struct mac_configuration_cmd *)(raw->rdata);
1019 * 57710 and 57711 do not support MOVE command,
1020 * so it's either ADD or DEL
1022 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1025 /* Reset the ramrod data buffer */
1026 memset(config, 0, sizeof(*config));
1028 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1030 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1031 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1032 ETH_VLAN_FILTER_CLASSIFY, config);
1035 #define list_next_entry(pos, member) \
1036 list_entry((pos)->member.next, typeof(*(pos)), member)
1039 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1041 * @bp: device handle
1042 * @p: command parameters
1043 * @ppos: pointer to the cooky
1045 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1046 * previously configured elements list.
1048 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1051 * pointer to the cooky - that should be given back in the next call to make
1052 * function handle the next element. If *ppos is set to NULL it will restart the
1053 * iterator. If returned *ppos == NULL this means that the last element has been
1057 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1058 struct bnx2x_vlan_mac_ramrod_params *p,
1059 struct bnx2x_vlan_mac_registry_elem **ppos)
1061 struct bnx2x_vlan_mac_registry_elem *pos;
1062 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1064 /* If list is empty - there is nothing to do here */
1065 if (list_empty(&o->head)) {
1070 /* make a step... */
1072 *ppos = list_first_entry(&o->head,
1073 struct bnx2x_vlan_mac_registry_elem,
1076 *ppos = list_next_entry(*ppos, link);
1080 /* If it's the last step - return NULL */
1081 if (list_is_last(&pos->link, &o->head))
1084 /* Prepare a 'user_req' */
1085 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1087 /* Set the command */
1088 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1090 /* Set vlan_mac_flags */
1091 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1093 /* Set a restore bit */
1094 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1096 return bnx2x_config_vlan_mac(bp, p);
1100 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1101 * pointer to an element with a specific criteria and NULL if such an element
1102 * hasn't been found.
1104 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1105 struct bnx2x_exe_queue_obj *o,
1106 struct bnx2x_exeq_elem *elem)
1108 struct bnx2x_exeq_elem *pos;
1109 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1111 /* Check pending for execution commands */
1112 list_for_each_entry(pos, &o->exe_queue, link)
1113 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1115 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1121 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1122 struct bnx2x_exe_queue_obj *o,
1123 struct bnx2x_exeq_elem *elem)
1125 struct bnx2x_exeq_elem *pos;
1126 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1128 /* Check pending for execution commands */
1129 list_for_each_entry(pos, &o->exe_queue, link)
1130 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1132 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1138 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1139 struct bnx2x_exe_queue_obj *o,
1140 struct bnx2x_exeq_elem *elem)
1142 struct bnx2x_exeq_elem *pos;
1143 struct bnx2x_vlan_mac_ramrod_data *data =
1144 &elem->cmd_data.vlan_mac.u.vlan_mac;
1146 /* Check pending for execution commands */
1147 list_for_each_entry(pos, &o->exe_queue, link)
1148 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1150 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1157 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1159 * @bp: device handle
1160 * @qo: bnx2x_qable_obj
1161 * @elem: bnx2x_exeq_elem
1163 * Checks that the requested configuration can be added. If yes and if
1164 * requested, consume CAM credit.
1166 * The 'validate' is run after the 'optimize'.
1169 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1170 union bnx2x_qable_obj *qo,
1171 struct bnx2x_exeq_elem *elem)
1173 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1174 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1177 /* Check the registry */
1178 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1180 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1185 * Check if there is a pending ADD command for this
1186 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1188 if (exeq->get(exeq, elem)) {
1189 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1194 * TODO: Check the pending MOVE from other objects where this
1195 * object is a destination object.
1198 /* Consume the credit if not requested not to */
1199 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1200 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1208 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1210 * @bp: device handle
1211 * @qo: quable object to check
1212 * @elem: element that needs to be deleted
1214 * Checks that the requested configuration can be deleted. If yes and if
1215 * requested, returns a CAM credit.
1217 * The 'validate' is run after the 'optimize'.
1219 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1220 union bnx2x_qable_obj *qo,
1221 struct bnx2x_exeq_elem *elem)
1223 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1224 struct bnx2x_vlan_mac_registry_elem *pos;
1225 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1226 struct bnx2x_exeq_elem query_elem;
1228 /* If this classification can not be deleted (doesn't exist)
1229 * - return a BNX2X_EXIST.
1231 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1233 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1238 * Check if there are pending DEL or MOVE commands for this
1239 * MAC/VLAN/VLAN-MAC. Return an error if so.
1241 memcpy(&query_elem, elem, sizeof(query_elem));
1243 /* Check for MOVE commands */
1244 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1245 if (exeq->get(exeq, &query_elem)) {
1246 BNX2X_ERR("There is a pending MOVE command already\n");
1250 /* Check for DEL commands */
1251 if (exeq->get(exeq, elem)) {
1252 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1256 /* Return the credit to the credit pool if not requested not to */
1257 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1258 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1259 o->put_credit(o))) {
1260 BNX2X_ERR("Failed to return a credit\n");
1268 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1270 * @bp: device handle
1271 * @qo: quable object to check (source)
1272 * @elem: element that needs to be moved
1274 * Checks that the requested configuration can be moved. If yes and if
1275 * requested, returns a CAM credit.
1277 * The 'validate' is run after the 'optimize'.
1279 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1280 union bnx2x_qable_obj *qo,
1281 struct bnx2x_exeq_elem *elem)
1283 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1284 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1285 struct bnx2x_exeq_elem query_elem;
1286 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1287 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1290 * Check if we can perform this operation based on the current registry
1293 if (!src_o->check_move(bp, src_o, dest_o,
1294 &elem->cmd_data.vlan_mac.u)) {
1295 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1300 * Check if there is an already pending DEL or MOVE command for the
1301 * source object or ADD command for a destination object. Return an
1304 memcpy(&query_elem, elem, sizeof(query_elem));
1306 /* Check DEL on source */
1307 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1308 if (src_exeq->get(src_exeq, &query_elem)) {
1309 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1313 /* Check MOVE on source */
1314 if (src_exeq->get(src_exeq, elem)) {
1315 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1319 /* Check ADD on destination */
1320 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1321 if (dest_exeq->get(dest_exeq, &query_elem)) {
1322 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1326 /* Consume the credit if not requested not to */
1327 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1328 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1329 dest_o->get_credit(dest_o)))
1332 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1333 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1334 src_o->put_credit(src_o))) {
1335 /* return the credit taken from dest... */
1336 dest_o->put_credit(dest_o);
1343 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1344 union bnx2x_qable_obj *qo,
1345 struct bnx2x_exeq_elem *elem)
1347 switch (elem->cmd_data.vlan_mac.cmd) {
1348 case BNX2X_VLAN_MAC_ADD:
1349 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1350 case BNX2X_VLAN_MAC_DEL:
1351 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1352 case BNX2X_VLAN_MAC_MOVE:
1353 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1359 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1360 union bnx2x_qable_obj *qo,
1361 struct bnx2x_exeq_elem *elem)
1365 /* If consumption wasn't required, nothing to do */
1366 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1367 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1370 switch (elem->cmd_data.vlan_mac.cmd) {
1371 case BNX2X_VLAN_MAC_ADD:
1372 case BNX2X_VLAN_MAC_MOVE:
1373 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1375 case BNX2X_VLAN_MAC_DEL:
1376 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1389 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1391 * @bp: device handle
1392 * @o: bnx2x_vlan_mac_obj
1395 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1396 struct bnx2x_vlan_mac_obj *o)
1399 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1400 struct bnx2x_raw_obj *raw = &o->raw;
1403 /* Wait for the current command to complete */
1404 rc = raw->wait_comp(bp, raw);
1408 /* Wait until there are no pending commands */
1409 if (!bnx2x_exe_queue_empty(exeq))
1410 usleep_range(1000, 1000);
1419 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1421 * @bp: device handle
1422 * @o: bnx2x_vlan_mac_obj
1424 * @cont: if true schedule next execution chunk
1427 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1428 struct bnx2x_vlan_mac_obj *o,
1429 union event_ring_elem *cqe,
1430 unsigned long *ramrod_flags)
1432 struct bnx2x_raw_obj *r = &o->raw;
1435 /* Reset pending list */
1436 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1439 r->clear_pending(r);
1441 /* If ramrod failed this is most likely a SW bug */
1442 if (cqe->message.error)
1445 /* Run the next bulk of pending commands if requeted */
1446 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1447 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1452 /* If there is more work to do return PENDING */
1453 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1460 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1462 * @bp: device handle
1463 * @o: bnx2x_qable_obj
1464 * @elem: bnx2x_exeq_elem
1466 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1467 union bnx2x_qable_obj *qo,
1468 struct bnx2x_exeq_elem *elem)
1470 struct bnx2x_exeq_elem query, *pos;
1471 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1472 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1474 memcpy(&query, elem, sizeof(query));
1476 switch (elem->cmd_data.vlan_mac.cmd) {
1477 case BNX2X_VLAN_MAC_ADD:
1478 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1480 case BNX2X_VLAN_MAC_DEL:
1481 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1484 /* Don't handle anything other than ADD or DEL */
1488 /* If we found the appropriate element - delete it */
1489 pos = exeq->get(exeq, &query);
1492 /* Return the credit of the optimized command */
1493 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1494 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1495 if ((query.cmd_data.vlan_mac.cmd ==
1496 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1497 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1499 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1500 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1505 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1506 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1509 list_del(&pos->link);
1510 bnx2x_exe_queue_free_elem(bp, pos);
1518 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1520 * @bp: device handle
1526 * prepare a registry element according to the current command request.
1528 static inline int bnx2x_vlan_mac_get_registry_elem(
1530 struct bnx2x_vlan_mac_obj *o,
1531 struct bnx2x_exeq_elem *elem,
1533 struct bnx2x_vlan_mac_registry_elem **re)
1535 int cmd = elem->cmd_data.vlan_mac.cmd;
1536 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1538 /* Allocate a new registry element if needed. */
1540 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1541 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1545 /* Get a new CAM offset */
1546 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1548 * This shell never happen, because we have checked the
1549 * CAM availiability in the 'validate'.
1556 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1558 /* Set a VLAN-MAC data */
1559 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u,
1560 sizeof(reg_elem->u));
1562 /* Copy the flags (needed for DEL and RESTORE flows) */
1563 reg_elem->vlan_mac_flags =
1564 elem->cmd_data.vlan_mac.vlan_mac_flags;
1565 } else /* DEL, RESTORE */
1566 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1573 * bnx2x_execute_vlan_mac - execute vlan mac command
1575 * @bp: device handle
1580 * go and send a ramrod!
1582 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1583 union bnx2x_qable_obj *qo,
1584 struct list_head *exe_chunk,
1585 unsigned long *ramrod_flags)
1587 struct bnx2x_exeq_elem *elem;
1588 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1589 struct bnx2x_raw_obj *r = &o->raw;
1591 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1592 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1593 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1597 * If DRIVER_ONLY execution is requested, cleanup a registry
1598 * and exit. Otherwise send a ramrod to FW.
1601 WARN_ON(r->check_pending(r));
1606 /* Fill tha ramrod data */
1607 list_for_each_entry(elem, exe_chunk, link) {
1608 cmd = elem->cmd_data.vlan_mac.cmd;
1610 * We will add to the target object in MOVE command, so
1611 * change the object for a CAM search.
1613 if (cmd == BNX2X_VLAN_MAC_MOVE)
1614 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1618 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1626 /* Push a new entry into the registry */
1628 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1629 (cmd == BNX2X_VLAN_MAC_MOVE)))
1630 list_add(®_elem->link, &cam_obj->head);
1632 /* Configure a single command in a ramrod data buffer */
1633 o->set_one_rule(bp, o, elem, idx,
1634 reg_elem->cam_offset);
1636 /* MOVE command consumes 2 entries in the ramrod data */
1637 if (cmd == BNX2X_VLAN_MAC_MOVE)
1644 * No need for an explicit memory barrier here as long we would
1645 * need to ensure the ordering of writing to the SPQ element
1646 * and updating of the SPQ producer which involves a memory
1647 * read and we will have to put a full memory barrier there
1648 * (inside bnx2x_sp_post()).
1651 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1652 U64_HI(r->rdata_mapping),
1653 U64_LO(r->rdata_mapping),
1654 ETH_CONNECTION_TYPE);
1659 /* Now, when we are done with the ramrod - clean up the registry */
1660 list_for_each_entry(elem, exe_chunk, link) {
1661 cmd = elem->cmd_data.vlan_mac.cmd;
1662 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1663 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1664 reg_elem = o->check_del(bp, o,
1665 &elem->cmd_data.vlan_mac.u);
1669 o->put_cam_offset(o, reg_elem->cam_offset);
1670 list_del(®_elem->link);
1681 r->clear_pending(r);
1683 /* Cleanup a registry in case of a failure */
1684 list_for_each_entry(elem, exe_chunk, link) {
1685 cmd = elem->cmd_data.vlan_mac.cmd;
1687 if (cmd == BNX2X_VLAN_MAC_MOVE)
1688 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1692 /* Delete all newly added above entries */
1694 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1695 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1696 reg_elem = o->check_del(bp, cam_obj,
1697 &elem->cmd_data.vlan_mac.u);
1699 list_del(®_elem->link);
1708 static inline int bnx2x_vlan_mac_push_new_cmd(
1710 struct bnx2x_vlan_mac_ramrod_params *p)
1712 struct bnx2x_exeq_elem *elem;
1713 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1714 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1716 /* Allocate the execution queue element */
1717 elem = bnx2x_exe_queue_alloc_elem(bp);
1721 /* Set the command 'length' */
1722 switch (p->user_req.cmd) {
1723 case BNX2X_VLAN_MAC_MOVE:
1730 /* Fill the object specific info */
1731 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1733 /* Try to add a new command to the pending list */
1734 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1738 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1740 * @bp: device handle
1744 int bnx2x_config_vlan_mac(
1746 struct bnx2x_vlan_mac_ramrod_params *p)
1749 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1750 unsigned long *ramrod_flags = &p->ramrod_flags;
1751 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1752 struct bnx2x_raw_obj *raw = &o->raw;
1755 * Add new elements to the execution list for commands that require it.
1758 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1764 * If nothing will be executed further in this iteration we want to
1765 * return PENDING if there are pending commands
1767 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1770 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1771 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1772 raw->clear_pending(raw);
1775 /* Execute commands if required */
1776 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1777 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1778 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1784 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1785 * then user want to wait until the last command is done.
1787 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1789 * Wait maximum for the current exe_queue length iterations plus
1790 * one (for the current pending command).
1792 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1794 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1797 /* Wait for the current command to complete */
1798 rc = raw->wait_comp(bp, raw);
1802 /* Make a next step */
1803 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1818 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1820 * @bp: device handle
1823 * @ramrod_flags: execution flags to be used for this deletion
1825 * if the last operation has completed successfully and there are no
1826 * moreelements left, positive value if the last operation has completed
1827 * successfully and there are more previously configured elements, negative
1828 * value is current operation has failed.
1830 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1831 struct bnx2x_vlan_mac_obj *o,
1832 unsigned long *vlan_mac_flags,
1833 unsigned long *ramrod_flags)
1835 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1837 struct bnx2x_vlan_mac_ramrod_params p;
1838 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1839 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1841 /* Clear pending commands first */
1843 spin_lock_bh(&exeq->lock);
1845 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1846 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1848 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1850 BNX2X_ERR("Failed to remove command\n");
1851 spin_unlock_bh(&exeq->lock);
1854 list_del(&exeq_pos->link);
1858 spin_unlock_bh(&exeq->lock);
1860 /* Prepare a command request */
1861 memset(&p, 0, sizeof(p));
1863 p.ramrod_flags = *ramrod_flags;
1864 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1867 * Add all but the last VLAN-MAC to the execution queue without actually
1868 * execution anything.
1870 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1871 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1872 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1874 list_for_each_entry(pos, &o->head, link) {
1875 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1876 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1877 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1878 rc = bnx2x_config_vlan_mac(bp, &p);
1880 BNX2X_ERR("Failed to add a new DEL command\n");
1886 p.ramrod_flags = *ramrod_flags;
1887 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1889 return bnx2x_config_vlan_mac(bp, &p);
1892 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1893 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1894 unsigned long *pstate, bnx2x_obj_type type)
1896 raw->func_id = func_id;
1900 raw->rdata_mapping = rdata_mapping;
1902 raw->pstate = pstate;
1903 raw->obj_type = type;
1904 raw->check_pending = bnx2x_raw_check_pending;
1905 raw->clear_pending = bnx2x_raw_clear_pending;
1906 raw->set_pending = bnx2x_raw_set_pending;
1907 raw->wait_comp = bnx2x_raw_wait;
1910 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1911 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1912 int state, unsigned long *pstate, bnx2x_obj_type type,
1913 struct bnx2x_credit_pool_obj *macs_pool,
1914 struct bnx2x_credit_pool_obj *vlans_pool)
1916 INIT_LIST_HEAD(&o->head);
1918 o->macs_pool = macs_pool;
1919 o->vlans_pool = vlans_pool;
1921 o->delete_all = bnx2x_vlan_mac_del_all;
1922 o->restore = bnx2x_vlan_mac_restore;
1923 o->complete = bnx2x_complete_vlan_mac;
1924 o->wait = bnx2x_wait_vlan_mac;
1926 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1927 state, pstate, type);
1931 void bnx2x_init_mac_obj(struct bnx2x *bp,
1932 struct bnx2x_vlan_mac_obj *mac_obj,
1933 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1934 dma_addr_t rdata_mapping, int state,
1935 unsigned long *pstate, bnx2x_obj_type type,
1936 struct bnx2x_credit_pool_obj *macs_pool)
1938 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1940 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1941 rdata_mapping, state, pstate, type,
1944 /* CAM credit pool handling */
1945 mac_obj->get_credit = bnx2x_get_credit_mac;
1946 mac_obj->put_credit = bnx2x_put_credit_mac;
1947 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1948 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1950 if (CHIP_IS_E1x(bp)) {
1951 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1952 mac_obj->check_del = bnx2x_check_mac_del;
1953 mac_obj->check_add = bnx2x_check_mac_add;
1954 mac_obj->check_move = bnx2x_check_move_always_err;
1955 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1958 bnx2x_exe_queue_init(bp,
1959 &mac_obj->exe_queue, 1, qable_obj,
1960 bnx2x_validate_vlan_mac,
1961 bnx2x_remove_vlan_mac,
1962 bnx2x_optimize_vlan_mac,
1963 bnx2x_execute_vlan_mac,
1964 bnx2x_exeq_get_mac);
1966 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1967 mac_obj->check_del = bnx2x_check_mac_del;
1968 mac_obj->check_add = bnx2x_check_mac_add;
1969 mac_obj->check_move = bnx2x_check_move;
1970 mac_obj->ramrod_cmd =
1971 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1972 mac_obj->get_n_elements = bnx2x_get_n_elements;
1975 bnx2x_exe_queue_init(bp,
1976 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1977 qable_obj, bnx2x_validate_vlan_mac,
1978 bnx2x_remove_vlan_mac,
1979 bnx2x_optimize_vlan_mac,
1980 bnx2x_execute_vlan_mac,
1981 bnx2x_exeq_get_mac);
1985 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1986 struct bnx2x_vlan_mac_obj *vlan_obj,
1987 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1988 dma_addr_t rdata_mapping, int state,
1989 unsigned long *pstate, bnx2x_obj_type type,
1990 struct bnx2x_credit_pool_obj *vlans_pool)
1992 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1994 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1995 rdata_mapping, state, pstate, type, NULL,
1998 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1999 vlan_obj->put_credit = bnx2x_put_credit_vlan;
2000 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2001 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2003 if (CHIP_IS_E1x(bp)) {
2004 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2007 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2008 vlan_obj->check_del = bnx2x_check_vlan_del;
2009 vlan_obj->check_add = bnx2x_check_vlan_add;
2010 vlan_obj->check_move = bnx2x_check_move;
2011 vlan_obj->ramrod_cmd =
2012 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2015 bnx2x_exe_queue_init(bp,
2016 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2017 qable_obj, bnx2x_validate_vlan_mac,
2018 bnx2x_remove_vlan_mac,
2019 bnx2x_optimize_vlan_mac,
2020 bnx2x_execute_vlan_mac,
2021 bnx2x_exeq_get_vlan);
2025 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2026 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2027 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2028 dma_addr_t rdata_mapping, int state,
2029 unsigned long *pstate, bnx2x_obj_type type,
2030 struct bnx2x_credit_pool_obj *macs_pool,
2031 struct bnx2x_credit_pool_obj *vlans_pool)
2033 union bnx2x_qable_obj *qable_obj =
2034 (union bnx2x_qable_obj *)vlan_mac_obj;
2036 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2037 rdata_mapping, state, pstate, type,
2038 macs_pool, vlans_pool);
2040 /* CAM pool handling */
2041 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2042 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2044 * CAM offset is relevant for 57710 and 57711 chips only which have a
2045 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2046 * will be taken from MACs' pool object only.
2048 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2049 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2051 if (CHIP_IS_E1(bp)) {
2052 BNX2X_ERR("Do not support chips others than E2\n");
2054 } else if (CHIP_IS_E1H(bp)) {
2055 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2056 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2057 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2058 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2059 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2062 bnx2x_exe_queue_init(bp,
2063 &vlan_mac_obj->exe_queue, 1, qable_obj,
2064 bnx2x_validate_vlan_mac,
2065 bnx2x_remove_vlan_mac,
2066 bnx2x_optimize_vlan_mac,
2067 bnx2x_execute_vlan_mac,
2068 bnx2x_exeq_get_vlan_mac);
2070 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2071 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2072 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2073 vlan_mac_obj->check_move = bnx2x_check_move;
2074 vlan_mac_obj->ramrod_cmd =
2075 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2078 bnx2x_exe_queue_init(bp,
2079 &vlan_mac_obj->exe_queue,
2080 CLASSIFY_RULES_COUNT,
2081 qable_obj, bnx2x_validate_vlan_mac,
2082 bnx2x_remove_vlan_mac,
2083 bnx2x_optimize_vlan_mac,
2084 bnx2x_execute_vlan_mac,
2085 bnx2x_exeq_get_vlan_mac);
2090 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2091 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2092 struct tstorm_eth_mac_filter_config *mac_filters,
2095 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2097 u32 addr = BAR_TSTRORM_INTMEM +
2098 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2100 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2103 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2104 struct bnx2x_rx_mode_ramrod_params *p)
2106 /* update the bp MAC filter structure */
2107 u32 mask = (1 << p->cl_id);
2109 struct tstorm_eth_mac_filter_config *mac_filters =
2110 (struct tstorm_eth_mac_filter_config *)p->rdata;
2112 /* initial seeting is drop-all */
2113 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2114 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2115 u8 unmatched_unicast = 0;
2117 /* In e1x there we only take into account rx acceot flag since tx switching
2119 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2120 /* accept matched ucast */
2123 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2124 /* accept matched mcast */
2127 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2128 /* accept all mcast */
2132 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2133 /* accept all mcast */
2137 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2138 /* accept (all) bcast */
2140 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2141 /* accept unmatched unicasts */
2142 unmatched_unicast = 1;
2144 mac_filters->ucast_drop_all = drop_all_ucast ?
2145 mac_filters->ucast_drop_all | mask :
2146 mac_filters->ucast_drop_all & ~mask;
2148 mac_filters->mcast_drop_all = drop_all_mcast ?
2149 mac_filters->mcast_drop_all | mask :
2150 mac_filters->mcast_drop_all & ~mask;
2152 mac_filters->ucast_accept_all = accp_all_ucast ?
2153 mac_filters->ucast_accept_all | mask :
2154 mac_filters->ucast_accept_all & ~mask;
2156 mac_filters->mcast_accept_all = accp_all_mcast ?
2157 mac_filters->mcast_accept_all | mask :
2158 mac_filters->mcast_accept_all & ~mask;
2160 mac_filters->bcast_accept_all = accp_all_bcast ?
2161 mac_filters->bcast_accept_all | mask :
2162 mac_filters->bcast_accept_all & ~mask;
2164 mac_filters->unmatched_unicast = unmatched_unicast ?
2165 mac_filters->unmatched_unicast | mask :
2166 mac_filters->unmatched_unicast & ~mask;
2168 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2169 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2170 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2171 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2172 mac_filters->bcast_accept_all);
2174 /* write the MAC filter structure*/
2175 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2177 /* The operation is completed */
2178 clear_bit(p->state, p->pstate);
2179 smp_mb__after_clear_bit();
2184 /* Setup ramrod data */
2185 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2186 struct eth_classify_header *hdr,
2190 hdr->rule_cnt = rule_cnt;
2193 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2194 unsigned long accept_flags,
2195 struct eth_filter_rules_cmd *cmd,
2196 bool clear_accept_all)
2200 /* start with 'drop-all' */
2201 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2202 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2205 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2206 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2208 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2209 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2211 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2212 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2213 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2216 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2217 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2218 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2220 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2221 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2223 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2224 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2225 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2227 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2228 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2231 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2232 if (clear_accept_all) {
2233 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2234 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2235 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2236 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2239 cmd->state = cpu_to_le16(state);
2243 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2244 struct bnx2x_rx_mode_ramrod_params *p)
2246 struct eth_filter_rules_ramrod_data *data = p->rdata;
2250 /* Reset the ramrod data buffer */
2251 memset(data, 0, sizeof(*data));
2253 /* Setup ramrod data */
2255 /* Tx (internal switching) */
2256 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2257 data->rules[rule_idx].client_id = p->cl_id;
2258 data->rules[rule_idx].func_id = p->func_id;
2260 data->rules[rule_idx].cmd_general_data =
2261 ETH_FILTER_RULES_CMD_TX_CMD;
2263 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2264 &(data->rules[rule_idx++]), false);
2268 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2269 data->rules[rule_idx].client_id = p->cl_id;
2270 data->rules[rule_idx].func_id = p->func_id;
2272 data->rules[rule_idx].cmd_general_data =
2273 ETH_FILTER_RULES_CMD_RX_CMD;
2275 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2276 &(data->rules[rule_idx++]), false);
2281 * If FCoE Queue configuration has been requested configure the Rx and
2282 * internal switching modes for this queue in separate rules.
2284 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2285 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2287 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2288 /* Tx (internal switching) */
2289 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2290 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2291 data->rules[rule_idx].func_id = p->func_id;
2293 data->rules[rule_idx].cmd_general_data =
2294 ETH_FILTER_RULES_CMD_TX_CMD;
2296 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2297 &(data->rules[rule_idx++]),
2302 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2303 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2304 data->rules[rule_idx].func_id = p->func_id;
2306 data->rules[rule_idx].cmd_general_data =
2307 ETH_FILTER_RULES_CMD_RX_CMD;
2309 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2310 &(data->rules[rule_idx++]),
2316 * Set the ramrod header (most importantly - number of rules to
2319 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2321 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2322 data->header.rule_cnt, p->rx_accept_flags,
2323 p->tx_accept_flags);
2326 * No need for an explicit memory barrier here as long we would
2327 * need to ensure the ordering of writing to the SPQ element
2328 * and updating of the SPQ producer which involves a memory
2329 * read and we will have to put a full memory barrier there
2330 * (inside bnx2x_sp_post()).
2334 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2335 U64_HI(p->rdata_mapping),
2336 U64_LO(p->rdata_mapping),
2337 ETH_CONNECTION_TYPE);
2341 /* Ramrod completion is pending */
2345 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2346 struct bnx2x_rx_mode_ramrod_params *p)
2348 return bnx2x_state_wait(bp, p->state, p->pstate);
2351 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2352 struct bnx2x_rx_mode_ramrod_params *p)
2358 int bnx2x_config_rx_mode(struct bnx2x *bp,
2359 struct bnx2x_rx_mode_ramrod_params *p)
2363 /* Configure the new classification in the chip */
2364 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2368 /* Wait for a ramrod completion if was requested */
2369 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2370 rc = p->rx_mode_obj->wait_comp(bp, p);
2378 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2379 struct bnx2x_rx_mode_obj *o)
2381 if (CHIP_IS_E1x(bp)) {
2382 o->wait_comp = bnx2x_empty_rx_mode_wait;
2383 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2385 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2386 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2390 /********************* Multicast verbs: SET, CLEAR ****************************/
2391 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2393 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2396 struct bnx2x_mcast_mac_elem {
2397 struct list_head link;
2399 u8 pad[2]; /* For a natural alignment of the following buffer */
2402 struct bnx2x_pending_mcast_cmd {
2403 struct list_head link;
2404 int type; /* BNX2X_MCAST_CMD_X */
2406 struct list_head macs_head;
2407 u32 macs_num; /* Needed for DEL command */
2408 int next_bin; /* Needed for RESTORE flow with aprox match */
2411 bool done; /* set to true, when the command has been handled,
2412 * practically used in 57712 handling only, where one pending
2413 * command may be handled in a few operations. As long as for
2414 * other chips every operation handling is completed in a
2415 * single ramrod, there is no need to utilize this field.
2419 static int bnx2x_mcast_wait(struct bnx2x *bp,
2420 struct bnx2x_mcast_obj *o)
2422 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2423 o->raw.wait_comp(bp, &o->raw))
2429 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2430 struct bnx2x_mcast_obj *o,
2431 struct bnx2x_mcast_ramrod_params *p,
2435 struct bnx2x_pending_mcast_cmd *new_cmd;
2436 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2437 struct bnx2x_mcast_list_elem *pos;
2438 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2439 p->mcast_list_len : 0);
2441 /* If the command is empty ("handle pending commands only"), break */
2442 if (!p->mcast_list_len)
2445 total_sz = sizeof(*new_cmd) +
2446 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2448 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2449 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2454 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2455 cmd, macs_list_len);
2457 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2459 new_cmd->type = cmd;
2460 new_cmd->done = false;
2463 case BNX2X_MCAST_CMD_ADD:
2464 cur_mac = (struct bnx2x_mcast_mac_elem *)
2465 ((u8 *)new_cmd + sizeof(*new_cmd));
2467 /* Push the MACs of the current command into the pendig command
2470 list_for_each_entry(pos, &p->mcast_list, link) {
2471 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2472 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2478 case BNX2X_MCAST_CMD_DEL:
2479 new_cmd->data.macs_num = p->mcast_list_len;
2482 case BNX2X_MCAST_CMD_RESTORE:
2483 new_cmd->data.next_bin = 0;
2488 BNX2X_ERR("Unknown command: %d\n", cmd);
2492 /* Push the new pending command to the tail of the pending list: FIFO */
2493 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2501 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2504 * @last: index to start looking from (including)
2506 * returns the next found (set) bin or a negative value if none is found.
2508 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2510 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2512 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2513 if (o->registry.aprox_match.vec[i])
2514 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2515 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2516 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2529 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2533 * returns the index of the found bin or -1 if none is found
2535 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2537 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2540 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2545 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2547 struct bnx2x_raw_obj *raw = &o->raw;
2550 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2551 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2552 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2554 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2555 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2556 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2561 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2562 struct bnx2x_mcast_obj *o, int idx,
2563 union bnx2x_mcast_config_data *cfg_data,
2566 struct bnx2x_raw_obj *r = &o->raw;
2567 struct eth_multicast_rules_ramrod_data *data =
2568 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2569 u8 func_id = r->func_id;
2570 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2573 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2574 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2576 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2578 /* Get a bin and update a bins' vector */
2580 case BNX2X_MCAST_CMD_ADD:
2581 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2582 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2585 case BNX2X_MCAST_CMD_DEL:
2586 /* If there were no more bins to clear
2587 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2588 * clear any (0xff) bin.
2589 * See bnx2x_mcast_validate_e2() for explanation when it may
2592 bin = bnx2x_mcast_clear_first_bin(o);
2595 case BNX2X_MCAST_CMD_RESTORE:
2596 bin = cfg_data->bin;
2600 BNX2X_ERR("Unknown command: %d\n", cmd);
2604 DP(BNX2X_MSG_SP, "%s bin %d\n",
2605 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2606 "Setting" : "Clearing"), bin);
2608 data->rules[idx].bin_id = (u8)bin;
2609 data->rules[idx].func_id = func_id;
2610 data->rules[idx].engine_id = o->engine_id;
2614 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2616 * @bp: device handle
2618 * @start_bin: index in the registry to start from (including)
2619 * @rdata_idx: index in the ramrod data to start from
2621 * returns last handled bin index or -1 if all bins have been handled
2623 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2624 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2627 int cur_bin, cnt = *rdata_idx;
2628 union bnx2x_mcast_config_data cfg_data = {0};
2630 /* go through the registry and configure the bins from it */
2631 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2632 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2634 cfg_data.bin = (u8)cur_bin;
2635 o->set_one_rule(bp, o, cnt, &cfg_data,
2636 BNX2X_MCAST_CMD_RESTORE);
2640 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2642 /* Break if we reached the maximum number
2645 if (cnt >= o->max_cmd_len)
2654 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2655 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2658 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2659 int cnt = *line_idx;
2660 union bnx2x_mcast_config_data cfg_data = {0};
2662 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2665 cfg_data.mac = &pmac_pos->mac[0];
2666 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2670 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2673 list_del(&pmac_pos->link);
2675 /* Break if we reached the maximum number
2678 if (cnt >= o->max_cmd_len)
2684 /* if no more MACs to configure - we are done */
2685 if (list_empty(&cmd_pos->data.macs_head))
2686 cmd_pos->done = true;
2689 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2690 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2693 int cnt = *line_idx;
2695 while (cmd_pos->data.macs_num) {
2696 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2700 cmd_pos->data.macs_num--;
2702 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2703 cmd_pos->data.macs_num, cnt);
2705 /* Break if we reached the maximum
2708 if (cnt >= o->max_cmd_len)
2714 /* If we cleared all bins - we are done */
2715 if (!cmd_pos->data.macs_num)
2716 cmd_pos->done = true;
2719 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2720 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2723 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2726 if (cmd_pos->data.next_bin < 0)
2727 /* If o->set_restore returned -1 we are done */
2728 cmd_pos->done = true;
2730 /* Start from the next bin next time */
2731 cmd_pos->data.next_bin++;
2734 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2735 struct bnx2x_mcast_ramrod_params *p)
2737 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2739 struct bnx2x_mcast_obj *o = p->mcast_obj;
2741 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2743 switch (cmd_pos->type) {
2744 case BNX2X_MCAST_CMD_ADD:
2745 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2748 case BNX2X_MCAST_CMD_DEL:
2749 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2752 case BNX2X_MCAST_CMD_RESTORE:
2753 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2758 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2762 /* If the command has been completed - remove it from the list
2763 * and free the memory
2765 if (cmd_pos->done) {
2766 list_del(&cmd_pos->link);
2770 /* Break if we reached the maximum number of rules */
2771 if (cnt >= o->max_cmd_len)
2778 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2779 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2782 struct bnx2x_mcast_list_elem *mlist_pos;
2783 union bnx2x_mcast_config_data cfg_data = {0};
2784 int cnt = *line_idx;
2786 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2787 cfg_data.mac = mlist_pos->mac;
2788 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2792 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2799 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2800 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2803 int cnt = *line_idx, i;
2805 for (i = 0; i < p->mcast_list_len; i++) {
2806 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2810 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2811 p->mcast_list_len - i - 1);
2818 * bnx2x_mcast_handle_current_cmd -
2820 * @bp: device handle
2823 * @start_cnt: first line in the ramrod data that may be used
2825 * This function is called iff there is enough place for the current command in
2827 * Returns number of lines filled in the ramrod data in total.
2829 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2830 struct bnx2x_mcast_ramrod_params *p, int cmd,
2833 struct bnx2x_mcast_obj *o = p->mcast_obj;
2834 int cnt = start_cnt;
2836 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2839 case BNX2X_MCAST_CMD_ADD:
2840 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2843 case BNX2X_MCAST_CMD_DEL:
2844 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2847 case BNX2X_MCAST_CMD_RESTORE:
2848 o->hdl_restore(bp, o, 0, &cnt);
2852 BNX2X_ERR("Unknown command: %d\n", cmd);
2856 /* The current command has been handled */
2857 p->mcast_list_len = 0;
2862 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2863 struct bnx2x_mcast_ramrod_params *p,
2866 struct bnx2x_mcast_obj *o = p->mcast_obj;
2867 int reg_sz = o->get_registry_size(o);
2870 /* DEL command deletes all currently configured MACs */
2871 case BNX2X_MCAST_CMD_DEL:
2872 o->set_registry_size(o, 0);
2875 /* RESTORE command will restore the entire multicast configuration */
2876 case BNX2X_MCAST_CMD_RESTORE:
2877 /* Here we set the approximate amount of work to do, which in
2878 * fact may be only less as some MACs in postponed ADD
2879 * command(s) scheduled before this command may fall into
2880 * the same bin and the actual number of bins set in the
2881 * registry would be less than we estimated here. See
2882 * bnx2x_mcast_set_one_rule_e2() for further details.
2884 p->mcast_list_len = reg_sz;
2887 case BNX2X_MCAST_CMD_ADD:
2888 case BNX2X_MCAST_CMD_CONT:
2889 /* Here we assume that all new MACs will fall into new bins.
2890 * However we will correct the real registry size after we
2891 * handle all pending commands.
2893 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2897 BNX2X_ERR("Unknown command: %d\n", cmd);
2902 /* Increase the total number of MACs pending to be configured */
2903 o->total_pending_num += p->mcast_list_len;
2908 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2909 struct bnx2x_mcast_ramrod_params *p,
2912 struct bnx2x_mcast_obj *o = p->mcast_obj;
2914 o->set_registry_size(o, old_num_bins);
2915 o->total_pending_num -= p->mcast_list_len;
2919 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2921 * @bp: device handle
2923 * @len: number of rules to handle
2925 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2926 struct bnx2x_mcast_ramrod_params *p,
2929 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2930 struct eth_multicast_rules_ramrod_data *data =
2931 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2933 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2934 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2935 data->header.rule_cnt = len;
2939 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2941 * @bp: device handle
2944 * Recalculate the actual number of set bins in the registry using Brian
2945 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2947 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2949 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2950 struct bnx2x_mcast_obj *o)
2955 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2956 elem = o->registry.aprox_match.vec[i];
2961 o->set_registry_size(o, cnt);
2966 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2967 struct bnx2x_mcast_ramrod_params *p,
2970 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2971 struct bnx2x_mcast_obj *o = p->mcast_obj;
2972 struct eth_multicast_rules_ramrod_data *data =
2973 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2976 /* Reset the ramrod data buffer */
2977 memset(data, 0, sizeof(*data));
2979 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2981 /* If there are no more pending commands - clear SCHEDULED state */
2982 if (list_empty(&o->pending_cmds_head))
2985 /* The below may be true iff there was enough room in ramrod
2986 * data for all pending commands and for the current
2987 * command. Otherwise the current command would have been added
2988 * to the pending commands and p->mcast_list_len would have been
2991 if (p->mcast_list_len > 0)
2992 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2994 /* We've pulled out some MACs - update the total number of
2997 o->total_pending_num -= cnt;
3000 WARN_ON(o->total_pending_num < 0);
3001 WARN_ON(cnt > o->max_cmd_len);
3003 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3005 /* Update a registry size if there are no more pending operations.
3007 * We don't want to change the value of the registry size if there are
3008 * pending operations because we want it to always be equal to the
3009 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3010 * set bins after the last requested operation in order to properly
3011 * evaluate the size of the next DEL/RESTORE operation.
3013 * Note that we update the registry itself during command(s) handling
3014 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3015 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3016 * with a limited amount of update commands (per MAC/bin) and we don't
3017 * know in this scope what the actual state of bins configuration is
3018 * going to be after this ramrod.
3020 if (!o->total_pending_num)
3021 bnx2x_mcast_refresh_registry_e2(bp, o);
3024 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3025 * RAMROD_PENDING status immediately.
3027 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3028 raw->clear_pending(raw);
3032 * No need for an explicit memory barrier here as long we would
3033 * need to ensure the ordering of writing to the SPQ element
3034 * and updating of the SPQ producer which involves a memory
3035 * read and we will have to put a full memory barrier there
3036 * (inside bnx2x_sp_post()).
3040 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3041 raw->cid, U64_HI(raw->rdata_mapping),
3042 U64_LO(raw->rdata_mapping),
3043 ETH_CONNECTION_TYPE);
3047 /* Ramrod completion is pending */
3052 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3053 struct bnx2x_mcast_ramrod_params *p,
3056 /* Mark, that there is a work to do */
3057 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3058 p->mcast_list_len = 1;
3063 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3064 struct bnx2x_mcast_ramrod_params *p,
3070 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3072 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3075 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3076 struct bnx2x_mcast_obj *o,
3077 struct bnx2x_mcast_ramrod_params *p,
3080 struct bnx2x_mcast_list_elem *mlist_pos;
3083 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3084 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3085 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3087 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3088 mlist_pos->mac, bit);
3090 /* bookkeeping... */
3091 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3096 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3097 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3102 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3104 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3105 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3106 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3110 /* On 57711 we write the multicast MACs' aproximate match
3111 * table by directly into the TSTORM's internal RAM. So we don't
3112 * really need to handle any tricks to make it work.
3114 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3115 struct bnx2x_mcast_ramrod_params *p,
3119 struct bnx2x_mcast_obj *o = p->mcast_obj;
3120 struct bnx2x_raw_obj *r = &o->raw;
3122 /* If CLEAR_ONLY has been requested - clear the registry
3123 * and clear a pending bit.
3125 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3126 u32 mc_filter[MC_HASH_SIZE] = {0};
3128 /* Set the multicast filter bits before writing it into
3129 * the internal memory.
3132 case BNX2X_MCAST_CMD_ADD:
3133 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3136 case BNX2X_MCAST_CMD_DEL:
3138 "Invalidating multicast MACs configuration\n");
3140 /* clear the registry */
3141 memset(o->registry.aprox_match.vec, 0,
3142 sizeof(o->registry.aprox_match.vec));
3145 case BNX2X_MCAST_CMD_RESTORE:
3146 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3150 BNX2X_ERR("Unknown command: %d\n", cmd);
3154 /* Set the mcast filter in the internal memory */
3155 for (i = 0; i < MC_HASH_SIZE; i++)
3156 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3158 /* clear the registry */
3159 memset(o->registry.aprox_match.vec, 0,
3160 sizeof(o->registry.aprox_match.vec));
3163 r->clear_pending(r);
3168 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3169 struct bnx2x_mcast_ramrod_params *p,
3172 struct bnx2x_mcast_obj *o = p->mcast_obj;
3173 int reg_sz = o->get_registry_size(o);
3176 /* DEL command deletes all currently configured MACs */
3177 case BNX2X_MCAST_CMD_DEL:
3178 o->set_registry_size(o, 0);
3181 /* RESTORE command will restore the entire multicast configuration */
3182 case BNX2X_MCAST_CMD_RESTORE:
3183 p->mcast_list_len = reg_sz;
3184 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3185 cmd, p->mcast_list_len);
3188 case BNX2X_MCAST_CMD_ADD:
3189 case BNX2X_MCAST_CMD_CONT:
3190 /* Multicast MACs on 57710 are configured as unicast MACs and
3191 * there is only a limited number of CAM entries for that
3194 if (p->mcast_list_len > o->max_cmd_len) {
3195 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3199 /* Every configured MAC should be cleared if DEL command is
3200 * called. Only the last ADD command is relevant as long as
3201 * every ADD commands overrides the previous configuration.
3203 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3204 if (p->mcast_list_len > 0)
3205 o->set_registry_size(o, p->mcast_list_len);
3210 BNX2X_ERR("Unknown command: %d\n", cmd);
3215 /* We want to ensure that commands are executed one by one for 57710.
3216 * Therefore each none-empty command will consume o->max_cmd_len.
3218 if (p->mcast_list_len)
3219 o->total_pending_num += o->max_cmd_len;
3224 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3225 struct bnx2x_mcast_ramrod_params *p,
3228 struct bnx2x_mcast_obj *o = p->mcast_obj;
3230 o->set_registry_size(o, old_num_macs);
3232 /* If current command hasn't been handled yet and we are
3233 * here means that it's meant to be dropped and we have to
3234 * update the number of outstandling MACs accordingly.
3236 if (p->mcast_list_len)
3237 o->total_pending_num -= o->max_cmd_len;
3240 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3241 struct bnx2x_mcast_obj *o, int idx,
3242 union bnx2x_mcast_config_data *cfg_data,
3245 struct bnx2x_raw_obj *r = &o->raw;
3246 struct mac_configuration_cmd *data =
3247 (struct mac_configuration_cmd *)(r->rdata);
3250 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3251 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3252 &data->config_table[idx].middle_mac_addr,
3253 &data->config_table[idx].lsb_mac_addr,
3256 data->config_table[idx].vlan_id = 0;
3257 data->config_table[idx].pf_id = r->func_id;
3258 data->config_table[idx].clients_bit_vector =
3259 cpu_to_le32(1 << r->cl_id);
3261 SET_FLAG(data->config_table[idx].flags,
3262 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3263 T_ETH_MAC_COMMAND_SET);
3268 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3270 * @bp: device handle
3272 * @len: number of rules to handle
3274 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3275 struct bnx2x_mcast_ramrod_params *p,
3278 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3279 struct mac_configuration_cmd *data =
3280 (struct mac_configuration_cmd *)(r->rdata);
3282 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3283 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3284 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3286 data->hdr.offset = offset;
3287 data->hdr.client_id = 0xff;
3288 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3289 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3290 data->hdr.length = len;
3294 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3296 * @bp: device handle
3298 * @start_idx: index in the registry to start from
3299 * @rdata_idx: index in the ramrod data to start from
3301 * restore command for 57710 is like all other commands - always a stand alone
3302 * command - start_idx and rdata_idx will always be 0. This function will always
3304 * returns -1 to comply with 57712 variant.
3306 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3307 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3310 struct bnx2x_mcast_mac_elem *elem;
3312 union bnx2x_mcast_config_data cfg_data = {0};
3314 /* go through the registry and configure the MACs from it. */
3315 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3316 cfg_data.mac = &elem->mac[0];
3317 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3321 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3331 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3332 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3334 struct bnx2x_pending_mcast_cmd *cmd_pos;
3335 struct bnx2x_mcast_mac_elem *pmac_pos;
3336 struct bnx2x_mcast_obj *o = p->mcast_obj;
3337 union bnx2x_mcast_config_data cfg_data = {0};
3341 /* If nothing to be done - return */
3342 if (list_empty(&o->pending_cmds_head))
3345 /* Handle the first command */
3346 cmd_pos = list_first_entry(&o->pending_cmds_head,
3347 struct bnx2x_pending_mcast_cmd, link);
3349 switch (cmd_pos->type) {
3350 case BNX2X_MCAST_CMD_ADD:
3351 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3352 cfg_data.mac = &pmac_pos->mac[0];
3353 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3357 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3362 case BNX2X_MCAST_CMD_DEL:
3363 cnt = cmd_pos->data.macs_num;
3364 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3367 case BNX2X_MCAST_CMD_RESTORE:
3368 o->hdl_restore(bp, o, 0, &cnt);
3372 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3376 list_del(&cmd_pos->link);
3383 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3390 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3391 __le16 *fw_lo, u8 *mac)
3393 mac[1] = ((u8 *)fw_hi)[0];
3394 mac[0] = ((u8 *)fw_hi)[1];
3395 mac[3] = ((u8 *)fw_mid)[0];
3396 mac[2] = ((u8 *)fw_mid)[1];
3397 mac[5] = ((u8 *)fw_lo)[0];
3398 mac[4] = ((u8 *)fw_lo)[1];
3402 * bnx2x_mcast_refresh_registry_e1 -
3404 * @bp: device handle
3407 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3408 * and update the registry correspondingly: if ADD - allocate a memory and add
3409 * the entries to the registry (list), if DELETE - clear the registry and free
3412 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3413 struct bnx2x_mcast_obj *o)
3415 struct bnx2x_raw_obj *raw = &o->raw;
3416 struct bnx2x_mcast_mac_elem *elem;
3417 struct mac_configuration_cmd *data =
3418 (struct mac_configuration_cmd *)(raw->rdata);
3420 /* If first entry contains a SET bit - the command was ADD,
3421 * otherwise - DEL_ALL
3423 if (GET_FLAG(data->config_table[0].flags,
3424 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3425 int i, len = data->hdr.length;
3427 /* Break if it was a RESTORE command */
3428 if (!list_empty(&o->registry.exact_match.macs))
3431 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3433 BNX2X_ERR("Failed to allocate registry memory\n");
3437 for (i = 0; i < len; i++, elem++) {
3438 bnx2x_get_fw_mac_addr(
3439 &data->config_table[i].msb_mac_addr,
3440 &data->config_table[i].middle_mac_addr,
3441 &data->config_table[i].lsb_mac_addr,
3443 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3445 list_add_tail(&elem->link,
3446 &o->registry.exact_match.macs);
3449 elem = list_first_entry(&o->registry.exact_match.macs,
3450 struct bnx2x_mcast_mac_elem, link);
3451 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3453 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3459 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3460 struct bnx2x_mcast_ramrod_params *p,
3463 struct bnx2x_mcast_obj *o = p->mcast_obj;
3464 struct bnx2x_raw_obj *raw = &o->raw;
3465 struct mac_configuration_cmd *data =
3466 (struct mac_configuration_cmd *)(raw->rdata);
3469 /* Reset the ramrod data buffer */
3470 memset(data, 0, sizeof(*data));
3472 /* First set all entries as invalid */
3473 for (i = 0; i < o->max_cmd_len ; i++)
3474 SET_FLAG(data->config_table[i].flags,
3475 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3476 T_ETH_MAC_COMMAND_INVALIDATE);
3478 /* Handle pending commands first */
3479 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3481 /* If there are no more pending commands - clear SCHEDULED state */
3482 if (list_empty(&o->pending_cmds_head))
3485 /* The below may be true iff there were no pending commands */
3487 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3489 /* For 57710 every command has o->max_cmd_len length to ensure that
3490 * commands are done one at a time.
3492 o->total_pending_num -= o->max_cmd_len;
3496 WARN_ON(cnt > o->max_cmd_len);
3498 /* Set ramrod header (in particular, a number of entries to update) */
3499 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3501 /* update a registry: we need the registry contents to be always up
3502 * to date in order to be able to execute a RESTORE opcode. Here
3503 * we use the fact that for 57710 we sent one command at a time
3504 * hence we may take the registry update out of the command handling
3505 * and do it in a simpler way here.
3507 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3512 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3513 * RAMROD_PENDING status immediately.
3515 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3516 raw->clear_pending(raw);
3520 * No need for an explicit memory barrier here as long we would
3521 * need to ensure the ordering of writing to the SPQ element
3522 * and updating of the SPQ producer which involves a memory
3523 * read and we will have to put a full memory barrier there
3524 * (inside bnx2x_sp_post()).
3528 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3529 U64_HI(raw->rdata_mapping),
3530 U64_LO(raw->rdata_mapping),
3531 ETH_CONNECTION_TYPE);
3535 /* Ramrod completion is pending */
3541 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3543 return o->registry.exact_match.num_macs_set;
3546 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3548 return o->registry.aprox_match.num_bins_set;
3551 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3554 o->registry.exact_match.num_macs_set = n;
3557 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3560 o->registry.aprox_match.num_bins_set = n;
3563 int bnx2x_config_mcast(struct bnx2x *bp,
3564 struct bnx2x_mcast_ramrod_params *p,
3567 struct bnx2x_mcast_obj *o = p->mcast_obj;
3568 struct bnx2x_raw_obj *r = &o->raw;
3569 int rc = 0, old_reg_size;
3571 /* This is needed to recover number of currently configured mcast macs
3572 * in case of failure.
3574 old_reg_size = o->get_registry_size(o);
3576 /* Do some calculations and checks */
3577 rc = o->validate(bp, p, cmd);
3581 /* Return if there is no work to do */
3582 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3585 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3586 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3588 /* Enqueue the current command to the pending list if we can't complete
3589 * it in the current iteration
3591 if (r->check_pending(r) ||
3592 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3593 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3597 /* As long as the current command is in a command list we
3598 * don't need to handle it separately.
3600 p->mcast_list_len = 0;
3603 if (!r->check_pending(r)) {
3605 /* Set 'pending' state */
3608 /* Configure the new classification in the chip */
3609 rc = o->config_mcast(bp, p, cmd);
3613 /* Wait for a ramrod completion if was requested */
3614 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3615 rc = o->wait_comp(bp, o);
3621 r->clear_pending(r);
3624 o->revert(bp, p, old_reg_size);
3629 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3631 smp_mb__before_clear_bit();
3632 clear_bit(o->sched_state, o->raw.pstate);
3633 smp_mb__after_clear_bit();
3636 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3638 smp_mb__before_clear_bit();
3639 set_bit(o->sched_state, o->raw.pstate);
3640 smp_mb__after_clear_bit();
3643 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3645 return !!test_bit(o->sched_state, o->raw.pstate);
3648 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3650 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3653 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3654 struct bnx2x_mcast_obj *mcast_obj,
3655 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3656 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3657 int state, unsigned long *pstate, bnx2x_obj_type type)
3659 memset(mcast_obj, 0, sizeof(*mcast_obj));
3661 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3662 rdata, rdata_mapping, state, pstate, type);
3664 mcast_obj->engine_id = engine_id;
3666 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3668 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3669 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3670 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3671 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3673 if (CHIP_IS_E1(bp)) {
3674 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3675 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3676 mcast_obj->hdl_restore =
3677 bnx2x_mcast_handle_restore_cmd_e1;
3678 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3680 if (CHIP_REV_IS_SLOW(bp))
3681 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3683 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3685 mcast_obj->wait_comp = bnx2x_mcast_wait;
3686 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3687 mcast_obj->validate = bnx2x_mcast_validate_e1;
3688 mcast_obj->revert = bnx2x_mcast_revert_e1;
3689 mcast_obj->get_registry_size =
3690 bnx2x_mcast_get_registry_size_exact;
3691 mcast_obj->set_registry_size =
3692 bnx2x_mcast_set_registry_size_exact;
3694 /* 57710 is the only chip that uses the exact match for mcast
3697 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3699 } else if (CHIP_IS_E1H(bp)) {
3700 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3701 mcast_obj->enqueue_cmd = NULL;
3702 mcast_obj->hdl_restore = NULL;
3703 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3705 /* 57711 doesn't send a ramrod, so it has unlimited credit
3708 mcast_obj->max_cmd_len = -1;
3709 mcast_obj->wait_comp = bnx2x_mcast_wait;
3710 mcast_obj->set_one_rule = NULL;
3711 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3712 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3713 mcast_obj->get_registry_size =
3714 bnx2x_mcast_get_registry_size_aprox;
3715 mcast_obj->set_registry_size =
3716 bnx2x_mcast_set_registry_size_aprox;
3718 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3719 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3720 mcast_obj->hdl_restore =
3721 bnx2x_mcast_handle_restore_cmd_e2;
3722 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3723 /* TODO: There should be a proper HSI define for this number!!!
3725 mcast_obj->max_cmd_len = 16;
3726 mcast_obj->wait_comp = bnx2x_mcast_wait;
3727 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3728 mcast_obj->validate = bnx2x_mcast_validate_e2;
3729 mcast_obj->revert = bnx2x_mcast_revert_e2;
3730 mcast_obj->get_registry_size =
3731 bnx2x_mcast_get_registry_size_aprox;
3732 mcast_obj->set_registry_size =
3733 bnx2x_mcast_set_registry_size_aprox;
3737 /*************************** Credit handling **********************************/
3740 * atomic_add_ifless - add if the result is less than a given value.
3742 * @v: pointer of type atomic_t
3743 * @a: the amount to add to v...
3744 * @u: ...if (v + a) is less than u.
3746 * returns true if (v + a) was less than u, and false otherwise.
3749 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3755 if (unlikely(c + a >= u))
3758 old = atomic_cmpxchg((v), c, c + a);
3759 if (likely(old == c))
3768 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3770 * @v: pointer of type atomic_t
3771 * @a: the amount to dec from v...
3772 * @u: ...if (v - a) is more or equal than u.
3774 * returns true if (v - a) was more or equal than u, and false
3777 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3783 if (unlikely(c - a < u))
3786 old = atomic_cmpxchg((v), c, c - a);
3787 if (likely(old == c))
3795 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3800 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3806 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3812 /* Don't let to refill if credit + cnt > pool_sz */
3813 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3820 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3825 cur_credit = atomic_read(&o->credit);
3830 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3837 static bool bnx2x_credit_pool_get_entry(
3838 struct bnx2x_credit_pool_obj *o,
3845 /* Find "internal cam-offset" then add to base for this object... */
3846 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3848 /* Skip the current vector if there are no free entries in it */
3849 if (!o->pool_mirror[vec])
3852 /* If we've got here we are going to find a free entry */
3853 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3854 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3856 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3858 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3859 *offset = o->base_pool_offset + idx;
3867 static bool bnx2x_credit_pool_put_entry(
3868 struct bnx2x_credit_pool_obj *o,
3871 if (offset < o->base_pool_offset)
3874 offset -= o->base_pool_offset;
3876 if (offset >= o->pool_sz)
3879 /* Return the entry to the pool */
3880 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3885 static bool bnx2x_credit_pool_put_entry_always_true(
3886 struct bnx2x_credit_pool_obj *o,
3892 static bool bnx2x_credit_pool_get_entry_always_true(
3893 struct bnx2x_credit_pool_obj *o,
3900 * bnx2x_init_credit_pool - initialize credit pool internals.
3903 * @base: Base entry in the CAM to use.
3904 * @credit: pool size.
3906 * If base is negative no CAM entries handling will be performed.
3907 * If credit is negative pool operations will always succeed (unlimited pool).
3910 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3911 int base, int credit)
3913 /* Zero the object first */
3914 memset(p, 0, sizeof(*p));
3916 /* Set the table to all 1s */
3917 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3919 /* Init a pool as full */
3920 atomic_set(&p->credit, credit);
3922 /* The total poll size */
3923 p->pool_sz = credit;
3925 p->base_pool_offset = base;
3927 /* Commit the change */
3930 p->check = bnx2x_credit_pool_check;
3932 /* if pool credit is negative - disable the checks */
3934 p->put = bnx2x_credit_pool_put;
3935 p->get = bnx2x_credit_pool_get;
3936 p->put_entry = bnx2x_credit_pool_put_entry;
3937 p->get_entry = bnx2x_credit_pool_get_entry;
3939 p->put = bnx2x_credit_pool_always_true;
3940 p->get = bnx2x_credit_pool_always_true;
3941 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3942 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3945 /* If base is negative - disable entries handling */
3947 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3948 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3952 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3953 struct bnx2x_credit_pool_obj *p, u8 func_id,
3956 /* TODO: this will be defined in consts as well... */
3957 #define BNX2X_CAM_SIZE_EMUL 5
3961 if (CHIP_IS_E1(bp)) {
3962 /* In E1, Multicast is saved in cam... */
3963 if (!CHIP_REV_IS_SLOW(bp))
3964 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3966 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3968 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3970 } else if (CHIP_IS_E1H(bp)) {
3971 /* CAM credit is equaly divided between all active functions
3974 if ((func_num > 0)) {
3975 if (!CHIP_REV_IS_SLOW(bp))
3976 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3978 cam_sz = BNX2X_CAM_SIZE_EMUL;
3979 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3981 /* this should never happen! Block MAC operations. */
3982 bnx2x_init_credit_pool(p, 0, 0);
3988 * CAM credit is equaly divided between all active functions
3991 if ((func_num > 0)) {
3992 if (!CHIP_REV_IS_SLOW(bp))
3993 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3995 cam_sz = BNX2X_CAM_SIZE_EMUL;
3998 * No need for CAM entries handling for 57712 and
4001 bnx2x_init_credit_pool(p, -1, cam_sz);
4003 /* this should never happen! Block MAC operations. */
4004 bnx2x_init_credit_pool(p, 0, 0);
4010 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4011 struct bnx2x_credit_pool_obj *p,
4015 if (CHIP_IS_E1x(bp)) {
4017 * There is no VLAN credit in HW on 57710 and 57711 only
4018 * MAC / MAC-VLAN can be set
4020 bnx2x_init_credit_pool(p, 0, -1);
4023 * CAM credit is equaly divided between all active functions
4027 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4028 bnx2x_init_credit_pool(p, func_id * credit, credit);
4030 /* this should never happen! Block VLAN operations. */
4031 bnx2x_init_credit_pool(p, 0, 0);
4035 /****************** RSS Configuration ******************/
4037 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4039 * @bp: driver hanlde
4040 * @p: pointer to rss configuration
4042 * Prints it when NETIF_MSG_IFUP debug level is configured.
4044 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4045 struct bnx2x_config_rss_params *p)
4049 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4050 DP(BNX2X_MSG_SP, "0x0000: ");
4051 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4052 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4054 /* Print 4 bytes in a line */
4055 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4056 (((i + 1) & 0x3) == 0)) {
4057 DP_CONT(BNX2X_MSG_SP, "\n");
4058 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4062 DP_CONT(BNX2X_MSG_SP, "\n");
4066 * bnx2x_setup_rss - configure RSS
4068 * @bp: device handle
4069 * @p: rss configuration
4071 * sends on UPDATE ramrod for that matter.
4073 static int bnx2x_setup_rss(struct bnx2x *bp,
4074 struct bnx2x_config_rss_params *p)
4076 struct bnx2x_rss_config_obj *o = p->rss_obj;
4077 struct bnx2x_raw_obj *r = &o->raw;
4078 struct eth_rss_update_ramrod_data *data =
4079 (struct eth_rss_update_ramrod_data *)(r->rdata);
4083 memset(data, 0, sizeof(*data));
4085 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4087 /* Set an echo field */
4088 data->echo = (r->cid & BNX2X_SWCID_MASK) |
4089 (r->state << BNX2X_SWCID_SHIFT);
4092 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4093 rss_mode = ETH_RSS_MODE_DISABLED;
4094 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4095 rss_mode = ETH_RSS_MODE_REGULAR;
4097 data->rss_mode = rss_mode;
4099 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4101 /* RSS capabilities */
4102 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4103 data->capabilities |=
4104 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4106 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4107 data->capabilities |=
4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4110 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4111 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4114 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4115 data->capabilities |=
4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4118 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4119 data->capabilities |=
4120 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4122 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4123 data->capabilities |=
4124 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4127 data->rss_result_mask = p->rss_result_mask;
4130 data->rss_engine_id = o->engine_id;
4132 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4134 /* Indirection table */
4135 memcpy(data->indirection_table, p->ind_table,
4136 T_ETH_INDIRECTION_TABLE_SIZE);
4138 /* Remember the last configuration */
4139 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4141 /* Print the indirection table */
4142 if (netif_msg_ifup(bp))
4143 bnx2x_debug_print_ind_table(bp, p);
4146 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4147 memcpy(&data->rss_key[0], &p->rss_key[0],
4148 sizeof(data->rss_key));
4149 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4153 * No need for an explicit memory barrier here as long we would
4154 * need to ensure the ordering of writing to the SPQ element
4155 * and updating of the SPQ producer which involves a memory
4156 * read and we will have to put a full memory barrier there
4157 * (inside bnx2x_sp_post()).
4161 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4162 U64_HI(r->rdata_mapping),
4163 U64_LO(r->rdata_mapping),
4164 ETH_CONNECTION_TYPE);
4172 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4175 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4178 int bnx2x_config_rss(struct bnx2x *bp,
4179 struct bnx2x_config_rss_params *p)
4182 struct bnx2x_rss_config_obj *o = p->rss_obj;
4183 struct bnx2x_raw_obj *r = &o->raw;
4185 /* Do nothing if only driver cleanup was requested */
4186 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4191 rc = o->config_rss(bp, p);
4193 r->clear_pending(r);
4197 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4198 rc = r->wait_comp(bp, r);
4204 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4205 struct bnx2x_rss_config_obj *rss_obj,
4206 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4207 void *rdata, dma_addr_t rdata_mapping,
4208 int state, unsigned long *pstate,
4209 bnx2x_obj_type type)
4211 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4212 rdata_mapping, state, pstate, type);
4214 rss_obj->engine_id = engine_id;
4215 rss_obj->config_rss = bnx2x_setup_rss;
4218 /********************** Queue state object ***********************************/
4221 * bnx2x_queue_state_change - perform Queue state change transition
4223 * @bp: device handle
4224 * @params: parameters to perform the transition
4226 * returns 0 in case of successfully completed transition, negative error
4227 * code in case of failure, positive (EBUSY) value if there is a completion
4228 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4229 * not set in params->ramrod_flags for asynchronous commands).
4232 int bnx2x_queue_state_change(struct bnx2x *bp,
4233 struct bnx2x_queue_state_params *params)
4235 struct bnx2x_queue_sp_obj *o = params->q_obj;
4236 int rc, pending_bit;
4237 unsigned long *pending = &o->pending;
4239 /* Check that the requested transition is legal */
4240 if (o->check_transition(bp, o, params))
4243 /* Set "pending" bit */
4244 pending_bit = o->set_pending(o, params);
4246 /* Don't send a command if only driver cleanup was requested */
4247 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4248 o->complete_cmd(bp, o, pending_bit);
4251 rc = o->send_cmd(bp, params);
4253 o->next_state = BNX2X_Q_STATE_MAX;
4254 clear_bit(pending_bit, pending);
4255 smp_mb__after_clear_bit();
4259 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4260 rc = o->wait_comp(bp, o, pending_bit);
4268 return !!test_bit(pending_bit, pending);
4272 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4273 struct bnx2x_queue_state_params *params)
4275 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4277 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4280 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4281 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4282 bit = BNX2X_Q_CMD_UPDATE;
4286 set_bit(bit, &obj->pending);
4290 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4291 struct bnx2x_queue_sp_obj *o,
4292 enum bnx2x_queue_cmd cmd)
4294 return bnx2x_state_wait(bp, cmd, &o->pending);
4298 * bnx2x_queue_comp_cmd - complete the state change command.
4300 * @bp: device handle
4304 * Checks that the arrived completion is expected.
4306 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4307 struct bnx2x_queue_sp_obj *o,
4308 enum bnx2x_queue_cmd cmd)
4310 unsigned long cur_pending = o->pending;
4312 if (!test_and_clear_bit(cmd, &cur_pending)) {
4313 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4314 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4315 o->state, cur_pending, o->next_state);
4319 if (o->next_tx_only >= o->max_cos)
4320 /* >= becuase tx only must always be smaller than cos since the
4321 * primary connection supports COS 0
4323 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4324 o->next_tx_only, o->max_cos);
4327 "Completing command %d for queue %d, setting state to %d\n",
4328 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4330 if (o->next_tx_only) /* print num tx-only if any exist */
4331 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4332 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4334 o->state = o->next_state;
4335 o->num_tx_only = o->next_tx_only;
4336 o->next_state = BNX2X_Q_STATE_MAX;
4338 /* It's important that o->state and o->next_state are
4339 * updated before o->pending.
4343 clear_bit(cmd, &o->pending);
4344 smp_mb__after_clear_bit();
4349 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4350 struct bnx2x_queue_state_params *cmd_params,
4351 struct client_init_ramrod_data *data)
4353 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4357 /* IPv6 TPA supported for E2 and above only */
4358 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) *
4359 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4362 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4363 struct bnx2x_queue_sp_obj *o,
4364 struct bnx2x_general_setup_params *params,
4365 struct client_init_general_data *gen_data,
4366 unsigned long *flags)
4368 gen_data->client_id = o->cl_id;
4370 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4371 gen_data->statistics_counter_id =
4373 gen_data->statistics_en_flg = 1;
4374 gen_data->statistics_zero_flg =
4375 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4377 gen_data->statistics_counter_id =
4378 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4380 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4381 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4382 gen_data->sp_client_id = params->spcl_id;
4383 gen_data->mtu = cpu_to_le16(params->mtu);
4384 gen_data->func_id = o->func_id;
4387 gen_data->cos = params->cos;
4389 gen_data->traffic_type =
4390 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4391 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4393 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4394 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4397 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4398 struct bnx2x_txq_setup_params *params,
4399 struct client_init_tx_data *tx_data,
4400 unsigned long *flags)
4402 tx_data->enforce_security_flg =
4403 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4404 tx_data->default_vlan =
4405 cpu_to_le16(params->default_vlan);
4406 tx_data->default_vlan_flg =
4407 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4408 tx_data->tx_switching_flg =
4409 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4410 tx_data->anti_spoofing_flg =
4411 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4412 tx_data->force_default_pri_flg =
4413 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4415 tx_data->tx_status_block_id = params->fw_sb_id;
4416 tx_data->tx_sb_index_number = params->sb_cq_index;
4417 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4419 tx_data->tx_bd_page_base.lo =
4420 cpu_to_le32(U64_LO(params->dscr_map));
4421 tx_data->tx_bd_page_base.hi =
4422 cpu_to_le32(U64_HI(params->dscr_map));
4424 /* Don't configure any Tx switching mode during queue SETUP */
4428 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4429 struct rxq_pause_params *params,
4430 struct client_init_rx_data *rx_data)
4432 /* flow control data */
4433 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4434 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4435 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4436 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4437 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4438 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4439 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4442 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4443 struct bnx2x_rxq_setup_params *params,
4444 struct client_init_rx_data *rx_data,
4445 unsigned long *flags)
4447 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4448 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4449 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4450 CLIENT_INIT_RX_DATA_TPA_MODE;
4451 rx_data->vmqueue_mode_en_flg = 0;
4453 rx_data->cache_line_alignment_log_size =
4454 params->cache_line_log;
4455 rx_data->enable_dynamic_hc =
4456 test_bit(BNX2X_Q_FLG_DHC, flags);
4457 rx_data->max_sges_for_packet = params->max_sges_pkt;
4458 rx_data->client_qzone_id = params->cl_qzone_id;
4459 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4461 /* Always start in DROP_ALL mode */
4462 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4463 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4465 /* We don't set drop flags */
4466 rx_data->drop_ip_cs_err_flg = 0;
4467 rx_data->drop_tcp_cs_err_flg = 0;
4468 rx_data->drop_ttl0_flg = 0;
4469 rx_data->drop_udp_cs_err_flg = 0;
4470 rx_data->inner_vlan_removal_enable_flg =
4471 test_bit(BNX2X_Q_FLG_VLAN, flags);
4472 rx_data->outer_vlan_removal_enable_flg =
4473 test_bit(BNX2X_Q_FLG_OV, flags);
4474 rx_data->status_block_id = params->fw_sb_id;
4475 rx_data->rx_sb_index_number = params->sb_cq_index;
4476 rx_data->max_tpa_queues = params->max_tpa_queues;
4477 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4478 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4479 rx_data->bd_page_base.lo =
4480 cpu_to_le32(U64_LO(params->dscr_map));
4481 rx_data->bd_page_base.hi =
4482 cpu_to_le32(U64_HI(params->dscr_map));
4483 rx_data->sge_page_base.lo =
4484 cpu_to_le32(U64_LO(params->sge_map));
4485 rx_data->sge_page_base.hi =
4486 cpu_to_le32(U64_HI(params->sge_map));
4487 rx_data->cqe_page_base.lo =
4488 cpu_to_le32(U64_LO(params->rcq_map));
4489 rx_data->cqe_page_base.hi =
4490 cpu_to_le32(U64_HI(params->rcq_map));
4491 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4493 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4494 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4495 rx_data->is_approx_mcast = 1;
4498 rx_data->rss_engine_id = params->rss_engine_id;
4500 /* silent vlan removal */
4501 rx_data->silent_vlan_removal_flg =
4502 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4503 rx_data->silent_vlan_value =
4504 cpu_to_le16(params->silent_removal_value);
4505 rx_data->silent_vlan_mask =
4506 cpu_to_le16(params->silent_removal_mask);
4510 /* initialize the general, tx and rx parts of a queue object */
4511 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4512 struct bnx2x_queue_state_params *cmd_params,
4513 struct client_init_ramrod_data *data)
4515 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4516 &cmd_params->params.setup.gen_params,
4518 &cmd_params->params.setup.flags);
4520 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4521 &cmd_params->params.setup.txq_params,
4523 &cmd_params->params.setup.flags);
4525 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4526 &cmd_params->params.setup.rxq_params,
4528 &cmd_params->params.setup.flags);
4530 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4531 &cmd_params->params.setup.pause_params,
4535 /* initialize the general and tx parts of a tx-only queue object */
4536 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4537 struct bnx2x_queue_state_params *cmd_params,
4538 struct tx_queue_init_ramrod_data *data)
4540 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4541 &cmd_params->params.tx_only.gen_params,
4543 &cmd_params->params.tx_only.flags);
4545 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4546 &cmd_params->params.tx_only.txq_params,
4548 &cmd_params->params.tx_only.flags);
4550 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4551 cmd_params->q_obj->cids[0],
4552 data->tx.tx_bd_page_base.lo,
4553 data->tx.tx_bd_page_base.hi);
4557 * bnx2x_q_init - init HW/FW queue
4559 * @bp: device handle
4562 * HW/FW initial Queue configuration:
4564 * - CDU context validation
4567 static inline int bnx2x_q_init(struct bnx2x *bp,
4568 struct bnx2x_queue_state_params *params)
4570 struct bnx2x_queue_sp_obj *o = params->q_obj;
4571 struct bnx2x_queue_init_params *init = ¶ms->params.init;
4575 /* Tx HC configuration */
4576 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4577 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4578 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4580 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4581 init->tx.sb_cq_index,
4582 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4586 /* Rx HC configuration */
4587 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4588 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4589 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4591 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4592 init->rx.sb_cq_index,
4593 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4597 /* Set CDU context validation values */
4598 for (cos = 0; cos < o->max_cos; cos++) {
4599 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4601 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4602 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4605 /* As no ramrod is sent, complete the command immediately */
4606 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4614 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4615 struct bnx2x_queue_state_params *params)
4617 struct bnx2x_queue_sp_obj *o = params->q_obj;
4618 struct client_init_ramrod_data *rdata =
4619 (struct client_init_ramrod_data *)o->rdata;
4620 dma_addr_t data_mapping = o->rdata_mapping;
4621 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4623 /* Clear the ramrod data */
4624 memset(rdata, 0, sizeof(*rdata));
4626 /* Fill the ramrod data */
4627 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4630 * No need for an explicit memory barrier here as long we would
4631 * need to ensure the ordering of writing to the SPQ element
4632 * and updating of the SPQ producer which involves a memory
4633 * read and we will have to put a full memory barrier there
4634 * (inside bnx2x_sp_post()).
4637 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4638 U64_HI(data_mapping),
4639 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4642 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4643 struct bnx2x_queue_state_params *params)
4645 struct bnx2x_queue_sp_obj *o = params->q_obj;
4646 struct client_init_ramrod_data *rdata =
4647 (struct client_init_ramrod_data *)o->rdata;
4648 dma_addr_t data_mapping = o->rdata_mapping;
4649 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4651 /* Clear the ramrod data */
4652 memset(rdata, 0, sizeof(*rdata));
4654 /* Fill the ramrod data */
4655 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4656 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4659 * No need for an explicit memory barrier here as long we would
4660 * need to ensure the ordering of writing to the SPQ element
4661 * and updating of the SPQ producer which involves a memory
4662 * read and we will have to put a full memory barrier there
4663 * (inside bnx2x_sp_post()).
4666 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4667 U64_HI(data_mapping),
4668 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4671 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4672 struct bnx2x_queue_state_params *params)
4674 struct bnx2x_queue_sp_obj *o = params->q_obj;
4675 struct tx_queue_init_ramrod_data *rdata =
4676 (struct tx_queue_init_ramrod_data *)o->rdata;
4677 dma_addr_t data_mapping = o->rdata_mapping;
4678 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4679 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4680 ¶ms->params.tx_only;
4681 u8 cid_index = tx_only_params->cid_index;
4684 if (cid_index >= o->max_cos) {
4685 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4686 o->cl_id, cid_index);
4690 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4691 tx_only_params->gen_params.cos,
4692 tx_only_params->gen_params.spcl_id);
4694 /* Clear the ramrod data */
4695 memset(rdata, 0, sizeof(*rdata));
4697 /* Fill the ramrod data */
4698 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4700 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4701 o->cids[cid_index], rdata->general.client_id,
4702 rdata->general.sp_client_id, rdata->general.cos);
4705 * No need for an explicit memory barrier here as long we would
4706 * need to ensure the ordering of writing to the SPQ element
4707 * and updating of the SPQ producer which involves a memory
4708 * read and we will have to put a full memory barrier there
4709 * (inside bnx2x_sp_post()).
4712 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4713 U64_HI(data_mapping),
4714 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4717 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4718 struct bnx2x_queue_sp_obj *obj,
4719 struct bnx2x_queue_update_params *params,
4720 struct client_update_ramrod_data *data)
4722 /* Client ID of the client to update */
4723 data->client_id = obj->cl_id;
4725 /* Function ID of the client to update */
4726 data->func_id = obj->func_id;
4728 /* Default VLAN value */
4729 data->default_vlan = cpu_to_le16(params->def_vlan);
4731 /* Inner VLAN stripping */
4732 data->inner_vlan_removal_enable_flg =
4733 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4734 data->inner_vlan_removal_change_flg =
4735 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4736 ¶ms->update_flags);
4738 /* Outer VLAN sripping */
4739 data->outer_vlan_removal_enable_flg =
4740 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4741 data->outer_vlan_removal_change_flg =
4742 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4743 ¶ms->update_flags);
4745 /* Drop packets that have source MAC that doesn't belong to this
4748 data->anti_spoofing_enable_flg =
4749 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4750 data->anti_spoofing_change_flg =
4751 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags);
4753 /* Activate/Deactivate */
4754 data->activate_flg =
4755 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4756 data->activate_change_flg =
4757 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4759 /* Enable default VLAN */
4760 data->default_vlan_enable_flg =
4761 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4762 data->default_vlan_change_flg =
4763 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4764 ¶ms->update_flags);
4766 /* silent vlan removal */
4767 data->silent_vlan_change_flg =
4768 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4769 ¶ms->update_flags);
4770 data->silent_vlan_removal_flg =
4771 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);
4772 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4773 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4776 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4777 struct bnx2x_queue_state_params *params)
4779 struct bnx2x_queue_sp_obj *o = params->q_obj;
4780 struct client_update_ramrod_data *rdata =
4781 (struct client_update_ramrod_data *)o->rdata;
4782 dma_addr_t data_mapping = o->rdata_mapping;
4783 struct bnx2x_queue_update_params *update_params =
4784 ¶ms->params.update;
4785 u8 cid_index = update_params->cid_index;
4787 if (cid_index >= o->max_cos) {
4788 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4789 o->cl_id, cid_index);
4794 /* Clear the ramrod data */
4795 memset(rdata, 0, sizeof(*rdata));
4797 /* Fill the ramrod data */
4798 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4801 * No need for an explicit memory barrier here as long we would
4802 * need to ensure the ordering of writing to the SPQ element
4803 * and updating of the SPQ producer which involves a memory
4804 * read and we will have to put a full memory barrier there
4805 * (inside bnx2x_sp_post()).
4808 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4809 o->cids[cid_index], U64_HI(data_mapping),
4810 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4814 * bnx2x_q_send_deactivate - send DEACTIVATE command
4816 * @bp: device handle
4819 * implemented using the UPDATE command.
4821 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4822 struct bnx2x_queue_state_params *params)
4824 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4826 memset(update, 0, sizeof(*update));
4828 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4830 return bnx2x_q_send_update(bp, params);
4834 * bnx2x_q_send_activate - send ACTIVATE command
4836 * @bp: device handle
4839 * implemented using the UPDATE command.
4841 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4842 struct bnx2x_queue_state_params *params)
4844 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4846 memset(update, 0, sizeof(*update));
4848 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4849 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4851 return bnx2x_q_send_update(bp, params);
4854 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4855 struct bnx2x_queue_state_params *params)
4857 /* TODO: Not implemented yet. */
4861 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4862 struct bnx2x_queue_state_params *params)
4864 struct bnx2x_queue_sp_obj *o = params->q_obj;
4866 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4867 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4868 ETH_CONNECTION_TYPE);
4871 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4872 struct bnx2x_queue_state_params *params)
4874 struct bnx2x_queue_sp_obj *o = params->q_obj;
4875 u8 cid_idx = params->params.cfc_del.cid_index;
4877 if (cid_idx >= o->max_cos) {
4878 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4883 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4884 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4887 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4888 struct bnx2x_queue_state_params *params)
4890 struct bnx2x_queue_sp_obj *o = params->q_obj;
4891 u8 cid_index = params->params.terminate.cid_index;
4893 if (cid_index >= o->max_cos) {
4894 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4895 o->cl_id, cid_index);
4899 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4900 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4903 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4904 struct bnx2x_queue_state_params *params)
4906 struct bnx2x_queue_sp_obj *o = params->q_obj;
4908 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4909 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4910 ETH_CONNECTION_TYPE);
4913 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4914 struct bnx2x_queue_state_params *params)
4916 switch (params->cmd) {
4917 case BNX2X_Q_CMD_INIT:
4918 return bnx2x_q_init(bp, params);
4919 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4920 return bnx2x_q_send_setup_tx_only(bp, params);
4921 case BNX2X_Q_CMD_DEACTIVATE:
4922 return bnx2x_q_send_deactivate(bp, params);
4923 case BNX2X_Q_CMD_ACTIVATE:
4924 return bnx2x_q_send_activate(bp, params);
4925 case BNX2X_Q_CMD_UPDATE:
4926 return bnx2x_q_send_update(bp, params);
4927 case BNX2X_Q_CMD_UPDATE_TPA:
4928 return bnx2x_q_send_update_tpa(bp, params);
4929 case BNX2X_Q_CMD_HALT:
4930 return bnx2x_q_send_halt(bp, params);
4931 case BNX2X_Q_CMD_CFC_DEL:
4932 return bnx2x_q_send_cfc_del(bp, params);
4933 case BNX2X_Q_CMD_TERMINATE:
4934 return bnx2x_q_send_terminate(bp, params);
4935 case BNX2X_Q_CMD_EMPTY:
4936 return bnx2x_q_send_empty(bp, params);
4938 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4943 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4944 struct bnx2x_queue_state_params *params)
4946 switch (params->cmd) {
4947 case BNX2X_Q_CMD_SETUP:
4948 return bnx2x_q_send_setup_e1x(bp, params);
4949 case BNX2X_Q_CMD_INIT:
4950 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4951 case BNX2X_Q_CMD_DEACTIVATE:
4952 case BNX2X_Q_CMD_ACTIVATE:
4953 case BNX2X_Q_CMD_UPDATE:
4954 case BNX2X_Q_CMD_UPDATE_TPA:
4955 case BNX2X_Q_CMD_HALT:
4956 case BNX2X_Q_CMD_CFC_DEL:
4957 case BNX2X_Q_CMD_TERMINATE:
4958 case BNX2X_Q_CMD_EMPTY:
4959 return bnx2x_queue_send_cmd_cmn(bp, params);
4961 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4966 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4967 struct bnx2x_queue_state_params *params)
4969 switch (params->cmd) {
4970 case BNX2X_Q_CMD_SETUP:
4971 return bnx2x_q_send_setup_e2(bp, params);
4972 case BNX2X_Q_CMD_INIT:
4973 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4974 case BNX2X_Q_CMD_DEACTIVATE:
4975 case BNX2X_Q_CMD_ACTIVATE:
4976 case BNX2X_Q_CMD_UPDATE:
4977 case BNX2X_Q_CMD_UPDATE_TPA:
4978 case BNX2X_Q_CMD_HALT:
4979 case BNX2X_Q_CMD_CFC_DEL:
4980 case BNX2X_Q_CMD_TERMINATE:
4981 case BNX2X_Q_CMD_EMPTY:
4982 return bnx2x_queue_send_cmd_cmn(bp, params);
4984 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4990 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4992 * @bp: device handle
4997 * It both checks if the requested command is legal in a current
4998 * state and, if it's legal, sets a `next_state' in the object
4999 * that will be used in the completion flow to set the `state'
5002 * returns 0 if a requested command is a legal transition,
5003 * -EINVAL otherwise.
5005 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5006 struct bnx2x_queue_sp_obj *o,
5007 struct bnx2x_queue_state_params *params)
5009 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5010 enum bnx2x_queue_cmd cmd = params->cmd;
5011 struct bnx2x_queue_update_params *update_params =
5012 ¶ms->params.update;
5013 u8 next_tx_only = o->num_tx_only;
5016 * Forget all pending for completion commands if a driver only state
5017 * transition has been requested.
5019 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5021 o->next_state = BNX2X_Q_STATE_MAX;
5025 * Don't allow a next state transition if we are in the middle of
5032 case BNX2X_Q_STATE_RESET:
5033 if (cmd == BNX2X_Q_CMD_INIT)
5034 next_state = BNX2X_Q_STATE_INITIALIZED;
5037 case BNX2X_Q_STATE_INITIALIZED:
5038 if (cmd == BNX2X_Q_CMD_SETUP) {
5039 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5040 ¶ms->params.setup.flags))
5041 next_state = BNX2X_Q_STATE_ACTIVE;
5043 next_state = BNX2X_Q_STATE_INACTIVE;
5047 case BNX2X_Q_STATE_ACTIVE:
5048 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5049 next_state = BNX2X_Q_STATE_INACTIVE;
5051 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5052 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5053 next_state = BNX2X_Q_STATE_ACTIVE;
5055 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5056 next_state = BNX2X_Q_STATE_MULTI_COS;
5060 else if (cmd == BNX2X_Q_CMD_HALT)
5061 next_state = BNX2X_Q_STATE_STOPPED;
5063 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5064 /* If "active" state change is requested, update the
5065 * state accordingly.
5067 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5068 &update_params->update_flags) &&
5069 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5070 &update_params->update_flags))
5071 next_state = BNX2X_Q_STATE_INACTIVE;
5073 next_state = BNX2X_Q_STATE_ACTIVE;
5077 case BNX2X_Q_STATE_MULTI_COS:
5078 if (cmd == BNX2X_Q_CMD_TERMINATE)
5079 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5081 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5082 next_state = BNX2X_Q_STATE_MULTI_COS;
5083 next_tx_only = o->num_tx_only + 1;
5086 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5087 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5088 next_state = BNX2X_Q_STATE_MULTI_COS;
5090 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5091 /* If "active" state change is requested, update the
5092 * state accordingly.
5094 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5095 &update_params->update_flags) &&
5096 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5097 &update_params->update_flags))
5098 next_state = BNX2X_Q_STATE_INACTIVE;
5100 next_state = BNX2X_Q_STATE_MULTI_COS;
5104 case BNX2X_Q_STATE_MCOS_TERMINATED:
5105 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5106 next_tx_only = o->num_tx_only - 1;
5107 if (next_tx_only == 0)
5108 next_state = BNX2X_Q_STATE_ACTIVE;
5110 next_state = BNX2X_Q_STATE_MULTI_COS;
5114 case BNX2X_Q_STATE_INACTIVE:
5115 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5116 next_state = BNX2X_Q_STATE_ACTIVE;
5118 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5119 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5120 next_state = BNX2X_Q_STATE_INACTIVE;
5122 else if (cmd == BNX2X_Q_CMD_HALT)
5123 next_state = BNX2X_Q_STATE_STOPPED;
5125 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5126 /* If "active" state change is requested, update the
5127 * state accordingly.
5129 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5130 &update_params->update_flags) &&
5131 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5132 &update_params->update_flags)){
5133 if (o->num_tx_only == 0)
5134 next_state = BNX2X_Q_STATE_ACTIVE;
5135 else /* tx only queues exist for this queue */
5136 next_state = BNX2X_Q_STATE_MULTI_COS;
5138 next_state = BNX2X_Q_STATE_INACTIVE;
5142 case BNX2X_Q_STATE_STOPPED:
5143 if (cmd == BNX2X_Q_CMD_TERMINATE)
5144 next_state = BNX2X_Q_STATE_TERMINATED;
5147 case BNX2X_Q_STATE_TERMINATED:
5148 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5149 next_state = BNX2X_Q_STATE_RESET;
5153 BNX2X_ERR("Illegal state: %d\n", state);
5156 /* Transition is assured */
5157 if (next_state != BNX2X_Q_STATE_MAX) {
5158 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5159 state, cmd, next_state);
5160 o->next_state = next_state;
5161 o->next_tx_only = next_tx_only;
5165 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5170 void bnx2x_init_queue_obj(struct bnx2x *bp,
5171 struct bnx2x_queue_sp_obj *obj,
5172 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5174 dma_addr_t rdata_mapping, unsigned long type)
5176 memset(obj, 0, sizeof(*obj));
5178 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5179 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5181 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5182 obj->max_cos = cid_cnt;
5184 obj->func_id = func_id;
5186 obj->rdata_mapping = rdata_mapping;
5188 obj->next_state = BNX2X_Q_STATE_MAX;
5190 if (CHIP_IS_E1x(bp))
5191 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5193 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5195 obj->check_transition = bnx2x_queue_chk_transition;
5197 obj->complete_cmd = bnx2x_queue_comp_cmd;
5198 obj->wait_comp = bnx2x_queue_wait_comp;
5199 obj->set_pending = bnx2x_queue_set_pending;
5202 /********************** Function state object *********************************/
5203 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5204 struct bnx2x_func_sp_obj *o)
5206 /* in the middle of transaction - return INVALID state */
5208 return BNX2X_F_STATE_MAX;
5211 * unsure the order of reading of o->pending and o->state
5212 * o->pending should be read first
5219 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5220 struct bnx2x_func_sp_obj *o,
5221 enum bnx2x_func_cmd cmd)
5223 return bnx2x_state_wait(bp, cmd, &o->pending);
5227 * bnx2x_func_state_change_comp - complete the state machine transition
5229 * @bp: device handle
5233 * Called on state change transition. Completes the state
5234 * machine transition only - no HW interaction.
5236 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5237 struct bnx2x_func_sp_obj *o,
5238 enum bnx2x_func_cmd cmd)
5240 unsigned long cur_pending = o->pending;
5242 if (!test_and_clear_bit(cmd, &cur_pending)) {
5243 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5244 cmd, BP_FUNC(bp), o->state,
5245 cur_pending, o->next_state);
5250 "Completing command %d for func %d, setting state to %d\n",
5251 cmd, BP_FUNC(bp), o->next_state);
5253 o->state = o->next_state;
5254 o->next_state = BNX2X_F_STATE_MAX;
5256 /* It's important that o->state and o->next_state are
5257 * updated before o->pending.
5261 clear_bit(cmd, &o->pending);
5262 smp_mb__after_clear_bit();
5268 * bnx2x_func_comp_cmd - complete the state change command
5270 * @bp: device handle
5274 * Checks that the arrived completion is expected.
5276 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5277 struct bnx2x_func_sp_obj *o,
5278 enum bnx2x_func_cmd cmd)
5280 /* Complete the state machine part first, check if it's a
5283 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5288 * bnx2x_func_chk_transition - perform function state machine transition
5290 * @bp: device handle
5294 * It both checks if the requested command is legal in a current
5295 * state and, if it's legal, sets a `next_state' in the object
5296 * that will be used in the completion flow to set the `state'
5299 * returns 0 if a requested command is a legal transition,
5300 * -EINVAL otherwise.
5302 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5303 struct bnx2x_func_sp_obj *o,
5304 struct bnx2x_func_state_params *params)
5306 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5307 enum bnx2x_func_cmd cmd = params->cmd;
5310 * Forget all pending for completion commands if a driver only state
5311 * transition has been requested.
5313 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5315 o->next_state = BNX2X_F_STATE_MAX;
5319 * Don't allow a next state transition if we are in the middle of
5326 case BNX2X_F_STATE_RESET:
5327 if (cmd == BNX2X_F_CMD_HW_INIT)
5328 next_state = BNX2X_F_STATE_INITIALIZED;
5331 case BNX2X_F_STATE_INITIALIZED:
5332 if (cmd == BNX2X_F_CMD_START)
5333 next_state = BNX2X_F_STATE_STARTED;
5335 else if (cmd == BNX2X_F_CMD_HW_RESET)
5336 next_state = BNX2X_F_STATE_RESET;
5339 case BNX2X_F_STATE_STARTED:
5340 if (cmd == BNX2X_F_CMD_STOP)
5341 next_state = BNX2X_F_STATE_INITIALIZED;
5342 /* afex ramrods can be sent only in started mode, and only
5343 * if not pending for function_stop ramrod completion
5344 * for these events - next state remained STARTED.
5346 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5347 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5348 next_state = BNX2X_F_STATE_STARTED;
5350 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5351 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5352 next_state = BNX2X_F_STATE_STARTED;
5354 /* Switch_update ramrod can be sent in either started or
5355 * tx_stopped state, and it doesn't change the state.
5357 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5358 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5359 next_state = BNX2X_F_STATE_STARTED;
5361 else if (cmd == BNX2X_F_CMD_TX_STOP)
5362 next_state = BNX2X_F_STATE_TX_STOPPED;
5365 case BNX2X_F_STATE_TX_STOPPED:
5366 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5367 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5368 next_state = BNX2X_F_STATE_TX_STOPPED;
5370 else if (cmd == BNX2X_F_CMD_TX_START)
5371 next_state = BNX2X_F_STATE_STARTED;
5375 BNX2X_ERR("Unknown state: %d\n", state);
5378 /* Transition is assured */
5379 if (next_state != BNX2X_F_STATE_MAX) {
5380 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5381 state, cmd, next_state);
5382 o->next_state = next_state;
5386 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5393 * bnx2x_func_init_func - performs HW init at function stage
5395 * @bp: device handle
5398 * Init HW when the current phase is
5399 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5402 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5403 const struct bnx2x_func_sp_drv_ops *drv)
5405 return drv->init_hw_func(bp);
5409 * bnx2x_func_init_port - performs HW init at port stage
5411 * @bp: device handle
5414 * Init HW when the current phase is
5415 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5416 * FUNCTION-only HW blocks.
5419 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5420 const struct bnx2x_func_sp_drv_ops *drv)
5422 int rc = drv->init_hw_port(bp);
5426 return bnx2x_func_init_func(bp, drv);
5430 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5432 * @bp: device handle
5435 * Init HW when the current phase is
5436 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5437 * PORT-only and FUNCTION-only HW blocks.
5439 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5440 const struct bnx2x_func_sp_drv_ops *drv)
5442 int rc = drv->init_hw_cmn_chip(bp);
5446 return bnx2x_func_init_port(bp, drv);
5450 * bnx2x_func_init_cmn - performs HW init at common stage
5452 * @bp: device handle
5455 * Init HW when the current phase is
5456 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5457 * PORT-only and FUNCTION-only HW blocks.
5459 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5460 const struct bnx2x_func_sp_drv_ops *drv)
5462 int rc = drv->init_hw_cmn(bp);
5466 return bnx2x_func_init_port(bp, drv);
5469 static int bnx2x_func_hw_init(struct bnx2x *bp,
5470 struct bnx2x_func_state_params *params)
5472 u32 load_code = params->params.hw_init.load_phase;
5473 struct bnx2x_func_sp_obj *o = params->f_obj;
5474 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5477 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5478 BP_ABS_FUNC(bp), load_code);
5480 /* Prepare buffers for unzipping the FW */
5481 rc = drv->gunzip_init(bp);
5486 rc = drv->init_fw(bp);
5488 BNX2X_ERR("Error loading firmware\n");
5492 /* Handle the beginning of COMMON_XXX pases separatelly... */
5493 switch (load_code) {
5494 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5495 rc = bnx2x_func_init_cmn_chip(bp, drv);
5500 case FW_MSG_CODE_DRV_LOAD_COMMON:
5501 rc = bnx2x_func_init_cmn(bp, drv);
5506 case FW_MSG_CODE_DRV_LOAD_PORT:
5507 rc = bnx2x_func_init_port(bp, drv);
5512 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5513 rc = bnx2x_func_init_func(bp, drv);
5519 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5524 drv->gunzip_end(bp);
5526 /* In case of success, complete the comand immediatelly: no ramrods
5530 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5536 * bnx2x_func_reset_func - reset HW at function stage
5538 * @bp: device handle
5541 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5542 * FUNCTION-only HW blocks.
5544 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5545 const struct bnx2x_func_sp_drv_ops *drv)
5547 drv->reset_hw_func(bp);
5551 * bnx2x_func_reset_port - reser HW at port stage
5553 * @bp: device handle
5556 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5557 * FUNCTION-only and PORT-only HW blocks.
5561 * It's important to call reset_port before reset_func() as the last thing
5562 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5563 * makes impossible any DMAE transactions.
5565 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5566 const struct bnx2x_func_sp_drv_ops *drv)
5568 drv->reset_hw_port(bp);
5569 bnx2x_func_reset_func(bp, drv);
5573 * bnx2x_func_reset_cmn - reser HW at common stage
5575 * @bp: device handle
5578 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5579 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5580 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5582 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5583 const struct bnx2x_func_sp_drv_ops *drv)
5585 bnx2x_func_reset_port(bp, drv);
5586 drv->reset_hw_cmn(bp);
5590 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5591 struct bnx2x_func_state_params *params)
5593 u32 reset_phase = params->params.hw_reset.reset_phase;
5594 struct bnx2x_func_sp_obj *o = params->f_obj;
5595 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5597 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5600 switch (reset_phase) {
5601 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5602 bnx2x_func_reset_cmn(bp, drv);
5604 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5605 bnx2x_func_reset_port(bp, drv);
5607 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5608 bnx2x_func_reset_func(bp, drv);
5611 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5616 /* Complete the comand immediatelly: no ramrods have been sent. */
5617 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5622 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5623 struct bnx2x_func_state_params *params)
5625 struct bnx2x_func_sp_obj *o = params->f_obj;
5626 struct function_start_data *rdata =
5627 (struct function_start_data *)o->rdata;
5628 dma_addr_t data_mapping = o->rdata_mapping;
5629 struct bnx2x_func_start_params *start_params = ¶ms->params.start;
5631 memset(rdata, 0, sizeof(*rdata));
5633 /* Fill the ramrod data with provided parameters */
5634 rdata->function_mode = (u8)start_params->mf_mode;
5635 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5636 rdata->path_id = BP_PATH(bp);
5637 rdata->network_cos_mode = start_params->network_cos_mode;
5640 * No need for an explicit memory barrier here as long we would
5641 * need to ensure the ordering of writing to the SPQ element
5642 * and updating of the SPQ producer which involves a memory
5643 * read and we will have to put a full memory barrier there
5644 * (inside bnx2x_sp_post()).
5647 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5648 U64_HI(data_mapping),
5649 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5652 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5653 struct bnx2x_func_state_params *params)
5655 struct bnx2x_func_sp_obj *o = params->f_obj;
5656 struct function_update_data *rdata =
5657 (struct function_update_data *)o->rdata;
5658 dma_addr_t data_mapping = o->rdata_mapping;
5659 struct bnx2x_func_switch_update_params *switch_update_params =
5660 ¶ms->params.switch_update;
5662 memset(rdata, 0, sizeof(*rdata));
5664 /* Fill the ramrod data with provided parameters */
5665 rdata->tx_switch_suspend_change_flg = 1;
5666 rdata->tx_switch_suspend = switch_update_params->suspend;
5667 rdata->echo = SWITCH_UPDATE;
5669 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5670 U64_HI(data_mapping),
5671 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5674 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5675 struct bnx2x_func_state_params *params)
5677 struct bnx2x_func_sp_obj *o = params->f_obj;
5678 struct function_update_data *rdata =
5679 (struct function_update_data *)o->afex_rdata;
5680 dma_addr_t data_mapping = o->afex_rdata_mapping;
5681 struct bnx2x_func_afex_update_params *afex_update_params =
5682 ¶ms->params.afex_update;
5684 memset(rdata, 0, sizeof(*rdata));
5686 /* Fill the ramrod data with provided parameters */
5687 rdata->vif_id_change_flg = 1;
5688 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5689 rdata->afex_default_vlan_change_flg = 1;
5690 rdata->afex_default_vlan =
5691 cpu_to_le16(afex_update_params->afex_default_vlan);
5692 rdata->allowed_priorities_change_flg = 1;
5693 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5694 rdata->echo = AFEX_UPDATE;
5696 /* No need for an explicit memory barrier here as long we would
5697 * need to ensure the ordering of writing to the SPQ element
5698 * and updating of the SPQ producer which involves a memory
5699 * read and we will have to put a full memory barrier there
5700 * (inside bnx2x_sp_post()).
5703 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5705 rdata->afex_default_vlan, rdata->allowed_priorities);
5707 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5708 U64_HI(data_mapping),
5709 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5713 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5714 struct bnx2x_func_state_params *params)
5716 struct bnx2x_func_sp_obj *o = params->f_obj;
5717 struct afex_vif_list_ramrod_data *rdata =
5718 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5719 struct bnx2x_func_afex_viflists_params *afex_viflist_params =
5720 ¶ms->params.afex_viflists;
5721 u64 *p_rdata = (u64 *)rdata;
5723 memset(rdata, 0, sizeof(*rdata));
5725 /* Fill the ramrod data with provided parameters */
5726 rdata->vif_list_index = afex_viflist_params->vif_list_index;
5727 rdata->func_bit_map = afex_viflist_params->func_bit_map;
5728 rdata->afex_vif_list_command =
5729 afex_viflist_params->afex_vif_list_command;
5730 rdata->func_to_clear = afex_viflist_params->func_to_clear;
5732 /* send in echo type of sub command */
5733 rdata->echo = afex_viflist_params->afex_vif_list_command;
5735 /* No need for an explicit memory barrier here as long we would
5736 * need to ensure the ordering of writing to the SPQ element
5737 * and updating of the SPQ producer which involves a memory
5738 * read and we will have to put a full memory barrier there
5739 * (inside bnx2x_sp_post()).
5742 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5743 rdata->afex_vif_list_command, rdata->vif_list_index,
5744 rdata->func_bit_map, rdata->func_to_clear);
5746 /* this ramrod sends data directly and not through DMA mapping */
5747 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5748 U64_HI(*p_rdata), U64_LO(*p_rdata),
5749 NONE_CONNECTION_TYPE);
5752 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5753 struct bnx2x_func_state_params *params)
5755 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5756 NONE_CONNECTION_TYPE);
5759 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5760 struct bnx2x_func_state_params *params)
5762 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5763 NONE_CONNECTION_TYPE);
5765 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5766 struct bnx2x_func_state_params *params)
5768 struct bnx2x_func_sp_obj *o = params->f_obj;
5769 struct flow_control_configuration *rdata =
5770 (struct flow_control_configuration *)o->rdata;
5771 dma_addr_t data_mapping = o->rdata_mapping;
5772 struct bnx2x_func_tx_start_params *tx_start_params =
5773 ¶ms->params.tx_start;
5776 memset(rdata, 0, sizeof(*rdata));
5778 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5779 rdata->dcb_version = tx_start_params->dcb_version;
5780 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5782 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5783 rdata->traffic_type_to_priority_cos[i] =
5784 tx_start_params->traffic_type_to_priority_cos[i];
5786 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5787 U64_HI(data_mapping),
5788 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5791 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5792 struct bnx2x_func_state_params *params)
5794 switch (params->cmd) {
5795 case BNX2X_F_CMD_HW_INIT:
5796 return bnx2x_func_hw_init(bp, params);
5797 case BNX2X_F_CMD_START:
5798 return bnx2x_func_send_start(bp, params);
5799 case BNX2X_F_CMD_STOP:
5800 return bnx2x_func_send_stop(bp, params);
5801 case BNX2X_F_CMD_HW_RESET:
5802 return bnx2x_func_hw_reset(bp, params);
5803 case BNX2X_F_CMD_AFEX_UPDATE:
5804 return bnx2x_func_send_afex_update(bp, params);
5805 case BNX2X_F_CMD_AFEX_VIFLISTS:
5806 return bnx2x_func_send_afex_viflists(bp, params);
5807 case BNX2X_F_CMD_TX_STOP:
5808 return bnx2x_func_send_tx_stop(bp, params);
5809 case BNX2X_F_CMD_TX_START:
5810 return bnx2x_func_send_tx_start(bp, params);
5811 case BNX2X_F_CMD_SWITCH_UPDATE:
5812 return bnx2x_func_send_switch_update(bp, params);
5814 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5819 void bnx2x_init_func_obj(struct bnx2x *bp,
5820 struct bnx2x_func_sp_obj *obj,
5821 void *rdata, dma_addr_t rdata_mapping,
5822 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5823 struct bnx2x_func_sp_drv_ops *drv_iface)
5825 memset(obj, 0, sizeof(*obj));
5827 mutex_init(&obj->one_pending_mutex);
5830 obj->rdata_mapping = rdata_mapping;
5831 obj->afex_rdata = afex_rdata;
5832 obj->afex_rdata_mapping = afex_rdata_mapping;
5833 obj->send_cmd = bnx2x_func_send_cmd;
5834 obj->check_transition = bnx2x_func_chk_transition;
5835 obj->complete_cmd = bnx2x_func_comp_cmd;
5836 obj->wait_comp = bnx2x_func_wait_comp;
5838 obj->drv = drv_iface;
5842 * bnx2x_func_state_change - perform Function state change transition
5844 * @bp: device handle
5845 * @params: parameters to perform the transaction
5847 * returns 0 in case of successfully completed transition,
5848 * negative error code in case of failure, positive
5849 * (EBUSY) value if there is a completion to that is
5850 * still pending (possible only if RAMROD_COMP_WAIT is
5851 * not set in params->ramrod_flags for asynchronous
5854 int bnx2x_func_state_change(struct bnx2x *bp,
5855 struct bnx2x_func_state_params *params)
5857 struct bnx2x_func_sp_obj *o = params->f_obj;
5859 enum bnx2x_func_cmd cmd = params->cmd;
5860 unsigned long *pending = &o->pending;
5862 mutex_lock(&o->one_pending_mutex);
5864 /* Check that the requested transition is legal */
5865 rc = o->check_transition(bp, o, params);
5866 if ((rc == -EBUSY) &&
5867 (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) {
5868 while ((rc == -EBUSY) && (--cnt > 0)) {
5869 mutex_unlock(&o->one_pending_mutex);
5871 mutex_lock(&o->one_pending_mutex);
5872 rc = o->check_transition(bp, o, params);
5875 mutex_unlock(&o->one_pending_mutex);
5876 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5880 mutex_unlock(&o->one_pending_mutex);
5884 /* Set "pending" bit */
5885 set_bit(cmd, pending);
5887 /* Don't send a command if only driver cleanup was requested */
5888 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5889 bnx2x_func_state_change_comp(bp, o, cmd);
5890 mutex_unlock(&o->one_pending_mutex);
5893 rc = o->send_cmd(bp, params);
5895 mutex_unlock(&o->one_pending_mutex);
5898 o->next_state = BNX2X_F_STATE_MAX;
5899 clear_bit(cmd, pending);
5900 smp_mb__after_clear_bit();
5904 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5905 rc = o->wait_comp(bp, o, cmd);
5913 return !!test_bit(cmd, pending);