Linux 3.9-rc8
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sp.c
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2011-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30
31 #define BNX2X_MAX_EMUL_MULTI            16
32
33 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34
35 /**** Exe Queue interfaces ****/
36
37 /**
38  * bnx2x_exe_queue_init - init the Exe Queue object
39  *
40  * @o:          poiter to the object
41  * @exe_len:    length
42  * @owner:      poiter to the owner
43  * @validate:   validate function pointer
44  * @optimize:   optimize function pointer
45  * @exec:       execute function pointer
46  * @get:        get function pointer
47  */
48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49                                         struct bnx2x_exe_queue_obj *o,
50                                         int exe_len,
51                                         union bnx2x_qable_obj *owner,
52                                         exe_q_validate validate,
53                                         exe_q_remove remove,
54                                         exe_q_optimize optimize,
55                                         exe_q_execute exec,
56                                         exe_q_get get)
57 {
58         memset(o, 0, sizeof(*o));
59
60         INIT_LIST_HEAD(&o->exe_queue);
61         INIT_LIST_HEAD(&o->pending_comp);
62
63         spin_lock_init(&o->lock);
64
65         o->exe_chunk_len = exe_len;
66         o->owner         = owner;
67
68         /* Owner specific callbacks */
69         o->validate      = validate;
70         o->remove        = remove;
71         o->optimize      = optimize;
72         o->execute       = exec;
73         o->get           = get;
74
75         DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
76            exe_len);
77 }
78
79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80                                              struct bnx2x_exeq_elem *elem)
81 {
82         DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83         kfree(elem);
84 }
85
86 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
87 {
88         struct bnx2x_exeq_elem *elem;
89         int cnt = 0;
90
91         spin_lock_bh(&o->lock);
92
93         list_for_each_entry(elem, &o->exe_queue, link)
94                 cnt++;
95
96         spin_unlock_bh(&o->lock);
97
98         return cnt;
99 }
100
101 /**
102  * bnx2x_exe_queue_add - add a new element to the execution queue
103  *
104  * @bp:         driver handle
105  * @o:          queue
106  * @cmd:        new command to add
107  * @restore:    true - do not optimize the command
108  *
109  * If the element is optimized or is illegal, frees it.
110  */
111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112                                       struct bnx2x_exe_queue_obj *o,
113                                       struct bnx2x_exeq_elem *elem,
114                                       bool restore)
115 {
116         int rc;
117
118         spin_lock_bh(&o->lock);
119
120         if (!restore) {
121                 /* Try to cancel this element queue */
122                 rc = o->optimize(bp, o->owner, elem);
123                 if (rc)
124                         goto free_and_exit;
125
126                 /* Check if this request is ok */
127                 rc = o->validate(bp, o->owner, elem);
128                 if (rc) {
129                         DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
130                         goto free_and_exit;
131                 }
132         }
133
134         /* If so, add it to the execution queue */
135         list_add_tail(&elem->link, &o->exe_queue);
136
137         spin_unlock_bh(&o->lock);
138
139         return 0;
140
141 free_and_exit:
142         bnx2x_exe_queue_free_elem(bp, elem);
143
144         spin_unlock_bh(&o->lock);
145
146         return rc;
147
148 }
149
150 static inline void __bnx2x_exe_queue_reset_pending(
151         struct bnx2x *bp,
152         struct bnx2x_exe_queue_obj *o)
153 {
154         struct bnx2x_exeq_elem *elem;
155
156         while (!list_empty(&o->pending_comp)) {
157                 elem = list_first_entry(&o->pending_comp,
158                                         struct bnx2x_exeq_elem, link);
159
160                 list_del(&elem->link);
161                 bnx2x_exe_queue_free_elem(bp, elem);
162         }
163 }
164
165 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166                                                  struct bnx2x_exe_queue_obj *o)
167 {
168
169         spin_lock_bh(&o->lock);
170
171         __bnx2x_exe_queue_reset_pending(bp, o);
172
173         spin_unlock_bh(&o->lock);
174
175 }
176
177 /**
178  * bnx2x_exe_queue_step - execute one execution chunk atomically
179  *
180  * @bp:                 driver handle
181  * @o:                  queue
182  * @ramrod_flags:       flags
183  *
184  * (Atomicy is ensured using the exe_queue->lock).
185  */
186 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187                                        struct bnx2x_exe_queue_obj *o,
188                                        unsigned long *ramrod_flags)
189 {
190         struct bnx2x_exeq_elem *elem, spacer;
191         int cur_len = 0, rc;
192
193         memset(&spacer, 0, sizeof(spacer));
194
195         spin_lock_bh(&o->lock);
196
197         /*
198          * Next step should not be performed until the current is finished,
199          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
200          * properly clear object internals without sending any command to the FW
201          * which also implies there won't be any completion to clear the
202          * 'pending' list.
203          */
204         if (!list_empty(&o->pending_comp)) {
205                 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
206                         DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
207                         __bnx2x_exe_queue_reset_pending(bp, o);
208                 } else {
209                         spin_unlock_bh(&o->lock);
210                         return 1;
211                 }
212         }
213
214         /*
215          * Run through the pending commands list and create a next
216          * execution chunk.
217          */
218         while (!list_empty(&o->exe_queue)) {
219                 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
220                                         link);
221                 WARN_ON(!elem->cmd_len);
222
223                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
224                         cur_len += elem->cmd_len;
225                         /*
226                          * Prevent from both lists being empty when moving an
227                          * element. This will allow the call of
228                          * bnx2x_exe_queue_empty() without locking.
229                          */
230                         list_add_tail(&spacer.link, &o->pending_comp);
231                         mb();
232                         list_move_tail(&elem->link, &o->pending_comp);
233                         list_del(&spacer.link);
234                 } else
235                         break;
236         }
237
238         /* Sanity check */
239         if (!cur_len) {
240                 spin_unlock_bh(&o->lock);
241                 return 0;
242         }
243
244         rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
245         if (rc < 0)
246                 /*
247                  *  In case of an error return the commands back to the queue
248                  *  and reset the pending_comp.
249                  */
250                 list_splice_init(&o->pending_comp, &o->exe_queue);
251         else if (!rc)
252                 /*
253                  * If zero is returned, means there are no outstanding pending
254                  * completions and we may dismiss the pending list.
255                  */
256                 __bnx2x_exe_queue_reset_pending(bp, o);
257
258         spin_unlock_bh(&o->lock);
259         return rc;
260 }
261
262 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
263 {
264         bool empty = list_empty(&o->exe_queue);
265
266         /* Don't reorder!!! */
267         mb();
268
269         return empty && list_empty(&o->pending_comp);
270 }
271
272 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
273         struct bnx2x *bp)
274 {
275         DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
276         return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
277 }
278
279 /************************ raw_obj functions ***********************************/
280 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
281 {
282         return !!test_bit(o->state, o->pstate);
283 }
284
285 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
286 {
287         smp_mb__before_clear_bit();
288         clear_bit(o->state, o->pstate);
289         smp_mb__after_clear_bit();
290 }
291
292 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
293 {
294         smp_mb__before_clear_bit();
295         set_bit(o->state, o->pstate);
296         smp_mb__after_clear_bit();
297 }
298
299 /**
300  * bnx2x_state_wait - wait until the given bit(state) is cleared
301  *
302  * @bp:         device handle
303  * @state:      state which is to be cleared
304  * @state_p:    state buffer
305  *
306  */
307 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
308                                    unsigned long *pstate)
309 {
310         /* can take a while if any port is running */
311         int cnt = 5000;
312
313
314         if (CHIP_REV_IS_EMUL(bp))
315                 cnt *= 20;
316
317         DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
318
319         might_sleep();
320         while (cnt--) {
321                 if (!test_bit(state, pstate)) {
322 #ifdef BNX2X_STOP_ON_ERROR
323                         DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
324 #endif
325                         return 0;
326                 }
327
328                 usleep_range(1000, 2000);
329
330                 if (bp->panic)
331                         return -EIO;
332         }
333
334         /* timeout! */
335         BNX2X_ERR("timeout waiting for state %d\n", state);
336 #ifdef BNX2X_STOP_ON_ERROR
337         bnx2x_panic();
338 #endif
339
340         return -EBUSY;
341 }
342
343 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
344 {
345         return bnx2x_state_wait(bp, raw->state, raw->pstate);
346 }
347
348 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
349 /* credit handling callbacks */
350 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
351 {
352         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
353
354         WARN_ON(!mp);
355
356         return mp->get_entry(mp, offset);
357 }
358
359 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
360 {
361         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362
363         WARN_ON(!mp);
364
365         return mp->get(mp, 1);
366 }
367
368 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
369 {
370         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
371
372         WARN_ON(!vp);
373
374         return vp->get_entry(vp, offset);
375 }
376
377 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
378 {
379         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
380
381         WARN_ON(!vp);
382
383         return vp->get(vp, 1);
384 }
385
386 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
387 {
388         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
389         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
390
391         if (!mp->get(mp, 1))
392                 return false;
393
394         if (!vp->get(vp, 1)) {
395                 mp->put(mp, 1);
396                 return false;
397         }
398
399         return true;
400 }
401
402 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
403 {
404         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
405
406         return mp->put_entry(mp, offset);
407 }
408
409 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
410 {
411         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
412
413         return mp->put(mp, 1);
414 }
415
416 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
417 {
418         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
419
420         return vp->put_entry(vp, offset);
421 }
422
423 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
424 {
425         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
426
427         return vp->put(vp, 1);
428 }
429
430 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
431 {
432         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
433         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
434
435         if (!mp->put(mp, 1))
436                 return false;
437
438         if (!vp->put(vp, 1)) {
439                 mp->get(mp, 1);
440                 return false;
441         }
442
443         return true;
444 }
445
446 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
447                                 int n, u8 *buf)
448 {
449         struct bnx2x_vlan_mac_registry_elem *pos;
450         u8 *next = buf;
451         int counter = 0;
452
453         /* traverse list */
454         list_for_each_entry(pos, &o->head, link) {
455                 if (counter < n) {
456                         /* place leading zeroes in buffer */
457                         memset(next, 0, MAC_LEADING_ZERO_CNT);
458
459                         /* place mac after leading zeroes*/
460                         memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
461                                ETH_ALEN);
462
463                         /* calculate address of next element and
464                          * advance counter
465                          */
466                         counter++;
467                         next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
468
469                         DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
470                            counter, next, pos->u.mac.mac);
471                 }
472         }
473         return counter * ETH_ALEN;
474 }
475
476 /* check_add() callbacks */
477 static int bnx2x_check_mac_add(struct bnx2x *bp,
478                                struct bnx2x_vlan_mac_obj *o,
479                                union bnx2x_classification_ramrod_data *data)
480 {
481         struct bnx2x_vlan_mac_registry_elem *pos;
482
483         DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
484
485         if (!is_valid_ether_addr(data->mac.mac))
486                 return -EINVAL;
487
488         /* Check if a requested MAC already exists */
489         list_for_each_entry(pos, &o->head, link)
490                 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
491                         return -EEXIST;
492
493         return 0;
494 }
495
496 static int bnx2x_check_vlan_add(struct bnx2x *bp,
497                                 struct bnx2x_vlan_mac_obj *o,
498                                 union bnx2x_classification_ramrod_data *data)
499 {
500         struct bnx2x_vlan_mac_registry_elem *pos;
501
502         DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
503
504         list_for_each_entry(pos, &o->head, link)
505                 if (data->vlan.vlan == pos->u.vlan.vlan)
506                         return -EEXIST;
507
508         return 0;
509 }
510
511 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
512                                     struct bnx2x_vlan_mac_obj *o,
513                                    union bnx2x_classification_ramrod_data *data)
514 {
515         struct bnx2x_vlan_mac_registry_elem *pos;
516
517         DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
518            data->vlan_mac.mac, data->vlan_mac.vlan);
519
520         list_for_each_entry(pos, &o->head, link)
521                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
522                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
523                              ETH_ALEN)))
524                         return -EEXIST;
525
526         return 0;
527 }
528
529
530 /* check_del() callbacks */
531 static struct bnx2x_vlan_mac_registry_elem *
532         bnx2x_check_mac_del(struct bnx2x *bp,
533                             struct bnx2x_vlan_mac_obj *o,
534                             union bnx2x_classification_ramrod_data *data)
535 {
536         struct bnx2x_vlan_mac_registry_elem *pos;
537
538         DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
539
540         list_for_each_entry(pos, &o->head, link)
541                 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
542                         return pos;
543
544         return NULL;
545 }
546
547 static struct bnx2x_vlan_mac_registry_elem *
548         bnx2x_check_vlan_del(struct bnx2x *bp,
549                              struct bnx2x_vlan_mac_obj *o,
550                              union bnx2x_classification_ramrod_data *data)
551 {
552         struct bnx2x_vlan_mac_registry_elem *pos;
553
554         DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
555
556         list_for_each_entry(pos, &o->head, link)
557                 if (data->vlan.vlan == pos->u.vlan.vlan)
558                         return pos;
559
560         return NULL;
561 }
562
563 static struct bnx2x_vlan_mac_registry_elem *
564         bnx2x_check_vlan_mac_del(struct bnx2x *bp,
565                                  struct bnx2x_vlan_mac_obj *o,
566                                  union bnx2x_classification_ramrod_data *data)
567 {
568         struct bnx2x_vlan_mac_registry_elem *pos;
569
570         DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
571            data->vlan_mac.mac, data->vlan_mac.vlan);
572
573         list_for_each_entry(pos, &o->head, link)
574                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
575                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
576                              ETH_ALEN)))
577                         return pos;
578
579         return NULL;
580 }
581
582 /* check_move() callback */
583 static bool bnx2x_check_move(struct bnx2x *bp,
584                              struct bnx2x_vlan_mac_obj *src_o,
585                              struct bnx2x_vlan_mac_obj *dst_o,
586                              union bnx2x_classification_ramrod_data *data)
587 {
588         struct bnx2x_vlan_mac_registry_elem *pos;
589         int rc;
590
591         /* Check if we can delete the requested configuration from the first
592          * object.
593          */
594         pos = src_o->check_del(bp, src_o, data);
595
596         /*  check if configuration can be added */
597         rc = dst_o->check_add(bp, dst_o, data);
598
599         /* If this classification can not be added (is already set)
600          * or can't be deleted - return an error.
601          */
602         if (rc || !pos)
603                 return false;
604
605         return true;
606 }
607
608 static bool bnx2x_check_move_always_err(
609         struct bnx2x *bp,
610         struct bnx2x_vlan_mac_obj *src_o,
611         struct bnx2x_vlan_mac_obj *dst_o,
612         union bnx2x_classification_ramrod_data *data)
613 {
614         return false;
615 }
616
617
618 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
619 {
620         struct bnx2x_raw_obj *raw = &o->raw;
621         u8 rx_tx_flag = 0;
622
623         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
624             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
625                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
626
627         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
628             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
629                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
630
631         return rx_tx_flag;
632 }
633
634
635 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
636                           bool add, unsigned char *dev_addr, int index)
637 {
638         u32 wb_data[2];
639         u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
640                          NIG_REG_LLH0_FUNC_MEM;
641
642         if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
643                 return;
644
645         if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
646                 return;
647
648         DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
649                          (add ? "ADD" : "DELETE"), index);
650
651         if (add) {
652                 /* LLH_FUNC_MEM is a u64 WB register */
653                 reg_offset += 8*index;
654
655                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
656                               (dev_addr[4] <<  8) |  dev_addr[5]);
657                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
658
659                 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
660         }
661
662         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
663                                   NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
664 }
665
666 /**
667  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
668  *
669  * @bp:         device handle
670  * @o:          queue for which we want to configure this rule
671  * @add:        if true the command is an ADD command, DEL otherwise
672  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
673  * @hdr:        pointer to a header to setup
674  *
675  */
676 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
677         struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
678         struct eth_classify_cmd_header *hdr)
679 {
680         struct bnx2x_raw_obj *raw = &o->raw;
681
682         hdr->client_id = raw->cl_id;
683         hdr->func_id = raw->func_id;
684
685         /* Rx or/and Tx (internal switching) configuration ? */
686         hdr->cmd_general_data |=
687                 bnx2x_vlan_mac_get_rx_tx_flag(o);
688
689         if (add)
690                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
691
692         hdr->cmd_general_data |=
693                 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
694 }
695
696 /**
697  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
698  *
699  * @cid:        connection id
700  * @type:       BNX2X_FILTER_XXX_PENDING
701  * @hdr:        poiter to header to setup
702  * @rule_cnt:
703  *
704  * currently we always configure one rule and echo field to contain a CID and an
705  * opcode type.
706  */
707 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
708                                 struct eth_classify_header *hdr, int rule_cnt)
709 {
710         hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
711                                 (type << BNX2X_SWCID_SHIFT));
712         hdr->rule_cnt = (u8)rule_cnt;
713 }
714
715
716 /* hw_config() callbacks */
717 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
718                                  struct bnx2x_vlan_mac_obj *o,
719                                  struct bnx2x_exeq_elem *elem, int rule_idx,
720                                  int cam_offset)
721 {
722         struct bnx2x_raw_obj *raw = &o->raw;
723         struct eth_classify_rules_ramrod_data *data =
724                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
725         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
726         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
727         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
728         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
729         u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
730
731         /*
732          * Set LLH CAM entry: currently only iSCSI and ETH macs are
733          * relevant. In addition, current implementation is tuned for a
734          * single ETH MAC.
735          *
736          * When multiple unicast ETH MACs PF configuration in switch
737          * independent mode is required (NetQ, multiple netdev MACs,
738          * etc.), consider better utilisation of 8 per function MAC
739          * entries in the LLH register. There is also
740          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
741          * total number of CAM entries to 16.
742          *
743          * Currently we won't configure NIG for MACs other than a primary ETH
744          * MAC and iSCSI L2 MAC.
745          *
746          * If this MAC is moving from one Queue to another, no need to change
747          * NIG configuration.
748          */
749         if (cmd != BNX2X_VLAN_MAC_MOVE) {
750                 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
751                         bnx2x_set_mac_in_nig(bp, add, mac,
752                                              BNX2X_LLH_CAM_ISCSI_ETH_LINE);
753                 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
754                         bnx2x_set_mac_in_nig(bp, add, mac,
755                                              BNX2X_LLH_CAM_ETH_LINE);
756         }
757
758         /* Reset the ramrod data buffer for the first rule */
759         if (rule_idx == 0)
760                 memset(data, 0, sizeof(*data));
761
762         /* Setup a command header */
763         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
764                                       &rule_entry->mac.header);
765
766         DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
767            (add ? "add" : "delete"), mac, raw->cl_id);
768
769         /* Set a MAC itself */
770         bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
771                               &rule_entry->mac.mac_mid,
772                               &rule_entry->mac.mac_lsb, mac);
773
774         /* MOVE: Add a rule that will add this MAC to the target Queue */
775         if (cmd == BNX2X_VLAN_MAC_MOVE) {
776                 rule_entry++;
777                 rule_cnt++;
778
779                 /* Setup ramrod data */
780                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
781                                         elem->cmd_data.vlan_mac.target_obj,
782                                               true, CLASSIFY_RULE_OPCODE_MAC,
783                                               &rule_entry->mac.header);
784
785                 /* Set a MAC itself */
786                 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
787                                       &rule_entry->mac.mac_mid,
788                                       &rule_entry->mac.mac_lsb, mac);
789         }
790
791         /* Set the ramrod data header */
792         /* TODO: take this to the higher level in order to prevent multiple
793                  writing */
794         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
795                                         rule_cnt);
796 }
797
798 /**
799  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
800  *
801  * @bp:         device handle
802  * @o:          queue
803  * @type:
804  * @cam_offset: offset in cam memory
805  * @hdr:        pointer to a header to setup
806  *
807  * E1/E1H
808  */
809 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
810         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
811         struct mac_configuration_hdr *hdr)
812 {
813         struct bnx2x_raw_obj *r = &o->raw;
814
815         hdr->length = 1;
816         hdr->offset = (u8)cam_offset;
817         hdr->client_id = cpu_to_le16(0xff);
818         hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
819                                 (type << BNX2X_SWCID_SHIFT));
820 }
821
822 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
823         struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
824         u16 vlan_id, struct mac_configuration_entry *cfg_entry)
825 {
826         struct bnx2x_raw_obj *r = &o->raw;
827         u32 cl_bit_vec = (1 << r->cl_id);
828
829         cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
830         cfg_entry->pf_id = r->func_id;
831         cfg_entry->vlan_id = cpu_to_le16(vlan_id);
832
833         if (add) {
834                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
835                          T_ETH_MAC_COMMAND_SET);
836                 SET_FLAG(cfg_entry->flags,
837                          MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
838
839                 /* Set a MAC in a ramrod data */
840                 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
841                                       &cfg_entry->middle_mac_addr,
842                                       &cfg_entry->lsb_mac_addr, mac);
843         } else
844                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
845                          T_ETH_MAC_COMMAND_INVALIDATE);
846 }
847
848 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
849         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
850         u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
851 {
852         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
853         struct bnx2x_raw_obj *raw = &o->raw;
854
855         bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
856                                          &config->hdr);
857         bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
858                                          cfg_entry);
859
860         DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
861                          (add ? "setting" : "clearing"),
862                          mac, raw->cl_id, cam_offset);
863 }
864
865 /**
866  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
867  *
868  * @bp:         device handle
869  * @o:          bnx2x_vlan_mac_obj
870  * @elem:       bnx2x_exeq_elem
871  * @rule_idx:   rule_idx
872  * @cam_offset: cam_offset
873  */
874 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
875                                   struct bnx2x_vlan_mac_obj *o,
876                                   struct bnx2x_exeq_elem *elem, int rule_idx,
877                                   int cam_offset)
878 {
879         struct bnx2x_raw_obj *raw = &o->raw;
880         struct mac_configuration_cmd *config =
881                 (struct mac_configuration_cmd *)(raw->rdata);
882         /*
883          * 57710 and 57711 do not support MOVE command,
884          * so it's either ADD or DEL
885          */
886         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
887                 true : false;
888
889         /* Reset the ramrod data buffer */
890         memset(config, 0, sizeof(*config));
891
892         bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
893                                      cam_offset, add,
894                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
895                                      ETH_VLAN_FILTER_ANY_VLAN, config);
896 }
897
898 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
899                                   struct bnx2x_vlan_mac_obj *o,
900                                   struct bnx2x_exeq_elem *elem, int rule_idx,
901                                   int cam_offset)
902 {
903         struct bnx2x_raw_obj *raw = &o->raw;
904         struct eth_classify_rules_ramrod_data *data =
905                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
906         int rule_cnt = rule_idx + 1;
907         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
908         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
909         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
910         u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
911
912         /* Reset the ramrod data buffer for the first rule */
913         if (rule_idx == 0)
914                 memset(data, 0, sizeof(*data));
915
916         /* Set a rule header */
917         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
918                                       &rule_entry->vlan.header);
919
920         DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
921                          vlan);
922
923         /* Set a VLAN itself */
924         rule_entry->vlan.vlan = cpu_to_le16(vlan);
925
926         /* MOVE: Add a rule that will add this MAC to the target Queue */
927         if (cmd == BNX2X_VLAN_MAC_MOVE) {
928                 rule_entry++;
929                 rule_cnt++;
930
931                 /* Setup ramrod data */
932                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
933                                         elem->cmd_data.vlan_mac.target_obj,
934                                               true, CLASSIFY_RULE_OPCODE_VLAN,
935                                               &rule_entry->vlan.header);
936
937                 /* Set a VLAN itself */
938                 rule_entry->vlan.vlan = cpu_to_le16(vlan);
939         }
940
941         /* Set the ramrod data header */
942         /* TODO: take this to the higher level in order to prevent multiple
943                  writing */
944         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
945                                         rule_cnt);
946 }
947
948 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
949                                       struct bnx2x_vlan_mac_obj *o,
950                                       struct bnx2x_exeq_elem *elem,
951                                       int rule_idx, int cam_offset)
952 {
953         struct bnx2x_raw_obj *raw = &o->raw;
954         struct eth_classify_rules_ramrod_data *data =
955                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
956         int rule_cnt = rule_idx + 1;
957         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
958         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
959         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
960         u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
961         u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
962
963
964         /* Reset the ramrod data buffer for the first rule */
965         if (rule_idx == 0)
966                 memset(data, 0, sizeof(*data));
967
968         /* Set a rule header */
969         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
970                                       &rule_entry->pair.header);
971
972         /* Set VLAN and MAC themselvs */
973         rule_entry->pair.vlan = cpu_to_le16(vlan);
974         bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975                               &rule_entry->pair.mac_mid,
976                               &rule_entry->pair.mac_lsb, mac);
977
978         /* MOVE: Add a rule that will add this MAC to the target Queue */
979         if (cmd == BNX2X_VLAN_MAC_MOVE) {
980                 rule_entry++;
981                 rule_cnt++;
982
983                 /* Setup ramrod data */
984                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
985                                         elem->cmd_data.vlan_mac.target_obj,
986                                               true, CLASSIFY_RULE_OPCODE_PAIR,
987                                               &rule_entry->pair.header);
988
989                 /* Set a VLAN itself */
990                 rule_entry->pair.vlan = cpu_to_le16(vlan);
991                 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
992                                       &rule_entry->pair.mac_mid,
993                                       &rule_entry->pair.mac_lsb, mac);
994         }
995
996         /* Set the ramrod data header */
997         /* TODO: take this to the higher level in order to prevent multiple
998                  writing */
999         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1000                                         rule_cnt);
1001 }
1002
1003 /**
1004  * bnx2x_set_one_vlan_mac_e1h -
1005  *
1006  * @bp:         device handle
1007  * @o:          bnx2x_vlan_mac_obj
1008  * @elem:       bnx2x_exeq_elem
1009  * @rule_idx:   rule_idx
1010  * @cam_offset: cam_offset
1011  */
1012 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1013                                        struct bnx2x_vlan_mac_obj *o,
1014                                        struct bnx2x_exeq_elem *elem,
1015                                        int rule_idx, int cam_offset)
1016 {
1017         struct bnx2x_raw_obj *raw = &o->raw;
1018         struct mac_configuration_cmd *config =
1019                 (struct mac_configuration_cmd *)(raw->rdata);
1020         /*
1021          * 57710 and 57711 do not support MOVE command,
1022          * so it's either ADD or DEL
1023          */
1024         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1025                 true : false;
1026
1027         /* Reset the ramrod data buffer */
1028         memset(config, 0, sizeof(*config));
1029
1030         bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1031                                      cam_offset, add,
1032                                      elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1033                                      elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1034                                      ETH_VLAN_FILTER_CLASSIFY, config);
1035 }
1036
1037 #define list_next_entry(pos, member) \
1038         list_entry((pos)->member.next, typeof(*(pos)), member)
1039
1040 /**
1041  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1042  *
1043  * @bp:         device handle
1044  * @p:          command parameters
1045  * @ppos:       pointer to the cooky
1046  *
1047  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1048  * previously configured elements list.
1049  *
1050  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1051  * into an account
1052  *
1053  * pointer to the cooky  - that should be given back in the next call to make
1054  * function handle the next element. If *ppos is set to NULL it will restart the
1055  * iterator. If returned *ppos == NULL this means that the last element has been
1056  * handled.
1057  *
1058  */
1059 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1060                            struct bnx2x_vlan_mac_ramrod_params *p,
1061                            struct bnx2x_vlan_mac_registry_elem **ppos)
1062 {
1063         struct bnx2x_vlan_mac_registry_elem *pos;
1064         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1065
1066         /* If list is empty - there is nothing to do here */
1067         if (list_empty(&o->head)) {
1068                 *ppos = NULL;
1069                 return 0;
1070         }
1071
1072         /* make a step... */
1073         if (*ppos == NULL)
1074                 *ppos = list_first_entry(&o->head,
1075                                          struct bnx2x_vlan_mac_registry_elem,
1076                                          link);
1077         else
1078                 *ppos = list_next_entry(*ppos, link);
1079
1080         pos = *ppos;
1081
1082         /* If it's the last step - return NULL */
1083         if (list_is_last(&pos->link, &o->head))
1084                 *ppos = NULL;
1085
1086         /* Prepare a 'user_req' */
1087         memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1088
1089         /* Set the command */
1090         p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1091
1092         /* Set vlan_mac_flags */
1093         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1094
1095         /* Set a restore bit */
1096         __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1097
1098         return bnx2x_config_vlan_mac(bp, p);
1099 }
1100
1101 /*
1102  * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1103  * pointer to an element with a specific criteria and NULL if such an element
1104  * hasn't been found.
1105  */
1106 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1107         struct bnx2x_exe_queue_obj *o,
1108         struct bnx2x_exeq_elem *elem)
1109 {
1110         struct bnx2x_exeq_elem *pos;
1111         struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1112
1113         /* Check pending for execution commands */
1114         list_for_each_entry(pos, &o->exe_queue, link)
1115                 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1116                               sizeof(*data)) &&
1117                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1118                         return pos;
1119
1120         return NULL;
1121 }
1122
1123 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1124         struct bnx2x_exe_queue_obj *o,
1125         struct bnx2x_exeq_elem *elem)
1126 {
1127         struct bnx2x_exeq_elem *pos;
1128         struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1129
1130         /* Check pending for execution commands */
1131         list_for_each_entry(pos, &o->exe_queue, link)
1132                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1133                               sizeof(*data)) &&
1134                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1135                         return pos;
1136
1137         return NULL;
1138 }
1139
1140 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1141         struct bnx2x_exe_queue_obj *o,
1142         struct bnx2x_exeq_elem *elem)
1143 {
1144         struct bnx2x_exeq_elem *pos;
1145         struct bnx2x_vlan_mac_ramrod_data *data =
1146                 &elem->cmd_data.vlan_mac.u.vlan_mac;
1147
1148         /* Check pending for execution commands */
1149         list_for_each_entry(pos, &o->exe_queue, link)
1150                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1151                               sizeof(*data)) &&
1152                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1153                         return pos;
1154
1155         return NULL;
1156 }
1157
1158 /**
1159  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1160  *
1161  * @bp:         device handle
1162  * @qo:         bnx2x_qable_obj
1163  * @elem:       bnx2x_exeq_elem
1164  *
1165  * Checks that the requested configuration can be added. If yes and if
1166  * requested, consume CAM credit.
1167  *
1168  * The 'validate' is run after the 'optimize'.
1169  *
1170  */
1171 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1172                                               union bnx2x_qable_obj *qo,
1173                                               struct bnx2x_exeq_elem *elem)
1174 {
1175         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1176         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1177         int rc;
1178
1179         /* Check the registry */
1180         rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1181         if (rc) {
1182                 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1183                 return rc;
1184         }
1185
1186         /*
1187          * Check if there is a pending ADD command for this
1188          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1189          */
1190         if (exeq->get(exeq, elem)) {
1191                 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1192                 return -EEXIST;
1193         }
1194
1195         /*
1196          * TODO: Check the pending MOVE from other objects where this
1197          * object is a destination object.
1198          */
1199
1200         /* Consume the credit if not requested not to */
1201         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1202                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1203             o->get_credit(o)))
1204                 return -EINVAL;
1205
1206         return 0;
1207 }
1208
1209 /**
1210  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1211  *
1212  * @bp:         device handle
1213  * @qo:         quable object to check
1214  * @elem:       element that needs to be deleted
1215  *
1216  * Checks that the requested configuration can be deleted. If yes and if
1217  * requested, returns a CAM credit.
1218  *
1219  * The 'validate' is run after the 'optimize'.
1220  */
1221 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1222                                               union bnx2x_qable_obj *qo,
1223                                               struct bnx2x_exeq_elem *elem)
1224 {
1225         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1226         struct bnx2x_vlan_mac_registry_elem *pos;
1227         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1228         struct bnx2x_exeq_elem query_elem;
1229
1230         /* If this classification can not be deleted (doesn't exist)
1231          * - return a BNX2X_EXIST.
1232          */
1233         pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1234         if (!pos) {
1235                 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1236                 return -EEXIST;
1237         }
1238
1239         /*
1240          * Check if there are pending DEL or MOVE commands for this
1241          * MAC/VLAN/VLAN-MAC. Return an error if so.
1242          */
1243         memcpy(&query_elem, elem, sizeof(query_elem));
1244
1245         /* Check for MOVE commands */
1246         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1247         if (exeq->get(exeq, &query_elem)) {
1248                 BNX2X_ERR("There is a pending MOVE command already\n");
1249                 return -EINVAL;
1250         }
1251
1252         /* Check for DEL commands */
1253         if (exeq->get(exeq, elem)) {
1254                 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1255                 return -EEXIST;
1256         }
1257
1258         /* Return the credit to the credit pool if not requested not to */
1259         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1260                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1261             o->put_credit(o))) {
1262                 BNX2X_ERR("Failed to return a credit\n");
1263                 return -EINVAL;
1264         }
1265
1266         return 0;
1267 }
1268
1269 /**
1270  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1271  *
1272  * @bp:         device handle
1273  * @qo:         quable object to check (source)
1274  * @elem:       element that needs to be moved
1275  *
1276  * Checks that the requested configuration can be moved. If yes and if
1277  * requested, returns a CAM credit.
1278  *
1279  * The 'validate' is run after the 'optimize'.
1280  */
1281 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1282                                                union bnx2x_qable_obj *qo,
1283                                                struct bnx2x_exeq_elem *elem)
1284 {
1285         struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1286         struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1287         struct bnx2x_exeq_elem query_elem;
1288         struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1289         struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1290
1291         /*
1292          * Check if we can perform this operation based on the current registry
1293          * state.
1294          */
1295         if (!src_o->check_move(bp, src_o, dest_o,
1296                                &elem->cmd_data.vlan_mac.u)) {
1297                 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1298                 return -EINVAL;
1299         }
1300
1301         /*
1302          * Check if there is an already pending DEL or MOVE command for the
1303          * source object or ADD command for a destination object. Return an
1304          * error if so.
1305          */
1306         memcpy(&query_elem, elem, sizeof(query_elem));
1307
1308         /* Check DEL on source */
1309         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1310         if (src_exeq->get(src_exeq, &query_elem)) {
1311                 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1312                 return -EINVAL;
1313         }
1314
1315         /* Check MOVE on source */
1316         if (src_exeq->get(src_exeq, elem)) {
1317                 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1318                 return -EEXIST;
1319         }
1320
1321         /* Check ADD on destination */
1322         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1323         if (dest_exeq->get(dest_exeq, &query_elem)) {
1324                 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1325                 return -EINVAL;
1326         }
1327
1328         /* Consume the credit if not requested not to */
1329         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1330                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1331             dest_o->get_credit(dest_o)))
1332                 return -EINVAL;
1333
1334         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1335                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1336             src_o->put_credit(src_o))) {
1337                 /* return the credit taken from dest... */
1338                 dest_o->put_credit(dest_o);
1339                 return -EINVAL;
1340         }
1341
1342         return 0;
1343 }
1344
1345 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1346                                    union bnx2x_qable_obj *qo,
1347                                    struct bnx2x_exeq_elem *elem)
1348 {
1349         switch (elem->cmd_data.vlan_mac.cmd) {
1350         case BNX2X_VLAN_MAC_ADD:
1351                 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1352         case BNX2X_VLAN_MAC_DEL:
1353                 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1354         case BNX2X_VLAN_MAC_MOVE:
1355                 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1356         default:
1357                 return -EINVAL;
1358         }
1359 }
1360
1361 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1362                                   union bnx2x_qable_obj *qo,
1363                                   struct bnx2x_exeq_elem *elem)
1364 {
1365         int rc = 0;
1366
1367         /* If consumption wasn't required, nothing to do */
1368         if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1369                      &elem->cmd_data.vlan_mac.vlan_mac_flags))
1370                 return 0;
1371
1372         switch (elem->cmd_data.vlan_mac.cmd) {
1373         case BNX2X_VLAN_MAC_ADD:
1374         case BNX2X_VLAN_MAC_MOVE:
1375                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1376                 break;
1377         case BNX2X_VLAN_MAC_DEL:
1378                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1379                 break;
1380         default:
1381                 return -EINVAL;
1382         }
1383
1384         if (rc != true)
1385                 return -EINVAL;
1386
1387         return 0;
1388 }
1389
1390 /**
1391  * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1392  *
1393  * @bp:         device handle
1394  * @o:          bnx2x_vlan_mac_obj
1395  *
1396  */
1397 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1398                                struct bnx2x_vlan_mac_obj *o)
1399 {
1400         int cnt = 5000, rc;
1401         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1402         struct bnx2x_raw_obj *raw = &o->raw;
1403
1404         while (cnt--) {
1405                 /* Wait for the current command to complete */
1406                 rc = raw->wait_comp(bp, raw);
1407                 if (rc)
1408                         return rc;
1409
1410                 /* Wait until there are no pending commands */
1411                 if (!bnx2x_exe_queue_empty(exeq))
1412                         usleep_range(1000, 2000);
1413                 else
1414                         return 0;
1415         }
1416
1417         return -EBUSY;
1418 }
1419
1420 /**
1421  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1422  *
1423  * @bp:         device handle
1424  * @o:          bnx2x_vlan_mac_obj
1425  * @cqe:
1426  * @cont:       if true schedule next execution chunk
1427  *
1428  */
1429 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1430                                    struct bnx2x_vlan_mac_obj *o,
1431                                    union event_ring_elem *cqe,
1432                                    unsigned long *ramrod_flags)
1433 {
1434         struct bnx2x_raw_obj *r = &o->raw;
1435         int rc;
1436
1437         /* Reset pending list */
1438         bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1439
1440         /* Clear pending */
1441         r->clear_pending(r);
1442
1443         /* If ramrod failed this is most likely a SW bug */
1444         if (cqe->message.error)
1445                 return -EINVAL;
1446
1447         /* Run the next bulk of pending commands if requested */
1448         if (test_bit(RAMROD_CONT, ramrod_flags)) {
1449                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1450                 if (rc < 0)
1451                         return rc;
1452         }
1453
1454         /* If there is more work to do return PENDING */
1455         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1456                 return 1;
1457
1458         return 0;
1459 }
1460
1461 /**
1462  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1463  *
1464  * @bp:         device handle
1465  * @o:          bnx2x_qable_obj
1466  * @elem:       bnx2x_exeq_elem
1467  */
1468 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1469                                    union bnx2x_qable_obj *qo,
1470                                    struct bnx2x_exeq_elem *elem)
1471 {
1472         struct bnx2x_exeq_elem query, *pos;
1473         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1474         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1475
1476         memcpy(&query, elem, sizeof(query));
1477
1478         switch (elem->cmd_data.vlan_mac.cmd) {
1479         case BNX2X_VLAN_MAC_ADD:
1480                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1481                 break;
1482         case BNX2X_VLAN_MAC_DEL:
1483                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1484                 break;
1485         default:
1486                 /* Don't handle anything other than ADD or DEL */
1487                 return 0;
1488         }
1489
1490         /* If we found the appropriate element - delete it */
1491         pos = exeq->get(exeq, &query);
1492         if (pos) {
1493
1494                 /* Return the credit of the optimized command */
1495                 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1496                               &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1497                         if ((query.cmd_data.vlan_mac.cmd ==
1498                              BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1499                                 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1500                                 return -EINVAL;
1501                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1502                                 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1503                                 return -EINVAL;
1504                         }
1505                 }
1506
1507                 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1508                            (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1509                            "ADD" : "DEL");
1510
1511                 list_del(&pos->link);
1512                 bnx2x_exe_queue_free_elem(bp, pos);
1513                 return 1;
1514         }
1515
1516         return 0;
1517 }
1518
1519 /**
1520  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1521  *
1522  * @bp:   device handle
1523  * @o:
1524  * @elem:
1525  * @restore:
1526  * @re:
1527  *
1528  * prepare a registry element according to the current command request.
1529  */
1530 static inline int bnx2x_vlan_mac_get_registry_elem(
1531         struct bnx2x *bp,
1532         struct bnx2x_vlan_mac_obj *o,
1533         struct bnx2x_exeq_elem *elem,
1534         bool restore,
1535         struct bnx2x_vlan_mac_registry_elem **re)
1536 {
1537         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1538         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1539
1540         /* Allocate a new registry element if needed. */
1541         if (!restore &&
1542             ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1543                 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1544                 if (!reg_elem)
1545                         return -ENOMEM;
1546
1547                 /* Get a new CAM offset */
1548                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1549                         /*
1550                          * This shell never happen, because we have checked the
1551                          * CAM availiability in the 'validate'.
1552                          */
1553                         WARN_ON(1);
1554                         kfree(reg_elem);
1555                         return -EINVAL;
1556                 }
1557
1558                 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1559
1560                 /* Set a VLAN-MAC data */
1561                 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1562                           sizeof(reg_elem->u));
1563
1564                 /* Copy the flags (needed for DEL and RESTORE flows) */
1565                 reg_elem->vlan_mac_flags =
1566                         elem->cmd_data.vlan_mac.vlan_mac_flags;
1567         } else /* DEL, RESTORE */
1568                 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1569
1570         *re = reg_elem;
1571         return 0;
1572 }
1573
1574 /**
1575  * bnx2x_execute_vlan_mac - execute vlan mac command
1576  *
1577  * @bp:                 device handle
1578  * @qo:
1579  * @exe_chunk:
1580  * @ramrod_flags:
1581  *
1582  * go and send a ramrod!
1583  */
1584 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1585                                   union bnx2x_qable_obj *qo,
1586                                   struct list_head *exe_chunk,
1587                                   unsigned long *ramrod_flags)
1588 {
1589         struct bnx2x_exeq_elem *elem;
1590         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1591         struct bnx2x_raw_obj *r = &o->raw;
1592         int rc, idx = 0;
1593         bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1594         bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1595         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1596         enum bnx2x_vlan_mac_cmd cmd;
1597
1598         /*
1599          * If DRIVER_ONLY execution is requested, cleanup a registry
1600          * and exit. Otherwise send a ramrod to FW.
1601          */
1602         if (!drv_only) {
1603                 WARN_ON(r->check_pending(r));
1604
1605                 /* Set pending */
1606                 r->set_pending(r);
1607
1608                 /* Fill tha ramrod data */
1609                 list_for_each_entry(elem, exe_chunk, link) {
1610                         cmd = elem->cmd_data.vlan_mac.cmd;
1611                         /*
1612                          * We will add to the target object in MOVE command, so
1613                          * change the object for a CAM search.
1614                          */
1615                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1616                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1617                         else
1618                                 cam_obj = o;
1619
1620                         rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1621                                                               elem, restore,
1622                                                               &reg_elem);
1623                         if (rc)
1624                                 goto error_exit;
1625
1626                         WARN_ON(!reg_elem);
1627
1628                         /* Push a new entry into the registry */
1629                         if (!restore &&
1630                             ((cmd == BNX2X_VLAN_MAC_ADD) ||
1631                             (cmd == BNX2X_VLAN_MAC_MOVE)))
1632                                 list_add(&reg_elem->link, &cam_obj->head);
1633
1634                         /* Configure a single command in a ramrod data buffer */
1635                         o->set_one_rule(bp, o, elem, idx,
1636                                         reg_elem->cam_offset);
1637
1638                         /* MOVE command consumes 2 entries in the ramrod data */
1639                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1640                                 idx += 2;
1641                         else
1642                                 idx++;
1643                 }
1644
1645                 /*
1646                  *  No need for an explicit memory barrier here as long we would
1647                  *  need to ensure the ordering of writing to the SPQ element
1648                  *  and updating of the SPQ producer which involves a memory
1649                  *  read and we will have to put a full memory barrier there
1650                  *  (inside bnx2x_sp_post()).
1651                  */
1652
1653                 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1654                                    U64_HI(r->rdata_mapping),
1655                                    U64_LO(r->rdata_mapping),
1656                                    ETH_CONNECTION_TYPE);
1657                 if (rc)
1658                         goto error_exit;
1659         }
1660
1661         /* Now, when we are done with the ramrod - clean up the registry */
1662         list_for_each_entry(elem, exe_chunk, link) {
1663                 cmd = elem->cmd_data.vlan_mac.cmd;
1664                 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1665                     (cmd == BNX2X_VLAN_MAC_MOVE)) {
1666                         reg_elem = o->check_del(bp, o,
1667                                                 &elem->cmd_data.vlan_mac.u);
1668
1669                         WARN_ON(!reg_elem);
1670
1671                         o->put_cam_offset(o, reg_elem->cam_offset);
1672                         list_del(&reg_elem->link);
1673                         kfree(reg_elem);
1674                 }
1675         }
1676
1677         if (!drv_only)
1678                 return 1;
1679         else
1680                 return 0;
1681
1682 error_exit:
1683         r->clear_pending(r);
1684
1685         /* Cleanup a registry in case of a failure */
1686         list_for_each_entry(elem, exe_chunk, link) {
1687                 cmd = elem->cmd_data.vlan_mac.cmd;
1688
1689                 if (cmd == BNX2X_VLAN_MAC_MOVE)
1690                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1691                 else
1692                         cam_obj = o;
1693
1694                 /* Delete all newly added above entries */
1695                 if (!restore &&
1696                     ((cmd == BNX2X_VLAN_MAC_ADD) ||
1697                     (cmd == BNX2X_VLAN_MAC_MOVE))) {
1698                         reg_elem = o->check_del(bp, cam_obj,
1699                                                 &elem->cmd_data.vlan_mac.u);
1700                         if (reg_elem) {
1701                                 list_del(&reg_elem->link);
1702                                 kfree(reg_elem);
1703                         }
1704                 }
1705         }
1706
1707         return rc;
1708 }
1709
1710 static inline int bnx2x_vlan_mac_push_new_cmd(
1711         struct bnx2x *bp,
1712         struct bnx2x_vlan_mac_ramrod_params *p)
1713 {
1714         struct bnx2x_exeq_elem *elem;
1715         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1716         bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1717
1718         /* Allocate the execution queue element */
1719         elem = bnx2x_exe_queue_alloc_elem(bp);
1720         if (!elem)
1721                 return -ENOMEM;
1722
1723         /* Set the command 'length' */
1724         switch (p->user_req.cmd) {
1725         case BNX2X_VLAN_MAC_MOVE:
1726                 elem->cmd_len = 2;
1727                 break;
1728         default:
1729                 elem->cmd_len = 1;
1730         }
1731
1732         /* Fill the object specific info */
1733         memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1734
1735         /* Try to add a new command to the pending list */
1736         return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1737 }
1738
1739 /**
1740  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1741  *
1742  * @bp:   device handle
1743  * @p:
1744  *
1745  */
1746 int bnx2x_config_vlan_mac(
1747         struct bnx2x *bp,
1748         struct bnx2x_vlan_mac_ramrod_params *p)
1749 {
1750         int rc = 0;
1751         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1752         unsigned long *ramrod_flags = &p->ramrod_flags;
1753         bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1754         struct bnx2x_raw_obj *raw = &o->raw;
1755
1756         /*
1757          * Add new elements to the execution list for commands that require it.
1758          */
1759         if (!cont) {
1760                 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1761                 if (rc)
1762                         return rc;
1763         }
1764
1765         /*
1766          * If nothing will be executed further in this iteration we want to
1767          * return PENDING if there are pending commands
1768          */
1769         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1770                 rc = 1;
1771
1772         if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1773                 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1774                 raw->clear_pending(raw);
1775         }
1776
1777         /* Execute commands if required */
1778         if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1779             test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1780                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1781                 if (rc < 0)
1782                         return rc;
1783         }
1784
1785         /*
1786          * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1787          * then user want to wait until the last command is done.
1788          */
1789         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1790                 /*
1791                  * Wait maximum for the current exe_queue length iterations plus
1792                  * one (for the current pending command).
1793                  */
1794                 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1795
1796                 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1797                        max_iterations--) {
1798
1799                         /* Wait for the current command to complete */
1800                         rc = raw->wait_comp(bp, raw);
1801                         if (rc)
1802                                 return rc;
1803
1804                         /* Make a next step */
1805                         rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1806                                                   ramrod_flags);
1807                         if (rc < 0)
1808                                 return rc;
1809                 }
1810
1811                 return 0;
1812         }
1813
1814         return rc;
1815 }
1816
1817
1818
1819 /**
1820  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1821  *
1822  * @bp:                 device handle
1823  * @o:
1824  * @vlan_mac_flags:
1825  * @ramrod_flags:       execution flags to be used for this deletion
1826  *
1827  * if the last operation has completed successfully and there are no
1828  * moreelements left, positive value if the last operation has completed
1829  * successfully and there are more previously configured elements, negative
1830  * value is current operation has failed.
1831  */
1832 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1833                                   struct bnx2x_vlan_mac_obj *o,
1834                                   unsigned long *vlan_mac_flags,
1835                                   unsigned long *ramrod_flags)
1836 {
1837         struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1838         int rc = 0;
1839         struct bnx2x_vlan_mac_ramrod_params p;
1840         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1841         struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1842
1843         /* Clear pending commands first */
1844
1845         spin_lock_bh(&exeq->lock);
1846
1847         list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1848                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1849                     *vlan_mac_flags) {
1850                         rc = exeq->remove(bp, exeq->owner, exeq_pos);
1851                         if (rc) {
1852                                 BNX2X_ERR("Failed to remove command\n");
1853                                 spin_unlock_bh(&exeq->lock);
1854                                 return rc;
1855                         }
1856                         list_del(&exeq_pos->link);
1857                 }
1858         }
1859
1860         spin_unlock_bh(&exeq->lock);
1861
1862         /* Prepare a command request */
1863         memset(&p, 0, sizeof(p));
1864         p.vlan_mac_obj = o;
1865         p.ramrod_flags = *ramrod_flags;
1866         p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1867
1868         /*
1869          * Add all but the last VLAN-MAC to the execution queue without actually
1870          * execution anything.
1871          */
1872         __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1873         __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1874         __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1875
1876         list_for_each_entry(pos, &o->head, link) {
1877                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1878                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1879                         memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1880                         rc = bnx2x_config_vlan_mac(bp, &p);
1881                         if (rc < 0) {
1882                                 BNX2X_ERR("Failed to add a new DEL command\n");
1883                                 return rc;
1884                         }
1885                 }
1886         }
1887
1888         p.ramrod_flags = *ramrod_flags;
1889         __set_bit(RAMROD_CONT, &p.ramrod_flags);
1890
1891         return bnx2x_config_vlan_mac(bp, &p);
1892 }
1893
1894 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1895         u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1896         unsigned long *pstate, bnx2x_obj_type type)
1897 {
1898         raw->func_id = func_id;
1899         raw->cid = cid;
1900         raw->cl_id = cl_id;
1901         raw->rdata = rdata;
1902         raw->rdata_mapping = rdata_mapping;
1903         raw->state = state;
1904         raw->pstate = pstate;
1905         raw->obj_type = type;
1906         raw->check_pending = bnx2x_raw_check_pending;
1907         raw->clear_pending = bnx2x_raw_clear_pending;
1908         raw->set_pending = bnx2x_raw_set_pending;
1909         raw->wait_comp = bnx2x_raw_wait;
1910 }
1911
1912 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1913         u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1914         int state, unsigned long *pstate, bnx2x_obj_type type,
1915         struct bnx2x_credit_pool_obj *macs_pool,
1916         struct bnx2x_credit_pool_obj *vlans_pool)
1917 {
1918         INIT_LIST_HEAD(&o->head);
1919
1920         o->macs_pool = macs_pool;
1921         o->vlans_pool = vlans_pool;
1922
1923         o->delete_all = bnx2x_vlan_mac_del_all;
1924         o->restore = bnx2x_vlan_mac_restore;
1925         o->complete = bnx2x_complete_vlan_mac;
1926         o->wait = bnx2x_wait_vlan_mac;
1927
1928         bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1929                            state, pstate, type);
1930 }
1931
1932
1933 void bnx2x_init_mac_obj(struct bnx2x *bp,
1934                         struct bnx2x_vlan_mac_obj *mac_obj,
1935                         u8 cl_id, u32 cid, u8 func_id, void *rdata,
1936                         dma_addr_t rdata_mapping, int state,
1937                         unsigned long *pstate, bnx2x_obj_type type,
1938                         struct bnx2x_credit_pool_obj *macs_pool)
1939 {
1940         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1941
1942         bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1943                                    rdata_mapping, state, pstate, type,
1944                                    macs_pool, NULL);
1945
1946         /* CAM credit pool handling */
1947         mac_obj->get_credit = bnx2x_get_credit_mac;
1948         mac_obj->put_credit = bnx2x_put_credit_mac;
1949         mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1950         mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1951
1952         if (CHIP_IS_E1x(bp)) {
1953                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1954                 mac_obj->check_del         = bnx2x_check_mac_del;
1955                 mac_obj->check_add         = bnx2x_check_mac_add;
1956                 mac_obj->check_move        = bnx2x_check_move_always_err;
1957                 mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1958
1959                 /* Exe Queue */
1960                 bnx2x_exe_queue_init(bp,
1961                                      &mac_obj->exe_queue, 1, qable_obj,
1962                                      bnx2x_validate_vlan_mac,
1963                                      bnx2x_remove_vlan_mac,
1964                                      bnx2x_optimize_vlan_mac,
1965                                      bnx2x_execute_vlan_mac,
1966                                      bnx2x_exeq_get_mac);
1967         } else {
1968                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1969                 mac_obj->check_del         = bnx2x_check_mac_del;
1970                 mac_obj->check_add         = bnx2x_check_mac_add;
1971                 mac_obj->check_move        = bnx2x_check_move;
1972                 mac_obj->ramrod_cmd        =
1973                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1974                 mac_obj->get_n_elements    = bnx2x_get_n_elements;
1975
1976                 /* Exe Queue */
1977                 bnx2x_exe_queue_init(bp,
1978                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1979                                      qable_obj, bnx2x_validate_vlan_mac,
1980                                      bnx2x_remove_vlan_mac,
1981                                      bnx2x_optimize_vlan_mac,
1982                                      bnx2x_execute_vlan_mac,
1983                                      bnx2x_exeq_get_mac);
1984         }
1985 }
1986
1987 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1988                          struct bnx2x_vlan_mac_obj *vlan_obj,
1989                          u8 cl_id, u32 cid, u8 func_id, void *rdata,
1990                          dma_addr_t rdata_mapping, int state,
1991                          unsigned long *pstate, bnx2x_obj_type type,
1992                          struct bnx2x_credit_pool_obj *vlans_pool)
1993 {
1994         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1995
1996         bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1997                                    rdata_mapping, state, pstate, type, NULL,
1998                                    vlans_pool);
1999
2000         vlan_obj->get_credit = bnx2x_get_credit_vlan;
2001         vlan_obj->put_credit = bnx2x_put_credit_vlan;
2002         vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2003         vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2004
2005         if (CHIP_IS_E1x(bp)) {
2006                 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2007                 BUG();
2008         } else {
2009                 vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2010                 vlan_obj->check_del         = bnx2x_check_vlan_del;
2011                 vlan_obj->check_add         = bnx2x_check_vlan_add;
2012                 vlan_obj->check_move        = bnx2x_check_move;
2013                 vlan_obj->ramrod_cmd        =
2014                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2015
2016                 /* Exe Queue */
2017                 bnx2x_exe_queue_init(bp,
2018                                      &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2019                                      qable_obj, bnx2x_validate_vlan_mac,
2020                                      bnx2x_remove_vlan_mac,
2021                                      bnx2x_optimize_vlan_mac,
2022                                      bnx2x_execute_vlan_mac,
2023                                      bnx2x_exeq_get_vlan);
2024         }
2025 }
2026
2027 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2028                              struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2029                              u8 cl_id, u32 cid, u8 func_id, void *rdata,
2030                              dma_addr_t rdata_mapping, int state,
2031                              unsigned long *pstate, bnx2x_obj_type type,
2032                              struct bnx2x_credit_pool_obj *macs_pool,
2033                              struct bnx2x_credit_pool_obj *vlans_pool)
2034 {
2035         union bnx2x_qable_obj *qable_obj =
2036                 (union bnx2x_qable_obj *)vlan_mac_obj;
2037
2038         bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2039                                    rdata_mapping, state, pstate, type,
2040                                    macs_pool, vlans_pool);
2041
2042         /* CAM pool handling */
2043         vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2044         vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2045         /*
2046          * CAM offset is relevant for 57710 and 57711 chips only which have a
2047          * single CAM for both MACs and VLAN-MAC pairs. So the offset
2048          * will be taken from MACs' pool object only.
2049          */
2050         vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2051         vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2052
2053         if (CHIP_IS_E1(bp)) {
2054                 BNX2X_ERR("Do not support chips others than E2\n");
2055                 BUG();
2056         } else if (CHIP_IS_E1H(bp)) {
2057                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
2058                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2059                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2060                 vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
2061                 vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2062
2063                 /* Exe Queue */
2064                 bnx2x_exe_queue_init(bp,
2065                                      &vlan_mac_obj->exe_queue, 1, qable_obj,
2066                                      bnx2x_validate_vlan_mac,
2067                                      bnx2x_remove_vlan_mac,
2068                                      bnx2x_optimize_vlan_mac,
2069                                      bnx2x_execute_vlan_mac,
2070                                      bnx2x_exeq_get_vlan_mac);
2071         } else {
2072                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
2073                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2074                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2075                 vlan_mac_obj->check_move        = bnx2x_check_move;
2076                 vlan_mac_obj->ramrod_cmd        =
2077                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2078
2079                 /* Exe Queue */
2080                 bnx2x_exe_queue_init(bp,
2081                                      &vlan_mac_obj->exe_queue,
2082                                      CLASSIFY_RULES_COUNT,
2083                                      qable_obj, bnx2x_validate_vlan_mac,
2084                                      bnx2x_remove_vlan_mac,
2085                                      bnx2x_optimize_vlan_mac,
2086                                      bnx2x_execute_vlan_mac,
2087                                      bnx2x_exeq_get_vlan_mac);
2088         }
2089
2090 }
2091
2092 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2093 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2094                         struct tstorm_eth_mac_filter_config *mac_filters,
2095                         u16 pf_id)
2096 {
2097         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2098
2099         u32 addr = BAR_TSTRORM_INTMEM +
2100                         TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2101
2102         __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2103 }
2104
2105 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2106                                  struct bnx2x_rx_mode_ramrod_params *p)
2107 {
2108         /* update the bp MAC filter structure */
2109         u32 mask = (1 << p->cl_id);
2110
2111         struct tstorm_eth_mac_filter_config *mac_filters =
2112                 (struct tstorm_eth_mac_filter_config *)p->rdata;
2113
2114         /* initial seeting is drop-all */
2115         u8 drop_all_ucast = 1, drop_all_mcast = 1;
2116         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2117         u8 unmatched_unicast = 0;
2118
2119     /* In e1x there we only take into account rx acceot flag since tx switching
2120      * isn't enabled. */
2121         if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2122                 /* accept matched ucast */
2123                 drop_all_ucast = 0;
2124
2125         if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2126                 /* accept matched mcast */
2127                 drop_all_mcast = 0;
2128
2129         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2130                 /* accept all mcast */
2131                 drop_all_ucast = 0;
2132                 accp_all_ucast = 1;
2133         }
2134         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2135                 /* accept all mcast */
2136                 drop_all_mcast = 0;
2137                 accp_all_mcast = 1;
2138         }
2139         if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2140                 /* accept (all) bcast */
2141                 accp_all_bcast = 1;
2142         if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2143                 /* accept unmatched unicasts */
2144                 unmatched_unicast = 1;
2145
2146         mac_filters->ucast_drop_all = drop_all_ucast ?
2147                 mac_filters->ucast_drop_all | mask :
2148                 mac_filters->ucast_drop_all & ~mask;
2149
2150         mac_filters->mcast_drop_all = drop_all_mcast ?
2151                 mac_filters->mcast_drop_all | mask :
2152                 mac_filters->mcast_drop_all & ~mask;
2153
2154         mac_filters->ucast_accept_all = accp_all_ucast ?
2155                 mac_filters->ucast_accept_all | mask :
2156                 mac_filters->ucast_accept_all & ~mask;
2157
2158         mac_filters->mcast_accept_all = accp_all_mcast ?
2159                 mac_filters->mcast_accept_all | mask :
2160                 mac_filters->mcast_accept_all & ~mask;
2161
2162         mac_filters->bcast_accept_all = accp_all_bcast ?
2163                 mac_filters->bcast_accept_all | mask :
2164                 mac_filters->bcast_accept_all & ~mask;
2165
2166         mac_filters->unmatched_unicast = unmatched_unicast ?
2167                 mac_filters->unmatched_unicast | mask :
2168                 mac_filters->unmatched_unicast & ~mask;
2169
2170         DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2171                          "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2172            mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2173            mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2174            mac_filters->bcast_accept_all);
2175
2176         /* write the MAC filter structure*/
2177         __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2178
2179         /* The operation is completed */
2180         clear_bit(p->state, p->pstate);
2181         smp_mb__after_clear_bit();
2182
2183         return 0;
2184 }
2185
2186 /* Setup ramrod data */
2187 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2188                                 struct eth_classify_header *hdr,
2189                                 u8 rule_cnt)
2190 {
2191         hdr->echo = cpu_to_le32(cid);
2192         hdr->rule_cnt = rule_cnt;
2193 }
2194
2195 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2196                                 unsigned long *accept_flags,
2197                                 struct eth_filter_rules_cmd *cmd,
2198                                 bool clear_accept_all)
2199 {
2200         u16 state;
2201
2202         /* start with 'drop-all' */
2203         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2204                 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2205
2206         if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2207                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2208
2209         if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2210                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2211
2212         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2213                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2214                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2215         }
2216
2217         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2218                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2219                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2220         }
2221
2222         if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2223                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2224
2225         if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2226                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2227                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2228         }
2229
2230         if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2231                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2232
2233         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2234         if (clear_accept_all) {
2235                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2236                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2237                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2238                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2239         }
2240
2241         cmd->state = cpu_to_le16(state);
2242
2243 }
2244
2245 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2246                                 struct bnx2x_rx_mode_ramrod_params *p)
2247 {
2248         struct eth_filter_rules_ramrod_data *data = p->rdata;
2249         int rc;
2250         u8 rule_idx = 0;
2251
2252         /* Reset the ramrod data buffer */
2253         memset(data, 0, sizeof(*data));
2254
2255         /* Setup ramrod data */
2256
2257         /* Tx (internal switching) */
2258         if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2259                 data->rules[rule_idx].client_id = p->cl_id;
2260                 data->rules[rule_idx].func_id = p->func_id;
2261
2262                 data->rules[rule_idx].cmd_general_data =
2263                         ETH_FILTER_RULES_CMD_TX_CMD;
2264
2265                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2266                                                &(data->rules[rule_idx++]),
2267                                                false);
2268         }
2269
2270         /* Rx */
2271         if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2272                 data->rules[rule_idx].client_id = p->cl_id;
2273                 data->rules[rule_idx].func_id = p->func_id;
2274
2275                 data->rules[rule_idx].cmd_general_data =
2276                         ETH_FILTER_RULES_CMD_RX_CMD;
2277
2278                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2279                                                &(data->rules[rule_idx++]),
2280                                                false);
2281         }
2282
2283
2284         /*
2285          * If FCoE Queue configuration has been requested configure the Rx and
2286          * internal switching modes for this queue in separate rules.
2287          *
2288          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2289          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2290          */
2291         if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2292                 /*  Tx (internal switching) */
2293                 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2294                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2295                         data->rules[rule_idx].func_id = p->func_id;
2296
2297                         data->rules[rule_idx].cmd_general_data =
2298                                                 ETH_FILTER_RULES_CMD_TX_CMD;
2299
2300                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2301                                                        &(data->rules[rule_idx]),
2302                                                        true);
2303                         rule_idx++;
2304                 }
2305
2306                 /* Rx */
2307                 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2308                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2309                         data->rules[rule_idx].func_id = p->func_id;
2310
2311                         data->rules[rule_idx].cmd_general_data =
2312                                                 ETH_FILTER_RULES_CMD_RX_CMD;
2313
2314                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2315                                                        &(data->rules[rule_idx]),
2316                                                        true);
2317                         rule_idx++;
2318                 }
2319         }
2320
2321         /*
2322          * Set the ramrod header (most importantly - number of rules to
2323          * configure).
2324          */
2325         bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2326
2327         DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2328                          data->header.rule_cnt, p->rx_accept_flags,
2329                          p->tx_accept_flags);
2330
2331         /*
2332          *  No need for an explicit memory barrier here as long we would
2333          *  need to ensure the ordering of writing to the SPQ element
2334          *  and updating of the SPQ producer which involves a memory
2335          *  read and we will have to put a full memory barrier there
2336          *  (inside bnx2x_sp_post()).
2337          */
2338
2339         /* Send a ramrod */
2340         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2341                            U64_HI(p->rdata_mapping),
2342                            U64_LO(p->rdata_mapping),
2343                            ETH_CONNECTION_TYPE);
2344         if (rc)
2345                 return rc;
2346
2347         /* Ramrod completion is pending */
2348         return 1;
2349 }
2350
2351 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2352                                       struct bnx2x_rx_mode_ramrod_params *p)
2353 {
2354         return bnx2x_state_wait(bp, p->state, p->pstate);
2355 }
2356
2357 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2358                                     struct bnx2x_rx_mode_ramrod_params *p)
2359 {
2360         /* Do nothing */
2361         return 0;
2362 }
2363
2364 int bnx2x_config_rx_mode(struct bnx2x *bp,
2365                          struct bnx2x_rx_mode_ramrod_params *p)
2366 {
2367         int rc;
2368
2369         /* Configure the new classification in the chip */
2370         rc = p->rx_mode_obj->config_rx_mode(bp, p);
2371         if (rc < 0)
2372                 return rc;
2373
2374         /* Wait for a ramrod completion if was requested */
2375         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2376                 rc = p->rx_mode_obj->wait_comp(bp, p);
2377                 if (rc)
2378                         return rc;
2379         }
2380
2381         return rc;
2382 }
2383
2384 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2385                             struct bnx2x_rx_mode_obj *o)
2386 {
2387         if (CHIP_IS_E1x(bp)) {
2388                 o->wait_comp      = bnx2x_empty_rx_mode_wait;
2389                 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2390         } else {
2391                 o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2392                 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2393         }
2394 }
2395
2396 /********************* Multicast verbs: SET, CLEAR ****************************/
2397 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2398 {
2399         return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2400 }
2401
2402 struct bnx2x_mcast_mac_elem {
2403         struct list_head link;
2404         u8 mac[ETH_ALEN];
2405         u8 pad[2]; /* For a natural alignment of the following buffer */
2406 };
2407
2408 struct bnx2x_pending_mcast_cmd {
2409         struct list_head link;
2410         int type; /* BNX2X_MCAST_CMD_X */
2411         union {
2412                 struct list_head macs_head;
2413                 u32 macs_num; /* Needed for DEL command */
2414                 int next_bin; /* Needed for RESTORE flow with aprox match */
2415         } data;
2416
2417         bool done; /* set to true, when the command has been handled,
2418                     * practically used in 57712 handling only, where one pending
2419                     * command may be handled in a few operations. As long as for
2420                     * other chips every operation handling is completed in a
2421                     * single ramrod, there is no need to utilize this field.
2422                     */
2423 };
2424
2425 static int bnx2x_mcast_wait(struct bnx2x *bp,
2426                             struct bnx2x_mcast_obj *o)
2427 {
2428         if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2429                         o->raw.wait_comp(bp, &o->raw))
2430                 return -EBUSY;
2431
2432         return 0;
2433 }
2434
2435 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2436                                    struct bnx2x_mcast_obj *o,
2437                                    struct bnx2x_mcast_ramrod_params *p,
2438                                    enum bnx2x_mcast_cmd cmd)
2439 {
2440         int total_sz;
2441         struct bnx2x_pending_mcast_cmd *new_cmd;
2442         struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2443         struct bnx2x_mcast_list_elem *pos;
2444         int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2445                              p->mcast_list_len : 0);
2446
2447         /* If the command is empty ("handle pending commands only"), break */
2448         if (!p->mcast_list_len)
2449                 return 0;
2450
2451         total_sz = sizeof(*new_cmd) +
2452                 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2453
2454         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2455         new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2456
2457         if (!new_cmd)
2458                 return -ENOMEM;
2459
2460         DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2461            cmd, macs_list_len);
2462
2463         INIT_LIST_HEAD(&new_cmd->data.macs_head);
2464
2465         new_cmd->type = cmd;
2466         new_cmd->done = false;
2467
2468         switch (cmd) {
2469         case BNX2X_MCAST_CMD_ADD:
2470                 cur_mac = (struct bnx2x_mcast_mac_elem *)
2471                           ((u8 *)new_cmd + sizeof(*new_cmd));
2472
2473                 /* Push the MACs of the current command into the pendig command
2474                  * MACs list: FIFO
2475                  */
2476                 list_for_each_entry(pos, &p->mcast_list, link) {
2477                         memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2478                         list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2479                         cur_mac++;
2480                 }
2481
2482                 break;
2483
2484         case BNX2X_MCAST_CMD_DEL:
2485                 new_cmd->data.macs_num = p->mcast_list_len;
2486                 break;
2487
2488         case BNX2X_MCAST_CMD_RESTORE:
2489                 new_cmd->data.next_bin = 0;
2490                 break;
2491
2492         default:
2493                 kfree(new_cmd);
2494                 BNX2X_ERR("Unknown command: %d\n", cmd);
2495                 return -EINVAL;
2496         }
2497
2498         /* Push the new pending command to the tail of the pending list: FIFO */
2499         list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2500
2501         o->set_sched(o);
2502
2503         return 1;
2504 }
2505
2506 /**
2507  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2508  *
2509  * @o:
2510  * @last:       index to start looking from (including)
2511  *
2512  * returns the next found (set) bin or a negative value if none is found.
2513  */
2514 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2515 {
2516         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2517
2518         for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2519                 if (o->registry.aprox_match.vec[i])
2520                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2521                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2522                                 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2523                                                        vec, cur_bit)) {
2524                                         return cur_bit;
2525                                 }
2526                         }
2527                 inner_start = 0;
2528         }
2529
2530         /* None found */
2531         return -1;
2532 }
2533
2534 /**
2535  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2536  *
2537  * @o:
2538  *
2539  * returns the index of the found bin or -1 if none is found
2540  */
2541 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2542 {
2543         int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2544
2545         if (cur_bit >= 0)
2546                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2547
2548         return cur_bit;
2549 }
2550
2551 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2552 {
2553         struct bnx2x_raw_obj *raw = &o->raw;
2554         u8 rx_tx_flag = 0;
2555
2556         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2557             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2558                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2559
2560         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2561             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2562                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2563
2564         return rx_tx_flag;
2565 }
2566
2567 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2568                                         struct bnx2x_mcast_obj *o, int idx,
2569                                         union bnx2x_mcast_config_data *cfg_data,
2570                                         enum bnx2x_mcast_cmd cmd)
2571 {
2572         struct bnx2x_raw_obj *r = &o->raw;
2573         struct eth_multicast_rules_ramrod_data *data =
2574                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2575         u8 func_id = r->func_id;
2576         u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2577         int bin;
2578
2579         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2580                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2581
2582         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2583
2584         /* Get a bin and update a bins' vector */
2585         switch (cmd) {
2586         case BNX2X_MCAST_CMD_ADD:
2587                 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2588                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2589                 break;
2590
2591         case BNX2X_MCAST_CMD_DEL:
2592                 /* If there were no more bins to clear
2593                  * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2594                  * clear any (0xff) bin.
2595                  * See bnx2x_mcast_validate_e2() for explanation when it may
2596                  * happen.
2597                  */
2598                 bin = bnx2x_mcast_clear_first_bin(o);
2599                 break;
2600
2601         case BNX2X_MCAST_CMD_RESTORE:
2602                 bin = cfg_data->bin;
2603                 break;
2604
2605         default:
2606                 BNX2X_ERR("Unknown command: %d\n", cmd);
2607                 return;
2608         }
2609
2610         DP(BNX2X_MSG_SP, "%s bin %d\n",
2611                          ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2612                          "Setting"  : "Clearing"), bin);
2613
2614         data->rules[idx].bin_id    = (u8)bin;
2615         data->rules[idx].func_id   = func_id;
2616         data->rules[idx].engine_id = o->engine_id;
2617 }
2618
2619 /**
2620  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2621  *
2622  * @bp:         device handle
2623  * @o:
2624  * @start_bin:  index in the registry to start from (including)
2625  * @rdata_idx:  index in the ramrod data to start from
2626  *
2627  * returns last handled bin index or -1 if all bins have been handled
2628  */
2629 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2630         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2631         int *rdata_idx)
2632 {
2633         int cur_bin, cnt = *rdata_idx;
2634         union bnx2x_mcast_config_data cfg_data = {NULL};
2635
2636         /* go through the registry and configure the bins from it */
2637         for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2638             cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2639
2640                 cfg_data.bin = (u8)cur_bin;
2641                 o->set_one_rule(bp, o, cnt, &cfg_data,
2642                                 BNX2X_MCAST_CMD_RESTORE);
2643
2644                 cnt++;
2645
2646                 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2647
2648                 /* Break if we reached the maximum number
2649                  * of rules.
2650                  */
2651                 if (cnt >= o->max_cmd_len)
2652                         break;
2653         }
2654
2655         *rdata_idx = cnt;
2656
2657         return cur_bin;
2658 }
2659
2660 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2661         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2662         int *line_idx)
2663 {
2664         struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2665         int cnt = *line_idx;
2666         union bnx2x_mcast_config_data cfg_data = {NULL};
2667
2668         list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2669                                  link) {
2670
2671                 cfg_data.mac = &pmac_pos->mac[0];
2672                 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2673
2674                 cnt++;
2675
2676                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2677                    pmac_pos->mac);
2678
2679                 list_del(&pmac_pos->link);
2680
2681                 /* Break if we reached the maximum number
2682                  * of rules.
2683                  */
2684                 if (cnt >= o->max_cmd_len)
2685                         break;
2686         }
2687
2688         *line_idx = cnt;
2689
2690         /* if no more MACs to configure - we are done */
2691         if (list_empty(&cmd_pos->data.macs_head))
2692                 cmd_pos->done = true;
2693 }
2694
2695 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2696         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2697         int *line_idx)
2698 {
2699         int cnt = *line_idx;
2700
2701         while (cmd_pos->data.macs_num) {
2702                 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2703
2704                 cnt++;
2705
2706                 cmd_pos->data.macs_num--;
2707
2708                   DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2709                                    cmd_pos->data.macs_num, cnt);
2710
2711                 /* Break if we reached the maximum
2712                  * number of rules.
2713                  */
2714                 if (cnt >= o->max_cmd_len)
2715                         break;
2716         }
2717
2718         *line_idx = cnt;
2719
2720         /* If we cleared all bins - we are done */
2721         if (!cmd_pos->data.macs_num)
2722                 cmd_pos->done = true;
2723 }
2724
2725 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2726         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2727         int *line_idx)
2728 {
2729         cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2730                                                 line_idx);
2731
2732         if (cmd_pos->data.next_bin < 0)
2733                 /* If o->set_restore returned -1 we are done */
2734                 cmd_pos->done = true;
2735         else
2736                 /* Start from the next bin next time */
2737                 cmd_pos->data.next_bin++;
2738 }
2739
2740 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2741                                 struct bnx2x_mcast_ramrod_params *p)
2742 {
2743         struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2744         int cnt = 0;
2745         struct bnx2x_mcast_obj *o = p->mcast_obj;
2746
2747         list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2748                                  link) {
2749                 switch (cmd_pos->type) {
2750                 case BNX2X_MCAST_CMD_ADD:
2751                         bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2752                         break;
2753
2754                 case BNX2X_MCAST_CMD_DEL:
2755                         bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2756                         break;
2757
2758                 case BNX2X_MCAST_CMD_RESTORE:
2759                         bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2760                                                            &cnt);
2761                         break;
2762
2763                 default:
2764                         BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2765                         return -EINVAL;
2766                 }
2767
2768                 /* If the command has been completed - remove it from the list
2769                  * and free the memory
2770                  */
2771                 if (cmd_pos->done) {
2772                         list_del(&cmd_pos->link);
2773                         kfree(cmd_pos);
2774                 }
2775
2776                 /* Break if we reached the maximum number of rules */
2777                 if (cnt >= o->max_cmd_len)
2778                         break;
2779         }
2780
2781         return cnt;
2782 }
2783
2784 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2785         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2786         int *line_idx)
2787 {
2788         struct bnx2x_mcast_list_elem *mlist_pos;
2789         union bnx2x_mcast_config_data cfg_data = {NULL};
2790         int cnt = *line_idx;
2791
2792         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2793                 cfg_data.mac = mlist_pos->mac;
2794                 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2795
2796                 cnt++;
2797
2798                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2799                    mlist_pos->mac);
2800         }
2801
2802         *line_idx = cnt;
2803 }
2804
2805 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2806         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2807         int *line_idx)
2808 {
2809         int cnt = *line_idx, i;
2810
2811         for (i = 0; i < p->mcast_list_len; i++) {
2812                 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2813
2814                 cnt++;
2815
2816                 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2817                                  p->mcast_list_len - i - 1);
2818         }
2819
2820         *line_idx = cnt;
2821 }
2822
2823 /**
2824  * bnx2x_mcast_handle_current_cmd -
2825  *
2826  * @bp:         device handle
2827  * @p:
2828  * @cmd:
2829  * @start_cnt:  first line in the ramrod data that may be used
2830  *
2831  * This function is called iff there is enough place for the current command in
2832  * the ramrod data.
2833  * Returns number of lines filled in the ramrod data in total.
2834  */
2835 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2836                         struct bnx2x_mcast_ramrod_params *p,
2837                         enum bnx2x_mcast_cmd cmd,
2838                         int start_cnt)
2839 {
2840         struct bnx2x_mcast_obj *o = p->mcast_obj;
2841         int cnt = start_cnt;
2842
2843         DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2844
2845         switch (cmd) {
2846         case BNX2X_MCAST_CMD_ADD:
2847                 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2848                 break;
2849
2850         case BNX2X_MCAST_CMD_DEL:
2851                 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2852                 break;
2853
2854         case BNX2X_MCAST_CMD_RESTORE:
2855                 o->hdl_restore(bp, o, 0, &cnt);
2856                 break;
2857
2858         default:
2859                 BNX2X_ERR("Unknown command: %d\n", cmd);
2860                 return -EINVAL;
2861         }
2862
2863         /* The current command has been handled */
2864         p->mcast_list_len = 0;
2865
2866         return cnt;
2867 }
2868
2869 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2870                                    struct bnx2x_mcast_ramrod_params *p,
2871                                    enum bnx2x_mcast_cmd cmd)
2872 {
2873         struct bnx2x_mcast_obj *o = p->mcast_obj;
2874         int reg_sz = o->get_registry_size(o);
2875
2876         switch (cmd) {
2877         /* DEL command deletes all currently configured MACs */
2878         case BNX2X_MCAST_CMD_DEL:
2879                 o->set_registry_size(o, 0);
2880                 /* Don't break */
2881
2882         /* RESTORE command will restore the entire multicast configuration */
2883         case BNX2X_MCAST_CMD_RESTORE:
2884                 /* Here we set the approximate amount of work to do, which in
2885                  * fact may be only less as some MACs in postponed ADD
2886                  * command(s) scheduled before this command may fall into
2887                  * the same bin and the actual number of bins set in the
2888                  * registry would be less than we estimated here. See
2889                  * bnx2x_mcast_set_one_rule_e2() for further details.
2890                  */
2891                 p->mcast_list_len = reg_sz;
2892                 break;
2893
2894         case BNX2X_MCAST_CMD_ADD:
2895         case BNX2X_MCAST_CMD_CONT:
2896                 /* Here we assume that all new MACs will fall into new bins.
2897                  * However we will correct the real registry size after we
2898                  * handle all pending commands.
2899                  */
2900                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2901                 break;
2902
2903         default:
2904                 BNX2X_ERR("Unknown command: %d\n", cmd);
2905                 return -EINVAL;
2906
2907         }
2908
2909         /* Increase the total number of MACs pending to be configured */
2910         o->total_pending_num += p->mcast_list_len;
2911
2912         return 0;
2913 }
2914
2915 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2916                                       struct bnx2x_mcast_ramrod_params *p,
2917                                       int old_num_bins)
2918 {
2919         struct bnx2x_mcast_obj *o = p->mcast_obj;
2920
2921         o->set_registry_size(o, old_num_bins);
2922         o->total_pending_num -= p->mcast_list_len;
2923 }
2924
2925 /**
2926  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2927  *
2928  * @bp:         device handle
2929  * @p:
2930  * @len:        number of rules to handle
2931  */
2932 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2933                                         struct bnx2x_mcast_ramrod_params *p,
2934                                         u8 len)
2935 {
2936         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2937         struct eth_multicast_rules_ramrod_data *data =
2938                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2939
2940         data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2941                                         (BNX2X_FILTER_MCAST_PENDING <<
2942                                          BNX2X_SWCID_SHIFT));
2943         data->header.rule_cnt = len;
2944 }
2945
2946 /**
2947  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2948  *
2949  * @bp:         device handle
2950  * @o:
2951  *
2952  * Recalculate the actual number of set bins in the registry using Brian
2953  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2954  *
2955  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2956  */
2957 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2958                                                   struct bnx2x_mcast_obj *o)
2959 {
2960         int i, cnt = 0;
2961         u64 elem;
2962
2963         for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2964                 elem = o->registry.aprox_match.vec[i];
2965                 for (; elem; cnt++)
2966                         elem &= elem - 1;
2967         }
2968
2969         o->set_registry_size(o, cnt);
2970
2971         return 0;
2972 }
2973
2974 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2975                                 struct bnx2x_mcast_ramrod_params *p,
2976                                 enum bnx2x_mcast_cmd cmd)
2977 {
2978         struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2979         struct bnx2x_mcast_obj *o = p->mcast_obj;
2980         struct eth_multicast_rules_ramrod_data *data =
2981                 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2982         int cnt = 0, rc;
2983
2984         /* Reset the ramrod data buffer */
2985         memset(data, 0, sizeof(*data));
2986
2987         cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2988
2989         /* If there are no more pending commands - clear SCHEDULED state */
2990         if (list_empty(&o->pending_cmds_head))
2991                 o->clear_sched(o);
2992
2993         /* The below may be true iff there was enough room in ramrod
2994          * data for all pending commands and for the current
2995          * command. Otherwise the current command would have been added
2996          * to the pending commands and p->mcast_list_len would have been
2997          * zeroed.
2998          */
2999         if (p->mcast_list_len > 0)
3000                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3001
3002         /* We've pulled out some MACs - update the total number of
3003          * outstanding.
3004          */
3005         o->total_pending_num -= cnt;
3006
3007         /* send a ramrod */
3008         WARN_ON(o->total_pending_num < 0);
3009         WARN_ON(cnt > o->max_cmd_len);
3010
3011         bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3012
3013         /* Update a registry size if there are no more pending operations.
3014          *
3015          * We don't want to change the value of the registry size if there are
3016          * pending operations because we want it to always be equal to the
3017          * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3018          * set bins after the last requested operation in order to properly
3019          * evaluate the size of the next DEL/RESTORE operation.
3020          *
3021          * Note that we update the registry itself during command(s) handling
3022          * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3023          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3024          * with a limited amount of update commands (per MAC/bin) and we don't
3025          * know in this scope what the actual state of bins configuration is
3026          * going to be after this ramrod.
3027          */
3028         if (!o->total_pending_num)
3029                 bnx2x_mcast_refresh_registry_e2(bp, o);
3030
3031         /*
3032          * If CLEAR_ONLY was requested - don't send a ramrod and clear
3033          * RAMROD_PENDING status immediately.
3034          */
3035         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3036                 raw->clear_pending(raw);
3037                 return 0;
3038         } else {
3039                 /*
3040                  *  No need for an explicit memory barrier here as long we would
3041                  *  need to ensure the ordering of writing to the SPQ element
3042                  *  and updating of the SPQ producer which involves a memory
3043                  *  read and we will have to put a full memory barrier there
3044                  *  (inside bnx2x_sp_post()).
3045                  */
3046
3047                 /* Send a ramrod */
3048                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3049                                    raw->cid, U64_HI(raw->rdata_mapping),
3050                                    U64_LO(raw->rdata_mapping),
3051                                    ETH_CONNECTION_TYPE);
3052                 if (rc)
3053                         return rc;
3054
3055                 /* Ramrod completion is pending */
3056                 return 1;
3057         }
3058 }
3059
3060 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3061                                     struct bnx2x_mcast_ramrod_params *p,
3062                                     enum bnx2x_mcast_cmd cmd)
3063 {
3064         /* Mark, that there is a work to do */
3065         if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3066                 p->mcast_list_len = 1;
3067
3068         return 0;
3069 }
3070
3071 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3072                                        struct bnx2x_mcast_ramrod_params *p,
3073                                        int old_num_bins)
3074 {
3075         /* Do nothing */
3076 }
3077
3078 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3079 do { \
3080         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3081 } while (0)
3082
3083 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3084                                            struct bnx2x_mcast_obj *o,
3085                                            struct bnx2x_mcast_ramrod_params *p,
3086                                            u32 *mc_filter)
3087 {
3088         struct bnx2x_mcast_list_elem *mlist_pos;
3089         int bit;
3090
3091         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3092                 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3093                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3094
3095                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3096                    mlist_pos->mac, bit);
3097
3098                 /* bookkeeping... */
3099                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3100                                   bit);
3101         }
3102 }
3103
3104 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3105         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3106         u32 *mc_filter)
3107 {
3108         int bit;
3109
3110         for (bit = bnx2x_mcast_get_next_bin(o, 0);
3111              bit >= 0;
3112              bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3113                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3114                 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3115         }
3116 }
3117
3118 /* On 57711 we write the multicast MACs' aproximate match
3119  * table by directly into the TSTORM's internal RAM. So we don't
3120  * really need to handle any tricks to make it work.
3121  */
3122 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3123                                  struct bnx2x_mcast_ramrod_params *p,
3124                                  enum bnx2x_mcast_cmd cmd)
3125 {
3126         int i;
3127         struct bnx2x_mcast_obj *o = p->mcast_obj;
3128         struct bnx2x_raw_obj *r = &o->raw;
3129
3130         /* If CLEAR_ONLY has been requested - clear the registry
3131          * and clear a pending bit.
3132          */
3133         if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3134                 u32 mc_filter[MC_HASH_SIZE] = {0};
3135
3136                 /* Set the multicast filter bits before writing it into
3137                  * the internal memory.
3138                  */
3139                 switch (cmd) {
3140                 case BNX2X_MCAST_CMD_ADD:
3141                         bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3142                         break;
3143
3144                 case BNX2X_MCAST_CMD_DEL:
3145                         DP(BNX2X_MSG_SP,
3146                            "Invalidating multicast MACs configuration\n");
3147
3148                         /* clear the registry */
3149                         memset(o->registry.aprox_match.vec, 0,
3150                                sizeof(o->registry.aprox_match.vec));
3151                         break;
3152
3153                 case BNX2X_MCAST_CMD_RESTORE:
3154                         bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3155                         break;
3156
3157                 default:
3158                         BNX2X_ERR("Unknown command: %d\n", cmd);
3159                         return -EINVAL;
3160                 }
3161
3162                 /* Set the mcast filter in the internal memory */
3163                 for (i = 0; i < MC_HASH_SIZE; i++)
3164                         REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3165         } else
3166                 /* clear the registry */
3167                 memset(o->registry.aprox_match.vec, 0,
3168                        sizeof(o->registry.aprox_match.vec));
3169
3170         /* We are done */
3171         r->clear_pending(r);
3172
3173         return 0;
3174 }
3175
3176 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3177                                    struct bnx2x_mcast_ramrod_params *p,
3178                                    enum bnx2x_mcast_cmd cmd)
3179 {
3180         struct bnx2x_mcast_obj *o = p->mcast_obj;
3181         int reg_sz = o->get_registry_size(o);
3182
3183         switch (cmd) {
3184         /* DEL command deletes all currently configured MACs */
3185         case BNX2X_MCAST_CMD_DEL:
3186                 o->set_registry_size(o, 0);
3187                 /* Don't break */
3188
3189         /* RESTORE command will restore the entire multicast configuration */
3190         case BNX2X_MCAST_CMD_RESTORE:
3191                 p->mcast_list_len = reg_sz;
3192                   DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3193                                    cmd, p->mcast_list_len);
3194                 break;
3195
3196         case BNX2X_MCAST_CMD_ADD:
3197         case BNX2X_MCAST_CMD_CONT:
3198                 /* Multicast MACs on 57710 are configured as unicast MACs and
3199                  * there is only a limited number of CAM entries for that
3200                  * matter.
3201                  */
3202                 if (p->mcast_list_len > o->max_cmd_len) {
3203                         BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3204                                   o->max_cmd_len);
3205                         return -EINVAL;
3206                 }
3207                 /* Every configured MAC should be cleared if DEL command is
3208                  * called. Only the last ADD command is relevant as long as
3209                  * every ADD commands overrides the previous configuration.
3210                  */
3211                 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3212                 if (p->mcast_list_len > 0)
3213                         o->set_registry_size(o, p->mcast_list_len);
3214
3215                 break;
3216
3217         default:
3218                 BNX2X_ERR("Unknown command: %d\n", cmd);
3219                 return -EINVAL;
3220
3221         }
3222
3223         /* We want to ensure that commands are executed one by one for 57710.
3224          * Therefore each none-empty command will consume o->max_cmd_len.
3225          */
3226         if (p->mcast_list_len)
3227                 o->total_pending_num += o->max_cmd_len;
3228
3229         return 0;
3230 }
3231
3232 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3233                                       struct bnx2x_mcast_ramrod_params *p,
3234                                       int old_num_macs)
3235 {
3236         struct bnx2x_mcast_obj *o = p->mcast_obj;
3237
3238         o->set_registry_size(o, old_num_macs);
3239
3240         /* If current command hasn't been handled yet and we are
3241          * here means that it's meant to be dropped and we have to
3242          * update the number of outstandling MACs accordingly.
3243          */
3244         if (p->mcast_list_len)
3245                 o->total_pending_num -= o->max_cmd_len;
3246 }
3247
3248 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3249                                         struct bnx2x_mcast_obj *o, int idx,
3250                                         union bnx2x_mcast_config_data *cfg_data,
3251                                         enum bnx2x_mcast_cmd cmd)
3252 {
3253         struct bnx2x_raw_obj *r = &o->raw;
3254         struct mac_configuration_cmd *data =
3255                 (struct mac_configuration_cmd *)(r->rdata);
3256
3257         /* copy mac */
3258         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3259                 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3260                                       &data->config_table[idx].middle_mac_addr,
3261                                       &data->config_table[idx].lsb_mac_addr,
3262                                       cfg_data->mac);
3263
3264                 data->config_table[idx].vlan_id = 0;
3265                 data->config_table[idx].pf_id = r->func_id;
3266                 data->config_table[idx].clients_bit_vector =
3267                         cpu_to_le32(1 << r->cl_id);
3268
3269                 SET_FLAG(data->config_table[idx].flags,
3270                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3271                          T_ETH_MAC_COMMAND_SET);
3272         }
3273 }
3274
3275 /**
3276  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3277  *
3278  * @bp:         device handle
3279  * @p:
3280  * @len:        number of rules to handle
3281  */
3282 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3283                                         struct bnx2x_mcast_ramrod_params *p,
3284                                         u8 len)
3285 {
3286         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3287         struct mac_configuration_cmd *data =
3288                 (struct mac_configuration_cmd *)(r->rdata);
3289
3290         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3291                      BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3292                      BNX2X_MAX_MULTICAST*(1 + r->func_id));
3293
3294         data->hdr.offset = offset;
3295         data->hdr.client_id = cpu_to_le16(0xff);
3296         data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3297                                      (BNX2X_FILTER_MCAST_PENDING <<
3298                                       BNX2X_SWCID_SHIFT));
3299         data->hdr.length = len;
3300 }
3301
3302 /**
3303  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3304  *
3305  * @bp:         device handle
3306  * @o:
3307  * @start_idx:  index in the registry to start from
3308  * @rdata_idx:  index in the ramrod data to start from
3309  *
3310  * restore command for 57710 is like all other commands - always a stand alone
3311  * command - start_idx and rdata_idx will always be 0. This function will always
3312  * succeed.
3313  * returns -1 to comply with 57712 variant.
3314  */
3315 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3316         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3317         int *rdata_idx)
3318 {
3319         struct bnx2x_mcast_mac_elem *elem;
3320         int i = 0;
3321         union bnx2x_mcast_config_data cfg_data = {NULL};
3322
3323         /* go through the registry and configure the MACs from it. */
3324         list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3325                 cfg_data.mac = &elem->mac[0];
3326                 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3327
3328                 i++;
3329
3330                   DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3331                      cfg_data.mac);
3332         }
3333
3334         *rdata_idx = i;
3335
3336         return -1;
3337 }
3338
3339
3340 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3341         struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3342 {
3343         struct bnx2x_pending_mcast_cmd *cmd_pos;
3344         struct bnx2x_mcast_mac_elem *pmac_pos;
3345         struct bnx2x_mcast_obj *o = p->mcast_obj;
3346         union bnx2x_mcast_config_data cfg_data = {NULL};
3347         int cnt = 0;
3348
3349
3350         /* If nothing to be done - return */
3351         if (list_empty(&o->pending_cmds_head))
3352                 return 0;
3353
3354         /* Handle the first command */
3355         cmd_pos = list_first_entry(&o->pending_cmds_head,
3356                                    struct bnx2x_pending_mcast_cmd, link);
3357
3358         switch (cmd_pos->type) {
3359         case BNX2X_MCAST_CMD_ADD:
3360                 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3361                         cfg_data.mac = &pmac_pos->mac[0];
3362                         o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3363
3364                         cnt++;
3365
3366                         DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3367                            pmac_pos->mac);
3368                 }
3369                 break;
3370
3371         case BNX2X_MCAST_CMD_DEL:
3372                 cnt = cmd_pos->data.macs_num;
3373                 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3374                 break;
3375
3376         case BNX2X_MCAST_CMD_RESTORE:
3377                 o->hdl_restore(bp, o, 0, &cnt);
3378                 break;
3379
3380         default:
3381                 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3382                 return -EINVAL;
3383         }
3384
3385         list_del(&cmd_pos->link);
3386         kfree(cmd_pos);
3387
3388         return cnt;
3389 }
3390
3391 /**
3392  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3393  *
3394  * @fw_hi:
3395  * @fw_mid:
3396  * @fw_lo:
3397  * @mac:
3398  */
3399 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3400                                          __le16 *fw_lo, u8 *mac)
3401 {
3402         mac[1] = ((u8 *)fw_hi)[0];
3403         mac[0] = ((u8 *)fw_hi)[1];
3404         mac[3] = ((u8 *)fw_mid)[0];
3405         mac[2] = ((u8 *)fw_mid)[1];
3406         mac[5] = ((u8 *)fw_lo)[0];
3407         mac[4] = ((u8 *)fw_lo)[1];
3408 }
3409
3410 /**
3411  * bnx2x_mcast_refresh_registry_e1 -
3412  *
3413  * @bp:         device handle
3414  * @cnt:
3415  *
3416  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3417  * and update the registry correspondingly: if ADD - allocate a memory and add
3418  * the entries to the registry (list), if DELETE - clear the registry and free
3419  * the memory.
3420  */
3421 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3422                                                   struct bnx2x_mcast_obj *o)
3423 {
3424         struct bnx2x_raw_obj *raw = &o->raw;
3425         struct bnx2x_mcast_mac_elem *elem;
3426         struct mac_configuration_cmd *data =
3427                         (struct mac_configuration_cmd *)(raw->rdata);
3428
3429         /* If first entry contains a SET bit - the command was ADD,
3430          * otherwise - DEL_ALL
3431          */
3432         if (GET_FLAG(data->config_table[0].flags,
3433                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3434                 int i, len = data->hdr.length;
3435
3436                 /* Break if it was a RESTORE command */
3437                 if (!list_empty(&o->registry.exact_match.macs))
3438                         return 0;
3439
3440                 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3441                 if (!elem) {
3442                         BNX2X_ERR("Failed to allocate registry memory\n");
3443                         return -ENOMEM;
3444                 }
3445
3446                 for (i = 0; i < len; i++, elem++) {
3447                         bnx2x_get_fw_mac_addr(
3448                                 &data->config_table[i].msb_mac_addr,
3449                                 &data->config_table[i].middle_mac_addr,
3450                                 &data->config_table[i].lsb_mac_addr,
3451                                 elem->mac);
3452                         DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3453                            elem->mac);
3454                         list_add_tail(&elem->link,
3455                                       &o->registry.exact_match.macs);
3456                 }
3457         } else {
3458                 elem = list_first_entry(&o->registry.exact_match.macs,
3459                                         struct bnx2x_mcast_mac_elem, link);
3460                 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3461                 kfree(elem);
3462                 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3463         }
3464
3465         return 0;
3466 }
3467
3468 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3469                                 struct bnx2x_mcast_ramrod_params *p,
3470                                 enum bnx2x_mcast_cmd cmd)
3471 {
3472         struct bnx2x_mcast_obj *o = p->mcast_obj;
3473         struct bnx2x_raw_obj *raw = &o->raw;
3474         struct mac_configuration_cmd *data =
3475                 (struct mac_configuration_cmd *)(raw->rdata);
3476         int cnt = 0, i, rc;
3477
3478         /* Reset the ramrod data buffer */
3479         memset(data, 0, sizeof(*data));
3480
3481         /* First set all entries as invalid */
3482         for (i = 0; i < o->max_cmd_len ; i++)
3483                 SET_FLAG(data->config_table[i].flags,
3484                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3485                          T_ETH_MAC_COMMAND_INVALIDATE);
3486
3487         /* Handle pending commands first */
3488         cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3489
3490         /* If there are no more pending commands - clear SCHEDULED state */
3491         if (list_empty(&o->pending_cmds_head))
3492                 o->clear_sched(o);
3493
3494         /* The below may be true iff there were no pending commands */
3495         if (!cnt)
3496                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3497
3498         /* For 57710 every command has o->max_cmd_len length to ensure that
3499          * commands are done one at a time.
3500          */
3501         o->total_pending_num -= o->max_cmd_len;
3502
3503         /* send a ramrod */
3504
3505         WARN_ON(cnt > o->max_cmd_len);
3506
3507         /* Set ramrod header (in particular, a number of entries to update) */
3508         bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3509
3510         /* update a registry: we need the registry contents to be always up
3511          * to date in order to be able to execute a RESTORE opcode. Here
3512          * we use the fact that for 57710 we sent one command at a time
3513          * hence we may take the registry update out of the command handling
3514          * and do it in a simpler way here.
3515          */
3516         rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3517         if (rc)
3518                 return rc;
3519
3520         /*
3521          * If CLEAR_ONLY was requested - don't send a ramrod and clear
3522          * RAMROD_PENDING status immediately.
3523          */
3524         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3525                 raw->clear_pending(raw);
3526                 return 0;
3527         } else {
3528                 /*
3529                  *  No need for an explicit memory barrier here as long we would
3530                  *  need to ensure the ordering of writing to the SPQ element
3531                  *  and updating of the SPQ producer which involves a memory
3532                  *  read and we will have to put a full memory barrier there
3533                  *  (inside bnx2x_sp_post()).
3534                  */
3535
3536                 /* Send a ramrod */
3537                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3538                                    U64_HI(raw->rdata_mapping),
3539                                    U64_LO(raw->rdata_mapping),
3540                                    ETH_CONNECTION_TYPE);
3541                 if (rc)
3542                         return rc;
3543
3544                 /* Ramrod completion is pending */
3545                 return 1;
3546         }
3547
3548 }
3549
3550 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3551 {
3552         return o->registry.exact_match.num_macs_set;
3553 }
3554
3555 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3556 {
3557         return o->registry.aprox_match.num_bins_set;
3558 }
3559
3560 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3561                                                 int n)
3562 {
3563         o->registry.exact_match.num_macs_set = n;
3564 }
3565
3566 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3567                                                 int n)
3568 {
3569         o->registry.aprox_match.num_bins_set = n;
3570 }
3571
3572 int bnx2x_config_mcast(struct bnx2x *bp,
3573                        struct bnx2x_mcast_ramrod_params *p,
3574                        enum bnx2x_mcast_cmd cmd)
3575 {
3576         struct bnx2x_mcast_obj *o = p->mcast_obj;
3577         struct bnx2x_raw_obj *r = &o->raw;
3578         int rc = 0, old_reg_size;
3579
3580         /* This is needed to recover number of currently configured mcast macs
3581          * in case of failure.
3582          */
3583         old_reg_size = o->get_registry_size(o);
3584
3585         /* Do some calculations and checks */
3586         rc = o->validate(bp, p, cmd);
3587         if (rc)
3588                 return rc;
3589
3590         /* Return if there is no work to do */
3591         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3592                 return 0;
3593
3594         DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3595            o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3596
3597         /* Enqueue the current command to the pending list if we can't complete
3598          * it in the current iteration
3599          */
3600         if (r->check_pending(r) ||
3601             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3602                 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3603                 if (rc < 0)
3604                         goto error_exit1;
3605
3606                 /* As long as the current command is in a command list we
3607                  * don't need to handle it separately.
3608                  */
3609                 p->mcast_list_len = 0;
3610         }
3611
3612         if (!r->check_pending(r)) {
3613
3614                 /* Set 'pending' state */
3615                 r->set_pending(r);
3616
3617                 /* Configure the new classification in the chip */
3618                 rc = o->config_mcast(bp, p, cmd);
3619                 if (rc < 0)
3620                         goto error_exit2;
3621
3622                 /* Wait for a ramrod completion if was requested */
3623                 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3624                         rc = o->wait_comp(bp, o);
3625         }
3626
3627         return rc;
3628
3629 error_exit2:
3630         r->clear_pending(r);
3631
3632 error_exit1:
3633         o->revert(bp, p, old_reg_size);
3634
3635         return rc;
3636 }
3637
3638 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3639 {
3640         smp_mb__before_clear_bit();
3641         clear_bit(o->sched_state, o->raw.pstate);
3642         smp_mb__after_clear_bit();
3643 }
3644
3645 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3646 {
3647         smp_mb__before_clear_bit();
3648         set_bit(o->sched_state, o->raw.pstate);
3649         smp_mb__after_clear_bit();
3650 }
3651
3652 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3653 {
3654         return !!test_bit(o->sched_state, o->raw.pstate);
3655 }
3656
3657 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3658 {
3659         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3660 }
3661
3662 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3663                           struct bnx2x_mcast_obj *mcast_obj,
3664                           u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3665                           u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3666                           int state, unsigned long *pstate, bnx2x_obj_type type)
3667 {
3668         memset(mcast_obj, 0, sizeof(*mcast_obj));
3669
3670         bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3671                            rdata, rdata_mapping, state, pstate, type);
3672
3673         mcast_obj->engine_id = engine_id;
3674
3675         INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3676
3677         mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3678         mcast_obj->check_sched = bnx2x_mcast_check_sched;
3679         mcast_obj->set_sched = bnx2x_mcast_set_sched;
3680         mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3681
3682         if (CHIP_IS_E1(bp)) {
3683                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3684                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3685                 mcast_obj->hdl_restore       =
3686                         bnx2x_mcast_handle_restore_cmd_e1;
3687                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3688
3689                 if (CHIP_REV_IS_SLOW(bp))
3690                         mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3691                 else
3692                         mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3693
3694                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3695                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3696                 mcast_obj->validate          = bnx2x_mcast_validate_e1;
3697                 mcast_obj->revert            = bnx2x_mcast_revert_e1;
3698                 mcast_obj->get_registry_size =
3699                         bnx2x_mcast_get_registry_size_exact;
3700                 mcast_obj->set_registry_size =
3701                         bnx2x_mcast_set_registry_size_exact;
3702
3703                 /* 57710 is the only chip that uses the exact match for mcast
3704                  * at the moment.
3705                  */
3706                 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3707
3708         } else if (CHIP_IS_E1H(bp)) {
3709                 mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3710                 mcast_obj->enqueue_cmd   = NULL;
3711                 mcast_obj->hdl_restore   = NULL;
3712                 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3713
3714                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3715                  * for one command.
3716                  */
3717                 mcast_obj->max_cmd_len       = -1;
3718                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3719                 mcast_obj->set_one_rule      = NULL;
3720                 mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3721                 mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3722                 mcast_obj->get_registry_size =
3723                         bnx2x_mcast_get_registry_size_aprox;
3724                 mcast_obj->set_registry_size =
3725                         bnx2x_mcast_set_registry_size_aprox;
3726         } else {
3727                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3728                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3729                 mcast_obj->hdl_restore       =
3730                         bnx2x_mcast_handle_restore_cmd_e2;
3731                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3732                 /* TODO: There should be a proper HSI define for this number!!!
3733                  */
3734                 mcast_obj->max_cmd_len       = 16;
3735                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3736                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3737                 mcast_obj->validate          = bnx2x_mcast_validate_e2;
3738                 mcast_obj->revert            = bnx2x_mcast_revert_e2;
3739                 mcast_obj->get_registry_size =
3740                         bnx2x_mcast_get_registry_size_aprox;
3741                 mcast_obj->set_registry_size =
3742                         bnx2x_mcast_set_registry_size_aprox;
3743         }
3744 }
3745
3746 /*************************** Credit handling **********************************/
3747
3748 /**
3749  * atomic_add_ifless - add if the result is less than a given value.
3750  *
3751  * @v:  pointer of type atomic_t
3752  * @a:  the amount to add to v...
3753  * @u:  ...if (v + a) is less than u.
3754  *
3755  * returns true if (v + a) was less than u, and false otherwise.
3756  *
3757  */
3758 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3759 {
3760         int c, old;
3761
3762         c = atomic_read(v);
3763         for (;;) {
3764                 if (unlikely(c + a >= u))
3765                         return false;
3766
3767                 old = atomic_cmpxchg((v), c, c + a);
3768                 if (likely(old == c))
3769                         break;
3770                 c = old;
3771         }
3772
3773         return true;
3774 }
3775
3776 /**
3777  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3778  *
3779  * @v:  pointer of type atomic_t
3780  * @a:  the amount to dec from v...
3781  * @u:  ...if (v - a) is more or equal than u.
3782  *
3783  * returns true if (v - a) was more or equal than u, and false
3784  * otherwise.
3785  */
3786 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3787 {
3788         int c, old;
3789
3790         c = atomic_read(v);
3791         for (;;) {
3792                 if (unlikely(c - a < u))
3793                         return false;
3794
3795                 old = atomic_cmpxchg((v), c, c - a);
3796                 if (likely(old == c))
3797                         break;
3798                 c = old;
3799         }
3800
3801         return true;
3802 }
3803
3804 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3805 {
3806         bool rc;
3807
3808         smp_mb();
3809         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3810         smp_mb();
3811
3812         return rc;
3813 }
3814
3815 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3816 {
3817         bool rc;
3818
3819         smp_mb();
3820
3821         /* Don't let to refill if credit + cnt > pool_sz */
3822         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3823
3824         smp_mb();
3825
3826         return rc;
3827 }
3828
3829 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3830 {
3831         int cur_credit;
3832
3833         smp_mb();
3834         cur_credit = atomic_read(&o->credit);
3835
3836         return cur_credit;
3837 }
3838
3839 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3840                                           int cnt)
3841 {
3842         return true;
3843 }
3844
3845
3846 static bool bnx2x_credit_pool_get_entry(
3847         struct bnx2x_credit_pool_obj *o,
3848         int *offset)
3849 {
3850         int idx, vec, i;
3851
3852         *offset = -1;
3853
3854         /* Find "internal cam-offset" then add to base for this object... */
3855         for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3856
3857                 /* Skip the current vector if there are no free entries in it */
3858                 if (!o->pool_mirror[vec])
3859                         continue;
3860
3861                 /* If we've got here we are going to find a free entry */
3862                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3863                       i < BIT_VEC64_ELEM_SZ; idx++, i++)
3864
3865                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3866                                 /* Got one!! */
3867                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3868                                 *offset = o->base_pool_offset + idx;
3869                                 return true;
3870                         }
3871         }
3872
3873         return false;
3874 }
3875
3876 static bool bnx2x_credit_pool_put_entry(
3877         struct bnx2x_credit_pool_obj *o,
3878         int offset)
3879 {
3880         if (offset < o->base_pool_offset)
3881                 return false;
3882
3883         offset -= o->base_pool_offset;
3884
3885         if (offset >= o->pool_sz)
3886                 return false;
3887
3888         /* Return the entry to the pool */
3889         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3890
3891         return true;
3892 }
3893
3894 static bool bnx2x_credit_pool_put_entry_always_true(
3895         struct bnx2x_credit_pool_obj *o,
3896         int offset)
3897 {
3898         return true;
3899 }
3900
3901 static bool bnx2x_credit_pool_get_entry_always_true(
3902         struct bnx2x_credit_pool_obj *o,
3903         int *offset)
3904 {
3905         *offset = -1;
3906         return true;
3907 }
3908 /**
3909  * bnx2x_init_credit_pool - initialize credit pool internals.
3910  *
3911  * @p:
3912  * @base:       Base entry in the CAM to use.
3913  * @credit:     pool size.
3914  *
3915  * If base is negative no CAM entries handling will be performed.
3916  * If credit is negative pool operations will always succeed (unlimited pool).
3917  *
3918  */
3919 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3920                                           int base, int credit)
3921 {
3922         /* Zero the object first */
3923         memset(p, 0, sizeof(*p));
3924
3925         /* Set the table to all 1s */
3926         memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3927
3928         /* Init a pool as full */
3929         atomic_set(&p->credit, credit);
3930
3931         /* The total poll size */
3932         p->pool_sz = credit;
3933
3934         p->base_pool_offset = base;
3935
3936         /* Commit the change */
3937         smp_mb();
3938
3939         p->check = bnx2x_credit_pool_check;
3940
3941         /* if pool credit is negative - disable the checks */
3942         if (credit >= 0) {
3943                 p->put      = bnx2x_credit_pool_put;
3944                 p->get      = bnx2x_credit_pool_get;
3945                 p->put_entry = bnx2x_credit_pool_put_entry;
3946                 p->get_entry = bnx2x_credit_pool_get_entry;
3947         } else {
3948                 p->put      = bnx2x_credit_pool_always_true;
3949                 p->get      = bnx2x_credit_pool_always_true;
3950                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3951                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3952         }
3953
3954         /* If base is negative - disable entries handling */
3955         if (base < 0) {
3956                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3957                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3958         }
3959 }
3960
3961 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3962                                 struct bnx2x_credit_pool_obj *p, u8 func_id,
3963                                 u8 func_num)
3964 {
3965 /* TODO: this will be defined in consts as well... */
3966 #define BNX2X_CAM_SIZE_EMUL 5
3967
3968         int cam_sz;
3969
3970         if (CHIP_IS_E1(bp)) {
3971                 /* In E1, Multicast is saved in cam... */
3972                 if (!CHIP_REV_IS_SLOW(bp))
3973                         cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3974                 else
3975                         cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3976
3977                 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3978
3979         } else if (CHIP_IS_E1H(bp)) {
3980                 /* CAM credit is equaly divided between all active functions
3981                  * on the PORT!.
3982                  */
3983                 if ((func_num > 0)) {
3984                         if (!CHIP_REV_IS_SLOW(bp))
3985                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3986                         else
3987                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
3988                         bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3989                 } else {
3990                         /* this should never happen! Block MAC operations. */
3991                         bnx2x_init_credit_pool(p, 0, 0);
3992                 }
3993
3994         } else {
3995
3996                 /*
3997                  * CAM credit is equaly divided between all active functions
3998                  * on the PATH.
3999                  */
4000                 if ((func_num > 0)) {
4001                         if (!CHIP_REV_IS_SLOW(bp))
4002                                 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4003                         else
4004                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
4005
4006                         /*
4007                          * No need for CAM entries handling for 57712 and
4008                          * newer.
4009                          */
4010                         bnx2x_init_credit_pool(p, -1, cam_sz);
4011                 } else {
4012                         /* this should never happen! Block MAC operations. */
4013                         bnx2x_init_credit_pool(p, 0, 0);
4014                 }
4015
4016         }
4017 }
4018
4019 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4020                                  struct bnx2x_credit_pool_obj *p,
4021                                  u8 func_id,
4022                                  u8 func_num)
4023 {
4024         if (CHIP_IS_E1x(bp)) {
4025                 /*
4026                  * There is no VLAN credit in HW on 57710 and 57711 only
4027                  * MAC / MAC-VLAN can be set
4028                  */
4029                 bnx2x_init_credit_pool(p, 0, -1);
4030         } else {
4031                 /*
4032                  * CAM credit is equaly divided between all active functions
4033                  * on the PATH.
4034                  */
4035                 if (func_num > 0) {
4036                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
4037                         bnx2x_init_credit_pool(p, func_id * credit, credit);
4038                 } else
4039                         /* this should never happen! Block VLAN operations. */
4040                         bnx2x_init_credit_pool(p, 0, 0);
4041         }
4042 }
4043
4044 /****************** RSS Configuration ******************/
4045 /**
4046  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4047  *
4048  * @bp:         driver hanlde
4049  * @p:          pointer to rss configuration
4050  *
4051  * Prints it when NETIF_MSG_IFUP debug level is configured.
4052  */
4053 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4054                                         struct bnx2x_config_rss_params *p)
4055 {
4056         int i;
4057
4058         DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4059         DP(BNX2X_MSG_SP, "0x0000: ");
4060         for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4061                 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4062
4063                 /* Print 4 bytes in a line */
4064                 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4065                     (((i + 1) & 0x3) == 0)) {
4066                         DP_CONT(BNX2X_MSG_SP, "\n");
4067                         DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4068                 }
4069         }
4070
4071         DP_CONT(BNX2X_MSG_SP, "\n");
4072 }
4073
4074 /**
4075  * bnx2x_setup_rss - configure RSS
4076  *
4077  * @bp:         device handle
4078  * @p:          rss configuration
4079  *
4080  * sends on UPDATE ramrod for that matter.
4081  */
4082 static int bnx2x_setup_rss(struct bnx2x *bp,
4083                            struct bnx2x_config_rss_params *p)
4084 {
4085         struct bnx2x_rss_config_obj *o = p->rss_obj;
4086         struct bnx2x_raw_obj *r = &o->raw;
4087         struct eth_rss_update_ramrod_data *data =
4088                 (struct eth_rss_update_ramrod_data *)(r->rdata);
4089         u8 rss_mode = 0;
4090         int rc;
4091
4092         memset(data, 0, sizeof(*data));
4093
4094         DP(BNX2X_MSG_SP, "Configuring RSS\n");
4095
4096         /* Set an echo field */
4097         data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4098                                  (r->state << BNX2X_SWCID_SHIFT));
4099
4100         /* RSS mode */
4101         if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4102                 rss_mode = ETH_RSS_MODE_DISABLED;
4103         else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4104                 rss_mode = ETH_RSS_MODE_REGULAR;
4105
4106         data->rss_mode = rss_mode;
4107
4108         DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4109
4110         /* RSS capabilities */
4111         if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4112                 data->capabilities |=
4113                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4114
4115         if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4116                 data->capabilities |=
4117                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4118
4119         if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4120                 data->capabilities |=
4121                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4122
4123         if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4124                 data->capabilities |=
4125                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4126
4127         if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4128                 data->capabilities |=
4129                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4130
4131         if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4132                 data->capabilities |=
4133                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4134
4135         /* Hashing mask */
4136         data->rss_result_mask = p->rss_result_mask;
4137
4138         /* RSS engine ID */
4139         data->rss_engine_id = o->engine_id;
4140
4141         DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4142
4143         /* Indirection table */
4144         memcpy(data->indirection_table, p->ind_table,
4145                   T_ETH_INDIRECTION_TABLE_SIZE);
4146
4147         /* Remember the last configuration */
4148         memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4149
4150         /* Print the indirection table */
4151         if (netif_msg_ifup(bp))
4152                 bnx2x_debug_print_ind_table(bp, p);
4153
4154         /* RSS keys */
4155         if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4156                 memcpy(&data->rss_key[0], &p->rss_key[0],
4157                        sizeof(data->rss_key));
4158                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4159         }
4160
4161         /*
4162          *  No need for an explicit memory barrier here as long we would
4163          *  need to ensure the ordering of writing to the SPQ element
4164          *  and updating of the SPQ producer which involves a memory
4165          *  read and we will have to put a full memory barrier there
4166          *  (inside bnx2x_sp_post()).
4167          */
4168
4169         /* Send a ramrod */
4170         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4171                            U64_HI(r->rdata_mapping),
4172                            U64_LO(r->rdata_mapping),
4173                            ETH_CONNECTION_TYPE);
4174
4175         if (rc < 0)
4176                 return rc;
4177
4178         return 1;
4179 }
4180
4181 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4182                              u8 *ind_table)
4183 {
4184         memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4185 }
4186
4187 int bnx2x_config_rss(struct bnx2x *bp,
4188                      struct bnx2x_config_rss_params *p)
4189 {
4190         int rc;
4191         struct bnx2x_rss_config_obj *o = p->rss_obj;
4192         struct bnx2x_raw_obj *r = &o->raw;
4193
4194         /* Do nothing if only driver cleanup was requested */
4195         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4196                 return 0;
4197
4198         r->set_pending(r);
4199
4200         rc = o->config_rss(bp, p);
4201         if (rc < 0) {
4202                 r->clear_pending(r);
4203                 return rc;
4204         }
4205
4206         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4207                 rc = r->wait_comp(bp, r);
4208
4209         return rc;
4210 }
4211
4212
4213 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4214                                struct bnx2x_rss_config_obj *rss_obj,
4215                                u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4216                                void *rdata, dma_addr_t rdata_mapping,
4217                                int state, unsigned long *pstate,
4218                                bnx2x_obj_type type)
4219 {
4220         bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4221                            rdata_mapping, state, pstate, type);
4222
4223         rss_obj->engine_id  = engine_id;
4224         rss_obj->config_rss = bnx2x_setup_rss;
4225 }
4226
4227 /********************** Queue state object ***********************************/
4228
4229 /**
4230  * bnx2x_queue_state_change - perform Queue state change transition
4231  *
4232  * @bp:         device handle
4233  * @params:     parameters to perform the transition
4234  *
4235  * returns 0 in case of successfully completed transition, negative error
4236  * code in case of failure, positive (EBUSY) value if there is a completion
4237  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4238  * not set in params->ramrod_flags for asynchronous commands).
4239  *
4240  */
4241 int bnx2x_queue_state_change(struct bnx2x *bp,
4242                              struct bnx2x_queue_state_params *params)
4243 {
4244         struct bnx2x_queue_sp_obj *o = params->q_obj;
4245         int rc, pending_bit;
4246         unsigned long *pending = &o->pending;
4247
4248         /* Check that the requested transition is legal */
4249         rc = o->check_transition(bp, o, params);
4250         if (rc) {
4251                 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4252                 return -EINVAL;
4253         }
4254
4255         /* Set "pending" bit */
4256         DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4257         pending_bit = o->set_pending(o, params);
4258         DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4259
4260         /* Don't send a command if only driver cleanup was requested */
4261         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4262                 o->complete_cmd(bp, o, pending_bit);
4263         else {
4264                 /* Send a ramrod */
4265                 rc = o->send_cmd(bp, params);
4266                 if (rc) {
4267                         o->next_state = BNX2X_Q_STATE_MAX;
4268                         clear_bit(pending_bit, pending);
4269                         smp_mb__after_clear_bit();
4270                         return rc;
4271                 }
4272
4273                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4274                         rc = o->wait_comp(bp, o, pending_bit);
4275                         if (rc)
4276                                 return rc;
4277
4278                         return 0;
4279                 }
4280         }
4281
4282         return !!test_bit(pending_bit, pending);
4283 }
4284
4285
4286 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4287                                    struct bnx2x_queue_state_params *params)
4288 {
4289         enum bnx2x_queue_cmd cmd = params->cmd, bit;
4290
4291         /* ACTIVATE and DEACTIVATE commands are implemented on top of
4292          * UPDATE command.
4293          */
4294         if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4295             (cmd == BNX2X_Q_CMD_DEACTIVATE))
4296                 bit = BNX2X_Q_CMD_UPDATE;
4297         else
4298                 bit = cmd;
4299
4300         set_bit(bit, &obj->pending);
4301         return bit;
4302 }
4303
4304 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4305                                  struct bnx2x_queue_sp_obj *o,
4306                                  enum bnx2x_queue_cmd cmd)
4307 {
4308         return bnx2x_state_wait(bp, cmd, &o->pending);
4309 }
4310
4311 /**
4312  * bnx2x_queue_comp_cmd - complete the state change command.
4313  *
4314  * @bp:         device handle
4315  * @o:
4316  * @cmd:
4317  *
4318  * Checks that the arrived completion is expected.
4319  */
4320 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4321                                 struct bnx2x_queue_sp_obj *o,
4322                                 enum bnx2x_queue_cmd cmd)
4323 {
4324         unsigned long cur_pending = o->pending;
4325
4326         if (!test_and_clear_bit(cmd, &cur_pending)) {
4327                 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4328                           cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4329                           o->state, cur_pending, o->next_state);
4330                 return -EINVAL;
4331         }
4332
4333         if (o->next_tx_only >= o->max_cos)
4334                 /* >= becuase tx only must always be smaller than cos since the
4335                  * primary connection supports COS 0
4336                  */
4337                 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4338                            o->next_tx_only, o->max_cos);
4339
4340         DP(BNX2X_MSG_SP,
4341            "Completing command %d for queue %d, setting state to %d\n",
4342            cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4343
4344         if (o->next_tx_only)  /* print num tx-only if any exist */
4345                 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4346                    o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4347
4348         o->state = o->next_state;
4349         o->num_tx_only = o->next_tx_only;
4350         o->next_state = BNX2X_Q_STATE_MAX;
4351
4352         /* It's important that o->state and o->next_state are
4353          * updated before o->pending.
4354          */
4355         wmb();
4356
4357         clear_bit(cmd, &o->pending);
4358         smp_mb__after_clear_bit();
4359
4360         return 0;
4361 }
4362
4363 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4364                                 struct bnx2x_queue_state_params *cmd_params,
4365                                 struct client_init_ramrod_data *data)
4366 {
4367         struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4368
4369         /* Rx data */
4370
4371         /* IPv6 TPA supported for E2 and above only */
4372         data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4373                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4374 }
4375
4376 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4377                                 struct bnx2x_queue_sp_obj *o,
4378                                 struct bnx2x_general_setup_params *params,
4379                                 struct client_init_general_data *gen_data,
4380                                 unsigned long *flags)
4381 {
4382         gen_data->client_id = o->cl_id;
4383
4384         if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4385                 gen_data->statistics_counter_id =
4386                                         params->stat_id;
4387                 gen_data->statistics_en_flg = 1;
4388                 gen_data->statistics_zero_flg =
4389                         test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4390         } else
4391                 gen_data->statistics_counter_id =
4392                                         DISABLE_STATISTIC_COUNTER_ID_VALUE;
4393
4394         gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4395         gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4396         gen_data->sp_client_id = params->spcl_id;
4397         gen_data->mtu = cpu_to_le16(params->mtu);
4398         gen_data->func_id = o->func_id;
4399
4400
4401         gen_data->cos = params->cos;
4402
4403         gen_data->traffic_type =
4404                 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4405                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4406
4407         DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4408            gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4409 }
4410
4411 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4412                                 struct bnx2x_txq_setup_params *params,
4413                                 struct client_init_tx_data *tx_data,
4414                                 unsigned long *flags)
4415 {
4416         tx_data->enforce_security_flg =
4417                 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4418         tx_data->default_vlan =
4419                 cpu_to_le16(params->default_vlan);
4420         tx_data->default_vlan_flg =
4421                 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4422         tx_data->tx_switching_flg =
4423                 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4424         tx_data->anti_spoofing_flg =
4425                 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4426         tx_data->force_default_pri_flg =
4427                 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4428
4429         tx_data->tx_status_block_id = params->fw_sb_id;
4430         tx_data->tx_sb_index_number = params->sb_cq_index;
4431         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4432
4433         tx_data->tx_bd_page_base.lo =
4434                 cpu_to_le32(U64_LO(params->dscr_map));
4435         tx_data->tx_bd_page_base.hi =
4436                 cpu_to_le32(U64_HI(params->dscr_map));
4437
4438         /* Don't configure any Tx switching mode during queue SETUP */
4439         tx_data->state = 0;
4440 }
4441
4442 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4443                                 struct rxq_pause_params *params,
4444                                 struct client_init_rx_data *rx_data)
4445 {
4446         /* flow control data */
4447         rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4448         rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4449         rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4450         rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4451         rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4452         rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4453         rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4454 }
4455
4456 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4457                                 struct bnx2x_rxq_setup_params *params,
4458                                 struct client_init_rx_data *rx_data,
4459                                 unsigned long *flags)
4460 {
4461         rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4462                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4463         rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4464                                 CLIENT_INIT_RX_DATA_TPA_MODE;
4465         rx_data->vmqueue_mode_en_flg = 0;
4466
4467         rx_data->cache_line_alignment_log_size =
4468                 params->cache_line_log;
4469         rx_data->enable_dynamic_hc =
4470                 test_bit(BNX2X_Q_FLG_DHC, flags);
4471         rx_data->max_sges_for_packet = params->max_sges_pkt;
4472         rx_data->client_qzone_id = params->cl_qzone_id;
4473         rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4474
4475         /* Always start in DROP_ALL mode */
4476         rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4477                                      CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4478
4479         /* We don't set drop flags */
4480         rx_data->drop_ip_cs_err_flg = 0;
4481         rx_data->drop_tcp_cs_err_flg = 0;
4482         rx_data->drop_ttl0_flg = 0;
4483         rx_data->drop_udp_cs_err_flg = 0;
4484         rx_data->inner_vlan_removal_enable_flg =
4485                 test_bit(BNX2X_Q_FLG_VLAN, flags);
4486         rx_data->outer_vlan_removal_enable_flg =
4487                 test_bit(BNX2X_Q_FLG_OV, flags);
4488         rx_data->status_block_id = params->fw_sb_id;
4489         rx_data->rx_sb_index_number = params->sb_cq_index;
4490         rx_data->max_tpa_queues = params->max_tpa_queues;
4491         rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4492         rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4493         rx_data->bd_page_base.lo =
4494                 cpu_to_le32(U64_LO(params->dscr_map));
4495         rx_data->bd_page_base.hi =
4496                 cpu_to_le32(U64_HI(params->dscr_map));
4497         rx_data->sge_page_base.lo =
4498                 cpu_to_le32(U64_LO(params->sge_map));
4499         rx_data->sge_page_base.hi =
4500                 cpu_to_le32(U64_HI(params->sge_map));
4501         rx_data->cqe_page_base.lo =
4502                 cpu_to_le32(U64_LO(params->rcq_map));
4503         rx_data->cqe_page_base.hi =
4504                 cpu_to_le32(U64_HI(params->rcq_map));
4505         rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4506
4507         if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4508                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4509                 rx_data->is_approx_mcast = 1;
4510         }
4511
4512         rx_data->rss_engine_id = params->rss_engine_id;
4513
4514         /* silent vlan removal */
4515         rx_data->silent_vlan_removal_flg =
4516                 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4517         rx_data->silent_vlan_value =
4518                 cpu_to_le16(params->silent_removal_value);
4519         rx_data->silent_vlan_mask =
4520                 cpu_to_le16(params->silent_removal_mask);
4521
4522 }
4523
4524 /* initialize the general, tx and rx parts of a queue object */
4525 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4526                                 struct bnx2x_queue_state_params *cmd_params,
4527                                 struct client_init_ramrod_data *data)
4528 {
4529         bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4530                                        &cmd_params->params.setup.gen_params,
4531                                        &data->general,
4532                                        &cmd_params->params.setup.flags);
4533
4534         bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4535                                   &cmd_params->params.setup.txq_params,
4536                                   &data->tx,
4537                                   &cmd_params->params.setup.flags);
4538
4539         bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4540                                   &cmd_params->params.setup.rxq_params,
4541                                   &data->rx,
4542                                   &cmd_params->params.setup.flags);
4543
4544         bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4545                                      &cmd_params->params.setup.pause_params,
4546                                      &data->rx);
4547 }
4548
4549 /* initialize the general and tx parts of a tx-only queue object */
4550 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4551                                 struct bnx2x_queue_state_params *cmd_params,
4552                                 struct tx_queue_init_ramrod_data *data)
4553 {
4554         bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4555                                        &cmd_params->params.tx_only.gen_params,
4556                                        &data->general,
4557                                        &cmd_params->params.tx_only.flags);
4558
4559         bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4560                                   &cmd_params->params.tx_only.txq_params,
4561                                   &data->tx,
4562                                   &cmd_params->params.tx_only.flags);
4563
4564         DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4565                          cmd_params->q_obj->cids[0],
4566                          data->tx.tx_bd_page_base.lo,
4567                          data->tx.tx_bd_page_base.hi);
4568 }
4569
4570 /**
4571  * bnx2x_q_init - init HW/FW queue
4572  *
4573  * @bp:         device handle
4574  * @params:
4575  *
4576  * HW/FW initial Queue configuration:
4577  *      - HC: Rx and Tx
4578  *      - CDU context validation
4579  *
4580  */
4581 static inline int bnx2x_q_init(struct bnx2x *bp,
4582                                struct bnx2x_queue_state_params *params)
4583 {
4584         struct bnx2x_queue_sp_obj *o = params->q_obj;
4585         struct bnx2x_queue_init_params *init = &params->params.init;
4586         u16 hc_usec;
4587         u8 cos;
4588
4589         /* Tx HC configuration */
4590         if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4591             test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4592                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4593
4594                 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4595                         init->tx.sb_cq_index,
4596                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4597                         hc_usec);
4598         }
4599
4600         /* Rx HC configuration */
4601         if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4602             test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4603                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4604
4605                 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4606                         init->rx.sb_cq_index,
4607                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4608                         hc_usec);
4609         }
4610
4611         /* Set CDU context validation values */
4612         for (cos = 0; cos < o->max_cos; cos++) {
4613                 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4614                                  o->cids[cos], cos);
4615                 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4616                 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4617         }
4618
4619         /* As no ramrod is sent, complete the command immediately  */
4620         o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4621
4622         mmiowb();
4623         smp_mb();
4624
4625         return 0;
4626 }
4627
4628 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4629                                         struct bnx2x_queue_state_params *params)
4630 {
4631         struct bnx2x_queue_sp_obj *o = params->q_obj;
4632         struct client_init_ramrod_data *rdata =
4633                 (struct client_init_ramrod_data *)o->rdata;
4634         dma_addr_t data_mapping = o->rdata_mapping;
4635         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4636
4637         /* Clear the ramrod data */
4638         memset(rdata, 0, sizeof(*rdata));
4639
4640         /* Fill the ramrod data */
4641         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4642
4643         /*
4644          *  No need for an explicit memory barrier here as long we would
4645          *  need to ensure the ordering of writing to the SPQ element
4646          *  and updating of the SPQ producer which involves a memory
4647          *  read and we will have to put a full memory barrier there
4648          *  (inside bnx2x_sp_post()).
4649          */
4650
4651         return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4652                              U64_HI(data_mapping),
4653                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4654 }
4655
4656 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4657                                         struct bnx2x_queue_state_params *params)
4658 {
4659         struct bnx2x_queue_sp_obj *o = params->q_obj;
4660         struct client_init_ramrod_data *rdata =
4661                 (struct client_init_ramrod_data *)o->rdata;
4662         dma_addr_t data_mapping = o->rdata_mapping;
4663         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4664
4665         /* Clear the ramrod data */
4666         memset(rdata, 0, sizeof(*rdata));
4667
4668         /* Fill the ramrod data */
4669         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4670         bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4671
4672         /*
4673          *  No need for an explicit memory barrier here as long we would
4674          *  need to ensure the ordering of writing to the SPQ element
4675          *  and updating of the SPQ producer which involves a memory
4676          *  read and we will have to put a full memory barrier there
4677          *  (inside bnx2x_sp_post()).
4678          */
4679
4680         return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4681                              U64_HI(data_mapping),
4682                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4683 }
4684
4685 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4686                                   struct bnx2x_queue_state_params *params)
4687 {
4688         struct bnx2x_queue_sp_obj *o = params->q_obj;
4689         struct tx_queue_init_ramrod_data *rdata =
4690                 (struct tx_queue_init_ramrod_data *)o->rdata;
4691         dma_addr_t data_mapping = o->rdata_mapping;
4692         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4693         struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4694                 &params->params.tx_only;
4695         u8 cid_index = tx_only_params->cid_index;
4696
4697
4698         if (cid_index >= o->max_cos) {
4699                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4700                           o->cl_id, cid_index);
4701                 return -EINVAL;
4702         }
4703
4704         DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4705                          tx_only_params->gen_params.cos,
4706                          tx_only_params->gen_params.spcl_id);
4707
4708         /* Clear the ramrod data */
4709         memset(rdata, 0, sizeof(*rdata));
4710
4711         /* Fill the ramrod data */
4712         bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4713
4714         DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4715                          o->cids[cid_index], rdata->general.client_id,
4716                          rdata->general.sp_client_id, rdata->general.cos);
4717
4718         /*
4719          *  No need for an explicit memory barrier here as long we would
4720          *  need to ensure the ordering of writing to the SPQ element
4721          *  and updating of the SPQ producer which involves a memory
4722          *  read and we will have to put a full memory barrier there
4723          *  (inside bnx2x_sp_post()).
4724          */
4725
4726         return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4727                              U64_HI(data_mapping),
4728                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4729 }
4730
4731 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4732                                      struct bnx2x_queue_sp_obj *obj,
4733                                      struct bnx2x_queue_update_params *params,
4734                                      struct client_update_ramrod_data *data)
4735 {
4736         /* Client ID of the client to update */
4737         data->client_id = obj->cl_id;
4738
4739         /* Function ID of the client to update */
4740         data->func_id = obj->func_id;
4741
4742         /* Default VLAN value */
4743         data->default_vlan = cpu_to_le16(params->def_vlan);
4744
4745         /* Inner VLAN stripping */
4746         data->inner_vlan_removal_enable_flg =
4747                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4748         data->inner_vlan_removal_change_flg =
4749                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4750                          &params->update_flags);
4751
4752         /* Outer VLAN sripping */
4753         data->outer_vlan_removal_enable_flg =
4754                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4755         data->outer_vlan_removal_change_flg =
4756                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4757                          &params->update_flags);
4758
4759         /* Drop packets that have source MAC that doesn't belong to this
4760          * Queue.
4761          */
4762         data->anti_spoofing_enable_flg =
4763                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4764         data->anti_spoofing_change_flg =
4765                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4766
4767         /* Activate/Deactivate */
4768         data->activate_flg =
4769                 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4770         data->activate_change_flg =
4771                 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4772
4773         /* Enable default VLAN */
4774         data->default_vlan_enable_flg =
4775                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4776         data->default_vlan_change_flg =
4777                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4778                          &params->update_flags);
4779
4780         /* silent vlan removal */
4781         data->silent_vlan_change_flg =
4782                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4783                          &params->update_flags);
4784         data->silent_vlan_removal_flg =
4785                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4786         data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4787         data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4788 }
4789
4790 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4791                                       struct bnx2x_queue_state_params *params)
4792 {
4793         struct bnx2x_queue_sp_obj *o = params->q_obj;
4794         struct client_update_ramrod_data *rdata =
4795                 (struct client_update_ramrod_data *)o->rdata;
4796         dma_addr_t data_mapping = o->rdata_mapping;
4797         struct bnx2x_queue_update_params *update_params =
4798                 &params->params.update;
4799         u8 cid_index = update_params->cid_index;
4800
4801         if (cid_index >= o->max_cos) {
4802                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4803                           o->cl_id, cid_index);
4804                 return -EINVAL;
4805         }
4806
4807
4808         /* Clear the ramrod data */
4809         memset(rdata, 0, sizeof(*rdata));
4810
4811         /* Fill the ramrod data */
4812         bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4813
4814         /*
4815          *  No need for an explicit memory barrier here as long we would
4816          *  need to ensure the ordering of writing to the SPQ element
4817          *  and updating of the SPQ producer which involves a memory
4818          *  read and we will have to put a full memory barrier there
4819          *  (inside bnx2x_sp_post()).
4820          */
4821
4822         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4823                              o->cids[cid_index], U64_HI(data_mapping),
4824                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4825 }
4826
4827 /**
4828  * bnx2x_q_send_deactivate - send DEACTIVATE command
4829  *
4830  * @bp:         device handle
4831  * @params:
4832  *
4833  * implemented using the UPDATE command.
4834  */
4835 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4836                                         struct bnx2x_queue_state_params *params)
4837 {
4838         struct bnx2x_queue_update_params *update = &params->params.update;
4839
4840         memset(update, 0, sizeof(*update));
4841
4842         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4843
4844         return bnx2x_q_send_update(bp, params);
4845 }
4846
4847 /**
4848  * bnx2x_q_send_activate - send ACTIVATE command
4849  *
4850  * @bp:         device handle
4851  * @params:
4852  *
4853  * implemented using the UPDATE command.
4854  */
4855 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4856                                         struct bnx2x_queue_state_params *params)
4857 {
4858         struct bnx2x_queue_update_params *update = &params->params.update;
4859
4860         memset(update, 0, sizeof(*update));
4861
4862         __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4863         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4864
4865         return bnx2x_q_send_update(bp, params);
4866 }
4867
4868 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4869                                         struct bnx2x_queue_state_params *params)
4870 {
4871         /* TODO: Not implemented yet. */
4872         return -1;
4873 }
4874
4875 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4876                                     struct bnx2x_queue_state_params *params)
4877 {
4878         struct bnx2x_queue_sp_obj *o = params->q_obj;
4879
4880         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4881                              o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4882                              ETH_CONNECTION_TYPE);
4883 }
4884
4885 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4886                                        struct bnx2x_queue_state_params *params)
4887 {
4888         struct bnx2x_queue_sp_obj *o = params->q_obj;
4889         u8 cid_idx = params->params.cfc_del.cid_index;
4890
4891         if (cid_idx >= o->max_cos) {
4892                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4893                           o->cl_id, cid_idx);
4894                 return -EINVAL;
4895         }
4896
4897         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4898                              o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4899 }
4900
4901 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4902                                         struct bnx2x_queue_state_params *params)
4903 {
4904         struct bnx2x_queue_sp_obj *o = params->q_obj;
4905         u8 cid_index = params->params.terminate.cid_index;
4906
4907         if (cid_index >= o->max_cos) {
4908                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4909                           o->cl_id, cid_index);
4910                 return -EINVAL;
4911         }
4912
4913         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4914                              o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4915 }
4916
4917 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4918                                      struct bnx2x_queue_state_params *params)
4919 {
4920         struct bnx2x_queue_sp_obj *o = params->q_obj;
4921
4922         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4923                              o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4924                              ETH_CONNECTION_TYPE);
4925 }
4926
4927 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4928                                         struct bnx2x_queue_state_params *params)
4929 {
4930         switch (params->cmd) {
4931         case BNX2X_Q_CMD_INIT:
4932                 return bnx2x_q_init(bp, params);
4933         case BNX2X_Q_CMD_SETUP_TX_ONLY:
4934                 return bnx2x_q_send_setup_tx_only(bp, params);
4935         case BNX2X_Q_CMD_DEACTIVATE:
4936                 return bnx2x_q_send_deactivate(bp, params);
4937         case BNX2X_Q_CMD_ACTIVATE:
4938                 return bnx2x_q_send_activate(bp, params);
4939         case BNX2X_Q_CMD_UPDATE:
4940                 return bnx2x_q_send_update(bp, params);
4941         case BNX2X_Q_CMD_UPDATE_TPA:
4942                 return bnx2x_q_send_update_tpa(bp, params);
4943         case BNX2X_Q_CMD_HALT:
4944                 return bnx2x_q_send_halt(bp, params);
4945         case BNX2X_Q_CMD_CFC_DEL:
4946                 return bnx2x_q_send_cfc_del(bp, params);
4947         case BNX2X_Q_CMD_TERMINATE:
4948                 return bnx2x_q_send_terminate(bp, params);
4949         case BNX2X_Q_CMD_EMPTY:
4950                 return bnx2x_q_send_empty(bp, params);
4951         default:
4952                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4953                 return -EINVAL;
4954         }
4955 }
4956
4957 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4958                                     struct bnx2x_queue_state_params *params)
4959 {
4960         switch (params->cmd) {
4961         case BNX2X_Q_CMD_SETUP:
4962                 return bnx2x_q_send_setup_e1x(bp, params);
4963         case BNX2X_Q_CMD_INIT:
4964         case BNX2X_Q_CMD_SETUP_TX_ONLY:
4965         case BNX2X_Q_CMD_DEACTIVATE:
4966         case BNX2X_Q_CMD_ACTIVATE:
4967         case BNX2X_Q_CMD_UPDATE:
4968         case BNX2X_Q_CMD_UPDATE_TPA:
4969         case BNX2X_Q_CMD_HALT:
4970         case BNX2X_Q_CMD_CFC_DEL:
4971         case BNX2X_Q_CMD_TERMINATE:
4972         case BNX2X_Q_CMD_EMPTY:
4973                 return bnx2x_queue_send_cmd_cmn(bp, params);
4974         default:
4975                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4976                 return -EINVAL;
4977         }
4978 }
4979
4980 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4981                                    struct bnx2x_queue_state_params *params)
4982 {
4983         switch (params->cmd) {
4984         case BNX2X_Q_CMD_SETUP:
4985                 return bnx2x_q_send_setup_e2(bp, params);
4986         case BNX2X_Q_CMD_INIT:
4987         case BNX2X_Q_CMD_SETUP_TX_ONLY:
4988         case BNX2X_Q_CMD_DEACTIVATE:
4989         case BNX2X_Q_CMD_ACTIVATE:
4990         case BNX2X_Q_CMD_UPDATE:
4991         case BNX2X_Q_CMD_UPDATE_TPA:
4992         case BNX2X_Q_CMD_HALT:
4993         case BNX2X_Q_CMD_CFC_DEL:
4994         case BNX2X_Q_CMD_TERMINATE:
4995         case BNX2X_Q_CMD_EMPTY:
4996                 return bnx2x_queue_send_cmd_cmn(bp, params);
4997         default:
4998                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4999                 return -EINVAL;
5000         }
5001 }
5002
5003 /**
5004  * bnx2x_queue_chk_transition - check state machine of a regular Queue
5005  *
5006  * @bp:         device handle
5007  * @o:
5008  * @params:
5009  *
5010  * (not Forwarding)
5011  * It both checks if the requested command is legal in a current
5012  * state and, if it's legal, sets a `next_state' in the object
5013  * that will be used in the completion flow to set the `state'
5014  * of the object.
5015  *
5016  * returns 0 if a requested command is a legal transition,
5017  *         -EINVAL otherwise.
5018  */
5019 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5020                                       struct bnx2x_queue_sp_obj *o,
5021                                       struct bnx2x_queue_state_params *params)
5022 {
5023         enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5024         enum bnx2x_queue_cmd cmd = params->cmd;
5025         struct bnx2x_queue_update_params *update_params =
5026                  &params->params.update;
5027         u8 next_tx_only = o->num_tx_only;
5028
5029         /*
5030          * Forget all pending for completion commands if a driver only state
5031          * transition has been requested.
5032          */
5033         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5034                 o->pending = 0;
5035                 o->next_state = BNX2X_Q_STATE_MAX;
5036         }
5037
5038         /*
5039          * Don't allow a next state transition if we are in the middle of
5040          * the previous one.
5041          */
5042         if (o->pending) {
5043                 BNX2X_ERR("Blocking transition since pending was %lx\n",
5044                           o->pending);
5045                 return -EBUSY;
5046         }
5047
5048         switch (state) {
5049         case BNX2X_Q_STATE_RESET:
5050                 if (cmd == BNX2X_Q_CMD_INIT)
5051                         next_state = BNX2X_Q_STATE_INITIALIZED;
5052
5053                 break;
5054         case BNX2X_Q_STATE_INITIALIZED:
5055                 if (cmd == BNX2X_Q_CMD_SETUP) {
5056                         if (test_bit(BNX2X_Q_FLG_ACTIVE,
5057                                      &params->params.setup.flags))
5058                                 next_state = BNX2X_Q_STATE_ACTIVE;
5059                         else
5060                                 next_state = BNX2X_Q_STATE_INACTIVE;
5061                 }
5062
5063                 break;
5064         case BNX2X_Q_STATE_ACTIVE:
5065                 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5066                         next_state = BNX2X_Q_STATE_INACTIVE;
5067
5068                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5069                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5070                         next_state = BNX2X_Q_STATE_ACTIVE;
5071
5072                 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5073                         next_state = BNX2X_Q_STATE_MULTI_COS;
5074                         next_tx_only = 1;
5075                 }
5076
5077                 else if (cmd == BNX2X_Q_CMD_HALT)
5078                         next_state = BNX2X_Q_STATE_STOPPED;
5079
5080                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5081                         /* If "active" state change is requested, update the
5082                          *  state accordingly.
5083                          */
5084                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5085                                      &update_params->update_flags) &&
5086                             !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5087                                       &update_params->update_flags))
5088                                 next_state = BNX2X_Q_STATE_INACTIVE;
5089                         else
5090                                 next_state = BNX2X_Q_STATE_ACTIVE;
5091                 }
5092
5093                 break;
5094         case BNX2X_Q_STATE_MULTI_COS:
5095                 if (cmd == BNX2X_Q_CMD_TERMINATE)
5096                         next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5097
5098                 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5099                         next_state = BNX2X_Q_STATE_MULTI_COS;
5100                         next_tx_only = o->num_tx_only + 1;
5101                 }
5102
5103                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5104                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5105                         next_state = BNX2X_Q_STATE_MULTI_COS;
5106
5107                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5108                         /* If "active" state change is requested, update the
5109                          *  state accordingly.
5110                          */
5111                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5112                                      &update_params->update_flags) &&
5113                             !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5114                                       &update_params->update_flags))
5115                                 next_state = BNX2X_Q_STATE_INACTIVE;
5116                         else
5117                                 next_state = BNX2X_Q_STATE_MULTI_COS;
5118                 }
5119
5120                 break;
5121         case BNX2X_Q_STATE_MCOS_TERMINATED:
5122                 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5123                         next_tx_only = o->num_tx_only - 1;
5124                         if (next_tx_only == 0)
5125                                 next_state = BNX2X_Q_STATE_ACTIVE;
5126                         else
5127                                 next_state = BNX2X_Q_STATE_MULTI_COS;
5128                 }
5129
5130                 break;
5131         case BNX2X_Q_STATE_INACTIVE:
5132                 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5133                         next_state = BNX2X_Q_STATE_ACTIVE;
5134
5135                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5136                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5137                         next_state = BNX2X_Q_STATE_INACTIVE;
5138
5139                 else if (cmd == BNX2X_Q_CMD_HALT)
5140                         next_state = BNX2X_Q_STATE_STOPPED;
5141
5142                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5143                         /* If "active" state change is requested, update the
5144                          * state accordingly.
5145                          */
5146                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5147                                      &update_params->update_flags) &&
5148                             test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5149                                      &update_params->update_flags)){
5150                                 if (o->num_tx_only == 0)
5151                                         next_state = BNX2X_Q_STATE_ACTIVE;
5152                                 else /* tx only queues exist for this queue */
5153                                         next_state = BNX2X_Q_STATE_MULTI_COS;
5154                         } else
5155                                 next_state = BNX2X_Q_STATE_INACTIVE;
5156                 }
5157
5158                 break;
5159         case BNX2X_Q_STATE_STOPPED:
5160                 if (cmd == BNX2X_Q_CMD_TERMINATE)
5161                         next_state = BNX2X_Q_STATE_TERMINATED;
5162
5163                 break;
5164         case BNX2X_Q_STATE_TERMINATED:
5165                 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5166                         next_state = BNX2X_Q_STATE_RESET;
5167
5168                 break;
5169         default:
5170                 BNX2X_ERR("Illegal state: %d\n", state);
5171         }
5172
5173         /* Transition is assured */
5174         if (next_state != BNX2X_Q_STATE_MAX) {
5175                 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5176                                  state, cmd, next_state);
5177                 o->next_state = next_state;
5178                 o->next_tx_only = next_tx_only;
5179                 return 0;
5180         }
5181
5182         DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5183
5184         return -EINVAL;
5185 }
5186
5187 void bnx2x_init_queue_obj(struct bnx2x *bp,
5188                           struct bnx2x_queue_sp_obj *obj,
5189                           u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5190                           void *rdata,
5191                           dma_addr_t rdata_mapping, unsigned long type)
5192 {
5193         memset(obj, 0, sizeof(*obj));
5194
5195         /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5196         BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5197
5198         memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5199         obj->max_cos = cid_cnt;
5200         obj->cl_id = cl_id;
5201         obj->func_id = func_id;
5202         obj->rdata = rdata;
5203         obj->rdata_mapping = rdata_mapping;
5204         obj->type = type;
5205         obj->next_state = BNX2X_Q_STATE_MAX;
5206
5207         if (CHIP_IS_E1x(bp))
5208                 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5209         else
5210                 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5211
5212         obj->check_transition = bnx2x_queue_chk_transition;
5213
5214         obj->complete_cmd = bnx2x_queue_comp_cmd;
5215         obj->wait_comp = bnx2x_queue_wait_comp;
5216         obj->set_pending = bnx2x_queue_set_pending;
5217 }
5218
5219 /* return a queue object's logical state*/
5220 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5221                                struct bnx2x_queue_sp_obj *obj)
5222 {
5223         switch (obj->state) {
5224         case BNX2X_Q_STATE_ACTIVE:
5225         case BNX2X_Q_STATE_MULTI_COS:
5226                 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5227         case BNX2X_Q_STATE_RESET:
5228         case BNX2X_Q_STATE_INITIALIZED:
5229         case BNX2X_Q_STATE_MCOS_TERMINATED:
5230         case BNX2X_Q_STATE_INACTIVE:
5231         case BNX2X_Q_STATE_STOPPED:
5232         case BNX2X_Q_STATE_TERMINATED:
5233         case BNX2X_Q_STATE_FLRED:
5234                 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5235         default:
5236                 return -EINVAL;
5237         }
5238 }
5239
5240 /********************** Function state object *********************************/
5241 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5242                                            struct bnx2x_func_sp_obj *o)
5243 {
5244         /* in the middle of transaction - return INVALID state */
5245         if (o->pending)
5246                 return BNX2X_F_STATE_MAX;
5247
5248         /*
5249          * unsure the order of reading of o->pending and o->state
5250          * o->pending should be read first
5251          */
5252         rmb();
5253
5254         return o->state;
5255 }
5256
5257 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5258                                 struct bnx2x_func_sp_obj *o,
5259                                 enum bnx2x_func_cmd cmd)
5260 {
5261         return bnx2x_state_wait(bp, cmd, &o->pending);
5262 }
5263
5264 /**
5265  * bnx2x_func_state_change_comp - complete the state machine transition
5266  *
5267  * @bp:         device handle
5268  * @o:
5269  * @cmd:
5270  *
5271  * Called on state change transition. Completes the state
5272  * machine transition only - no HW interaction.
5273  */
5274 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5275                                                struct bnx2x_func_sp_obj *o,
5276                                                enum bnx2x_func_cmd cmd)
5277 {
5278         unsigned long cur_pending = o->pending;
5279
5280         if (!test_and_clear_bit(cmd, &cur_pending)) {
5281                 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5282                           cmd, BP_FUNC(bp), o->state,
5283                           cur_pending, o->next_state);
5284                 return -EINVAL;
5285         }
5286
5287         DP(BNX2X_MSG_SP,
5288            "Completing command %d for func %d, setting state to %d\n",
5289            cmd, BP_FUNC(bp), o->next_state);
5290
5291         o->state = o->next_state;
5292         o->next_state = BNX2X_F_STATE_MAX;
5293
5294         /* It's important that o->state and o->next_state are
5295          * updated before o->pending.
5296          */
5297         wmb();
5298
5299         clear_bit(cmd, &o->pending);
5300         smp_mb__after_clear_bit();
5301
5302         return 0;
5303 }
5304
5305 /**
5306  * bnx2x_func_comp_cmd - complete the state change command
5307  *
5308  * @bp:         device handle
5309  * @o:
5310  * @cmd:
5311  *
5312  * Checks that the arrived completion is expected.
5313  */
5314 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5315                                struct bnx2x_func_sp_obj *o,
5316                                enum bnx2x_func_cmd cmd)
5317 {
5318         /* Complete the state machine part first, check if it's a
5319          * legal completion.
5320          */
5321         int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5322         return rc;
5323 }
5324
5325 /**
5326  * bnx2x_func_chk_transition - perform function state machine transition
5327  *
5328  * @bp:         device handle
5329  * @o:
5330  * @params:
5331  *
5332  * It both checks if the requested command is legal in a current
5333  * state and, if it's legal, sets a `next_state' in the object
5334  * that will be used in the completion flow to set the `state'
5335  * of the object.
5336  *
5337  * returns 0 if a requested command is a legal transition,
5338  *         -EINVAL otherwise.
5339  */
5340 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5341                                      struct bnx2x_func_sp_obj *o,
5342                                      struct bnx2x_func_state_params *params)
5343 {
5344         enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5345         enum bnx2x_func_cmd cmd = params->cmd;
5346
5347         /*
5348          * Forget all pending for completion commands if a driver only state
5349          * transition has been requested.
5350          */
5351         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5352                 o->pending = 0;
5353                 o->next_state = BNX2X_F_STATE_MAX;
5354         }
5355
5356         /*
5357          * Don't allow a next state transition if we are in the middle of
5358          * the previous one.
5359          */
5360         if (o->pending)
5361                 return -EBUSY;
5362
5363         switch (state) {
5364         case BNX2X_F_STATE_RESET:
5365                 if (cmd == BNX2X_F_CMD_HW_INIT)
5366                         next_state = BNX2X_F_STATE_INITIALIZED;
5367
5368                 break;
5369         case BNX2X_F_STATE_INITIALIZED:
5370                 if (cmd == BNX2X_F_CMD_START)
5371                         next_state = BNX2X_F_STATE_STARTED;
5372
5373                 else if (cmd == BNX2X_F_CMD_HW_RESET)
5374                         next_state = BNX2X_F_STATE_RESET;
5375
5376                 break;
5377         case BNX2X_F_STATE_STARTED:
5378                 if (cmd == BNX2X_F_CMD_STOP)
5379                         next_state = BNX2X_F_STATE_INITIALIZED;
5380                 /* afex ramrods can be sent only in started mode, and only
5381                  * if not pending for function_stop ramrod completion
5382                  * for these events - next state remained STARTED.
5383                  */
5384                 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5385                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5386                         next_state = BNX2X_F_STATE_STARTED;
5387
5388                 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5389                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5390                         next_state = BNX2X_F_STATE_STARTED;
5391
5392                 /* Switch_update ramrod can be sent in either started or
5393                  * tx_stopped state, and it doesn't change the state.
5394                  */
5395                 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5396                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5397                         next_state = BNX2X_F_STATE_STARTED;
5398
5399                 else if (cmd == BNX2X_F_CMD_TX_STOP)
5400                         next_state = BNX2X_F_STATE_TX_STOPPED;
5401
5402                 break;
5403         case BNX2X_F_STATE_TX_STOPPED:
5404                 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5405                     (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5406                         next_state = BNX2X_F_STATE_TX_STOPPED;
5407
5408                 else if (cmd == BNX2X_F_CMD_TX_START)
5409                         next_state = BNX2X_F_STATE_STARTED;
5410
5411                 break;
5412         default:
5413                 BNX2X_ERR("Unknown state: %d\n", state);
5414         }
5415
5416         /* Transition is assured */
5417         if (next_state != BNX2X_F_STATE_MAX) {
5418                 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5419                                  state, cmd, next_state);
5420                 o->next_state = next_state;
5421                 return 0;
5422         }
5423
5424         DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5425                          state, cmd);
5426
5427         return -EINVAL;
5428 }
5429
5430 /**
5431  * bnx2x_func_init_func - performs HW init at function stage
5432  *
5433  * @bp:         device handle
5434  * @drv:
5435  *
5436  * Init HW when the current phase is
5437  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5438  * HW blocks.
5439  */
5440 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5441                                        const struct bnx2x_func_sp_drv_ops *drv)
5442 {
5443         return drv->init_hw_func(bp);
5444 }
5445
5446 /**
5447  * bnx2x_func_init_port - performs HW init at port stage
5448  *
5449  * @bp:         device handle
5450  * @drv:
5451  *
5452  * Init HW when the current phase is
5453  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5454  * FUNCTION-only HW blocks.
5455  *
5456  */
5457 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5458                                        const struct bnx2x_func_sp_drv_ops *drv)
5459 {
5460         int rc = drv->init_hw_port(bp);
5461         if (rc)
5462                 return rc;
5463
5464         return bnx2x_func_init_func(bp, drv);
5465 }
5466
5467 /**
5468  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5469  *
5470  * @bp:         device handle
5471  * @drv:
5472  *
5473  * Init HW when the current phase is
5474  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5475  * PORT-only and FUNCTION-only HW blocks.
5476  */
5477 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5478                                         const struct bnx2x_func_sp_drv_ops *drv)
5479 {
5480         int rc = drv->init_hw_cmn_chip(bp);
5481         if (rc)
5482                 return rc;
5483
5484         return bnx2x_func_init_port(bp, drv);
5485 }
5486
5487 /**
5488  * bnx2x_func_init_cmn - performs HW init at common stage
5489  *
5490  * @bp:         device handle
5491  * @drv:
5492  *
5493  * Init HW when the current phase is
5494  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5495  * PORT-only and FUNCTION-only HW blocks.
5496  */
5497 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5498                                       const struct bnx2x_func_sp_drv_ops *drv)
5499 {
5500         int rc = drv->init_hw_cmn(bp);
5501         if (rc)
5502                 return rc;
5503
5504         return bnx2x_func_init_port(bp, drv);
5505 }
5506
5507 static int bnx2x_func_hw_init(struct bnx2x *bp,
5508                               struct bnx2x_func_state_params *params)
5509 {
5510         u32 load_code = params->params.hw_init.load_phase;
5511         struct bnx2x_func_sp_obj *o = params->f_obj;
5512         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5513         int rc = 0;
5514
5515         DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5516                          BP_ABS_FUNC(bp), load_code);
5517
5518         /* Prepare buffers for unzipping the FW */
5519         rc = drv->gunzip_init(bp);
5520         if (rc)
5521                 return rc;
5522
5523         /* Prepare FW */
5524         rc = drv->init_fw(bp);
5525         if (rc) {
5526                 BNX2X_ERR("Error loading firmware\n");
5527                 goto init_err;
5528         }
5529
5530         /* Handle the beginning of COMMON_XXX pases separatelly... */
5531         switch (load_code) {
5532         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5533                 rc = bnx2x_func_init_cmn_chip(bp, drv);
5534                 if (rc)
5535                         goto init_err;
5536
5537                 break;
5538         case FW_MSG_CODE_DRV_LOAD_COMMON:
5539                 rc = bnx2x_func_init_cmn(bp, drv);
5540                 if (rc)
5541                         goto init_err;
5542
5543                 break;
5544         case FW_MSG_CODE_DRV_LOAD_PORT:
5545                 rc = bnx2x_func_init_port(bp, drv);
5546                 if (rc)
5547                         goto init_err;
5548
5549                 break;
5550         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5551                 rc = bnx2x_func_init_func(bp, drv);
5552                 if (rc)
5553                         goto init_err;
5554
5555                 break;
5556         default:
5557                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5558                 rc = -EINVAL;
5559         }
5560
5561 init_err:
5562         drv->gunzip_end(bp);
5563
5564         /* In case of success, complete the comand immediatelly: no ramrods
5565          * have been sent.
5566          */
5567         if (!rc)
5568                 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5569
5570         return rc;
5571 }
5572
5573 /**
5574  * bnx2x_func_reset_func - reset HW at function stage
5575  *
5576  * @bp:         device handle
5577  * @drv:
5578  *
5579  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5580  * FUNCTION-only HW blocks.
5581  */
5582 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5583                                         const struct bnx2x_func_sp_drv_ops *drv)
5584 {
5585         drv->reset_hw_func(bp);
5586 }
5587
5588 /**
5589  * bnx2x_func_reset_port - reser HW at port stage
5590  *
5591  * @bp:         device handle
5592  * @drv:
5593  *
5594  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5595  * FUNCTION-only and PORT-only HW blocks.
5596  *
5597  *                 !!!IMPORTANT!!!
5598  *
5599  * It's important to call reset_port before reset_func() as the last thing
5600  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5601  * makes impossible any DMAE transactions.
5602  */
5603 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5604                                         const struct bnx2x_func_sp_drv_ops *drv)
5605 {
5606         drv->reset_hw_port(bp);
5607         bnx2x_func_reset_func(bp, drv);
5608 }
5609
5610 /**
5611  * bnx2x_func_reset_cmn - reser HW at common stage
5612  *
5613  * @bp:         device handle
5614  * @drv:
5615  *
5616  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5617  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5618  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5619  */
5620 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5621                                         const struct bnx2x_func_sp_drv_ops *drv)
5622 {
5623         bnx2x_func_reset_port(bp, drv);
5624         drv->reset_hw_cmn(bp);
5625 }
5626
5627
5628 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5629                                       struct bnx2x_func_state_params *params)
5630 {
5631         u32 reset_phase = params->params.hw_reset.reset_phase;
5632         struct bnx2x_func_sp_obj *o = params->f_obj;
5633         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5634
5635         DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5636                          reset_phase);
5637
5638         switch (reset_phase) {
5639         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5640                 bnx2x_func_reset_cmn(bp, drv);
5641                 break;
5642         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5643                 bnx2x_func_reset_port(bp, drv);
5644                 break;
5645         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5646                 bnx2x_func_reset_func(bp, drv);
5647                 break;
5648         default:
5649                 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5650                            reset_phase);
5651                 break;
5652         }
5653
5654         /* Complete the comand immediatelly: no ramrods have been sent. */
5655         o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5656
5657         return 0;
5658 }
5659
5660 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5661                                         struct bnx2x_func_state_params *params)
5662 {
5663         struct bnx2x_func_sp_obj *o = params->f_obj;
5664         struct function_start_data *rdata =
5665                 (struct function_start_data *)o->rdata;
5666         dma_addr_t data_mapping = o->rdata_mapping;
5667         struct bnx2x_func_start_params *start_params = &params->params.start;
5668
5669         memset(rdata, 0, sizeof(*rdata));
5670
5671         /* Fill the ramrod data with provided parameters */
5672         rdata->function_mode    = (u8)start_params->mf_mode;
5673         rdata->sd_vlan_tag      = cpu_to_le16(start_params->sd_vlan_tag);
5674         rdata->path_id          = BP_PATH(bp);
5675         rdata->network_cos_mode = start_params->network_cos_mode;
5676
5677         /*
5678          *  No need for an explicit memory barrier here as long we would
5679          *  need to ensure the ordering of writing to the SPQ element
5680          *  and updating of the SPQ producer which involves a memory
5681          *  read and we will have to put a full memory barrier there
5682          *  (inside bnx2x_sp_post()).
5683          */
5684
5685         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5686                              U64_HI(data_mapping),
5687                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5688 }
5689
5690 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5691                                         struct bnx2x_func_state_params *params)
5692 {
5693         struct bnx2x_func_sp_obj *o = params->f_obj;
5694         struct function_update_data *rdata =
5695                 (struct function_update_data *)o->rdata;
5696         dma_addr_t data_mapping = o->rdata_mapping;
5697         struct bnx2x_func_switch_update_params *switch_update_params =
5698                 &params->params.switch_update;
5699
5700         memset(rdata, 0, sizeof(*rdata));
5701
5702         /* Fill the ramrod data with provided parameters */
5703         rdata->tx_switch_suspend_change_flg = 1;
5704         rdata->tx_switch_suspend = switch_update_params->suspend;
5705         rdata->echo = SWITCH_UPDATE;
5706
5707         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5708                              U64_HI(data_mapping),
5709                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5710 }
5711
5712 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5713                                          struct bnx2x_func_state_params *params)
5714 {
5715         struct bnx2x_func_sp_obj *o = params->f_obj;
5716         struct function_update_data *rdata =
5717                 (struct function_update_data *)o->afex_rdata;
5718         dma_addr_t data_mapping = o->afex_rdata_mapping;
5719         struct bnx2x_func_afex_update_params *afex_update_params =
5720                 &params->params.afex_update;
5721
5722         memset(rdata, 0, sizeof(*rdata));
5723
5724         /* Fill the ramrod data with provided parameters */
5725         rdata->vif_id_change_flg = 1;
5726         rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5727         rdata->afex_default_vlan_change_flg = 1;
5728         rdata->afex_default_vlan =
5729                 cpu_to_le16(afex_update_params->afex_default_vlan);
5730         rdata->allowed_priorities_change_flg = 1;
5731         rdata->allowed_priorities = afex_update_params->allowed_priorities;
5732         rdata->echo = AFEX_UPDATE;
5733
5734         /*  No need for an explicit memory barrier here as long we would
5735          *  need to ensure the ordering of writing to the SPQ element
5736          *  and updating of the SPQ producer which involves a memory
5737          *  read and we will have to put a full memory barrier there
5738          *  (inside bnx2x_sp_post()).
5739          */
5740         DP(BNX2X_MSG_SP,
5741            "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5742            rdata->vif_id,
5743            rdata->afex_default_vlan, rdata->allowed_priorities);
5744
5745         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5746                              U64_HI(data_mapping),
5747                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5748 }
5749
5750 static
5751 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5752                                          struct bnx2x_func_state_params *params)
5753 {
5754         struct bnx2x_func_sp_obj *o = params->f_obj;
5755         struct afex_vif_list_ramrod_data *rdata =
5756                 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5757         struct bnx2x_func_afex_viflists_params *afex_vif_params =
5758                 &params->params.afex_viflists;
5759         u64 *p_rdata = (u64 *)rdata;
5760
5761         memset(rdata, 0, sizeof(*rdata));
5762
5763         /* Fill the ramrod data with provided parameters */
5764         rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5765         rdata->func_bit_map          = afex_vif_params->func_bit_map;
5766         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5767         rdata->func_to_clear         = afex_vif_params->func_to_clear;
5768
5769         /* send in echo type of sub command */
5770         rdata->echo = afex_vif_params->afex_vif_list_command;
5771
5772         /*  No need for an explicit memory barrier here as long we would
5773          *  need to ensure the ordering of writing to the SPQ element
5774          *  and updating of the SPQ producer which involves a memory
5775          *  read and we will have to put a full memory barrier there
5776          *  (inside bnx2x_sp_post()).
5777          */
5778
5779         DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5780            rdata->afex_vif_list_command, rdata->vif_list_index,
5781            rdata->func_bit_map, rdata->func_to_clear);
5782
5783         /* this ramrod sends data directly and not through DMA mapping */
5784         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5785                              U64_HI(*p_rdata), U64_LO(*p_rdata),
5786                              NONE_CONNECTION_TYPE);
5787 }
5788
5789 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5790                                        struct bnx2x_func_state_params *params)
5791 {
5792         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5793                              NONE_CONNECTION_TYPE);
5794 }
5795
5796 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5797                                        struct bnx2x_func_state_params *params)
5798 {
5799         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5800                              NONE_CONNECTION_TYPE);
5801 }
5802 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5803                                        struct bnx2x_func_state_params *params)
5804 {
5805         struct bnx2x_func_sp_obj *o = params->f_obj;
5806         struct flow_control_configuration *rdata =
5807                 (struct flow_control_configuration *)o->rdata;
5808         dma_addr_t data_mapping = o->rdata_mapping;
5809         struct bnx2x_func_tx_start_params *tx_start_params =
5810                 &params->params.tx_start;
5811         int i;
5812
5813         memset(rdata, 0, sizeof(*rdata));
5814
5815         rdata->dcb_enabled = tx_start_params->dcb_enabled;
5816         rdata->dcb_version = tx_start_params->dcb_version;
5817         rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5818
5819         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5820                 rdata->traffic_type_to_priority_cos[i] =
5821                         tx_start_params->traffic_type_to_priority_cos[i];
5822
5823         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5824                              U64_HI(data_mapping),
5825                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5826 }
5827
5828 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5829                                struct bnx2x_func_state_params *params)
5830 {
5831         switch (params->cmd) {
5832         case BNX2X_F_CMD_HW_INIT:
5833                 return bnx2x_func_hw_init(bp, params);
5834         case BNX2X_F_CMD_START:
5835                 return bnx2x_func_send_start(bp, params);
5836         case BNX2X_F_CMD_STOP:
5837                 return bnx2x_func_send_stop(bp, params);
5838         case BNX2X_F_CMD_HW_RESET:
5839                 return bnx2x_func_hw_reset(bp, params);
5840         case BNX2X_F_CMD_AFEX_UPDATE:
5841                 return bnx2x_func_send_afex_update(bp, params);
5842         case BNX2X_F_CMD_AFEX_VIFLISTS:
5843                 return bnx2x_func_send_afex_viflists(bp, params);
5844         case BNX2X_F_CMD_TX_STOP:
5845                 return bnx2x_func_send_tx_stop(bp, params);
5846         case BNX2X_F_CMD_TX_START:
5847                 return bnx2x_func_send_tx_start(bp, params);
5848         case BNX2X_F_CMD_SWITCH_UPDATE:
5849                 return bnx2x_func_send_switch_update(bp, params);
5850         default:
5851                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5852                 return -EINVAL;
5853         }
5854 }
5855
5856 void bnx2x_init_func_obj(struct bnx2x *bp,
5857                          struct bnx2x_func_sp_obj *obj,
5858                          void *rdata, dma_addr_t rdata_mapping,
5859                          void *afex_rdata, dma_addr_t afex_rdata_mapping,
5860                          struct bnx2x_func_sp_drv_ops *drv_iface)
5861 {
5862         memset(obj, 0, sizeof(*obj));
5863
5864         mutex_init(&obj->one_pending_mutex);
5865
5866         obj->rdata = rdata;
5867         obj->rdata_mapping = rdata_mapping;
5868         obj->afex_rdata = afex_rdata;
5869         obj->afex_rdata_mapping = afex_rdata_mapping;
5870         obj->send_cmd = bnx2x_func_send_cmd;
5871         obj->check_transition = bnx2x_func_chk_transition;
5872         obj->complete_cmd = bnx2x_func_comp_cmd;
5873         obj->wait_comp = bnx2x_func_wait_comp;
5874
5875         obj->drv = drv_iface;
5876 }
5877
5878 /**
5879  * bnx2x_func_state_change - perform Function state change transition
5880  *
5881  * @bp:         device handle
5882  * @params:     parameters to perform the transaction
5883  *
5884  * returns 0 in case of successfully completed transition,
5885  *         negative error code in case of failure, positive
5886  *         (EBUSY) value if there is a completion to that is
5887  *         still pending (possible only if RAMROD_COMP_WAIT is
5888  *         not set in params->ramrod_flags for asynchronous
5889  *         commands).
5890  */
5891 int bnx2x_func_state_change(struct bnx2x *bp,
5892                             struct bnx2x_func_state_params *params)
5893 {
5894         struct bnx2x_func_sp_obj *o = params->f_obj;
5895         int rc, cnt = 300;
5896         enum bnx2x_func_cmd cmd = params->cmd;
5897         unsigned long *pending = &o->pending;
5898
5899         mutex_lock(&o->one_pending_mutex);
5900
5901         /* Check that the requested transition is legal */
5902         rc = o->check_transition(bp, o, params);
5903         if ((rc == -EBUSY) &&
5904             (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5905                 while ((rc == -EBUSY) && (--cnt > 0)) {
5906                         mutex_unlock(&o->one_pending_mutex);
5907                         msleep(10);
5908                         mutex_lock(&o->one_pending_mutex);
5909                         rc = o->check_transition(bp, o, params);
5910                 }
5911                 if (rc == -EBUSY) {
5912                         mutex_unlock(&o->one_pending_mutex);
5913                         BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5914                         return rc;
5915                 }
5916         } else if (rc) {
5917                 mutex_unlock(&o->one_pending_mutex);
5918                 return rc;
5919         }
5920
5921         /* Set "pending" bit */
5922         set_bit(cmd, pending);
5923
5924         /* Don't send a command if only driver cleanup was requested */
5925         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5926                 bnx2x_func_state_change_comp(bp, o, cmd);
5927                 mutex_unlock(&o->one_pending_mutex);
5928         } else {
5929                 /* Send a ramrod */
5930                 rc = o->send_cmd(bp, params);
5931
5932                 mutex_unlock(&o->one_pending_mutex);
5933
5934                 if (rc) {
5935                         o->next_state = BNX2X_F_STATE_MAX;
5936                         clear_bit(cmd, pending);
5937                         smp_mb__after_clear_bit();
5938                         return rc;
5939                 }
5940
5941                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5942                         rc = o->wait_comp(bp, o, cmd);
5943                         if (rc)
5944                                 return rc;
5945
5946                         return 0;
5947                 }
5948         }
5949
5950         return !!test_bit(cmd, pending);
5951 }