Linux 3.9-rc8
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         u8 port;
56 };
57
58 struct res_common {
59         struct list_head        list;
60         struct rb_node          node;
61         u64                     res_id;
62         int                     owner;
63         int                     state;
64         int                     from_state;
65         int                     to_state;
66         int                     removing;
67 };
68
69 enum {
70         RES_ANY_BUSY = 1
71 };
72
73 struct res_gid {
74         struct list_head        list;
75         u8                      gid[16];
76         enum mlx4_protocol      prot;
77         enum mlx4_steer_type    steer;
78 };
79
80 enum res_qp_states {
81         RES_QP_BUSY = RES_ANY_BUSY,
82
83         /* QP number was allocated */
84         RES_QP_RESERVED,
85
86         /* ICM memory for QP context was mapped */
87         RES_QP_MAPPED,
88
89         /* QP is in hw ownership */
90         RES_QP_HW
91 };
92
93 struct res_qp {
94         struct res_common       com;
95         struct res_mtt         *mtt;
96         struct res_cq          *rcq;
97         struct res_cq          *scq;
98         struct res_srq         *srq;
99         struct list_head        mcg_list;
100         spinlock_t              mcg_spl;
101         int                     local_qpn;
102         atomic_t                ref_count;
103 };
104
105 enum res_mtt_states {
106         RES_MTT_BUSY = RES_ANY_BUSY,
107         RES_MTT_ALLOCATED,
108 };
109
110 static inline const char *mtt_states_str(enum res_mtt_states state)
111 {
112         switch (state) {
113         case RES_MTT_BUSY: return "RES_MTT_BUSY";
114         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
115         default: return "Unknown";
116         }
117 }
118
119 struct res_mtt {
120         struct res_common       com;
121         int                     order;
122         atomic_t                ref_count;
123 };
124
125 enum res_mpt_states {
126         RES_MPT_BUSY = RES_ANY_BUSY,
127         RES_MPT_RESERVED,
128         RES_MPT_MAPPED,
129         RES_MPT_HW,
130 };
131
132 struct res_mpt {
133         struct res_common       com;
134         struct res_mtt         *mtt;
135         int                     key;
136 };
137
138 enum res_eq_states {
139         RES_EQ_BUSY = RES_ANY_BUSY,
140         RES_EQ_RESERVED,
141         RES_EQ_HW,
142 };
143
144 struct res_eq {
145         struct res_common       com;
146         struct res_mtt         *mtt;
147 };
148
149 enum res_cq_states {
150         RES_CQ_BUSY = RES_ANY_BUSY,
151         RES_CQ_ALLOCATED,
152         RES_CQ_HW,
153 };
154
155 struct res_cq {
156         struct res_common       com;
157         struct res_mtt         *mtt;
158         atomic_t                ref_count;
159 };
160
161 enum res_srq_states {
162         RES_SRQ_BUSY = RES_ANY_BUSY,
163         RES_SRQ_ALLOCATED,
164         RES_SRQ_HW,
165 };
166
167 struct res_srq {
168         struct res_common       com;
169         struct res_mtt         *mtt;
170         struct res_cq          *cq;
171         atomic_t                ref_count;
172 };
173
174 enum res_counter_states {
175         RES_COUNTER_BUSY = RES_ANY_BUSY,
176         RES_COUNTER_ALLOCATED,
177 };
178
179 struct res_counter {
180         struct res_common       com;
181         int                     port;
182 };
183
184 enum res_xrcdn_states {
185         RES_XRCD_BUSY = RES_ANY_BUSY,
186         RES_XRCD_ALLOCATED,
187 };
188
189 struct res_xrcdn {
190         struct res_common       com;
191         int                     port;
192 };
193
194 enum res_fs_rule_states {
195         RES_FS_RULE_BUSY = RES_ANY_BUSY,
196         RES_FS_RULE_ALLOCATED,
197 };
198
199 struct res_fs_rule {
200         struct res_common       com;
201         int                     qpn;
202 };
203
204 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
205 {
206         struct rb_node *node = root->rb_node;
207
208         while (node) {
209                 struct res_common *res = container_of(node, struct res_common,
210                                                       node);
211
212                 if (res_id < res->res_id)
213                         node = node->rb_left;
214                 else if (res_id > res->res_id)
215                         node = node->rb_right;
216                 else
217                         return res;
218         }
219         return NULL;
220 }
221
222 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
223 {
224         struct rb_node **new = &(root->rb_node), *parent = NULL;
225
226         /* Figure out where to put new node */
227         while (*new) {
228                 struct res_common *this = container_of(*new, struct res_common,
229                                                        node);
230
231                 parent = *new;
232                 if (res->res_id < this->res_id)
233                         new = &((*new)->rb_left);
234                 else if (res->res_id > this->res_id)
235                         new = &((*new)->rb_right);
236                 else
237                         return -EEXIST;
238         }
239
240         /* Add new node and rebalance tree. */
241         rb_link_node(&res->node, parent, new);
242         rb_insert_color(&res->node, root);
243
244         return 0;
245 }
246
247 enum qp_transition {
248         QP_TRANS_INIT2RTR,
249         QP_TRANS_RTR2RTS,
250         QP_TRANS_RTS2RTS,
251         QP_TRANS_SQERR2RTS,
252         QP_TRANS_SQD2SQD,
253         QP_TRANS_SQD2RTS
254 };
255
256 /* For Debug uses */
257 static const char *ResourceType(enum mlx4_resource rt)
258 {
259         switch (rt) {
260         case RES_QP: return "RES_QP";
261         case RES_CQ: return "RES_CQ";
262         case RES_SRQ: return "RES_SRQ";
263         case RES_MPT: return "RES_MPT";
264         case RES_MTT: return "RES_MTT";
265         case RES_MAC: return  "RES_MAC";
266         case RES_EQ: return "RES_EQ";
267         case RES_COUNTER: return "RES_COUNTER";
268         case RES_FS_RULE: return "RES_FS_RULE";
269         case RES_XRCD: return "RES_XRCD";
270         default: return "Unknown resource type !!!";
271         };
272 }
273
274 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
275 {
276         struct mlx4_priv *priv = mlx4_priv(dev);
277         int i;
278         int t;
279
280         priv->mfunc.master.res_tracker.slave_list =
281                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
282                         GFP_KERNEL);
283         if (!priv->mfunc.master.res_tracker.slave_list)
284                 return -ENOMEM;
285
286         for (i = 0 ; i < dev->num_slaves; i++) {
287                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
288                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
289                                        slave_list[i].res_list[t]);
290                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
291         }
292
293         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
294                  dev->num_slaves);
295         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
296                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
297
298         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
299         return 0 ;
300 }
301
302 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
303                                 enum mlx4_res_tracker_free_type type)
304 {
305         struct mlx4_priv *priv = mlx4_priv(dev);
306         int i;
307
308         if (priv->mfunc.master.res_tracker.slave_list) {
309                 if (type != RES_TR_FREE_STRUCTS_ONLY)
310                         for (i = 0 ; i < dev->num_slaves; i++)
311                                 if (type == RES_TR_FREE_ALL ||
312                                     dev->caps.function != i)
313                                         mlx4_delete_all_resources_for_slave(dev, i);
314
315                 if (type != RES_TR_FREE_SLAVES_ONLY) {
316                         kfree(priv->mfunc.master.res_tracker.slave_list);
317                         priv->mfunc.master.res_tracker.slave_list = NULL;
318                 }
319         }
320 }
321
322 static void update_pkey_index(struct mlx4_dev *dev, int slave,
323                               struct mlx4_cmd_mailbox *inbox)
324 {
325         u8 sched = *(u8 *)(inbox->buf + 64);
326         u8 orig_index = *(u8 *)(inbox->buf + 35);
327         u8 new_index;
328         struct mlx4_priv *priv = mlx4_priv(dev);
329         int port;
330
331         port = (sched >> 6 & 1) + 1;
332
333         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
334         *(u8 *)(inbox->buf + 35) = new_index;
335 }
336
337 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
338                        u8 slave)
339 {
340         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
341         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
342         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
343
344         if (MLX4_QP_ST_UD == ts)
345                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
346
347         if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
348                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
349                         qp_ctx->pri_path.mgid_index = slave & 0x7F;
350                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
351                         qp_ctx->alt_path.mgid_index = slave & 0x7F;
352         }
353 }
354
355 static int mpt_mask(struct mlx4_dev *dev)
356 {
357         return dev->caps.num_mpts - 1;
358 }
359
360 static void *find_res(struct mlx4_dev *dev, u64 res_id,
361                       enum mlx4_resource type)
362 {
363         struct mlx4_priv *priv = mlx4_priv(dev);
364
365         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
366                                   res_id);
367 }
368
369 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
370                    enum mlx4_resource type,
371                    void *res)
372 {
373         struct res_common *r;
374         int err = 0;
375
376         spin_lock_irq(mlx4_tlock(dev));
377         r = find_res(dev, res_id, type);
378         if (!r) {
379                 err = -ENONET;
380                 goto exit;
381         }
382
383         if (r->state == RES_ANY_BUSY) {
384                 err = -EBUSY;
385                 goto exit;
386         }
387
388         if (r->owner != slave) {
389                 err = -EPERM;
390                 goto exit;
391         }
392
393         r->from_state = r->state;
394         r->state = RES_ANY_BUSY;
395
396         if (res)
397                 *((struct res_common **)res) = r;
398
399 exit:
400         spin_unlock_irq(mlx4_tlock(dev));
401         return err;
402 }
403
404 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
405                                     enum mlx4_resource type,
406                                     u64 res_id, int *slave)
407 {
408
409         struct res_common *r;
410         int err = -ENOENT;
411         int id = res_id;
412
413         if (type == RES_QP)
414                 id &= 0x7fffff;
415         spin_lock(mlx4_tlock(dev));
416
417         r = find_res(dev, id, type);
418         if (r) {
419                 *slave = r->owner;
420                 err = 0;
421         }
422         spin_unlock(mlx4_tlock(dev));
423
424         return err;
425 }
426
427 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
428                     enum mlx4_resource type)
429 {
430         struct res_common *r;
431
432         spin_lock_irq(mlx4_tlock(dev));
433         r = find_res(dev, res_id, type);
434         if (r)
435                 r->state = r->from_state;
436         spin_unlock_irq(mlx4_tlock(dev));
437 }
438
439 static struct res_common *alloc_qp_tr(int id)
440 {
441         struct res_qp *ret;
442
443         ret = kzalloc(sizeof *ret, GFP_KERNEL);
444         if (!ret)
445                 return NULL;
446
447         ret->com.res_id = id;
448         ret->com.state = RES_QP_RESERVED;
449         ret->local_qpn = id;
450         INIT_LIST_HEAD(&ret->mcg_list);
451         spin_lock_init(&ret->mcg_spl);
452         atomic_set(&ret->ref_count, 0);
453
454         return &ret->com;
455 }
456
457 static struct res_common *alloc_mtt_tr(int id, int order)
458 {
459         struct res_mtt *ret;
460
461         ret = kzalloc(sizeof *ret, GFP_KERNEL);
462         if (!ret)
463                 return NULL;
464
465         ret->com.res_id = id;
466         ret->order = order;
467         ret->com.state = RES_MTT_ALLOCATED;
468         atomic_set(&ret->ref_count, 0);
469
470         return &ret->com;
471 }
472
473 static struct res_common *alloc_mpt_tr(int id, int key)
474 {
475         struct res_mpt *ret;
476
477         ret = kzalloc(sizeof *ret, GFP_KERNEL);
478         if (!ret)
479                 return NULL;
480
481         ret->com.res_id = id;
482         ret->com.state = RES_MPT_RESERVED;
483         ret->key = key;
484
485         return &ret->com;
486 }
487
488 static struct res_common *alloc_eq_tr(int id)
489 {
490         struct res_eq *ret;
491
492         ret = kzalloc(sizeof *ret, GFP_KERNEL);
493         if (!ret)
494                 return NULL;
495
496         ret->com.res_id = id;
497         ret->com.state = RES_EQ_RESERVED;
498
499         return &ret->com;
500 }
501
502 static struct res_common *alloc_cq_tr(int id)
503 {
504         struct res_cq *ret;
505
506         ret = kzalloc(sizeof *ret, GFP_KERNEL);
507         if (!ret)
508                 return NULL;
509
510         ret->com.res_id = id;
511         ret->com.state = RES_CQ_ALLOCATED;
512         atomic_set(&ret->ref_count, 0);
513
514         return &ret->com;
515 }
516
517 static struct res_common *alloc_srq_tr(int id)
518 {
519         struct res_srq *ret;
520
521         ret = kzalloc(sizeof *ret, GFP_KERNEL);
522         if (!ret)
523                 return NULL;
524
525         ret->com.res_id = id;
526         ret->com.state = RES_SRQ_ALLOCATED;
527         atomic_set(&ret->ref_count, 0);
528
529         return &ret->com;
530 }
531
532 static struct res_common *alloc_counter_tr(int id)
533 {
534         struct res_counter *ret;
535
536         ret = kzalloc(sizeof *ret, GFP_KERNEL);
537         if (!ret)
538                 return NULL;
539
540         ret->com.res_id = id;
541         ret->com.state = RES_COUNTER_ALLOCATED;
542
543         return &ret->com;
544 }
545
546 static struct res_common *alloc_xrcdn_tr(int id)
547 {
548         struct res_xrcdn *ret;
549
550         ret = kzalloc(sizeof *ret, GFP_KERNEL);
551         if (!ret)
552                 return NULL;
553
554         ret->com.res_id = id;
555         ret->com.state = RES_XRCD_ALLOCATED;
556
557         return &ret->com;
558 }
559
560 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
561 {
562         struct res_fs_rule *ret;
563
564         ret = kzalloc(sizeof *ret, GFP_KERNEL);
565         if (!ret)
566                 return NULL;
567
568         ret->com.res_id = id;
569         ret->com.state = RES_FS_RULE_ALLOCATED;
570         ret->qpn = qpn;
571         return &ret->com;
572 }
573
574 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
575                                    int extra)
576 {
577         struct res_common *ret;
578
579         switch (type) {
580         case RES_QP:
581                 ret = alloc_qp_tr(id);
582                 break;
583         case RES_MPT:
584                 ret = alloc_mpt_tr(id, extra);
585                 break;
586         case RES_MTT:
587                 ret = alloc_mtt_tr(id, extra);
588                 break;
589         case RES_EQ:
590                 ret = alloc_eq_tr(id);
591                 break;
592         case RES_CQ:
593                 ret = alloc_cq_tr(id);
594                 break;
595         case RES_SRQ:
596                 ret = alloc_srq_tr(id);
597                 break;
598         case RES_MAC:
599                 printk(KERN_ERR "implementation missing\n");
600                 return NULL;
601         case RES_COUNTER:
602                 ret = alloc_counter_tr(id);
603                 break;
604         case RES_XRCD:
605                 ret = alloc_xrcdn_tr(id);
606                 break;
607         case RES_FS_RULE:
608                 ret = alloc_fs_rule_tr(id, extra);
609                 break;
610         default:
611                 return NULL;
612         }
613         if (ret)
614                 ret->owner = slave;
615
616         return ret;
617 }
618
619 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
620                          enum mlx4_resource type, int extra)
621 {
622         int i;
623         int err;
624         struct mlx4_priv *priv = mlx4_priv(dev);
625         struct res_common **res_arr;
626         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
627         struct rb_root *root = &tracker->res_tree[type];
628
629         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
630         if (!res_arr)
631                 return -ENOMEM;
632
633         for (i = 0; i < count; ++i) {
634                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
635                 if (!res_arr[i]) {
636                         for (--i; i >= 0; --i)
637                                 kfree(res_arr[i]);
638
639                         kfree(res_arr);
640                         return -ENOMEM;
641                 }
642         }
643
644         spin_lock_irq(mlx4_tlock(dev));
645         for (i = 0; i < count; ++i) {
646                 if (find_res(dev, base + i, type)) {
647                         err = -EEXIST;
648                         goto undo;
649                 }
650                 err = res_tracker_insert(root, res_arr[i]);
651                 if (err)
652                         goto undo;
653                 list_add_tail(&res_arr[i]->list,
654                               &tracker->slave_list[slave].res_list[type]);
655         }
656         spin_unlock_irq(mlx4_tlock(dev));
657         kfree(res_arr);
658
659         return 0;
660
661 undo:
662         for (--i; i >= base; --i)
663                 rb_erase(&res_arr[i]->node, root);
664
665         spin_unlock_irq(mlx4_tlock(dev));
666
667         for (i = 0; i < count; ++i)
668                 kfree(res_arr[i]);
669
670         kfree(res_arr);
671
672         return err;
673 }
674
675 static int remove_qp_ok(struct res_qp *res)
676 {
677         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
678             !list_empty(&res->mcg_list)) {
679                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
680                        res->com.state, atomic_read(&res->ref_count));
681                 return -EBUSY;
682         } else if (res->com.state != RES_QP_RESERVED) {
683                 return -EPERM;
684         }
685
686         return 0;
687 }
688
689 static int remove_mtt_ok(struct res_mtt *res, int order)
690 {
691         if (res->com.state == RES_MTT_BUSY ||
692             atomic_read(&res->ref_count)) {
693                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
694                        __func__, __LINE__,
695                        mtt_states_str(res->com.state),
696                        atomic_read(&res->ref_count));
697                 return -EBUSY;
698         } else if (res->com.state != RES_MTT_ALLOCATED)
699                 return -EPERM;
700         else if (res->order != order)
701                 return -EINVAL;
702
703         return 0;
704 }
705
706 static int remove_mpt_ok(struct res_mpt *res)
707 {
708         if (res->com.state == RES_MPT_BUSY)
709                 return -EBUSY;
710         else if (res->com.state != RES_MPT_RESERVED)
711                 return -EPERM;
712
713         return 0;
714 }
715
716 static int remove_eq_ok(struct res_eq *res)
717 {
718         if (res->com.state == RES_MPT_BUSY)
719                 return -EBUSY;
720         else if (res->com.state != RES_MPT_RESERVED)
721                 return -EPERM;
722
723         return 0;
724 }
725
726 static int remove_counter_ok(struct res_counter *res)
727 {
728         if (res->com.state == RES_COUNTER_BUSY)
729                 return -EBUSY;
730         else if (res->com.state != RES_COUNTER_ALLOCATED)
731                 return -EPERM;
732
733         return 0;
734 }
735
736 static int remove_xrcdn_ok(struct res_xrcdn *res)
737 {
738         if (res->com.state == RES_XRCD_BUSY)
739                 return -EBUSY;
740         else if (res->com.state != RES_XRCD_ALLOCATED)
741                 return -EPERM;
742
743         return 0;
744 }
745
746 static int remove_fs_rule_ok(struct res_fs_rule *res)
747 {
748         if (res->com.state == RES_FS_RULE_BUSY)
749                 return -EBUSY;
750         else if (res->com.state != RES_FS_RULE_ALLOCATED)
751                 return -EPERM;
752
753         return 0;
754 }
755
756 static int remove_cq_ok(struct res_cq *res)
757 {
758         if (res->com.state == RES_CQ_BUSY)
759                 return -EBUSY;
760         else if (res->com.state != RES_CQ_ALLOCATED)
761                 return -EPERM;
762
763         return 0;
764 }
765
766 static int remove_srq_ok(struct res_srq *res)
767 {
768         if (res->com.state == RES_SRQ_BUSY)
769                 return -EBUSY;
770         else if (res->com.state != RES_SRQ_ALLOCATED)
771                 return -EPERM;
772
773         return 0;
774 }
775
776 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
777 {
778         switch (type) {
779         case RES_QP:
780                 return remove_qp_ok((struct res_qp *)res);
781         case RES_CQ:
782                 return remove_cq_ok((struct res_cq *)res);
783         case RES_SRQ:
784                 return remove_srq_ok((struct res_srq *)res);
785         case RES_MPT:
786                 return remove_mpt_ok((struct res_mpt *)res);
787         case RES_MTT:
788                 return remove_mtt_ok((struct res_mtt *)res, extra);
789         case RES_MAC:
790                 return -ENOSYS;
791         case RES_EQ:
792                 return remove_eq_ok((struct res_eq *)res);
793         case RES_COUNTER:
794                 return remove_counter_ok((struct res_counter *)res);
795         case RES_XRCD:
796                 return remove_xrcdn_ok((struct res_xrcdn *)res);
797         case RES_FS_RULE:
798                 return remove_fs_rule_ok((struct res_fs_rule *)res);
799         default:
800                 return -EINVAL;
801         }
802 }
803
804 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
805                          enum mlx4_resource type, int extra)
806 {
807         u64 i;
808         int err;
809         struct mlx4_priv *priv = mlx4_priv(dev);
810         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
811         struct res_common *r;
812
813         spin_lock_irq(mlx4_tlock(dev));
814         for (i = base; i < base + count; ++i) {
815                 r = res_tracker_lookup(&tracker->res_tree[type], i);
816                 if (!r) {
817                         err = -ENOENT;
818                         goto out;
819                 }
820                 if (r->owner != slave) {
821                         err = -EPERM;
822                         goto out;
823                 }
824                 err = remove_ok(r, type, extra);
825                 if (err)
826                         goto out;
827         }
828
829         for (i = base; i < base + count; ++i) {
830                 r = res_tracker_lookup(&tracker->res_tree[type], i);
831                 rb_erase(&r->node, &tracker->res_tree[type]);
832                 list_del(&r->list);
833                 kfree(r);
834         }
835         err = 0;
836
837 out:
838         spin_unlock_irq(mlx4_tlock(dev));
839
840         return err;
841 }
842
843 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
844                                 enum res_qp_states state, struct res_qp **qp,
845                                 int alloc)
846 {
847         struct mlx4_priv *priv = mlx4_priv(dev);
848         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
849         struct res_qp *r;
850         int err = 0;
851
852         spin_lock_irq(mlx4_tlock(dev));
853         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
854         if (!r)
855                 err = -ENOENT;
856         else if (r->com.owner != slave)
857                 err = -EPERM;
858         else {
859                 switch (state) {
860                 case RES_QP_BUSY:
861                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
862                                  __func__, r->com.res_id);
863                         err = -EBUSY;
864                         break;
865
866                 case RES_QP_RESERVED:
867                         if (r->com.state == RES_QP_MAPPED && !alloc)
868                                 break;
869
870                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
871                         err = -EINVAL;
872                         break;
873
874                 case RES_QP_MAPPED:
875                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
876                             r->com.state == RES_QP_HW)
877                                 break;
878                         else {
879                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
880                                           r->com.res_id);
881                                 err = -EINVAL;
882                         }
883
884                         break;
885
886                 case RES_QP_HW:
887                         if (r->com.state != RES_QP_MAPPED)
888                                 err = -EINVAL;
889                         break;
890                 default:
891                         err = -EINVAL;
892                 }
893
894                 if (!err) {
895                         r->com.from_state = r->com.state;
896                         r->com.to_state = state;
897                         r->com.state = RES_QP_BUSY;
898                         if (qp)
899                                 *qp = r;
900                 }
901         }
902
903         spin_unlock_irq(mlx4_tlock(dev));
904
905         return err;
906 }
907
908 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
909                                 enum res_mpt_states state, struct res_mpt **mpt)
910 {
911         struct mlx4_priv *priv = mlx4_priv(dev);
912         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
913         struct res_mpt *r;
914         int err = 0;
915
916         spin_lock_irq(mlx4_tlock(dev));
917         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
918         if (!r)
919                 err = -ENOENT;
920         else if (r->com.owner != slave)
921                 err = -EPERM;
922         else {
923                 switch (state) {
924                 case RES_MPT_BUSY:
925                         err = -EINVAL;
926                         break;
927
928                 case RES_MPT_RESERVED:
929                         if (r->com.state != RES_MPT_MAPPED)
930                                 err = -EINVAL;
931                         break;
932
933                 case RES_MPT_MAPPED:
934                         if (r->com.state != RES_MPT_RESERVED &&
935                             r->com.state != RES_MPT_HW)
936                                 err = -EINVAL;
937                         break;
938
939                 case RES_MPT_HW:
940                         if (r->com.state != RES_MPT_MAPPED)
941                                 err = -EINVAL;
942                         break;
943                 default:
944                         err = -EINVAL;
945                 }
946
947                 if (!err) {
948                         r->com.from_state = r->com.state;
949                         r->com.to_state = state;
950                         r->com.state = RES_MPT_BUSY;
951                         if (mpt)
952                                 *mpt = r;
953                 }
954         }
955
956         spin_unlock_irq(mlx4_tlock(dev));
957
958         return err;
959 }
960
961 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
962                                 enum res_eq_states state, struct res_eq **eq)
963 {
964         struct mlx4_priv *priv = mlx4_priv(dev);
965         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
966         struct res_eq *r;
967         int err = 0;
968
969         spin_lock_irq(mlx4_tlock(dev));
970         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
971         if (!r)
972                 err = -ENOENT;
973         else if (r->com.owner != slave)
974                 err = -EPERM;
975         else {
976                 switch (state) {
977                 case RES_EQ_BUSY:
978                         err = -EINVAL;
979                         break;
980
981                 case RES_EQ_RESERVED:
982                         if (r->com.state != RES_EQ_HW)
983                                 err = -EINVAL;
984                         break;
985
986                 case RES_EQ_HW:
987                         if (r->com.state != RES_EQ_RESERVED)
988                                 err = -EINVAL;
989                         break;
990
991                 default:
992                         err = -EINVAL;
993                 }
994
995                 if (!err) {
996                         r->com.from_state = r->com.state;
997                         r->com.to_state = state;
998                         r->com.state = RES_EQ_BUSY;
999                         if (eq)
1000                                 *eq = r;
1001                 }
1002         }
1003
1004         spin_unlock_irq(mlx4_tlock(dev));
1005
1006         return err;
1007 }
1008
1009 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1010                                 enum res_cq_states state, struct res_cq **cq)
1011 {
1012         struct mlx4_priv *priv = mlx4_priv(dev);
1013         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1014         struct res_cq *r;
1015         int err;
1016
1017         spin_lock_irq(mlx4_tlock(dev));
1018         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1019         if (!r)
1020                 err = -ENOENT;
1021         else if (r->com.owner != slave)
1022                 err = -EPERM;
1023         else {
1024                 switch (state) {
1025                 case RES_CQ_BUSY:
1026                         err = -EBUSY;
1027                         break;
1028
1029                 case RES_CQ_ALLOCATED:
1030                         if (r->com.state != RES_CQ_HW)
1031                                 err = -EINVAL;
1032                         else if (atomic_read(&r->ref_count))
1033                                 err = -EBUSY;
1034                         else
1035                                 err = 0;
1036                         break;
1037
1038                 case RES_CQ_HW:
1039                         if (r->com.state != RES_CQ_ALLOCATED)
1040                                 err = -EINVAL;
1041                         else
1042                                 err = 0;
1043                         break;
1044
1045                 default:
1046                         err = -EINVAL;
1047                 }
1048
1049                 if (!err) {
1050                         r->com.from_state = r->com.state;
1051                         r->com.to_state = state;
1052                         r->com.state = RES_CQ_BUSY;
1053                         if (cq)
1054                                 *cq = r;
1055                 }
1056         }
1057
1058         spin_unlock_irq(mlx4_tlock(dev));
1059
1060         return err;
1061 }
1062
1063 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1064                                  enum res_cq_states state, struct res_srq **srq)
1065 {
1066         struct mlx4_priv *priv = mlx4_priv(dev);
1067         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1068         struct res_srq *r;
1069         int err = 0;
1070
1071         spin_lock_irq(mlx4_tlock(dev));
1072         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1073         if (!r)
1074                 err = -ENOENT;
1075         else if (r->com.owner != slave)
1076                 err = -EPERM;
1077         else {
1078                 switch (state) {
1079                 case RES_SRQ_BUSY:
1080                         err = -EINVAL;
1081                         break;
1082
1083                 case RES_SRQ_ALLOCATED:
1084                         if (r->com.state != RES_SRQ_HW)
1085                                 err = -EINVAL;
1086                         else if (atomic_read(&r->ref_count))
1087                                 err = -EBUSY;
1088                         break;
1089
1090                 case RES_SRQ_HW:
1091                         if (r->com.state != RES_SRQ_ALLOCATED)
1092                                 err = -EINVAL;
1093                         break;
1094
1095                 default:
1096                         err = -EINVAL;
1097                 }
1098
1099                 if (!err) {
1100                         r->com.from_state = r->com.state;
1101                         r->com.to_state = state;
1102                         r->com.state = RES_SRQ_BUSY;
1103                         if (srq)
1104                                 *srq = r;
1105                 }
1106         }
1107
1108         spin_unlock_irq(mlx4_tlock(dev));
1109
1110         return err;
1111 }
1112
1113 static void res_abort_move(struct mlx4_dev *dev, int slave,
1114                            enum mlx4_resource type, int id)
1115 {
1116         struct mlx4_priv *priv = mlx4_priv(dev);
1117         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1118         struct res_common *r;
1119
1120         spin_lock_irq(mlx4_tlock(dev));
1121         r = res_tracker_lookup(&tracker->res_tree[type], id);
1122         if (r && (r->owner == slave))
1123                 r->state = r->from_state;
1124         spin_unlock_irq(mlx4_tlock(dev));
1125 }
1126
1127 static void res_end_move(struct mlx4_dev *dev, int slave,
1128                          enum mlx4_resource type, int id)
1129 {
1130         struct mlx4_priv *priv = mlx4_priv(dev);
1131         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1132         struct res_common *r;
1133
1134         spin_lock_irq(mlx4_tlock(dev));
1135         r = res_tracker_lookup(&tracker->res_tree[type], id);
1136         if (r && (r->owner == slave))
1137                 r->state = r->to_state;
1138         spin_unlock_irq(mlx4_tlock(dev));
1139 }
1140
1141 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1142 {
1143         return mlx4_is_qp_reserved(dev, qpn) &&
1144                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1145 }
1146
1147 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1148 {
1149         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1150 }
1151
1152 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1153                         u64 in_param, u64 *out_param)
1154 {
1155         int err;
1156         int count;
1157         int align;
1158         int base;
1159         int qpn;
1160
1161         switch (op) {
1162         case RES_OP_RESERVE:
1163                 count = get_param_l(&in_param);
1164                 align = get_param_h(&in_param);
1165                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1166                 if (err)
1167                         return err;
1168
1169                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1170                 if (err) {
1171                         __mlx4_qp_release_range(dev, base, count);
1172                         return err;
1173                 }
1174                 set_param_l(out_param, base);
1175                 break;
1176         case RES_OP_MAP_ICM:
1177                 qpn = get_param_l(&in_param) & 0x7fffff;
1178                 if (valid_reserved(dev, slave, qpn)) {
1179                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1180                         if (err)
1181                                 return err;
1182                 }
1183
1184                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1185                                            NULL, 1);
1186                 if (err)
1187                         return err;
1188
1189                 if (!fw_reserved(dev, qpn)) {
1190                         err = __mlx4_qp_alloc_icm(dev, qpn);
1191                         if (err) {
1192                                 res_abort_move(dev, slave, RES_QP, qpn);
1193                                 return err;
1194                         }
1195                 }
1196
1197                 res_end_move(dev, slave, RES_QP, qpn);
1198                 break;
1199
1200         default:
1201                 err = -EINVAL;
1202                 break;
1203         }
1204         return err;
1205 }
1206
1207 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1208                          u64 in_param, u64 *out_param)
1209 {
1210         int err = -EINVAL;
1211         int base;
1212         int order;
1213
1214         if (op != RES_OP_RESERVE_AND_MAP)
1215                 return err;
1216
1217         order = get_param_l(&in_param);
1218         base = __mlx4_alloc_mtt_range(dev, order);
1219         if (base == -1)
1220                 return -ENOMEM;
1221
1222         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1223         if (err)
1224                 __mlx4_free_mtt_range(dev, base, order);
1225         else
1226                 set_param_l(out_param, base);
1227
1228         return err;
1229 }
1230
1231 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1232                          u64 in_param, u64 *out_param)
1233 {
1234         int err = -EINVAL;
1235         int index;
1236         int id;
1237         struct res_mpt *mpt;
1238
1239         switch (op) {
1240         case RES_OP_RESERVE:
1241                 index = __mlx4_mpt_reserve(dev);
1242                 if (index == -1)
1243                         break;
1244                 id = index & mpt_mask(dev);
1245
1246                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1247                 if (err) {
1248                         __mlx4_mpt_release(dev, index);
1249                         break;
1250                 }
1251                 set_param_l(out_param, index);
1252                 break;
1253         case RES_OP_MAP_ICM:
1254                 index = get_param_l(&in_param);
1255                 id = index & mpt_mask(dev);
1256                 err = mr_res_start_move_to(dev, slave, id,
1257                                            RES_MPT_MAPPED, &mpt);
1258                 if (err)
1259                         return err;
1260
1261                 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1262                 if (err) {
1263                         res_abort_move(dev, slave, RES_MPT, id);
1264                         return err;
1265                 }
1266
1267                 res_end_move(dev, slave, RES_MPT, id);
1268                 break;
1269         }
1270         return err;
1271 }
1272
1273 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1274                         u64 in_param, u64 *out_param)
1275 {
1276         int cqn;
1277         int err;
1278
1279         switch (op) {
1280         case RES_OP_RESERVE_AND_MAP:
1281                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1282                 if (err)
1283                         break;
1284
1285                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1286                 if (err) {
1287                         __mlx4_cq_free_icm(dev, cqn);
1288                         break;
1289                 }
1290
1291                 set_param_l(out_param, cqn);
1292                 break;
1293
1294         default:
1295                 err = -EINVAL;
1296         }
1297
1298         return err;
1299 }
1300
1301 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1302                          u64 in_param, u64 *out_param)
1303 {
1304         int srqn;
1305         int err;
1306
1307         switch (op) {
1308         case RES_OP_RESERVE_AND_MAP:
1309                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1310                 if (err)
1311                         break;
1312
1313                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1314                 if (err) {
1315                         __mlx4_srq_free_icm(dev, srqn);
1316                         break;
1317                 }
1318
1319                 set_param_l(out_param, srqn);
1320                 break;
1321
1322         default:
1323                 err = -EINVAL;
1324         }
1325
1326         return err;
1327 }
1328
1329 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1330 {
1331         struct mlx4_priv *priv = mlx4_priv(dev);
1332         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1333         struct mac_res *res;
1334
1335         res = kzalloc(sizeof *res, GFP_KERNEL);
1336         if (!res)
1337                 return -ENOMEM;
1338         res->mac = mac;
1339         res->port = (u8) port;
1340         list_add_tail(&res->list,
1341                       &tracker->slave_list[slave].res_list[RES_MAC]);
1342         return 0;
1343 }
1344
1345 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1346                                int port)
1347 {
1348         struct mlx4_priv *priv = mlx4_priv(dev);
1349         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1350         struct list_head *mac_list =
1351                 &tracker->slave_list[slave].res_list[RES_MAC];
1352         struct mac_res *res, *tmp;
1353
1354         list_for_each_entry_safe(res, tmp, mac_list, list) {
1355                 if (res->mac == mac && res->port == (u8) port) {
1356                         list_del(&res->list);
1357                         kfree(res);
1358                         break;
1359                 }
1360         }
1361 }
1362
1363 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1364 {
1365         struct mlx4_priv *priv = mlx4_priv(dev);
1366         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1367         struct list_head *mac_list =
1368                 &tracker->slave_list[slave].res_list[RES_MAC];
1369         struct mac_res *res, *tmp;
1370
1371         list_for_each_entry_safe(res, tmp, mac_list, list) {
1372                 list_del(&res->list);
1373                 __mlx4_unregister_mac(dev, res->port, res->mac);
1374                 kfree(res);
1375         }
1376 }
1377
1378 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1379                          u64 in_param, u64 *out_param)
1380 {
1381         int err = -EINVAL;
1382         int port;
1383         u64 mac;
1384
1385         if (op != RES_OP_RESERVE_AND_MAP)
1386                 return err;
1387
1388         port = get_param_l(out_param);
1389         mac = in_param;
1390
1391         err = __mlx4_register_mac(dev, port, mac);
1392         if (err >= 0) {
1393                 set_param_l(out_param, err);
1394                 err = 0;
1395         }
1396
1397         if (!err) {
1398                 err = mac_add_to_slave(dev, slave, mac, port);
1399                 if (err)
1400                         __mlx4_unregister_mac(dev, port, mac);
1401         }
1402         return err;
1403 }
1404
1405 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1406                          u64 in_param, u64 *out_param)
1407 {
1408         return 0;
1409 }
1410
1411 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1412                              u64 in_param, u64 *out_param)
1413 {
1414         u32 index;
1415         int err;
1416
1417         if (op != RES_OP_RESERVE)
1418                 return -EINVAL;
1419
1420         err = __mlx4_counter_alloc(dev, &index);
1421         if (err)
1422                 return err;
1423
1424         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1425         if (err)
1426                 __mlx4_counter_free(dev, index);
1427         else
1428                 set_param_l(out_param, index);
1429
1430         return err;
1431 }
1432
1433 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1434                            u64 in_param, u64 *out_param)
1435 {
1436         u32 xrcdn;
1437         int err;
1438
1439         if (op != RES_OP_RESERVE)
1440                 return -EINVAL;
1441
1442         err = __mlx4_xrcd_alloc(dev, &xrcdn);
1443         if (err)
1444                 return err;
1445
1446         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1447         if (err)
1448                 __mlx4_xrcd_free(dev, xrcdn);
1449         else
1450                 set_param_l(out_param, xrcdn);
1451
1452         return err;
1453 }
1454
1455 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1456                            struct mlx4_vhcr *vhcr,
1457                            struct mlx4_cmd_mailbox *inbox,
1458                            struct mlx4_cmd_mailbox *outbox,
1459                            struct mlx4_cmd_info *cmd)
1460 {
1461         int err;
1462         int alop = vhcr->op_modifier;
1463
1464         switch (vhcr->in_modifier) {
1465         case RES_QP:
1466                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1467                                    vhcr->in_param, &vhcr->out_param);
1468                 break;
1469
1470         case RES_MTT:
1471                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1472                                     vhcr->in_param, &vhcr->out_param);
1473                 break;
1474
1475         case RES_MPT:
1476                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1477                                     vhcr->in_param, &vhcr->out_param);
1478                 break;
1479
1480         case RES_CQ:
1481                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1482                                    vhcr->in_param, &vhcr->out_param);
1483                 break;
1484
1485         case RES_SRQ:
1486                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1487                                     vhcr->in_param, &vhcr->out_param);
1488                 break;
1489
1490         case RES_MAC:
1491                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1492                                     vhcr->in_param, &vhcr->out_param);
1493                 break;
1494
1495         case RES_VLAN:
1496                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1497                                     vhcr->in_param, &vhcr->out_param);
1498                 break;
1499
1500         case RES_COUNTER:
1501                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1502                                         vhcr->in_param, &vhcr->out_param);
1503                 break;
1504
1505         case RES_XRCD:
1506                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1507                                       vhcr->in_param, &vhcr->out_param);
1508                 break;
1509
1510         default:
1511                 err = -EINVAL;
1512                 break;
1513         }
1514
1515         return err;
1516 }
1517
1518 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1519                        u64 in_param)
1520 {
1521         int err;
1522         int count;
1523         int base;
1524         int qpn;
1525
1526         switch (op) {
1527         case RES_OP_RESERVE:
1528                 base = get_param_l(&in_param) & 0x7fffff;
1529                 count = get_param_h(&in_param);
1530                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1531                 if (err)
1532                         break;
1533                 __mlx4_qp_release_range(dev, base, count);
1534                 break;
1535         case RES_OP_MAP_ICM:
1536                 qpn = get_param_l(&in_param) & 0x7fffff;
1537                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1538                                            NULL, 0);
1539                 if (err)
1540                         return err;
1541
1542                 if (!fw_reserved(dev, qpn))
1543                         __mlx4_qp_free_icm(dev, qpn);
1544
1545                 res_end_move(dev, slave, RES_QP, qpn);
1546
1547                 if (valid_reserved(dev, slave, qpn))
1548                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1549                 break;
1550         default:
1551                 err = -EINVAL;
1552                 break;
1553         }
1554         return err;
1555 }
1556
1557 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1558                         u64 in_param, u64 *out_param)
1559 {
1560         int err = -EINVAL;
1561         int base;
1562         int order;
1563
1564         if (op != RES_OP_RESERVE_AND_MAP)
1565                 return err;
1566
1567         base = get_param_l(&in_param);
1568         order = get_param_h(&in_param);
1569         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1570         if (!err)
1571                 __mlx4_free_mtt_range(dev, base, order);
1572         return err;
1573 }
1574
1575 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1576                         u64 in_param)
1577 {
1578         int err = -EINVAL;
1579         int index;
1580         int id;
1581         struct res_mpt *mpt;
1582
1583         switch (op) {
1584         case RES_OP_RESERVE:
1585                 index = get_param_l(&in_param);
1586                 id = index & mpt_mask(dev);
1587                 err = get_res(dev, slave, id, RES_MPT, &mpt);
1588                 if (err)
1589                         break;
1590                 index = mpt->key;
1591                 put_res(dev, slave, id, RES_MPT);
1592
1593                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1594                 if (err)
1595                         break;
1596                 __mlx4_mpt_release(dev, index);
1597                 break;
1598         case RES_OP_MAP_ICM:
1599                         index = get_param_l(&in_param);
1600                         id = index & mpt_mask(dev);
1601                         err = mr_res_start_move_to(dev, slave, id,
1602                                                    RES_MPT_RESERVED, &mpt);
1603                         if (err)
1604                                 return err;
1605
1606                         __mlx4_mpt_free_icm(dev, mpt->key);
1607                         res_end_move(dev, slave, RES_MPT, id);
1608                         return err;
1609                 break;
1610         default:
1611                 err = -EINVAL;
1612                 break;
1613         }
1614         return err;
1615 }
1616
1617 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1618                        u64 in_param, u64 *out_param)
1619 {
1620         int cqn;
1621         int err;
1622
1623         switch (op) {
1624         case RES_OP_RESERVE_AND_MAP:
1625                 cqn = get_param_l(&in_param);
1626                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1627                 if (err)
1628                         break;
1629
1630                 __mlx4_cq_free_icm(dev, cqn);
1631                 break;
1632
1633         default:
1634                 err = -EINVAL;
1635                 break;
1636         }
1637
1638         return err;
1639 }
1640
1641 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1642                         u64 in_param, u64 *out_param)
1643 {
1644         int srqn;
1645         int err;
1646
1647         switch (op) {
1648         case RES_OP_RESERVE_AND_MAP:
1649                 srqn = get_param_l(&in_param);
1650                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1651                 if (err)
1652                         break;
1653
1654                 __mlx4_srq_free_icm(dev, srqn);
1655                 break;
1656
1657         default:
1658                 err = -EINVAL;
1659                 break;
1660         }
1661
1662         return err;
1663 }
1664
1665 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1666                             u64 in_param, u64 *out_param)
1667 {
1668         int port;
1669         int err = 0;
1670
1671         switch (op) {
1672         case RES_OP_RESERVE_AND_MAP:
1673                 port = get_param_l(out_param);
1674                 mac_del_from_slave(dev, slave, in_param, port);
1675                 __mlx4_unregister_mac(dev, port, in_param);
1676                 break;
1677         default:
1678                 err = -EINVAL;
1679                 break;
1680         }
1681
1682         return err;
1683
1684 }
1685
1686 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1687                             u64 in_param, u64 *out_param)
1688 {
1689         return 0;
1690 }
1691
1692 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1693                             u64 in_param, u64 *out_param)
1694 {
1695         int index;
1696         int err;
1697
1698         if (op != RES_OP_RESERVE)
1699                 return -EINVAL;
1700
1701         index = get_param_l(&in_param);
1702         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1703         if (err)
1704                 return err;
1705
1706         __mlx4_counter_free(dev, index);
1707
1708         return err;
1709 }
1710
1711 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1712                           u64 in_param, u64 *out_param)
1713 {
1714         int xrcdn;
1715         int err;
1716
1717         if (op != RES_OP_RESERVE)
1718                 return -EINVAL;
1719
1720         xrcdn = get_param_l(&in_param);
1721         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1722         if (err)
1723                 return err;
1724
1725         __mlx4_xrcd_free(dev, xrcdn);
1726
1727         return err;
1728 }
1729
1730 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1731                           struct mlx4_vhcr *vhcr,
1732                           struct mlx4_cmd_mailbox *inbox,
1733                           struct mlx4_cmd_mailbox *outbox,
1734                           struct mlx4_cmd_info *cmd)
1735 {
1736         int err = -EINVAL;
1737         int alop = vhcr->op_modifier;
1738
1739         switch (vhcr->in_modifier) {
1740         case RES_QP:
1741                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1742                                   vhcr->in_param);
1743                 break;
1744
1745         case RES_MTT:
1746                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1747                                    vhcr->in_param, &vhcr->out_param);
1748                 break;
1749
1750         case RES_MPT:
1751                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1752                                    vhcr->in_param);
1753                 break;
1754
1755         case RES_CQ:
1756                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1757                                   vhcr->in_param, &vhcr->out_param);
1758                 break;
1759
1760         case RES_SRQ:
1761                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1762                                    vhcr->in_param, &vhcr->out_param);
1763                 break;
1764
1765         case RES_MAC:
1766                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1767                                    vhcr->in_param, &vhcr->out_param);
1768                 break;
1769
1770         case RES_VLAN:
1771                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1772                                    vhcr->in_param, &vhcr->out_param);
1773                 break;
1774
1775         case RES_COUNTER:
1776                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1777                                        vhcr->in_param, &vhcr->out_param);
1778                 break;
1779
1780         case RES_XRCD:
1781                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1782                                      vhcr->in_param, &vhcr->out_param);
1783
1784         default:
1785                 break;
1786         }
1787         return err;
1788 }
1789
1790 /* ugly but other choices are uglier */
1791 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1792 {
1793         return (be32_to_cpu(mpt->flags) >> 9) & 1;
1794 }
1795
1796 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1797 {
1798         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1799 }
1800
1801 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1802 {
1803         return be32_to_cpu(mpt->mtt_sz);
1804 }
1805
1806 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1807 {
1808         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1809 }
1810
1811 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1812 {
1813         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1814 }
1815
1816 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1817 {
1818         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1819 }
1820
1821 static int mr_is_region(struct mlx4_mpt_entry *mpt)
1822 {
1823         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1824 }
1825
1826 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1827 {
1828         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1829 }
1830
1831 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1832 {
1833         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1834 }
1835
1836 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1837 {
1838         int page_shift = (qpc->log_page_size & 0x3f) + 12;
1839         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1840         int log_sq_sride = qpc->sq_size_stride & 7;
1841         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1842         int log_rq_stride = qpc->rq_size_stride & 7;
1843         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1844         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1845         int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1846         int sq_size;
1847         int rq_size;
1848         int total_pages;
1849         int total_mem;
1850         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1851
1852         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1853         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1854         total_mem = sq_size + rq_size;
1855         total_pages =
1856                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1857                                    page_shift);
1858
1859         return total_pages;
1860 }
1861
1862 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1863                            int size, struct res_mtt *mtt)
1864 {
1865         int res_start = mtt->com.res_id;
1866         int res_size = (1 << mtt->order);
1867
1868         if (start < res_start || start + size > res_start + res_size)
1869                 return -EPERM;
1870         return 0;
1871 }
1872
1873 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1874                            struct mlx4_vhcr *vhcr,
1875                            struct mlx4_cmd_mailbox *inbox,
1876                            struct mlx4_cmd_mailbox *outbox,
1877                            struct mlx4_cmd_info *cmd)
1878 {
1879         int err;
1880         int index = vhcr->in_modifier;
1881         struct res_mtt *mtt;
1882         struct res_mpt *mpt;
1883         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1884         int phys;
1885         int id;
1886         u32 pd;
1887         int pd_slave;
1888
1889         id = index & mpt_mask(dev);
1890         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1891         if (err)
1892                 return err;
1893
1894         /* Disable memory windows for VFs. */
1895         if (!mr_is_region(inbox->buf)) {
1896                 err = -EPERM;
1897                 goto ex_abort;
1898         }
1899
1900         /* Make sure that the PD bits related to the slave id are zeros. */
1901         pd = mr_get_pd(inbox->buf);
1902         pd_slave = (pd >> 17) & 0x7f;
1903         if (pd_slave != 0 && pd_slave != slave) {
1904                 err = -EPERM;
1905                 goto ex_abort;
1906         }
1907
1908         if (mr_is_fmr(inbox->buf)) {
1909                 /* FMR and Bind Enable are forbidden in slave devices. */
1910                 if (mr_is_bind_enabled(inbox->buf)) {
1911                         err = -EPERM;
1912                         goto ex_abort;
1913                 }
1914                 /* FMR and Memory Windows are also forbidden. */
1915                 if (!mr_is_region(inbox->buf)) {
1916                         err = -EPERM;
1917                         goto ex_abort;
1918                 }
1919         }
1920
1921         phys = mr_phys_mpt(inbox->buf);
1922         if (!phys) {
1923                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1924                 if (err)
1925                         goto ex_abort;
1926
1927                 err = check_mtt_range(dev, slave, mtt_base,
1928                                       mr_get_mtt_size(inbox->buf), mtt);
1929                 if (err)
1930                         goto ex_put;
1931
1932                 mpt->mtt = mtt;
1933         }
1934
1935         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1936         if (err)
1937                 goto ex_put;
1938
1939         if (!phys) {
1940                 atomic_inc(&mtt->ref_count);
1941                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1942         }
1943
1944         res_end_move(dev, slave, RES_MPT, id);
1945         return 0;
1946
1947 ex_put:
1948         if (!phys)
1949                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1950 ex_abort:
1951         res_abort_move(dev, slave, RES_MPT, id);
1952
1953         return err;
1954 }
1955
1956 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1957                            struct mlx4_vhcr *vhcr,
1958                            struct mlx4_cmd_mailbox *inbox,
1959                            struct mlx4_cmd_mailbox *outbox,
1960                            struct mlx4_cmd_info *cmd)
1961 {
1962         int err;
1963         int index = vhcr->in_modifier;
1964         struct res_mpt *mpt;
1965         int id;
1966
1967         id = index & mpt_mask(dev);
1968         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1969         if (err)
1970                 return err;
1971
1972         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1973         if (err)
1974                 goto ex_abort;
1975
1976         if (mpt->mtt)
1977                 atomic_dec(&mpt->mtt->ref_count);
1978
1979         res_end_move(dev, slave, RES_MPT, id);
1980         return 0;
1981
1982 ex_abort:
1983         res_abort_move(dev, slave, RES_MPT, id);
1984
1985         return err;
1986 }
1987
1988 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1989                            struct mlx4_vhcr *vhcr,
1990                            struct mlx4_cmd_mailbox *inbox,
1991                            struct mlx4_cmd_mailbox *outbox,
1992                            struct mlx4_cmd_info *cmd)
1993 {
1994         int err;
1995         int index = vhcr->in_modifier;
1996         struct res_mpt *mpt;
1997         int id;
1998
1999         id = index & mpt_mask(dev);
2000         err = get_res(dev, slave, id, RES_MPT, &mpt);
2001         if (err)
2002                 return err;
2003
2004         if (mpt->com.from_state != RES_MPT_HW) {
2005                 err = -EBUSY;
2006                 goto out;
2007         }
2008
2009         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2010
2011 out:
2012         put_res(dev, slave, id, RES_MPT);
2013         return err;
2014 }
2015
2016 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2017 {
2018         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2019 }
2020
2021 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2022 {
2023         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2024 }
2025
2026 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2027 {
2028         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2029 }
2030
2031 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2032                                   struct mlx4_qp_context *context)
2033 {
2034         u32 qpn = vhcr->in_modifier & 0xffffff;
2035         u32 qkey = 0;
2036
2037         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2038                 return;
2039
2040         /* adjust qkey in qp context */
2041         context->qkey = cpu_to_be32(qkey);
2042 }
2043
2044 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2045                              struct mlx4_vhcr *vhcr,
2046                              struct mlx4_cmd_mailbox *inbox,
2047                              struct mlx4_cmd_mailbox *outbox,
2048                              struct mlx4_cmd_info *cmd)
2049 {
2050         int err;
2051         int qpn = vhcr->in_modifier & 0x7fffff;
2052         struct res_mtt *mtt;
2053         struct res_qp *qp;
2054         struct mlx4_qp_context *qpc = inbox->buf + 8;
2055         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2056         int mtt_size = qp_get_mtt_size(qpc);
2057         struct res_cq *rcq;
2058         struct res_cq *scq;
2059         int rcqn = qp_get_rcqn(qpc);
2060         int scqn = qp_get_scqn(qpc);
2061         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2062         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2063         struct res_srq *srq;
2064         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2065
2066         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2067         if (err)
2068                 return err;
2069         qp->local_qpn = local_qpn;
2070
2071         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2072         if (err)
2073                 goto ex_abort;
2074
2075         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2076         if (err)
2077                 goto ex_put_mtt;
2078
2079         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2080         if (err)
2081                 goto ex_put_mtt;
2082
2083         if (scqn != rcqn) {
2084                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2085                 if (err)
2086                         goto ex_put_rcq;
2087         } else
2088                 scq = rcq;
2089
2090         if (use_srq) {
2091                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2092                 if (err)
2093                         goto ex_put_scq;
2094         }
2095
2096         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2097         update_pkey_index(dev, slave, inbox);
2098         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2099         if (err)
2100                 goto ex_put_srq;
2101         atomic_inc(&mtt->ref_count);
2102         qp->mtt = mtt;
2103         atomic_inc(&rcq->ref_count);
2104         qp->rcq = rcq;
2105         atomic_inc(&scq->ref_count);
2106         qp->scq = scq;
2107
2108         if (scqn != rcqn)
2109                 put_res(dev, slave, scqn, RES_CQ);
2110
2111         if (use_srq) {
2112                 atomic_inc(&srq->ref_count);
2113                 put_res(dev, slave, srqn, RES_SRQ);
2114                 qp->srq = srq;
2115         }
2116         put_res(dev, slave, rcqn, RES_CQ);
2117         put_res(dev, slave, mtt_base, RES_MTT);
2118         res_end_move(dev, slave, RES_QP, qpn);
2119
2120         return 0;
2121
2122 ex_put_srq:
2123         if (use_srq)
2124                 put_res(dev, slave, srqn, RES_SRQ);
2125 ex_put_scq:
2126         if (scqn != rcqn)
2127                 put_res(dev, slave, scqn, RES_CQ);
2128 ex_put_rcq:
2129         put_res(dev, slave, rcqn, RES_CQ);
2130 ex_put_mtt:
2131         put_res(dev, slave, mtt_base, RES_MTT);
2132 ex_abort:
2133         res_abort_move(dev, slave, RES_QP, qpn);
2134
2135         return err;
2136 }
2137
2138 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2139 {
2140         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2141 }
2142
2143 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2144 {
2145         int log_eq_size = eqc->log_eq_size & 0x1f;
2146         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2147
2148         if (log_eq_size + 5 < page_shift)
2149                 return 1;
2150
2151         return 1 << (log_eq_size + 5 - page_shift);
2152 }
2153
2154 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2155 {
2156         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2157 }
2158
2159 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2160 {
2161         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2162         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2163
2164         if (log_cq_size + 5 < page_shift)
2165                 return 1;
2166
2167         return 1 << (log_cq_size + 5 - page_shift);
2168 }
2169
2170 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2171                           struct mlx4_vhcr *vhcr,
2172                           struct mlx4_cmd_mailbox *inbox,
2173                           struct mlx4_cmd_mailbox *outbox,
2174                           struct mlx4_cmd_info *cmd)
2175 {
2176         int err;
2177         int eqn = vhcr->in_modifier;
2178         int res_id = (slave << 8) | eqn;
2179         struct mlx4_eq_context *eqc = inbox->buf;
2180         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2181         int mtt_size = eq_get_mtt_size(eqc);
2182         struct res_eq *eq;
2183         struct res_mtt *mtt;
2184
2185         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2186         if (err)
2187                 return err;
2188         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2189         if (err)
2190                 goto out_add;
2191
2192         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2193         if (err)
2194                 goto out_move;
2195
2196         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2197         if (err)
2198                 goto out_put;
2199
2200         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2201         if (err)
2202                 goto out_put;
2203
2204         atomic_inc(&mtt->ref_count);
2205         eq->mtt = mtt;
2206         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2207         res_end_move(dev, slave, RES_EQ, res_id);
2208         return 0;
2209
2210 out_put:
2211         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2212 out_move:
2213         res_abort_move(dev, slave, RES_EQ, res_id);
2214 out_add:
2215         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2216         return err;
2217 }
2218
2219 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2220                               int len, struct res_mtt **res)
2221 {
2222         struct mlx4_priv *priv = mlx4_priv(dev);
2223         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2224         struct res_mtt *mtt;
2225         int err = -EINVAL;
2226
2227         spin_lock_irq(mlx4_tlock(dev));
2228         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2229                             com.list) {
2230                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2231                         *res = mtt;
2232                         mtt->com.from_state = mtt->com.state;
2233                         mtt->com.state = RES_MTT_BUSY;
2234                         err = 0;
2235                         break;
2236                 }
2237         }
2238         spin_unlock_irq(mlx4_tlock(dev));
2239
2240         return err;
2241 }
2242
2243 static int verify_qp_parameters(struct mlx4_dev *dev,
2244                                 struct mlx4_cmd_mailbox *inbox,
2245                                 enum qp_transition transition, u8 slave)
2246 {
2247         u32                     qp_type;
2248         struct mlx4_qp_context  *qp_ctx;
2249         enum mlx4_qp_optpar     optpar;
2250
2251         qp_ctx  = inbox->buf + 8;
2252         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2253         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2254
2255         switch (qp_type) {
2256         case MLX4_QP_ST_RC:
2257         case MLX4_QP_ST_UC:
2258                 switch (transition) {
2259                 case QP_TRANS_INIT2RTR:
2260                 case QP_TRANS_RTR2RTS:
2261                 case QP_TRANS_RTS2RTS:
2262                 case QP_TRANS_SQD2SQD:
2263                 case QP_TRANS_SQD2RTS:
2264                         if (slave != mlx4_master_func_num(dev))
2265                                 /* slaves have only gid index 0 */
2266                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2267                                         if (qp_ctx->pri_path.mgid_index)
2268                                                 return -EINVAL;
2269                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2270                                         if (qp_ctx->alt_path.mgid_index)
2271                                                 return -EINVAL;
2272                         break;
2273                 default:
2274                         break;
2275                 }
2276
2277                 break;
2278         default:
2279                 break;
2280         }
2281
2282         return 0;
2283 }
2284
2285 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2286                            struct mlx4_vhcr *vhcr,
2287                            struct mlx4_cmd_mailbox *inbox,
2288                            struct mlx4_cmd_mailbox *outbox,
2289                            struct mlx4_cmd_info *cmd)
2290 {
2291         struct mlx4_mtt mtt;
2292         __be64 *page_list = inbox->buf;
2293         u64 *pg_list = (u64 *)page_list;
2294         int i;
2295         struct res_mtt *rmtt = NULL;
2296         int start = be64_to_cpu(page_list[0]);
2297         int npages = vhcr->in_modifier;
2298         int err;
2299
2300         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2301         if (err)
2302                 return err;
2303
2304         /* Call the SW implementation of write_mtt:
2305          * - Prepare a dummy mtt struct
2306          * - Translate inbox contents to simple addresses in host endianess */
2307         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2308                             we don't really use it */
2309         mtt.order = 0;
2310         mtt.page_shift = 0;
2311         for (i = 0; i < npages; ++i)
2312                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2313
2314         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2315                                ((u64 *)page_list + 2));
2316
2317         if (rmtt)
2318                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2319
2320         return err;
2321 }
2322
2323 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2324                           struct mlx4_vhcr *vhcr,
2325                           struct mlx4_cmd_mailbox *inbox,
2326                           struct mlx4_cmd_mailbox *outbox,
2327                           struct mlx4_cmd_info *cmd)
2328 {
2329         int eqn = vhcr->in_modifier;
2330         int res_id = eqn | (slave << 8);
2331         struct res_eq *eq;
2332         int err;
2333
2334         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2335         if (err)
2336                 return err;
2337
2338         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2339         if (err)
2340                 goto ex_abort;
2341
2342         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2343         if (err)
2344                 goto ex_put;
2345
2346         atomic_dec(&eq->mtt->ref_count);
2347         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2348         res_end_move(dev, slave, RES_EQ, res_id);
2349         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2350
2351         return 0;
2352
2353 ex_put:
2354         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2355 ex_abort:
2356         res_abort_move(dev, slave, RES_EQ, res_id);
2357
2358         return err;
2359 }
2360
2361 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2362 {
2363         struct mlx4_priv *priv = mlx4_priv(dev);
2364         struct mlx4_slave_event_eq_info *event_eq;
2365         struct mlx4_cmd_mailbox *mailbox;
2366         u32 in_modifier = 0;
2367         int err;
2368         int res_id;
2369         struct res_eq *req;
2370
2371         if (!priv->mfunc.master.slave_state)
2372                 return -EINVAL;
2373
2374         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2375
2376         /* Create the event only if the slave is registered */
2377         if (event_eq->eqn < 0)
2378                 return 0;
2379
2380         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2381         res_id = (slave << 8) | event_eq->eqn;
2382         err = get_res(dev, slave, res_id, RES_EQ, &req);
2383         if (err)
2384                 goto unlock;
2385
2386         if (req->com.from_state != RES_EQ_HW) {
2387                 err = -EINVAL;
2388                 goto put;
2389         }
2390
2391         mailbox = mlx4_alloc_cmd_mailbox(dev);
2392         if (IS_ERR(mailbox)) {
2393                 err = PTR_ERR(mailbox);
2394                 goto put;
2395         }
2396
2397         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2398                 ++event_eq->token;
2399                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2400         }
2401
2402         memcpy(mailbox->buf, (u8 *) eqe, 28);
2403
2404         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2405
2406         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2407                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2408                        MLX4_CMD_NATIVE);
2409
2410         put_res(dev, slave, res_id, RES_EQ);
2411         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2412         mlx4_free_cmd_mailbox(dev, mailbox);
2413         return err;
2414
2415 put:
2416         put_res(dev, slave, res_id, RES_EQ);
2417
2418 unlock:
2419         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2420         return err;
2421 }
2422
2423 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2424                           struct mlx4_vhcr *vhcr,
2425                           struct mlx4_cmd_mailbox *inbox,
2426                           struct mlx4_cmd_mailbox *outbox,
2427                           struct mlx4_cmd_info *cmd)
2428 {
2429         int eqn = vhcr->in_modifier;
2430         int res_id = eqn | (slave << 8);
2431         struct res_eq *eq;
2432         int err;
2433
2434         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2435         if (err)
2436                 return err;
2437
2438         if (eq->com.from_state != RES_EQ_HW) {
2439                 err = -EINVAL;
2440                 goto ex_put;
2441         }
2442
2443         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2444
2445 ex_put:
2446         put_res(dev, slave, res_id, RES_EQ);
2447         return err;
2448 }
2449
2450 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2451                           struct mlx4_vhcr *vhcr,
2452                           struct mlx4_cmd_mailbox *inbox,
2453                           struct mlx4_cmd_mailbox *outbox,
2454                           struct mlx4_cmd_info *cmd)
2455 {
2456         int err;
2457         int cqn = vhcr->in_modifier;
2458         struct mlx4_cq_context *cqc = inbox->buf;
2459         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2460         struct res_cq *cq;
2461         struct res_mtt *mtt;
2462
2463         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2464         if (err)
2465                 return err;
2466         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2467         if (err)
2468                 goto out_move;
2469         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2470         if (err)
2471                 goto out_put;
2472         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2473         if (err)
2474                 goto out_put;
2475         atomic_inc(&mtt->ref_count);
2476         cq->mtt = mtt;
2477         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2478         res_end_move(dev, slave, RES_CQ, cqn);
2479         return 0;
2480
2481 out_put:
2482         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2483 out_move:
2484         res_abort_move(dev, slave, RES_CQ, cqn);
2485         return err;
2486 }
2487
2488 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2489                           struct mlx4_vhcr *vhcr,
2490                           struct mlx4_cmd_mailbox *inbox,
2491                           struct mlx4_cmd_mailbox *outbox,
2492                           struct mlx4_cmd_info *cmd)
2493 {
2494         int err;
2495         int cqn = vhcr->in_modifier;
2496         struct res_cq *cq;
2497
2498         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2499         if (err)
2500                 return err;
2501         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2502         if (err)
2503                 goto out_move;
2504         atomic_dec(&cq->mtt->ref_count);
2505         res_end_move(dev, slave, RES_CQ, cqn);
2506         return 0;
2507
2508 out_move:
2509         res_abort_move(dev, slave, RES_CQ, cqn);
2510         return err;
2511 }
2512
2513 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2514                           struct mlx4_vhcr *vhcr,
2515                           struct mlx4_cmd_mailbox *inbox,
2516                           struct mlx4_cmd_mailbox *outbox,
2517                           struct mlx4_cmd_info *cmd)
2518 {
2519         int cqn = vhcr->in_modifier;
2520         struct res_cq *cq;
2521         int err;
2522
2523         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2524         if (err)
2525                 return err;
2526
2527         if (cq->com.from_state != RES_CQ_HW)
2528                 goto ex_put;
2529
2530         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2531 ex_put:
2532         put_res(dev, slave, cqn, RES_CQ);
2533
2534         return err;
2535 }
2536
2537 static int handle_resize(struct mlx4_dev *dev, int slave,
2538                          struct mlx4_vhcr *vhcr,
2539                          struct mlx4_cmd_mailbox *inbox,
2540                          struct mlx4_cmd_mailbox *outbox,
2541                          struct mlx4_cmd_info *cmd,
2542                          struct res_cq *cq)
2543 {
2544         int err;
2545         struct res_mtt *orig_mtt;
2546         struct res_mtt *mtt;
2547         struct mlx4_cq_context *cqc = inbox->buf;
2548         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2549
2550         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2551         if (err)
2552                 return err;
2553
2554         if (orig_mtt != cq->mtt) {
2555                 err = -EINVAL;
2556                 goto ex_put;
2557         }
2558
2559         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2560         if (err)
2561                 goto ex_put;
2562
2563         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2564         if (err)
2565                 goto ex_put1;
2566         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2567         if (err)
2568                 goto ex_put1;
2569         atomic_dec(&orig_mtt->ref_count);
2570         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2571         atomic_inc(&mtt->ref_count);
2572         cq->mtt = mtt;
2573         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2574         return 0;
2575
2576 ex_put1:
2577         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2578 ex_put:
2579         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2580
2581         return err;
2582
2583 }
2584
2585 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2586                            struct mlx4_vhcr *vhcr,
2587                            struct mlx4_cmd_mailbox *inbox,
2588                            struct mlx4_cmd_mailbox *outbox,
2589                            struct mlx4_cmd_info *cmd)
2590 {
2591         int cqn = vhcr->in_modifier;
2592         struct res_cq *cq;
2593         int err;
2594
2595         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2596         if (err)
2597                 return err;
2598
2599         if (cq->com.from_state != RES_CQ_HW)
2600                 goto ex_put;
2601
2602         if (vhcr->op_modifier == 0) {
2603                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2604                 goto ex_put;
2605         }
2606
2607         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2608 ex_put:
2609         put_res(dev, slave, cqn, RES_CQ);
2610
2611         return err;
2612 }
2613
2614 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2615 {
2616         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2617         int log_rq_stride = srqc->logstride & 7;
2618         int page_shift = (srqc->log_page_size & 0x3f) + 12;
2619
2620         if (log_srq_size + log_rq_stride + 4 < page_shift)
2621                 return 1;
2622
2623         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2624 }
2625
2626 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2627                            struct mlx4_vhcr *vhcr,
2628                            struct mlx4_cmd_mailbox *inbox,
2629                            struct mlx4_cmd_mailbox *outbox,
2630                            struct mlx4_cmd_info *cmd)
2631 {
2632         int err;
2633         int srqn = vhcr->in_modifier;
2634         struct res_mtt *mtt;
2635         struct res_srq *srq;
2636         struct mlx4_srq_context *srqc = inbox->buf;
2637         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2638
2639         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2640                 return -EINVAL;
2641
2642         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2643         if (err)
2644                 return err;
2645         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2646         if (err)
2647                 goto ex_abort;
2648         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2649                               mtt);
2650         if (err)
2651                 goto ex_put_mtt;
2652
2653         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2654         if (err)
2655                 goto ex_put_mtt;
2656
2657         atomic_inc(&mtt->ref_count);
2658         srq->mtt = mtt;
2659         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2660         res_end_move(dev, slave, RES_SRQ, srqn);
2661         return 0;
2662
2663 ex_put_mtt:
2664         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2665 ex_abort:
2666         res_abort_move(dev, slave, RES_SRQ, srqn);
2667
2668         return err;
2669 }
2670
2671 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2672                            struct mlx4_vhcr *vhcr,
2673                            struct mlx4_cmd_mailbox *inbox,
2674                            struct mlx4_cmd_mailbox *outbox,
2675                            struct mlx4_cmd_info *cmd)
2676 {
2677         int err;
2678         int srqn = vhcr->in_modifier;
2679         struct res_srq *srq;
2680
2681         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2682         if (err)
2683                 return err;
2684         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2685         if (err)
2686                 goto ex_abort;
2687         atomic_dec(&srq->mtt->ref_count);
2688         if (srq->cq)
2689                 atomic_dec(&srq->cq->ref_count);
2690         res_end_move(dev, slave, RES_SRQ, srqn);
2691
2692         return 0;
2693
2694 ex_abort:
2695         res_abort_move(dev, slave, RES_SRQ, srqn);
2696
2697         return err;
2698 }
2699
2700 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2701                            struct mlx4_vhcr *vhcr,
2702                            struct mlx4_cmd_mailbox *inbox,
2703                            struct mlx4_cmd_mailbox *outbox,
2704                            struct mlx4_cmd_info *cmd)
2705 {
2706         int err;
2707         int srqn = vhcr->in_modifier;
2708         struct res_srq *srq;
2709
2710         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2711         if (err)
2712                 return err;
2713         if (srq->com.from_state != RES_SRQ_HW) {
2714                 err = -EBUSY;
2715                 goto out;
2716         }
2717         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2718 out:
2719         put_res(dev, slave, srqn, RES_SRQ);
2720         return err;
2721 }
2722
2723 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2724                          struct mlx4_vhcr *vhcr,
2725                          struct mlx4_cmd_mailbox *inbox,
2726                          struct mlx4_cmd_mailbox *outbox,
2727                          struct mlx4_cmd_info *cmd)
2728 {
2729         int err;
2730         int srqn = vhcr->in_modifier;
2731         struct res_srq *srq;
2732
2733         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2734         if (err)
2735                 return err;
2736
2737         if (srq->com.from_state != RES_SRQ_HW) {
2738                 err = -EBUSY;
2739                 goto out;
2740         }
2741
2742         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2743 out:
2744         put_res(dev, slave, srqn, RES_SRQ);
2745         return err;
2746 }
2747
2748 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2749                         struct mlx4_vhcr *vhcr,
2750                         struct mlx4_cmd_mailbox *inbox,
2751                         struct mlx4_cmd_mailbox *outbox,
2752                         struct mlx4_cmd_info *cmd)
2753 {
2754         int err;
2755         int qpn = vhcr->in_modifier & 0x7fffff;
2756         struct res_qp *qp;
2757
2758         err = get_res(dev, slave, qpn, RES_QP, &qp);
2759         if (err)
2760                 return err;
2761         if (qp->com.from_state != RES_QP_HW) {
2762                 err = -EBUSY;
2763                 goto out;
2764         }
2765
2766         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2767 out:
2768         put_res(dev, slave, qpn, RES_QP);
2769         return err;
2770 }
2771
2772 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2773                               struct mlx4_vhcr *vhcr,
2774                               struct mlx4_cmd_mailbox *inbox,
2775                               struct mlx4_cmd_mailbox *outbox,
2776                               struct mlx4_cmd_info *cmd)
2777 {
2778         struct mlx4_qp_context *context = inbox->buf + 8;
2779         adjust_proxy_tun_qkey(dev, vhcr, context);
2780         update_pkey_index(dev, slave, inbox);
2781         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2782 }
2783
2784 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2785                              struct mlx4_vhcr *vhcr,
2786                              struct mlx4_cmd_mailbox *inbox,
2787                              struct mlx4_cmd_mailbox *outbox,
2788                              struct mlx4_cmd_info *cmd)
2789 {
2790         int err;
2791         struct mlx4_qp_context *qpc = inbox->buf + 8;
2792
2793         err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2794         if (err)
2795                 return err;
2796
2797         update_pkey_index(dev, slave, inbox);
2798         update_gid(dev, inbox, (u8)slave);
2799         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2800
2801         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2802 }
2803
2804 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2805                             struct mlx4_vhcr *vhcr,
2806                             struct mlx4_cmd_mailbox *inbox,
2807                             struct mlx4_cmd_mailbox *outbox,
2808                             struct mlx4_cmd_info *cmd)
2809 {
2810         int err;
2811         struct mlx4_qp_context *context = inbox->buf + 8;
2812
2813         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2814         if (err)
2815                 return err;
2816
2817         update_pkey_index(dev, slave, inbox);
2818         update_gid(dev, inbox, (u8)slave);
2819         adjust_proxy_tun_qkey(dev, vhcr, context);
2820         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2821 }
2822
2823 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2824                             struct mlx4_vhcr *vhcr,
2825                             struct mlx4_cmd_mailbox *inbox,
2826                             struct mlx4_cmd_mailbox *outbox,
2827                             struct mlx4_cmd_info *cmd)
2828 {
2829         int err;
2830         struct mlx4_qp_context *context = inbox->buf + 8;
2831
2832         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2833         if (err)
2834                 return err;
2835
2836         update_pkey_index(dev, slave, inbox);
2837         update_gid(dev, inbox, (u8)slave);
2838         adjust_proxy_tun_qkey(dev, vhcr, context);
2839         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2840 }
2841
2842
2843 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2844                               struct mlx4_vhcr *vhcr,
2845                               struct mlx4_cmd_mailbox *inbox,
2846                               struct mlx4_cmd_mailbox *outbox,
2847                               struct mlx4_cmd_info *cmd)
2848 {
2849         struct mlx4_qp_context *context = inbox->buf + 8;
2850         adjust_proxy_tun_qkey(dev, vhcr, context);
2851         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2852 }
2853
2854 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2855                             struct mlx4_vhcr *vhcr,
2856                             struct mlx4_cmd_mailbox *inbox,
2857                             struct mlx4_cmd_mailbox *outbox,
2858                             struct mlx4_cmd_info *cmd)
2859 {
2860         int err;
2861         struct mlx4_qp_context *context = inbox->buf + 8;
2862
2863         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2864         if (err)
2865                 return err;
2866
2867         adjust_proxy_tun_qkey(dev, vhcr, context);
2868         update_gid(dev, inbox, (u8)slave);
2869         update_pkey_index(dev, slave, inbox);
2870         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2871 }
2872
2873 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2874                             struct mlx4_vhcr *vhcr,
2875                             struct mlx4_cmd_mailbox *inbox,
2876                             struct mlx4_cmd_mailbox *outbox,
2877                             struct mlx4_cmd_info *cmd)
2878 {
2879         int err;
2880         struct mlx4_qp_context *context = inbox->buf + 8;
2881
2882         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2883         if (err)
2884                 return err;
2885
2886         adjust_proxy_tun_qkey(dev, vhcr, context);
2887         update_gid(dev, inbox, (u8)slave);
2888         update_pkey_index(dev, slave, inbox);
2889         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2890 }
2891
2892 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2893                          struct mlx4_vhcr *vhcr,
2894                          struct mlx4_cmd_mailbox *inbox,
2895                          struct mlx4_cmd_mailbox *outbox,
2896                          struct mlx4_cmd_info *cmd)
2897 {
2898         int err;
2899         int qpn = vhcr->in_modifier & 0x7fffff;
2900         struct res_qp *qp;
2901
2902         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2903         if (err)
2904                 return err;
2905         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2906         if (err)
2907                 goto ex_abort;
2908
2909         atomic_dec(&qp->mtt->ref_count);
2910         atomic_dec(&qp->rcq->ref_count);
2911         atomic_dec(&qp->scq->ref_count);
2912         if (qp->srq)
2913                 atomic_dec(&qp->srq->ref_count);
2914         res_end_move(dev, slave, RES_QP, qpn);
2915         return 0;
2916
2917 ex_abort:
2918         res_abort_move(dev, slave, RES_QP, qpn);
2919
2920         return err;
2921 }
2922
2923 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2924                                 struct res_qp *rqp, u8 *gid)
2925 {
2926         struct res_gid *res;
2927
2928         list_for_each_entry(res, &rqp->mcg_list, list) {
2929                 if (!memcmp(res->gid, gid, 16))
2930                         return res;
2931         }
2932         return NULL;
2933 }
2934
2935 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2936                        u8 *gid, enum mlx4_protocol prot,
2937                        enum mlx4_steer_type steer)
2938 {
2939         struct res_gid *res;
2940         int err;
2941
2942         res = kzalloc(sizeof *res, GFP_KERNEL);
2943         if (!res)
2944                 return -ENOMEM;
2945
2946         spin_lock_irq(&rqp->mcg_spl);
2947         if (find_gid(dev, slave, rqp, gid)) {
2948                 kfree(res);
2949                 err = -EEXIST;
2950         } else {
2951                 memcpy(res->gid, gid, 16);
2952                 res->prot = prot;
2953                 res->steer = steer;
2954                 list_add_tail(&res->list, &rqp->mcg_list);
2955                 err = 0;
2956         }
2957         spin_unlock_irq(&rqp->mcg_spl);
2958
2959         return err;
2960 }
2961
2962 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2963                        u8 *gid, enum mlx4_protocol prot,
2964                        enum mlx4_steer_type steer)
2965 {
2966         struct res_gid *res;
2967         int err;
2968
2969         spin_lock_irq(&rqp->mcg_spl);
2970         res = find_gid(dev, slave, rqp, gid);
2971         if (!res || res->prot != prot || res->steer != steer)
2972                 err = -EINVAL;
2973         else {
2974                 list_del(&res->list);
2975                 kfree(res);
2976                 err = 0;
2977         }
2978         spin_unlock_irq(&rqp->mcg_spl);
2979
2980         return err;
2981 }
2982
2983 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2984                                struct mlx4_vhcr *vhcr,
2985                                struct mlx4_cmd_mailbox *inbox,
2986                                struct mlx4_cmd_mailbox *outbox,
2987                                struct mlx4_cmd_info *cmd)
2988 {
2989         struct mlx4_qp qp; /* dummy for calling attach/detach */
2990         u8 *gid = inbox->buf;
2991         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2992         int err;
2993         int qpn;
2994         struct res_qp *rqp;
2995         int attach = vhcr->op_modifier;
2996         int block_loopback = vhcr->in_modifier >> 31;
2997         u8 steer_type_mask = 2;
2998         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2999
3000         if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0)
3001                 return -EINVAL;
3002
3003         qpn = vhcr->in_modifier & 0xffffff;
3004         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3005         if (err)
3006                 return err;
3007
3008         qp.qpn = qpn;
3009         if (attach) {
3010                 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
3011                 if (err)
3012                         goto ex_put;
3013
3014                 err = mlx4_qp_attach_common(dev, &qp, gid,
3015                                             block_loopback, prot, type);
3016                 if (err)
3017                         goto ex_rem;
3018         } else {
3019                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
3020                 if (err)
3021                         goto ex_put;
3022                 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
3023         }
3024
3025         put_res(dev, slave, qpn, RES_QP);
3026         return 0;
3027
3028 ex_rem:
3029         /* ignore error return below, already in error */
3030         (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
3031 ex_put:
3032         put_res(dev, slave, qpn, RES_QP);
3033
3034         return err;
3035 }
3036
3037 /*
3038  * MAC validation for Flow Steering rules.
3039  * VF can attach rules only with a mac address which is assigned to it.
3040  */
3041 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3042                                    struct list_head *rlist)
3043 {
3044         struct mac_res *res, *tmp;
3045         __be64 be_mac;
3046
3047         /* make sure it isn't multicast or broadcast mac*/
3048         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3049             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3050                 list_for_each_entry_safe(res, tmp, rlist, list) {
3051                         be_mac = cpu_to_be64(res->mac << 16);
3052                         if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3053                                 return 0;
3054                 }
3055                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3056                        eth_header->eth.dst_mac, slave);
3057                 return -EINVAL;
3058         }
3059         return 0;
3060 }
3061
3062 /*
3063  * In case of missing eth header, append eth header with a MAC address
3064  * assigned to the VF.
3065  */
3066 static int add_eth_header(struct mlx4_dev *dev, int slave,
3067                           struct mlx4_cmd_mailbox *inbox,
3068                           struct list_head *rlist, int header_id)
3069 {
3070         struct mac_res *res, *tmp;
3071         u8 port;
3072         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3073         struct mlx4_net_trans_rule_hw_eth *eth_header;
3074         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3075         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3076         __be64 be_mac = 0;
3077         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3078
3079         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3080         port = ctrl->port;
3081         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3082
3083         /* Clear a space in the inbox for eth header */
3084         switch (header_id) {
3085         case MLX4_NET_TRANS_RULE_ID_IPV4:
3086                 ip_header =
3087                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3088                 memmove(ip_header, eth_header,
3089                         sizeof(*ip_header) + sizeof(*l4_header));
3090                 break;
3091         case MLX4_NET_TRANS_RULE_ID_TCP:
3092         case MLX4_NET_TRANS_RULE_ID_UDP:
3093                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3094                             (eth_header + 1);
3095                 memmove(l4_header, eth_header, sizeof(*l4_header));
3096                 break;
3097         default:
3098                 return -EINVAL;
3099         }
3100         list_for_each_entry_safe(res, tmp, rlist, list) {
3101                 if (port == res->port) {
3102                         be_mac = cpu_to_be64(res->mac << 16);
3103                         break;
3104                 }
3105         }
3106         if (!be_mac) {
3107                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3108                        port);
3109                 return -EINVAL;
3110         }
3111
3112         memset(eth_header, 0, sizeof(*eth_header));
3113         eth_header->size = sizeof(*eth_header) >> 2;
3114         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3115         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3116         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3117
3118         return 0;
3119
3120 }
3121
3122 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3123                                          struct mlx4_vhcr *vhcr,
3124                                          struct mlx4_cmd_mailbox *inbox,
3125                                          struct mlx4_cmd_mailbox *outbox,
3126                                          struct mlx4_cmd_info *cmd)
3127 {
3128
3129         struct mlx4_priv *priv = mlx4_priv(dev);
3130         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3131         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3132         int err;
3133         int qpn;
3134         struct res_qp *rqp;
3135         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3136         struct _rule_hw  *rule_header;
3137         int header_id;
3138
3139         if (dev->caps.steering_mode !=
3140             MLX4_STEERING_MODE_DEVICE_MANAGED)
3141                 return -EOPNOTSUPP;
3142
3143         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3144         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3145         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3146         if (err) {
3147                 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3148                 return err;
3149         }
3150         rule_header = (struct _rule_hw *)(ctrl + 1);
3151         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3152
3153         switch (header_id) {
3154         case MLX4_NET_TRANS_RULE_ID_ETH:
3155                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3156                         err = -EINVAL;
3157                         goto err_put;
3158                 }
3159                 break;
3160         case MLX4_NET_TRANS_RULE_ID_IB:
3161                 break;
3162         case MLX4_NET_TRANS_RULE_ID_IPV4:
3163         case MLX4_NET_TRANS_RULE_ID_TCP:
3164         case MLX4_NET_TRANS_RULE_ID_UDP:
3165                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3166                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3167                         err = -EINVAL;
3168                         goto err_put;
3169                 }
3170                 vhcr->in_modifier +=
3171                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3172                 break;
3173         default:
3174                 pr_err("Corrupted mailbox.\n");
3175                 err = -EINVAL;
3176                 goto err_put;
3177         }
3178
3179         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3180                            vhcr->in_modifier, 0,
3181                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3182                            MLX4_CMD_NATIVE);
3183         if (err)
3184                 goto err_put;
3185
3186         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3187         if (err) {
3188                 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3189                 /* detach rule*/
3190                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3191                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3192                          MLX4_CMD_NATIVE);
3193                 goto err_put;
3194         }
3195         atomic_inc(&rqp->ref_count);
3196 err_put:
3197         put_res(dev, slave, qpn, RES_QP);
3198         return err;
3199 }
3200
3201 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3202                                          struct mlx4_vhcr *vhcr,
3203                                          struct mlx4_cmd_mailbox *inbox,
3204                                          struct mlx4_cmd_mailbox *outbox,
3205                                          struct mlx4_cmd_info *cmd)
3206 {
3207         int err;
3208         struct res_qp *rqp;
3209         struct res_fs_rule *rrule;
3210
3211         if (dev->caps.steering_mode !=
3212             MLX4_STEERING_MODE_DEVICE_MANAGED)
3213                 return -EOPNOTSUPP;
3214
3215         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3216         if (err)
3217                 return err;
3218         /* Release the rule form busy state before removal */
3219         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3220         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3221         if (err)
3222                 return err;
3223
3224         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3225         if (err) {
3226                 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3227                 goto out;
3228         }
3229
3230         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3231                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3232                        MLX4_CMD_NATIVE);
3233         if (!err)
3234                 atomic_dec(&rqp->ref_count);
3235 out:
3236         put_res(dev, slave, rrule->qpn, RES_QP);
3237         return err;
3238 }
3239
3240 enum {
3241         BUSY_MAX_RETRIES = 10
3242 };
3243
3244 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3245                                struct mlx4_vhcr *vhcr,
3246                                struct mlx4_cmd_mailbox *inbox,
3247                                struct mlx4_cmd_mailbox *outbox,
3248                                struct mlx4_cmd_info *cmd)
3249 {
3250         int err;
3251         int index = vhcr->in_modifier & 0xffff;
3252
3253         err = get_res(dev, slave, index, RES_COUNTER, NULL);
3254         if (err)
3255                 return err;
3256
3257         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3258         put_res(dev, slave, index, RES_COUNTER);
3259         return err;
3260 }
3261
3262 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3263 {
3264         struct res_gid *rgid;
3265         struct res_gid *tmp;
3266         struct mlx4_qp qp; /* dummy for calling attach/detach */
3267
3268         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3269                 qp.qpn = rqp->local_qpn;
3270                 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
3271                                              rgid->steer);
3272                 list_del(&rgid->list);
3273                 kfree(rgid);
3274         }
3275 }
3276
3277 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3278                           enum mlx4_resource type, int print)
3279 {
3280         struct mlx4_priv *priv = mlx4_priv(dev);
3281         struct mlx4_resource_tracker *tracker =
3282                 &priv->mfunc.master.res_tracker;
3283         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3284         struct res_common *r;
3285         struct res_common *tmp;
3286         int busy;
3287
3288         busy = 0;
3289         spin_lock_irq(mlx4_tlock(dev));
3290         list_for_each_entry_safe(r, tmp, rlist, list) {
3291                 if (r->owner == slave) {
3292                         if (!r->removing) {
3293                                 if (r->state == RES_ANY_BUSY) {
3294                                         if (print)
3295                                                 mlx4_dbg(dev,
3296                                                          "%s id 0x%llx is busy\n",
3297                                                           ResourceType(type),
3298                                                           r->res_id);
3299                                         ++busy;
3300                                 } else {
3301                                         r->from_state = r->state;
3302                                         r->state = RES_ANY_BUSY;
3303                                         r->removing = 1;
3304                                 }
3305                         }
3306                 }
3307         }
3308         spin_unlock_irq(mlx4_tlock(dev));
3309
3310         return busy;
3311 }
3312
3313 static int move_all_busy(struct mlx4_dev *dev, int slave,
3314                          enum mlx4_resource type)
3315 {
3316         unsigned long begin;
3317         int busy;
3318
3319         begin = jiffies;
3320         do {
3321                 busy = _move_all_busy(dev, slave, type, 0);
3322                 if (time_after(jiffies, begin + 5 * HZ))
3323                         break;
3324                 if (busy)
3325                         cond_resched();
3326         } while (busy);
3327
3328         if (busy)
3329                 busy = _move_all_busy(dev, slave, type, 1);
3330
3331         return busy;
3332 }
3333 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3334 {
3335         struct mlx4_priv *priv = mlx4_priv(dev);
3336         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3337         struct list_head *qp_list =
3338                 &tracker->slave_list[slave].res_list[RES_QP];
3339         struct res_qp *qp;
3340         struct res_qp *tmp;
3341         int state;
3342         u64 in_param;
3343         int qpn;
3344         int err;
3345
3346         err = move_all_busy(dev, slave, RES_QP);
3347         if (err)
3348                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3349                           "for slave %d\n", slave);
3350
3351         spin_lock_irq(mlx4_tlock(dev));
3352         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3353                 spin_unlock_irq(mlx4_tlock(dev));
3354                 if (qp->com.owner == slave) {
3355                         qpn = qp->com.res_id;
3356                         detach_qp(dev, slave, qp);
3357                         state = qp->com.from_state;
3358                         while (state != 0) {
3359                                 switch (state) {
3360                                 case RES_QP_RESERVED:
3361                                         spin_lock_irq(mlx4_tlock(dev));
3362                                         rb_erase(&qp->com.node,
3363                                                  &tracker->res_tree[RES_QP]);
3364                                         list_del(&qp->com.list);
3365                                         spin_unlock_irq(mlx4_tlock(dev));
3366                                         kfree(qp);
3367                                         state = 0;
3368                                         break;
3369                                 case RES_QP_MAPPED:
3370                                         if (!valid_reserved(dev, slave, qpn))
3371                                                 __mlx4_qp_free_icm(dev, qpn);
3372                                         state = RES_QP_RESERVED;
3373                                         break;
3374                                 case RES_QP_HW:
3375                                         in_param = slave;
3376                                         err = mlx4_cmd(dev, in_param,
3377                                                        qp->local_qpn, 2,
3378                                                        MLX4_CMD_2RST_QP,
3379                                                        MLX4_CMD_TIME_CLASS_A,
3380                                                        MLX4_CMD_NATIVE);
3381                                         if (err)
3382                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
3383                                                          " to move slave %d qpn %d to"
3384                                                          " reset\n", slave,
3385                                                          qp->local_qpn);
3386                                         atomic_dec(&qp->rcq->ref_count);
3387                                         atomic_dec(&qp->scq->ref_count);
3388                                         atomic_dec(&qp->mtt->ref_count);
3389                                         if (qp->srq)
3390                                                 atomic_dec(&qp->srq->ref_count);
3391                                         state = RES_QP_MAPPED;
3392                                         break;
3393                                 default:
3394                                         state = 0;
3395                                 }
3396                         }
3397                 }
3398                 spin_lock_irq(mlx4_tlock(dev));
3399         }
3400         spin_unlock_irq(mlx4_tlock(dev));
3401 }
3402
3403 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3404 {
3405         struct mlx4_priv *priv = mlx4_priv(dev);
3406         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3407         struct list_head *srq_list =
3408                 &tracker->slave_list[slave].res_list[RES_SRQ];
3409         struct res_srq *srq;
3410         struct res_srq *tmp;
3411         int state;
3412         u64 in_param;
3413         LIST_HEAD(tlist);
3414         int srqn;
3415         int err;
3416
3417         err = move_all_busy(dev, slave, RES_SRQ);
3418         if (err)
3419                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3420                           "busy for slave %d\n", slave);
3421
3422         spin_lock_irq(mlx4_tlock(dev));
3423         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3424                 spin_unlock_irq(mlx4_tlock(dev));
3425                 if (srq->com.owner == slave) {
3426                         srqn = srq->com.res_id;
3427                         state = srq->com.from_state;
3428                         while (state != 0) {
3429                                 switch (state) {
3430                                 case RES_SRQ_ALLOCATED:
3431                                         __mlx4_srq_free_icm(dev, srqn);
3432                                         spin_lock_irq(mlx4_tlock(dev));
3433                                         rb_erase(&srq->com.node,
3434                                                  &tracker->res_tree[RES_SRQ]);
3435                                         list_del(&srq->com.list);
3436                                         spin_unlock_irq(mlx4_tlock(dev));
3437                                         kfree(srq);
3438                                         state = 0;
3439                                         break;
3440
3441                                 case RES_SRQ_HW:
3442                                         in_param = slave;
3443                                         err = mlx4_cmd(dev, in_param, srqn, 1,
3444                                                        MLX4_CMD_HW2SW_SRQ,
3445                                                        MLX4_CMD_TIME_CLASS_A,
3446                                                        MLX4_CMD_NATIVE);
3447                                         if (err)
3448                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
3449                                                          " to move slave %d srq %d to"
3450                                                          " SW ownership\n",
3451                                                          slave, srqn);
3452
3453                                         atomic_dec(&srq->mtt->ref_count);
3454                                         if (srq->cq)
3455                                                 atomic_dec(&srq->cq->ref_count);
3456                                         state = RES_SRQ_ALLOCATED;
3457                                         break;
3458
3459                                 default:
3460                                         state = 0;
3461                                 }
3462                         }
3463                 }
3464                 spin_lock_irq(mlx4_tlock(dev));
3465         }
3466         spin_unlock_irq(mlx4_tlock(dev));
3467 }
3468
3469 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3470 {
3471         struct mlx4_priv *priv = mlx4_priv(dev);
3472         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3473         struct list_head *cq_list =
3474                 &tracker->slave_list[slave].res_list[RES_CQ];
3475         struct res_cq *cq;
3476         struct res_cq *tmp;
3477         int state;
3478         u64 in_param;
3479         LIST_HEAD(tlist);
3480         int cqn;
3481         int err;
3482
3483         err = move_all_busy(dev, slave, RES_CQ);
3484         if (err)
3485                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3486                           "busy for slave %d\n", slave);
3487
3488         spin_lock_irq(mlx4_tlock(dev));
3489         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3490                 spin_unlock_irq(mlx4_tlock(dev));
3491                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3492                         cqn = cq->com.res_id;
3493                         state = cq->com.from_state;
3494                         while (state != 0) {
3495                                 switch (state) {
3496                                 case RES_CQ_ALLOCATED:
3497                                         __mlx4_cq_free_icm(dev, cqn);
3498                                         spin_lock_irq(mlx4_tlock(dev));
3499                                         rb_erase(&cq->com.node,
3500                                                  &tracker->res_tree[RES_CQ]);
3501                                         list_del(&cq->com.list);
3502                                         spin_unlock_irq(mlx4_tlock(dev));
3503                                         kfree(cq);
3504                                         state = 0;
3505                                         break;
3506
3507                                 case RES_CQ_HW:
3508                                         in_param = slave;
3509                                         err = mlx4_cmd(dev, in_param, cqn, 1,
3510                                                        MLX4_CMD_HW2SW_CQ,
3511                                                        MLX4_CMD_TIME_CLASS_A,
3512                                                        MLX4_CMD_NATIVE);
3513                                         if (err)
3514                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
3515                                                          " to move slave %d cq %d to"
3516                                                          " SW ownership\n",
3517                                                          slave, cqn);
3518                                         atomic_dec(&cq->mtt->ref_count);
3519                                         state = RES_CQ_ALLOCATED;
3520                                         break;
3521
3522                                 default:
3523                                         state = 0;
3524                                 }
3525                         }
3526                 }
3527                 spin_lock_irq(mlx4_tlock(dev));
3528         }
3529         spin_unlock_irq(mlx4_tlock(dev));
3530 }
3531
3532 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3533 {
3534         struct mlx4_priv *priv = mlx4_priv(dev);
3535         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3536         struct list_head *mpt_list =
3537                 &tracker->slave_list[slave].res_list[RES_MPT];
3538         struct res_mpt *mpt;
3539         struct res_mpt *tmp;
3540         int state;
3541         u64 in_param;
3542         LIST_HEAD(tlist);
3543         int mptn;
3544         int err;
3545
3546         err = move_all_busy(dev, slave, RES_MPT);
3547         if (err)
3548                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3549                           "busy for slave %d\n", slave);
3550
3551         spin_lock_irq(mlx4_tlock(dev));
3552         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3553                 spin_unlock_irq(mlx4_tlock(dev));
3554                 if (mpt->com.owner == slave) {
3555                         mptn = mpt->com.res_id;
3556                         state = mpt->com.from_state;
3557                         while (state != 0) {
3558                                 switch (state) {
3559                                 case RES_MPT_RESERVED:
3560                                         __mlx4_mpt_release(dev, mpt->key);
3561                                         spin_lock_irq(mlx4_tlock(dev));
3562                                         rb_erase(&mpt->com.node,
3563                                                  &tracker->res_tree[RES_MPT]);
3564                                         list_del(&mpt->com.list);
3565                                         spin_unlock_irq(mlx4_tlock(dev));
3566                                         kfree(mpt);
3567                                         state = 0;
3568                                         break;
3569
3570                                 case RES_MPT_MAPPED:
3571                                         __mlx4_mpt_free_icm(dev, mpt->key);
3572                                         state = RES_MPT_RESERVED;
3573                                         break;
3574
3575                                 case RES_MPT_HW:
3576                                         in_param = slave;
3577                                         err = mlx4_cmd(dev, in_param, mptn, 0,
3578                                                      MLX4_CMD_HW2SW_MPT,
3579                                                      MLX4_CMD_TIME_CLASS_A,
3580                                                      MLX4_CMD_NATIVE);
3581                                         if (err)
3582                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
3583                                                          " to move slave %d mpt %d to"
3584                                                          " SW ownership\n",
3585                                                          slave, mptn);
3586                                         if (mpt->mtt)
3587                                                 atomic_dec(&mpt->mtt->ref_count);
3588                                         state = RES_MPT_MAPPED;
3589                                         break;
3590                                 default:
3591                                         state = 0;
3592                                 }
3593                         }
3594                 }
3595                 spin_lock_irq(mlx4_tlock(dev));
3596         }
3597         spin_unlock_irq(mlx4_tlock(dev));
3598 }
3599
3600 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3601 {
3602         struct mlx4_priv *priv = mlx4_priv(dev);
3603         struct mlx4_resource_tracker *tracker =
3604                 &priv->mfunc.master.res_tracker;
3605         struct list_head *mtt_list =
3606                 &tracker->slave_list[slave].res_list[RES_MTT];
3607         struct res_mtt *mtt;
3608         struct res_mtt *tmp;
3609         int state;
3610         LIST_HEAD(tlist);
3611         int base;
3612         int err;
3613
3614         err = move_all_busy(dev, slave, RES_MTT);
3615         if (err)
3616                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3617                           "busy for slave %d\n", slave);
3618
3619         spin_lock_irq(mlx4_tlock(dev));
3620         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3621                 spin_unlock_irq(mlx4_tlock(dev));
3622                 if (mtt->com.owner == slave) {
3623                         base = mtt->com.res_id;
3624                         state = mtt->com.from_state;
3625                         while (state != 0) {
3626                                 switch (state) {
3627                                 case RES_MTT_ALLOCATED:
3628                                         __mlx4_free_mtt_range(dev, base,
3629                                                               mtt->order);
3630                                         spin_lock_irq(mlx4_tlock(dev));
3631                                         rb_erase(&mtt->com.node,
3632                                                  &tracker->res_tree[RES_MTT]);
3633                                         list_del(&mtt->com.list);
3634                                         spin_unlock_irq(mlx4_tlock(dev));
3635                                         kfree(mtt);
3636                                         state = 0;
3637                                         break;
3638
3639                                 default:
3640                                         state = 0;
3641                                 }
3642                         }
3643                 }
3644                 spin_lock_irq(mlx4_tlock(dev));
3645         }
3646         spin_unlock_irq(mlx4_tlock(dev));
3647 }
3648
3649 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3650 {
3651         struct mlx4_priv *priv = mlx4_priv(dev);
3652         struct mlx4_resource_tracker *tracker =
3653                 &priv->mfunc.master.res_tracker;
3654         struct list_head *fs_rule_list =
3655                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3656         struct res_fs_rule *fs_rule;
3657         struct res_fs_rule *tmp;
3658         int state;
3659         u64 base;
3660         int err;
3661
3662         err = move_all_busy(dev, slave, RES_FS_RULE);
3663         if (err)
3664                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3665                           slave);
3666
3667         spin_lock_irq(mlx4_tlock(dev));
3668         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3669                 spin_unlock_irq(mlx4_tlock(dev));
3670                 if (fs_rule->com.owner == slave) {
3671                         base = fs_rule->com.res_id;
3672                         state = fs_rule->com.from_state;
3673                         while (state != 0) {
3674                                 switch (state) {
3675                                 case RES_FS_RULE_ALLOCATED:
3676                                         /* detach rule */
3677                                         err = mlx4_cmd(dev, base, 0, 0,
3678                                                        MLX4_QP_FLOW_STEERING_DETACH,
3679                                                        MLX4_CMD_TIME_CLASS_A,
3680                                                        MLX4_CMD_NATIVE);
3681
3682                                         spin_lock_irq(mlx4_tlock(dev));
3683                                         rb_erase(&fs_rule->com.node,
3684                                                  &tracker->res_tree[RES_FS_RULE]);
3685                                         list_del(&fs_rule->com.list);
3686                                         spin_unlock_irq(mlx4_tlock(dev));
3687                                         kfree(fs_rule);
3688                                         state = 0;
3689                                         break;
3690
3691                                 default:
3692                                         state = 0;
3693                                 }
3694                         }
3695                 }
3696                 spin_lock_irq(mlx4_tlock(dev));
3697         }
3698         spin_unlock_irq(mlx4_tlock(dev));
3699 }
3700
3701 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3702 {
3703         struct mlx4_priv *priv = mlx4_priv(dev);
3704         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3705         struct list_head *eq_list =
3706                 &tracker->slave_list[slave].res_list[RES_EQ];
3707         struct res_eq *eq;
3708         struct res_eq *tmp;
3709         int err;
3710         int state;
3711         LIST_HEAD(tlist);
3712         int eqn;
3713         struct mlx4_cmd_mailbox *mailbox;
3714
3715         err = move_all_busy(dev, slave, RES_EQ);
3716         if (err)
3717                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3718                           "busy for slave %d\n", slave);
3719
3720         spin_lock_irq(mlx4_tlock(dev));
3721         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3722                 spin_unlock_irq(mlx4_tlock(dev));
3723                 if (eq->com.owner == slave) {
3724                         eqn = eq->com.res_id;
3725                         state = eq->com.from_state;
3726                         while (state != 0) {
3727                                 switch (state) {
3728                                 case RES_EQ_RESERVED:
3729                                         spin_lock_irq(mlx4_tlock(dev));
3730                                         rb_erase(&eq->com.node,
3731                                                  &tracker->res_tree[RES_EQ]);
3732                                         list_del(&eq->com.list);
3733                                         spin_unlock_irq(mlx4_tlock(dev));
3734                                         kfree(eq);
3735                                         state = 0;
3736                                         break;
3737
3738                                 case RES_EQ_HW:
3739                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
3740                                         if (IS_ERR(mailbox)) {
3741                                                 cond_resched();
3742                                                 continue;
3743                                         }
3744                                         err = mlx4_cmd_box(dev, slave, 0,
3745                                                            eqn & 0xff, 0,
3746                                                            MLX4_CMD_HW2SW_EQ,
3747                                                            MLX4_CMD_TIME_CLASS_A,
3748                                                            MLX4_CMD_NATIVE);
3749                                         if (err)
3750                                                 mlx4_dbg(dev, "rem_slave_eqs: failed"
3751                                                          " to move slave %d eqs %d to"
3752                                                          " SW ownership\n", slave, eqn);
3753                                         mlx4_free_cmd_mailbox(dev, mailbox);
3754                                         atomic_dec(&eq->mtt->ref_count);
3755                                         state = RES_EQ_RESERVED;
3756                                         break;
3757
3758                                 default:
3759                                         state = 0;
3760                                 }
3761                         }
3762                 }
3763                 spin_lock_irq(mlx4_tlock(dev));
3764         }
3765         spin_unlock_irq(mlx4_tlock(dev));
3766 }
3767
3768 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3769 {
3770         struct mlx4_priv *priv = mlx4_priv(dev);
3771         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3772         struct list_head *counter_list =
3773                 &tracker->slave_list[slave].res_list[RES_COUNTER];
3774         struct res_counter *counter;
3775         struct res_counter *tmp;
3776         int err;
3777         int index;
3778
3779         err = move_all_busy(dev, slave, RES_COUNTER);
3780         if (err)
3781                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3782                           "busy for slave %d\n", slave);
3783
3784         spin_lock_irq(mlx4_tlock(dev));
3785         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3786                 if (counter->com.owner == slave) {
3787                         index = counter->com.res_id;
3788                         rb_erase(&counter->com.node,
3789                                  &tracker->res_tree[RES_COUNTER]);
3790                         list_del(&counter->com.list);
3791                         kfree(counter);
3792                         __mlx4_counter_free(dev, index);
3793                 }
3794         }
3795         spin_unlock_irq(mlx4_tlock(dev));
3796 }
3797
3798 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3799 {
3800         struct mlx4_priv *priv = mlx4_priv(dev);
3801         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3802         struct list_head *xrcdn_list =
3803                 &tracker->slave_list[slave].res_list[RES_XRCD];
3804         struct res_xrcdn *xrcd;
3805         struct res_xrcdn *tmp;
3806         int err;
3807         int xrcdn;
3808
3809         err = move_all_busy(dev, slave, RES_XRCD);
3810         if (err)
3811                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3812                           "busy for slave %d\n", slave);
3813
3814         spin_lock_irq(mlx4_tlock(dev));
3815         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3816                 if (xrcd->com.owner == slave) {
3817                         xrcdn = xrcd->com.res_id;
3818                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3819                         list_del(&xrcd->com.list);
3820                         kfree(xrcd);
3821                         __mlx4_xrcd_free(dev, xrcdn);
3822                 }
3823         }
3824         spin_unlock_irq(mlx4_tlock(dev));
3825 }
3826
3827 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3828 {
3829         struct mlx4_priv *priv = mlx4_priv(dev);
3830
3831         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3832         /*VLAN*/
3833         rem_slave_macs(dev, slave);
3834         rem_slave_fs_rule(dev, slave);
3835         rem_slave_qps(dev, slave);
3836         rem_slave_srqs(dev, slave);
3837         rem_slave_cqs(dev, slave);
3838         rem_slave_mrs(dev, slave);
3839         rem_slave_eqs(dev, slave);
3840         rem_slave_mtts(dev, slave);
3841         rem_slave_counters(dev, slave);
3842         rem_slave_xrcdns(dev, slave);
3843         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3844 }