2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list;
64 struct list_head list;
72 struct list_head list;
87 struct list_head list;
89 enum mlx4_protocol prot;
90 enum mlx4_steer_type steer;
95 RES_QP_BUSY = RES_ANY_BUSY,
97 /* QP number was allocated */
100 /* ICM memory for QP context was mapped */
103 /* QP is in hw ownership */
108 struct res_common com;
113 struct list_head mcg_list;
118 /* saved qp params before VST enforcement in order to restore on VGT */
128 enum res_mtt_states {
129 RES_MTT_BUSY = RES_ANY_BUSY,
133 static inline const char *mtt_states_str(enum res_mtt_states state)
136 case RES_MTT_BUSY: return "RES_MTT_BUSY";
137 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
138 default: return "Unknown";
143 struct res_common com;
148 enum res_mpt_states {
149 RES_MPT_BUSY = RES_ANY_BUSY,
156 struct res_common com;
162 RES_EQ_BUSY = RES_ANY_BUSY,
168 struct res_common com;
173 RES_CQ_BUSY = RES_ANY_BUSY,
179 struct res_common com;
184 enum res_srq_states {
185 RES_SRQ_BUSY = RES_ANY_BUSY,
191 struct res_common com;
197 enum res_counter_states {
198 RES_COUNTER_BUSY = RES_ANY_BUSY,
199 RES_COUNTER_ALLOCATED,
203 struct res_common com;
207 enum res_xrcdn_states {
208 RES_XRCD_BUSY = RES_ANY_BUSY,
213 struct res_common com;
217 enum res_fs_rule_states {
218 RES_FS_RULE_BUSY = RES_ANY_BUSY,
219 RES_FS_RULE_ALLOCATED,
223 struct res_common com;
227 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
229 struct rb_node *node = root->rb_node;
232 struct res_common *res = container_of(node, struct res_common,
235 if (res_id < res->res_id)
236 node = node->rb_left;
237 else if (res_id > res->res_id)
238 node = node->rb_right;
245 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
247 struct rb_node **new = &(root->rb_node), *parent = NULL;
249 /* Figure out where to put new node */
251 struct res_common *this = container_of(*new, struct res_common,
255 if (res->res_id < this->res_id)
256 new = &((*new)->rb_left);
257 else if (res->res_id > this->res_id)
258 new = &((*new)->rb_right);
263 /* Add new node and rebalance tree. */
264 rb_link_node(&res->node, parent, new);
265 rb_insert_color(&res->node, root);
280 static const char *resource_str(enum mlx4_resource rt)
283 case RES_QP: return "RES_QP";
284 case RES_CQ: return "RES_CQ";
285 case RES_SRQ: return "RES_SRQ";
286 case RES_MPT: return "RES_MPT";
287 case RES_MTT: return "RES_MTT";
288 case RES_MAC: return "RES_MAC";
289 case RES_VLAN: return "RES_VLAN";
290 case RES_EQ: return "RES_EQ";
291 case RES_COUNTER: return "RES_COUNTER";
292 case RES_FS_RULE: return "RES_FS_RULE";
293 case RES_XRCD: return "RES_XRCD";
294 default: return "Unknown resource type !!!";
298 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
299 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
300 enum mlx4_resource res_type, int count,
303 struct mlx4_priv *priv = mlx4_priv(dev);
304 struct resource_allocator *res_alloc =
305 &priv->mfunc.master.res_tracker.res_alloc[res_type];
307 int allocated, free, reserved, guaranteed, from_free;
310 if (slave > dev->persist->num_vfs)
313 spin_lock(&res_alloc->alloc_lock);
314 allocated = (port > 0) ?
315 res_alloc->allocated[(port - 1) *
316 (dev->persist->num_vfs + 1) + slave] :
317 res_alloc->allocated[slave];
318 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
320 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
321 res_alloc->res_reserved;
322 guaranteed = res_alloc->guaranteed[slave];
324 if (allocated + count > res_alloc->quota[slave]) {
325 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
326 slave, port, resource_str(res_type), count,
327 allocated, res_alloc->quota[slave]);
331 if (allocated + count <= guaranteed) {
335 /* portion may need to be obtained from free area */
336 if (guaranteed - allocated > 0)
337 from_free = count - (guaranteed - allocated);
341 from_rsvd = count - from_free;
343 if (free - from_free >= reserved)
346 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
347 slave, port, resource_str(res_type), free,
348 from_free, reserved);
352 /* grant the request */
354 res_alloc->allocated[(port - 1) *
355 (dev->persist->num_vfs + 1) + slave] += count;
356 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
359 res_alloc->allocated[slave] += count;
360 res_alloc->res_free -= count;
361 res_alloc->res_reserved -= from_rsvd;
366 spin_unlock(&res_alloc->alloc_lock);
370 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371 enum mlx4_resource res_type, int count,
374 struct mlx4_priv *priv = mlx4_priv(dev);
375 struct resource_allocator *res_alloc =
376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd;
379 if (slave > dev->persist->num_vfs)
382 spin_lock(&res_alloc->alloc_lock);
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) *
386 (dev->persist->num_vfs + 1) + slave] :
387 res_alloc->allocated[slave];
388 guaranteed = res_alloc->guaranteed[slave];
390 if (allocated - count >= guaranteed) {
393 /* portion may need to be returned to reserved area */
394 if (allocated - guaranteed > 0)
395 from_rsvd = count - (allocated - guaranteed);
401 res_alloc->allocated[(port - 1) *
402 (dev->persist->num_vfs + 1) + slave] -= count;
403 res_alloc->res_port_free[port - 1] += count;
404 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
406 res_alloc->allocated[slave] -= count;
407 res_alloc->res_free += count;
408 res_alloc->res_reserved += from_rsvd;
411 spin_unlock(&res_alloc->alloc_lock);
415 static inline void initialize_res_quotas(struct mlx4_dev *dev,
416 struct resource_allocator *res_alloc,
417 enum mlx4_resource res_type,
418 int vf, int num_instances)
420 res_alloc->guaranteed[vf] = num_instances /
421 (2 * (dev->persist->num_vfs + 1));
422 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
423 if (vf == mlx4_master_func_num(dev)) {
424 res_alloc->res_free = num_instances;
425 if (res_type == RES_MTT) {
426 /* reserved mtts will be taken out of the PF allocation */
427 res_alloc->res_free += dev->caps.reserved_mtts;
428 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
429 res_alloc->quota[vf] += dev->caps.reserved_mtts;
434 void mlx4_init_quotas(struct mlx4_dev *dev)
436 struct mlx4_priv *priv = mlx4_priv(dev);
439 /* quotas for VFs are initialized in mlx4_slave_cap */
440 if (mlx4_is_slave(dev))
443 if (!mlx4_is_mfunc(dev)) {
444 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
445 mlx4_num_reserved_sqps(dev);
446 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
447 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
448 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
449 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
453 pf = mlx4_master_func_num(dev);
455 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
457 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
459 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
461 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
463 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
466 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
468 /* reduce the sink counter */
469 return (dev->caps.max_counters - 1 -
470 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
474 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
476 struct mlx4_priv *priv = mlx4_priv(dev);
479 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
481 priv->mfunc.master.res_tracker.slave_list =
482 kzalloc(dev->num_slaves * sizeof(struct slave_list),
484 if (!priv->mfunc.master.res_tracker.slave_list)
487 for (i = 0 ; i < dev->num_slaves; i++) {
488 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
489 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
490 slave_list[i].res_list[t]);
491 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
494 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
496 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
497 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
499 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
500 struct resource_allocator *res_alloc =
501 &priv->mfunc.master.res_tracker.res_alloc[i];
502 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
503 sizeof(int), GFP_KERNEL);
504 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
505 sizeof(int), GFP_KERNEL);
506 if (i == RES_MAC || i == RES_VLAN)
507 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
508 (dev->persist->num_vfs
510 sizeof(int), GFP_KERNEL);
512 res_alloc->allocated = kzalloc((dev->persist->
514 sizeof(int), GFP_KERNEL);
515 /* Reduce the sink counter */
516 if (i == RES_COUNTER)
517 res_alloc->res_free = dev->caps.max_counters - 1;
519 if (!res_alloc->quota || !res_alloc->guaranteed ||
520 !res_alloc->allocated)
523 spin_lock_init(&res_alloc->alloc_lock);
524 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
525 struct mlx4_active_ports actv_ports =
526 mlx4_get_active_ports(dev, t);
529 initialize_res_quotas(dev, res_alloc, RES_QP,
530 t, dev->caps.num_qps -
531 dev->caps.reserved_qps -
532 mlx4_num_reserved_sqps(dev));
535 initialize_res_quotas(dev, res_alloc, RES_CQ,
536 t, dev->caps.num_cqs -
537 dev->caps.reserved_cqs);
540 initialize_res_quotas(dev, res_alloc, RES_SRQ,
541 t, dev->caps.num_srqs -
542 dev->caps.reserved_srqs);
545 initialize_res_quotas(dev, res_alloc, RES_MPT,
546 t, dev->caps.num_mpts -
547 dev->caps.reserved_mrws);
550 initialize_res_quotas(dev, res_alloc, RES_MTT,
551 t, dev->caps.num_mtts -
552 dev->caps.reserved_mtts);
555 if (t == mlx4_master_func_num(dev)) {
556 int max_vfs_pport = 0;
557 /* Calculate the max vfs per port for */
559 for (j = 0; j < dev->caps.num_ports;
561 struct mlx4_slaves_pport slaves_pport =
562 mlx4_phys_to_slaves_pport(dev, j + 1);
563 unsigned current_slaves =
564 bitmap_weight(slaves_pport.slaves,
565 dev->caps.num_ports) - 1;
566 if (max_vfs_pport < current_slaves)
570 res_alloc->quota[t] =
573 res_alloc->guaranteed[t] = 2;
574 for (j = 0; j < MLX4_MAX_PORTS; j++)
575 res_alloc->res_port_free[j] =
578 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
579 res_alloc->guaranteed[t] = 2;
583 if (t == mlx4_master_func_num(dev)) {
584 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
585 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
586 for (j = 0; j < MLX4_MAX_PORTS; j++)
587 res_alloc->res_port_free[j] =
590 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
591 res_alloc->guaranteed[t] = 0;
595 res_alloc->quota[t] = dev->caps.max_counters;
596 if (t == mlx4_master_func_num(dev))
597 res_alloc->guaranteed[t] =
598 MLX4_PF_COUNTERS_PER_PORT *
600 else if (t <= max_vfs_guarantee_counter)
601 res_alloc->guaranteed[t] =
602 MLX4_VF_COUNTERS_PER_PORT *
605 res_alloc->guaranteed[t] = 0;
606 res_alloc->res_free -= res_alloc->guaranteed[t];
611 if (i == RES_MAC || i == RES_VLAN) {
612 for (j = 0; j < dev->caps.num_ports; j++)
613 if (test_bit(j, actv_ports.ports))
614 res_alloc->res_port_rsvd[j] +=
615 res_alloc->guaranteed[t];
617 res_alloc->res_reserved += res_alloc->guaranteed[t];
621 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
625 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
626 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
627 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
628 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
629 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
630 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
631 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
636 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
637 enum mlx4_res_tracker_free_type type)
639 struct mlx4_priv *priv = mlx4_priv(dev);
642 if (priv->mfunc.master.res_tracker.slave_list) {
643 if (type != RES_TR_FREE_STRUCTS_ONLY) {
644 for (i = 0; i < dev->num_slaves; i++) {
645 if (type == RES_TR_FREE_ALL ||
646 dev->caps.function != i)
647 mlx4_delete_all_resources_for_slave(dev, i);
649 /* free master's vlans */
650 i = dev->caps.function;
651 mlx4_reset_roce_gids(dev, i);
652 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
653 rem_slave_vlans(dev, i);
654 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
657 if (type != RES_TR_FREE_SLAVES_ONLY) {
658 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
659 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
660 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
661 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
662 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
663 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
664 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
666 kfree(priv->mfunc.master.res_tracker.slave_list);
667 priv->mfunc.master.res_tracker.slave_list = NULL;
672 static void update_pkey_index(struct mlx4_dev *dev, int slave,
673 struct mlx4_cmd_mailbox *inbox)
675 u8 sched = *(u8 *)(inbox->buf + 64);
676 u8 orig_index = *(u8 *)(inbox->buf + 35);
678 struct mlx4_priv *priv = mlx4_priv(dev);
681 port = (sched >> 6 & 1) + 1;
683 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
684 *(u8 *)(inbox->buf + 35) = new_index;
687 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
690 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
691 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
692 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
695 if (MLX4_QP_ST_UD == ts) {
696 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
697 if (mlx4_is_eth(dev, port))
698 qp_ctx->pri_path.mgid_index =
699 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
701 qp_ctx->pri_path.mgid_index = slave | 0x80;
703 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
704 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
705 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
706 if (mlx4_is_eth(dev, port)) {
707 qp_ctx->pri_path.mgid_index +=
708 mlx4_get_base_gid_ix(dev, slave, port);
709 qp_ctx->pri_path.mgid_index &= 0x7f;
711 qp_ctx->pri_path.mgid_index = slave & 0x7F;
714 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
715 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
716 if (mlx4_is_eth(dev, port)) {
717 qp_ctx->alt_path.mgid_index +=
718 mlx4_get_base_gid_ix(dev, slave, port);
719 qp_ctx->alt_path.mgid_index &= 0x7f;
721 qp_ctx->alt_path.mgid_index = slave & 0x7F;
727 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
730 static int update_vport_qp_param(struct mlx4_dev *dev,
731 struct mlx4_cmd_mailbox *inbox,
734 struct mlx4_qp_context *qpc = inbox->buf + 8;
735 struct mlx4_vport_oper_state *vp_oper;
736 struct mlx4_priv *priv;
740 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
741 priv = mlx4_priv(dev);
742 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
743 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
745 err = handle_counter(dev, qpc, slave, port);
749 if (MLX4_VGT != vp_oper->state.default_vlan) {
750 /* the reserved QPs (special, proxy, tunnel)
751 * do not operate over vlans
753 if (mlx4_is_qp_reserved(dev, qpn))
756 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
757 if (qp_type == MLX4_QP_ST_UD ||
758 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
759 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
760 *(__be32 *)inbox->buf =
761 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
762 MLX4_QP_OPTPAR_VLAN_STRIPPING);
763 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
765 struct mlx4_update_qp_params params = {.flags = 0};
767 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
773 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
774 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
775 qpc->pri_path.vlan_control =
776 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
777 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
778 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
779 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
780 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
781 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
782 } else if (0 != vp_oper->state.default_vlan) {
783 qpc->pri_path.vlan_control =
784 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
785 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
786 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
787 } else { /* priority tagged */
788 qpc->pri_path.vlan_control =
789 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
793 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
794 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
795 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
796 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
797 qpc->pri_path.sched_queue &= 0xC7;
798 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
799 qpc->qos_vport = vp_oper->state.qos_vport;
801 if (vp_oper->state.spoofchk) {
802 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
803 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
809 static int mpt_mask(struct mlx4_dev *dev)
811 return dev->caps.num_mpts - 1;
814 static void *find_res(struct mlx4_dev *dev, u64 res_id,
815 enum mlx4_resource type)
817 struct mlx4_priv *priv = mlx4_priv(dev);
819 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
823 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
824 enum mlx4_resource type,
827 struct res_common *r;
830 spin_lock_irq(mlx4_tlock(dev));
831 r = find_res(dev, res_id, type);
837 if (r->state == RES_ANY_BUSY) {
842 if (r->owner != slave) {
847 r->from_state = r->state;
848 r->state = RES_ANY_BUSY;
851 *((struct res_common **)res) = r;
854 spin_unlock_irq(mlx4_tlock(dev));
858 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
859 enum mlx4_resource type,
860 u64 res_id, int *slave)
863 struct res_common *r;
869 spin_lock(mlx4_tlock(dev));
871 r = find_res(dev, id, type);
876 spin_unlock(mlx4_tlock(dev));
881 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
882 enum mlx4_resource type)
884 struct res_common *r;
886 spin_lock_irq(mlx4_tlock(dev));
887 r = find_res(dev, res_id, type);
889 r->state = r->from_state;
890 spin_unlock_irq(mlx4_tlock(dev));
893 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
894 u64 in_param, u64 *out_param, int port);
896 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
899 struct res_common *r;
900 struct res_counter *counter;
903 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
906 spin_lock_irq(mlx4_tlock(dev));
907 r = find_res(dev, counter_index, RES_COUNTER);
908 if (!r || r->owner != slave)
910 counter = container_of(r, struct res_counter, com);
912 counter->port = port;
914 spin_unlock_irq(mlx4_tlock(dev));
918 static int handle_unexisting_counter(struct mlx4_dev *dev,
919 struct mlx4_qp_context *qpc, u8 slave,
922 struct mlx4_priv *priv = mlx4_priv(dev);
923 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
924 struct res_common *tmp;
925 struct res_counter *counter;
926 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
929 spin_lock_irq(mlx4_tlock(dev));
930 list_for_each_entry(tmp,
931 &tracker->slave_list[slave].res_list[RES_COUNTER],
933 counter = container_of(tmp, struct res_counter, com);
934 if (port == counter->port) {
935 qpc->pri_path.counter_index = counter->com.res_id;
936 spin_unlock_irq(mlx4_tlock(dev));
940 spin_unlock_irq(mlx4_tlock(dev));
942 /* No existing counter, need to allocate a new counter */
943 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
945 if (err == -ENOENT) {
947 } else if (err && err != -ENOSPC) {
948 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
949 __func__, slave, err);
951 qpc->pri_path.counter_index = counter_idx;
952 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
953 __func__, slave, qpc->pri_path.counter_index);
960 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
963 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
964 return handle_existing_counter(dev, slave, port,
965 qpc->pri_path.counter_index);
967 return handle_unexisting_counter(dev, qpc, slave, port);
970 static struct res_common *alloc_qp_tr(int id)
974 ret = kzalloc(sizeof *ret, GFP_KERNEL);
978 ret->com.res_id = id;
979 ret->com.state = RES_QP_RESERVED;
981 INIT_LIST_HEAD(&ret->mcg_list);
982 spin_lock_init(&ret->mcg_spl);
983 atomic_set(&ret->ref_count, 0);
988 static struct res_common *alloc_mtt_tr(int id, int order)
992 ret = kzalloc(sizeof *ret, GFP_KERNEL);
996 ret->com.res_id = id;
998 ret->com.state = RES_MTT_ALLOCATED;
999 atomic_set(&ret->ref_count, 0);
1004 static struct res_common *alloc_mpt_tr(int id, int key)
1006 struct res_mpt *ret;
1008 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1012 ret->com.res_id = id;
1013 ret->com.state = RES_MPT_RESERVED;
1019 static struct res_common *alloc_eq_tr(int id)
1023 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1027 ret->com.res_id = id;
1028 ret->com.state = RES_EQ_RESERVED;
1033 static struct res_common *alloc_cq_tr(int id)
1037 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1041 ret->com.res_id = id;
1042 ret->com.state = RES_CQ_ALLOCATED;
1043 atomic_set(&ret->ref_count, 0);
1048 static struct res_common *alloc_srq_tr(int id)
1050 struct res_srq *ret;
1052 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1056 ret->com.res_id = id;
1057 ret->com.state = RES_SRQ_ALLOCATED;
1058 atomic_set(&ret->ref_count, 0);
1063 static struct res_common *alloc_counter_tr(int id, int port)
1065 struct res_counter *ret;
1067 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1071 ret->com.res_id = id;
1072 ret->com.state = RES_COUNTER_ALLOCATED;
1078 static struct res_common *alloc_xrcdn_tr(int id)
1080 struct res_xrcdn *ret;
1082 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1086 ret->com.res_id = id;
1087 ret->com.state = RES_XRCD_ALLOCATED;
1092 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1094 struct res_fs_rule *ret;
1096 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1100 ret->com.res_id = id;
1101 ret->com.state = RES_FS_RULE_ALLOCATED;
1106 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1109 struct res_common *ret;
1113 ret = alloc_qp_tr(id);
1116 ret = alloc_mpt_tr(id, extra);
1119 ret = alloc_mtt_tr(id, extra);
1122 ret = alloc_eq_tr(id);
1125 ret = alloc_cq_tr(id);
1128 ret = alloc_srq_tr(id);
1131 pr_err("implementation missing\n");
1134 ret = alloc_counter_tr(id, extra);
1137 ret = alloc_xrcdn_tr(id);
1140 ret = alloc_fs_rule_tr(id, extra);
1151 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1152 struct mlx4_counter *data)
1154 struct mlx4_priv *priv = mlx4_priv(dev);
1155 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1156 struct res_common *tmp;
1157 struct res_counter *counter;
1161 memset(data, 0, sizeof(*data));
1163 counters_arr = kmalloc_array(dev->caps.max_counters,
1164 sizeof(*counters_arr), GFP_KERNEL);
1168 spin_lock_irq(mlx4_tlock(dev));
1169 list_for_each_entry(tmp,
1170 &tracker->slave_list[slave].res_list[RES_COUNTER],
1172 counter = container_of(tmp, struct res_counter, com);
1173 if (counter->port == port) {
1174 counters_arr[i] = (int)tmp->res_id;
1178 spin_unlock_irq(mlx4_tlock(dev));
1179 counters_arr[i] = -1;
1183 while (counters_arr[i] != -1) {
1184 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1187 memset(data, 0, sizeof(*data));
1194 kfree(counters_arr);
1198 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1199 enum mlx4_resource type, int extra)
1203 struct mlx4_priv *priv = mlx4_priv(dev);
1204 struct res_common **res_arr;
1205 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1206 struct rb_root *root = &tracker->res_tree[type];
1208 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1212 for (i = 0; i < count; ++i) {
1213 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1215 for (--i; i >= 0; --i)
1223 spin_lock_irq(mlx4_tlock(dev));
1224 for (i = 0; i < count; ++i) {
1225 if (find_res(dev, base + i, type)) {
1229 err = res_tracker_insert(root, res_arr[i]);
1232 list_add_tail(&res_arr[i]->list,
1233 &tracker->slave_list[slave].res_list[type]);
1235 spin_unlock_irq(mlx4_tlock(dev));
1241 for (--i; i >= base; --i)
1242 rb_erase(&res_arr[i]->node, root);
1244 spin_unlock_irq(mlx4_tlock(dev));
1246 for (i = 0; i < count; ++i)
1254 static int remove_qp_ok(struct res_qp *res)
1256 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1257 !list_empty(&res->mcg_list)) {
1258 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1259 res->com.state, atomic_read(&res->ref_count));
1261 } else if (res->com.state != RES_QP_RESERVED) {
1268 static int remove_mtt_ok(struct res_mtt *res, int order)
1270 if (res->com.state == RES_MTT_BUSY ||
1271 atomic_read(&res->ref_count)) {
1272 pr_devel("%s-%d: state %s, ref_count %d\n",
1274 mtt_states_str(res->com.state),
1275 atomic_read(&res->ref_count));
1277 } else if (res->com.state != RES_MTT_ALLOCATED)
1279 else if (res->order != order)
1285 static int remove_mpt_ok(struct res_mpt *res)
1287 if (res->com.state == RES_MPT_BUSY)
1289 else if (res->com.state != RES_MPT_RESERVED)
1295 static int remove_eq_ok(struct res_eq *res)
1297 if (res->com.state == RES_MPT_BUSY)
1299 else if (res->com.state != RES_MPT_RESERVED)
1305 static int remove_counter_ok(struct res_counter *res)
1307 if (res->com.state == RES_COUNTER_BUSY)
1309 else if (res->com.state != RES_COUNTER_ALLOCATED)
1315 static int remove_xrcdn_ok(struct res_xrcdn *res)
1317 if (res->com.state == RES_XRCD_BUSY)
1319 else if (res->com.state != RES_XRCD_ALLOCATED)
1325 static int remove_fs_rule_ok(struct res_fs_rule *res)
1327 if (res->com.state == RES_FS_RULE_BUSY)
1329 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1335 static int remove_cq_ok(struct res_cq *res)
1337 if (res->com.state == RES_CQ_BUSY)
1339 else if (res->com.state != RES_CQ_ALLOCATED)
1345 static int remove_srq_ok(struct res_srq *res)
1347 if (res->com.state == RES_SRQ_BUSY)
1349 else if (res->com.state != RES_SRQ_ALLOCATED)
1355 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1359 return remove_qp_ok((struct res_qp *)res);
1361 return remove_cq_ok((struct res_cq *)res);
1363 return remove_srq_ok((struct res_srq *)res);
1365 return remove_mpt_ok((struct res_mpt *)res);
1367 return remove_mtt_ok((struct res_mtt *)res, extra);
1371 return remove_eq_ok((struct res_eq *)res);
1373 return remove_counter_ok((struct res_counter *)res);
1375 return remove_xrcdn_ok((struct res_xrcdn *)res);
1377 return remove_fs_rule_ok((struct res_fs_rule *)res);
1383 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1384 enum mlx4_resource type, int extra)
1388 struct mlx4_priv *priv = mlx4_priv(dev);
1389 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1390 struct res_common *r;
1392 spin_lock_irq(mlx4_tlock(dev));
1393 for (i = base; i < base + count; ++i) {
1394 r = res_tracker_lookup(&tracker->res_tree[type], i);
1399 if (r->owner != slave) {
1403 err = remove_ok(r, type, extra);
1408 for (i = base; i < base + count; ++i) {
1409 r = res_tracker_lookup(&tracker->res_tree[type], i);
1410 rb_erase(&r->node, &tracker->res_tree[type]);
1417 spin_unlock_irq(mlx4_tlock(dev));
1422 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1423 enum res_qp_states state, struct res_qp **qp,
1426 struct mlx4_priv *priv = mlx4_priv(dev);
1427 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1431 spin_lock_irq(mlx4_tlock(dev));
1432 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1435 else if (r->com.owner != slave)
1440 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1441 __func__, r->com.res_id);
1445 case RES_QP_RESERVED:
1446 if (r->com.state == RES_QP_MAPPED && !alloc)
1449 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1454 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1455 r->com.state == RES_QP_HW)
1458 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1466 if (r->com.state != RES_QP_MAPPED)
1474 r->com.from_state = r->com.state;
1475 r->com.to_state = state;
1476 r->com.state = RES_QP_BUSY;
1482 spin_unlock_irq(mlx4_tlock(dev));
1487 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1488 enum res_mpt_states state, struct res_mpt **mpt)
1490 struct mlx4_priv *priv = mlx4_priv(dev);
1491 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1495 spin_lock_irq(mlx4_tlock(dev));
1496 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1499 else if (r->com.owner != slave)
1507 case RES_MPT_RESERVED:
1508 if (r->com.state != RES_MPT_MAPPED)
1512 case RES_MPT_MAPPED:
1513 if (r->com.state != RES_MPT_RESERVED &&
1514 r->com.state != RES_MPT_HW)
1519 if (r->com.state != RES_MPT_MAPPED)
1527 r->com.from_state = r->com.state;
1528 r->com.to_state = state;
1529 r->com.state = RES_MPT_BUSY;
1535 spin_unlock_irq(mlx4_tlock(dev));
1540 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1541 enum res_eq_states state, struct res_eq **eq)
1543 struct mlx4_priv *priv = mlx4_priv(dev);
1544 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1548 spin_lock_irq(mlx4_tlock(dev));
1549 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1552 else if (r->com.owner != slave)
1560 case RES_EQ_RESERVED:
1561 if (r->com.state != RES_EQ_HW)
1566 if (r->com.state != RES_EQ_RESERVED)
1575 r->com.from_state = r->com.state;
1576 r->com.to_state = state;
1577 r->com.state = RES_EQ_BUSY;
1583 spin_unlock_irq(mlx4_tlock(dev));
1588 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1589 enum res_cq_states state, struct res_cq **cq)
1591 struct mlx4_priv *priv = mlx4_priv(dev);
1592 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1596 spin_lock_irq(mlx4_tlock(dev));
1597 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1600 } else if (r->com.owner != slave) {
1602 } else if (state == RES_CQ_ALLOCATED) {
1603 if (r->com.state != RES_CQ_HW)
1605 else if (atomic_read(&r->ref_count))
1609 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1616 r->com.from_state = r->com.state;
1617 r->com.to_state = state;
1618 r->com.state = RES_CQ_BUSY;
1623 spin_unlock_irq(mlx4_tlock(dev));
1628 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1629 enum res_srq_states state, struct res_srq **srq)
1631 struct mlx4_priv *priv = mlx4_priv(dev);
1632 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1636 spin_lock_irq(mlx4_tlock(dev));
1637 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1640 } else if (r->com.owner != slave) {
1642 } else if (state == RES_SRQ_ALLOCATED) {
1643 if (r->com.state != RES_SRQ_HW)
1645 else if (atomic_read(&r->ref_count))
1647 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1652 r->com.from_state = r->com.state;
1653 r->com.to_state = state;
1654 r->com.state = RES_SRQ_BUSY;
1659 spin_unlock_irq(mlx4_tlock(dev));
1664 static void res_abort_move(struct mlx4_dev *dev, int slave,
1665 enum mlx4_resource type, int id)
1667 struct mlx4_priv *priv = mlx4_priv(dev);
1668 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1669 struct res_common *r;
1671 spin_lock_irq(mlx4_tlock(dev));
1672 r = res_tracker_lookup(&tracker->res_tree[type], id);
1673 if (r && (r->owner == slave))
1674 r->state = r->from_state;
1675 spin_unlock_irq(mlx4_tlock(dev));
1678 static void res_end_move(struct mlx4_dev *dev, int slave,
1679 enum mlx4_resource type, int id)
1681 struct mlx4_priv *priv = mlx4_priv(dev);
1682 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1683 struct res_common *r;
1685 spin_lock_irq(mlx4_tlock(dev));
1686 r = res_tracker_lookup(&tracker->res_tree[type], id);
1687 if (r && (r->owner == slave))
1688 r->state = r->to_state;
1689 spin_unlock_irq(mlx4_tlock(dev));
1692 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1694 return mlx4_is_qp_reserved(dev, qpn) &&
1695 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1698 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1700 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1703 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1704 u64 in_param, u64 *out_param)
1714 case RES_OP_RESERVE:
1715 count = get_param_l(&in_param) & 0xffffff;
1716 /* Turn off all unsupported QP allocation flags that the
1717 * slave tries to set.
1719 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1720 align = get_param_h(&in_param);
1721 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1725 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1727 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1731 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1733 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1734 __mlx4_qp_release_range(dev, base, count);
1737 set_param_l(out_param, base);
1739 case RES_OP_MAP_ICM:
1740 qpn = get_param_l(&in_param) & 0x7fffff;
1741 if (valid_reserved(dev, slave, qpn)) {
1742 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1747 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1752 if (!fw_reserved(dev, qpn)) {
1753 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1755 res_abort_move(dev, slave, RES_QP, qpn);
1760 res_end_move(dev, slave, RES_QP, qpn);
1770 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1771 u64 in_param, u64 *out_param)
1777 if (op != RES_OP_RESERVE_AND_MAP)
1780 order = get_param_l(&in_param);
1782 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1786 base = __mlx4_alloc_mtt_range(dev, order);
1788 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1792 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1794 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1795 __mlx4_free_mtt_range(dev, base, order);
1797 set_param_l(out_param, base);
1803 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1804 u64 in_param, u64 *out_param)
1809 struct res_mpt *mpt;
1812 case RES_OP_RESERVE:
1813 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1817 index = __mlx4_mpt_reserve(dev);
1819 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1822 id = index & mpt_mask(dev);
1824 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1826 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1827 __mlx4_mpt_release(dev, index);
1830 set_param_l(out_param, index);
1832 case RES_OP_MAP_ICM:
1833 index = get_param_l(&in_param);
1834 id = index & mpt_mask(dev);
1835 err = mr_res_start_move_to(dev, slave, id,
1836 RES_MPT_MAPPED, &mpt);
1840 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1842 res_abort_move(dev, slave, RES_MPT, id);
1846 res_end_move(dev, slave, RES_MPT, id);
1852 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1853 u64 in_param, u64 *out_param)
1859 case RES_OP_RESERVE_AND_MAP:
1860 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1864 err = __mlx4_cq_alloc_icm(dev, &cqn);
1866 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1870 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1872 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1873 __mlx4_cq_free_icm(dev, cqn);
1877 set_param_l(out_param, cqn);
1887 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1888 u64 in_param, u64 *out_param)
1894 case RES_OP_RESERVE_AND_MAP:
1895 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1899 err = __mlx4_srq_alloc_icm(dev, &srqn);
1901 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1905 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1907 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1908 __mlx4_srq_free_icm(dev, srqn);
1912 set_param_l(out_param, srqn);
1922 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1923 u8 smac_index, u64 *mac)
1925 struct mlx4_priv *priv = mlx4_priv(dev);
1926 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1927 struct list_head *mac_list =
1928 &tracker->slave_list[slave].res_list[RES_MAC];
1929 struct mac_res *res, *tmp;
1931 list_for_each_entry_safe(res, tmp, mac_list, list) {
1932 if (res->smac_index == smac_index && res->port == (u8) port) {
1940 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1942 struct mlx4_priv *priv = mlx4_priv(dev);
1943 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1944 struct list_head *mac_list =
1945 &tracker->slave_list[slave].res_list[RES_MAC];
1946 struct mac_res *res, *tmp;
1948 list_for_each_entry_safe(res, tmp, mac_list, list) {
1949 if (res->mac == mac && res->port == (u8) port) {
1950 /* mac found. update ref count */
1956 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1958 res = kzalloc(sizeof *res, GFP_KERNEL);
1960 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1964 res->port = (u8) port;
1965 res->smac_index = smac_index;
1967 list_add_tail(&res->list,
1968 &tracker->slave_list[slave].res_list[RES_MAC]);
1972 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1975 struct mlx4_priv *priv = mlx4_priv(dev);
1976 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1977 struct list_head *mac_list =
1978 &tracker->slave_list[slave].res_list[RES_MAC];
1979 struct mac_res *res, *tmp;
1981 list_for_each_entry_safe(res, tmp, mac_list, list) {
1982 if (res->mac == mac && res->port == (u8) port) {
1983 if (!--res->ref_count) {
1984 list_del(&res->list);
1985 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1993 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1995 struct mlx4_priv *priv = mlx4_priv(dev);
1996 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1997 struct list_head *mac_list =
1998 &tracker->slave_list[slave].res_list[RES_MAC];
1999 struct mac_res *res, *tmp;
2002 list_for_each_entry_safe(res, tmp, mac_list, list) {
2003 list_del(&res->list);
2004 /* dereference the mac the num times the slave referenced it */
2005 for (i = 0; i < res->ref_count; i++)
2006 __mlx4_unregister_mac(dev, res->port, res->mac);
2007 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2012 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2013 u64 in_param, u64 *out_param, int in_port)
2020 if (op != RES_OP_RESERVE_AND_MAP)
2023 port = !in_port ? get_param_l(out_param) : in_port;
2024 port = mlx4_slave_convert_port(
2031 err = __mlx4_register_mac(dev, port, mac);
2034 set_param_l(out_param, err);
2039 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2041 __mlx4_unregister_mac(dev, port, mac);
2046 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2047 int port, int vlan_index)
2049 struct mlx4_priv *priv = mlx4_priv(dev);
2050 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2051 struct list_head *vlan_list =
2052 &tracker->slave_list[slave].res_list[RES_VLAN];
2053 struct vlan_res *res, *tmp;
2055 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2056 if (res->vlan == vlan && res->port == (u8) port) {
2057 /* vlan found. update ref count */
2063 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2065 res = kzalloc(sizeof(*res), GFP_KERNEL);
2067 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2071 res->port = (u8) port;
2072 res->vlan_index = vlan_index;
2074 list_add_tail(&res->list,
2075 &tracker->slave_list[slave].res_list[RES_VLAN]);
2080 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2083 struct mlx4_priv *priv = mlx4_priv(dev);
2084 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2085 struct list_head *vlan_list =
2086 &tracker->slave_list[slave].res_list[RES_VLAN];
2087 struct vlan_res *res, *tmp;
2089 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2090 if (res->vlan == vlan && res->port == (u8) port) {
2091 if (!--res->ref_count) {
2092 list_del(&res->list);
2093 mlx4_release_resource(dev, slave, RES_VLAN,
2102 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2104 struct mlx4_priv *priv = mlx4_priv(dev);
2105 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2106 struct list_head *vlan_list =
2107 &tracker->slave_list[slave].res_list[RES_VLAN];
2108 struct vlan_res *res, *tmp;
2111 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2112 list_del(&res->list);
2113 /* dereference the vlan the num times the slave referenced it */
2114 for (i = 0; i < res->ref_count; i++)
2115 __mlx4_unregister_vlan(dev, res->port, res->vlan);
2116 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2121 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2122 u64 in_param, u64 *out_param, int in_port)
2124 struct mlx4_priv *priv = mlx4_priv(dev);
2125 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2131 port = !in_port ? get_param_l(out_param) : in_port;
2133 if (!port || op != RES_OP_RESERVE_AND_MAP)
2136 port = mlx4_slave_convert_port(
2141 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2142 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2143 slave_state[slave].old_vlan_api = true;
2147 vlan = (u16) in_param;
2149 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2151 set_param_l(out_param, (u32) vlan_index);
2152 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2154 __mlx4_unregister_vlan(dev, port, vlan);
2159 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2160 u64 in_param, u64 *out_param, int port)
2165 if (op != RES_OP_RESERVE)
2168 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2172 err = __mlx4_counter_alloc(dev, &index);
2174 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2178 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2180 __mlx4_counter_free(dev, index);
2181 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2183 set_param_l(out_param, index);
2189 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2190 u64 in_param, u64 *out_param)
2195 if (op != RES_OP_RESERVE)
2198 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2202 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2204 __mlx4_xrcd_free(dev, xrcdn);
2206 set_param_l(out_param, xrcdn);
2211 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2212 struct mlx4_vhcr *vhcr,
2213 struct mlx4_cmd_mailbox *inbox,
2214 struct mlx4_cmd_mailbox *outbox,
2215 struct mlx4_cmd_info *cmd)
2218 int alop = vhcr->op_modifier;
2220 switch (vhcr->in_modifier & 0xFF) {
2222 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2223 vhcr->in_param, &vhcr->out_param);
2227 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2228 vhcr->in_param, &vhcr->out_param);
2232 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2233 vhcr->in_param, &vhcr->out_param);
2237 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2238 vhcr->in_param, &vhcr->out_param);
2242 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2243 vhcr->in_param, &vhcr->out_param);
2247 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2248 vhcr->in_param, &vhcr->out_param,
2249 (vhcr->in_modifier >> 8) & 0xFF);
2253 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2254 vhcr->in_param, &vhcr->out_param,
2255 (vhcr->in_modifier >> 8) & 0xFF);
2259 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2260 vhcr->in_param, &vhcr->out_param, 0);
2264 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2265 vhcr->in_param, &vhcr->out_param);
2276 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2285 case RES_OP_RESERVE:
2286 base = get_param_l(&in_param) & 0x7fffff;
2287 count = get_param_h(&in_param);
2288 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2291 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2292 __mlx4_qp_release_range(dev, base, count);
2294 case RES_OP_MAP_ICM:
2295 qpn = get_param_l(&in_param) & 0x7fffff;
2296 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2301 if (!fw_reserved(dev, qpn))
2302 __mlx4_qp_free_icm(dev, qpn);
2304 res_end_move(dev, slave, RES_QP, qpn);
2306 if (valid_reserved(dev, slave, qpn))
2307 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2316 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2317 u64 in_param, u64 *out_param)
2323 if (op != RES_OP_RESERVE_AND_MAP)
2326 base = get_param_l(&in_param);
2327 order = get_param_h(&in_param);
2328 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2330 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2331 __mlx4_free_mtt_range(dev, base, order);
2336 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2342 struct res_mpt *mpt;
2345 case RES_OP_RESERVE:
2346 index = get_param_l(&in_param);
2347 id = index & mpt_mask(dev);
2348 err = get_res(dev, slave, id, RES_MPT, &mpt);
2352 put_res(dev, slave, id, RES_MPT);
2354 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2357 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2358 __mlx4_mpt_release(dev, index);
2360 case RES_OP_MAP_ICM:
2361 index = get_param_l(&in_param);
2362 id = index & mpt_mask(dev);
2363 err = mr_res_start_move_to(dev, slave, id,
2364 RES_MPT_RESERVED, &mpt);
2368 __mlx4_mpt_free_icm(dev, mpt->key);
2369 res_end_move(dev, slave, RES_MPT, id);
2379 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2380 u64 in_param, u64 *out_param)
2386 case RES_OP_RESERVE_AND_MAP:
2387 cqn = get_param_l(&in_param);
2388 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2392 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2393 __mlx4_cq_free_icm(dev, cqn);
2404 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2405 u64 in_param, u64 *out_param)
2411 case RES_OP_RESERVE_AND_MAP:
2412 srqn = get_param_l(&in_param);
2413 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2417 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2418 __mlx4_srq_free_icm(dev, srqn);
2429 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2430 u64 in_param, u64 *out_param, int in_port)
2436 case RES_OP_RESERVE_AND_MAP:
2437 port = !in_port ? get_param_l(out_param) : in_port;
2438 port = mlx4_slave_convert_port(
2443 mac_del_from_slave(dev, slave, in_param, port);
2444 __mlx4_unregister_mac(dev, port, in_param);
2455 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2456 u64 in_param, u64 *out_param, int port)
2458 struct mlx4_priv *priv = mlx4_priv(dev);
2459 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2462 port = mlx4_slave_convert_port(
2468 case RES_OP_RESERVE_AND_MAP:
2469 if (slave_state[slave].old_vlan_api)
2473 vlan_del_from_slave(dev, slave, in_param, port);
2474 __mlx4_unregister_vlan(dev, port, in_param);
2484 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2485 u64 in_param, u64 *out_param)
2490 if (op != RES_OP_RESERVE)
2493 index = get_param_l(&in_param);
2494 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2497 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2501 __mlx4_counter_free(dev, index);
2502 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2507 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2508 u64 in_param, u64 *out_param)
2513 if (op != RES_OP_RESERVE)
2516 xrcdn = get_param_l(&in_param);
2517 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2521 __mlx4_xrcd_free(dev, xrcdn);
2526 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2527 struct mlx4_vhcr *vhcr,
2528 struct mlx4_cmd_mailbox *inbox,
2529 struct mlx4_cmd_mailbox *outbox,
2530 struct mlx4_cmd_info *cmd)
2533 int alop = vhcr->op_modifier;
2535 switch (vhcr->in_modifier & 0xFF) {
2537 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2542 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2543 vhcr->in_param, &vhcr->out_param);
2547 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2552 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2553 vhcr->in_param, &vhcr->out_param);
2557 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2558 vhcr->in_param, &vhcr->out_param);
2562 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2563 vhcr->in_param, &vhcr->out_param,
2564 (vhcr->in_modifier >> 8) & 0xFF);
2568 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2569 vhcr->in_param, &vhcr->out_param,
2570 (vhcr->in_modifier >> 8) & 0xFF);
2574 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2575 vhcr->in_param, &vhcr->out_param);
2579 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2580 vhcr->in_param, &vhcr->out_param);
2588 /* ugly but other choices are uglier */
2589 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2591 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2594 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2596 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2599 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2601 return be32_to_cpu(mpt->mtt_sz);
2604 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2606 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2609 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2611 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2614 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2616 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2619 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2621 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2624 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2626 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2629 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2631 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2634 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2636 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2637 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2638 int log_sq_sride = qpc->sq_size_stride & 7;
2639 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2640 int log_rq_stride = qpc->rq_size_stride & 7;
2641 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2642 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2643 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2644 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2649 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2651 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2652 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2653 total_mem = sq_size + rq_size;
2655 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2661 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2662 int size, struct res_mtt *mtt)
2664 int res_start = mtt->com.res_id;
2665 int res_size = (1 << mtt->order);
2667 if (start < res_start || start + size > res_start + res_size)
2672 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2673 struct mlx4_vhcr *vhcr,
2674 struct mlx4_cmd_mailbox *inbox,
2675 struct mlx4_cmd_mailbox *outbox,
2676 struct mlx4_cmd_info *cmd)
2679 int index = vhcr->in_modifier;
2680 struct res_mtt *mtt;
2681 struct res_mpt *mpt;
2682 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2688 id = index & mpt_mask(dev);
2689 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2693 /* Disable memory windows for VFs. */
2694 if (!mr_is_region(inbox->buf)) {
2699 /* Make sure that the PD bits related to the slave id are zeros. */
2700 pd = mr_get_pd(inbox->buf);
2701 pd_slave = (pd >> 17) & 0x7f;
2702 if (pd_slave != 0 && --pd_slave != slave) {
2707 if (mr_is_fmr(inbox->buf)) {
2708 /* FMR and Bind Enable are forbidden in slave devices. */
2709 if (mr_is_bind_enabled(inbox->buf)) {
2713 /* FMR and Memory Windows are also forbidden. */
2714 if (!mr_is_region(inbox->buf)) {
2720 phys = mr_phys_mpt(inbox->buf);
2722 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2726 err = check_mtt_range(dev, slave, mtt_base,
2727 mr_get_mtt_size(inbox->buf), mtt);
2734 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2739 atomic_inc(&mtt->ref_count);
2740 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2743 res_end_move(dev, slave, RES_MPT, id);
2748 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2750 res_abort_move(dev, slave, RES_MPT, id);
2755 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2756 struct mlx4_vhcr *vhcr,
2757 struct mlx4_cmd_mailbox *inbox,
2758 struct mlx4_cmd_mailbox *outbox,
2759 struct mlx4_cmd_info *cmd)
2762 int index = vhcr->in_modifier;
2763 struct res_mpt *mpt;
2766 id = index & mpt_mask(dev);
2767 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2771 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2776 atomic_dec(&mpt->mtt->ref_count);
2778 res_end_move(dev, slave, RES_MPT, id);
2782 res_abort_move(dev, slave, RES_MPT, id);
2787 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2788 struct mlx4_vhcr *vhcr,
2789 struct mlx4_cmd_mailbox *inbox,
2790 struct mlx4_cmd_mailbox *outbox,
2791 struct mlx4_cmd_info *cmd)
2794 int index = vhcr->in_modifier;
2795 struct res_mpt *mpt;
2798 id = index & mpt_mask(dev);
2799 err = get_res(dev, slave, id, RES_MPT, &mpt);
2803 if (mpt->com.from_state == RES_MPT_MAPPED) {
2804 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2805 * that, the VF must read the MPT. But since the MPT entry memory is not
2806 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2807 * entry contents. To guarantee that the MPT cannot be changed, the driver
2808 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2809 * ownership fofollowing the change. The change here allows the VF to
2810 * perform QUERY_MPT also when the entry is in SW ownership.
2812 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2813 &mlx4_priv(dev)->mr_table.dmpt_table,
2816 if (NULL == mpt_entry || NULL == outbox->buf) {
2821 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2824 } else if (mpt->com.from_state == RES_MPT_HW) {
2825 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2833 put_res(dev, slave, id, RES_MPT);
2837 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2839 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2842 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2844 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2847 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2849 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2852 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2853 struct mlx4_qp_context *context)
2855 u32 qpn = vhcr->in_modifier & 0xffffff;
2858 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2861 /* adjust qkey in qp context */
2862 context->qkey = cpu_to_be32(qkey);
2865 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2866 struct mlx4_qp_context *qpc,
2867 struct mlx4_cmd_mailbox *inbox);
2869 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2870 struct mlx4_vhcr *vhcr,
2871 struct mlx4_cmd_mailbox *inbox,
2872 struct mlx4_cmd_mailbox *outbox,
2873 struct mlx4_cmd_info *cmd)
2876 int qpn = vhcr->in_modifier & 0x7fffff;
2877 struct res_mtt *mtt;
2879 struct mlx4_qp_context *qpc = inbox->buf + 8;
2880 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2881 int mtt_size = qp_get_mtt_size(qpc);
2884 int rcqn = qp_get_rcqn(qpc);
2885 int scqn = qp_get_scqn(qpc);
2886 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2887 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2888 struct res_srq *srq;
2889 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2891 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2895 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2898 qp->local_qpn = local_qpn;
2899 qp->sched_queue = 0;
2901 qp->vlan_control = 0;
2903 qp->pri_path_fl = 0;
2906 qp->qpc_flags = be32_to_cpu(qpc->flags);
2908 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2912 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2916 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2921 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2928 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2933 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2934 update_pkey_index(dev, slave, inbox);
2935 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2938 atomic_inc(&mtt->ref_count);
2940 atomic_inc(&rcq->ref_count);
2942 atomic_inc(&scq->ref_count);
2946 put_res(dev, slave, scqn, RES_CQ);
2949 atomic_inc(&srq->ref_count);
2950 put_res(dev, slave, srqn, RES_SRQ);
2953 put_res(dev, slave, rcqn, RES_CQ);
2954 put_res(dev, slave, mtt_base, RES_MTT);
2955 res_end_move(dev, slave, RES_QP, qpn);
2961 put_res(dev, slave, srqn, RES_SRQ);
2964 put_res(dev, slave, scqn, RES_CQ);
2966 put_res(dev, slave, rcqn, RES_CQ);
2968 put_res(dev, slave, mtt_base, RES_MTT);
2970 res_abort_move(dev, slave, RES_QP, qpn);
2975 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2977 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2980 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2982 int log_eq_size = eqc->log_eq_size & 0x1f;
2983 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2985 if (log_eq_size + 5 < page_shift)
2988 return 1 << (log_eq_size + 5 - page_shift);
2991 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2993 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2996 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2998 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2999 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3001 if (log_cq_size + 5 < page_shift)
3004 return 1 << (log_cq_size + 5 - page_shift);
3007 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3008 struct mlx4_vhcr *vhcr,
3009 struct mlx4_cmd_mailbox *inbox,
3010 struct mlx4_cmd_mailbox *outbox,
3011 struct mlx4_cmd_info *cmd)
3014 int eqn = vhcr->in_modifier;
3015 int res_id = (slave << 10) | eqn;
3016 struct mlx4_eq_context *eqc = inbox->buf;
3017 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3018 int mtt_size = eq_get_mtt_size(eqc);
3020 struct res_mtt *mtt;
3022 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3025 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3029 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3033 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3037 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3041 atomic_inc(&mtt->ref_count);
3043 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3044 res_end_move(dev, slave, RES_EQ, res_id);
3048 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3050 res_abort_move(dev, slave, RES_EQ, res_id);
3052 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3056 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3057 struct mlx4_vhcr *vhcr,
3058 struct mlx4_cmd_mailbox *inbox,
3059 struct mlx4_cmd_mailbox *outbox,
3060 struct mlx4_cmd_info *cmd)
3063 u8 get = vhcr->op_modifier;
3068 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3073 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3074 int len, struct res_mtt **res)
3076 struct mlx4_priv *priv = mlx4_priv(dev);
3077 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3078 struct res_mtt *mtt;
3081 spin_lock_irq(mlx4_tlock(dev));
3082 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3084 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3086 mtt->com.from_state = mtt->com.state;
3087 mtt->com.state = RES_MTT_BUSY;
3092 spin_unlock_irq(mlx4_tlock(dev));
3097 static int verify_qp_parameters(struct mlx4_dev *dev,
3098 struct mlx4_vhcr *vhcr,
3099 struct mlx4_cmd_mailbox *inbox,
3100 enum qp_transition transition, u8 slave)
3104 struct mlx4_qp_context *qp_ctx;
3105 enum mlx4_qp_optpar optpar;
3109 qp_ctx = inbox->buf + 8;
3110 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3111 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3113 if (slave != mlx4_master_func_num(dev)) {
3114 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3115 /* setting QP rate-limit is disallowed for VFs */
3116 if (qp_ctx->rate_limit_params)
3122 case MLX4_QP_ST_XRC:
3124 switch (transition) {
3125 case QP_TRANS_INIT2RTR:
3126 case QP_TRANS_RTR2RTS:
3127 case QP_TRANS_RTS2RTS:
3128 case QP_TRANS_SQD2SQD:
3129 case QP_TRANS_SQD2RTS:
3130 if (slave != mlx4_master_func_num(dev))
3131 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3132 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3133 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3134 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3137 if (qp_ctx->pri_path.mgid_index >= num_gids)
3140 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3141 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3142 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3143 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3146 if (qp_ctx->alt_path.mgid_index >= num_gids)
3155 case MLX4_QP_ST_MLX:
3156 qpn = vhcr->in_modifier & 0x7fffff;
3157 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3158 if (transition == QP_TRANS_INIT2RTR &&
3159 slave != mlx4_master_func_num(dev) &&
3160 mlx4_is_qp_reserved(dev, qpn) &&
3161 !mlx4_vf_smi_enabled(dev, slave, port)) {
3162 /* only enabled VFs may create MLX proxy QPs */
3163 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3164 __func__, slave, port);
3176 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3177 struct mlx4_vhcr *vhcr,
3178 struct mlx4_cmd_mailbox *inbox,
3179 struct mlx4_cmd_mailbox *outbox,
3180 struct mlx4_cmd_info *cmd)
3182 struct mlx4_mtt mtt;
3183 __be64 *page_list = inbox->buf;
3184 u64 *pg_list = (u64 *)page_list;
3186 struct res_mtt *rmtt = NULL;
3187 int start = be64_to_cpu(page_list[0]);
3188 int npages = vhcr->in_modifier;
3191 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3195 /* Call the SW implementation of write_mtt:
3196 * - Prepare a dummy mtt struct
3197 * - Translate inbox contents to simple addresses in host endianness */
3198 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3199 we don't really use it */
3202 for (i = 0; i < npages; ++i)
3203 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3205 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3206 ((u64 *)page_list + 2));
3209 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3214 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3215 struct mlx4_vhcr *vhcr,
3216 struct mlx4_cmd_mailbox *inbox,
3217 struct mlx4_cmd_mailbox *outbox,
3218 struct mlx4_cmd_info *cmd)
3220 int eqn = vhcr->in_modifier;
3221 int res_id = eqn | (slave << 10);
3225 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3229 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3233 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3237 atomic_dec(&eq->mtt->ref_count);
3238 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3239 res_end_move(dev, slave, RES_EQ, res_id);
3240 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3245 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3247 res_abort_move(dev, slave, RES_EQ, res_id);
3252 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3254 struct mlx4_priv *priv = mlx4_priv(dev);
3255 struct mlx4_slave_event_eq_info *event_eq;
3256 struct mlx4_cmd_mailbox *mailbox;
3257 u32 in_modifier = 0;
3262 if (!priv->mfunc.master.slave_state)
3265 /* check for slave valid, slave not PF, and slave active */
3266 if (slave < 0 || slave > dev->persist->num_vfs ||
3267 slave == dev->caps.function ||
3268 !priv->mfunc.master.slave_state[slave].active)
3271 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3273 /* Create the event only if the slave is registered */
3274 if (event_eq->eqn < 0)
3277 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3278 res_id = (slave << 10) | event_eq->eqn;
3279 err = get_res(dev, slave, res_id, RES_EQ, &req);
3283 if (req->com.from_state != RES_EQ_HW) {
3288 mailbox = mlx4_alloc_cmd_mailbox(dev);
3289 if (IS_ERR(mailbox)) {
3290 err = PTR_ERR(mailbox);
3294 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3296 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3299 memcpy(mailbox->buf, (u8 *) eqe, 28);
3301 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3303 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3304 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3307 put_res(dev, slave, res_id, RES_EQ);
3308 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3309 mlx4_free_cmd_mailbox(dev, mailbox);
3313 put_res(dev, slave, res_id, RES_EQ);
3316 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3320 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3321 struct mlx4_vhcr *vhcr,
3322 struct mlx4_cmd_mailbox *inbox,
3323 struct mlx4_cmd_mailbox *outbox,
3324 struct mlx4_cmd_info *cmd)
3326 int eqn = vhcr->in_modifier;
3327 int res_id = eqn | (slave << 10);
3331 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3335 if (eq->com.from_state != RES_EQ_HW) {
3340 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3343 put_res(dev, slave, res_id, RES_EQ);
3347 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3348 struct mlx4_vhcr *vhcr,
3349 struct mlx4_cmd_mailbox *inbox,
3350 struct mlx4_cmd_mailbox *outbox,
3351 struct mlx4_cmd_info *cmd)
3354 int cqn = vhcr->in_modifier;
3355 struct mlx4_cq_context *cqc = inbox->buf;
3356 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3357 struct res_cq *cq = NULL;
3358 struct res_mtt *mtt;
3360 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3363 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3366 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3369 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3372 atomic_inc(&mtt->ref_count);
3374 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3375 res_end_move(dev, slave, RES_CQ, cqn);
3379 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3381 res_abort_move(dev, slave, RES_CQ, cqn);
3385 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3386 struct mlx4_vhcr *vhcr,
3387 struct mlx4_cmd_mailbox *inbox,
3388 struct mlx4_cmd_mailbox *outbox,
3389 struct mlx4_cmd_info *cmd)
3392 int cqn = vhcr->in_modifier;
3393 struct res_cq *cq = NULL;
3395 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3398 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3401 atomic_dec(&cq->mtt->ref_count);
3402 res_end_move(dev, slave, RES_CQ, cqn);
3406 res_abort_move(dev, slave, RES_CQ, cqn);
3410 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3411 struct mlx4_vhcr *vhcr,
3412 struct mlx4_cmd_mailbox *inbox,
3413 struct mlx4_cmd_mailbox *outbox,
3414 struct mlx4_cmd_info *cmd)
3416 int cqn = vhcr->in_modifier;
3420 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3424 if (cq->com.from_state != RES_CQ_HW)
3427 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3429 put_res(dev, slave, cqn, RES_CQ);
3434 static int handle_resize(struct mlx4_dev *dev, int slave,
3435 struct mlx4_vhcr *vhcr,
3436 struct mlx4_cmd_mailbox *inbox,
3437 struct mlx4_cmd_mailbox *outbox,
3438 struct mlx4_cmd_info *cmd,
3442 struct res_mtt *orig_mtt;
3443 struct res_mtt *mtt;
3444 struct mlx4_cq_context *cqc = inbox->buf;
3445 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3447 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3451 if (orig_mtt != cq->mtt) {
3456 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3460 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3463 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3466 atomic_dec(&orig_mtt->ref_count);
3467 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3468 atomic_inc(&mtt->ref_count);
3470 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3474 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3476 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3482 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3483 struct mlx4_vhcr *vhcr,
3484 struct mlx4_cmd_mailbox *inbox,
3485 struct mlx4_cmd_mailbox *outbox,
3486 struct mlx4_cmd_info *cmd)
3488 int cqn = vhcr->in_modifier;
3492 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3496 if (cq->com.from_state != RES_CQ_HW)
3499 if (vhcr->op_modifier == 0) {
3500 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3504 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3506 put_res(dev, slave, cqn, RES_CQ);
3511 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3513 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3514 int log_rq_stride = srqc->logstride & 7;
3515 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3517 if (log_srq_size + log_rq_stride + 4 < page_shift)
3520 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3523 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3524 struct mlx4_vhcr *vhcr,
3525 struct mlx4_cmd_mailbox *inbox,
3526 struct mlx4_cmd_mailbox *outbox,
3527 struct mlx4_cmd_info *cmd)
3530 int srqn = vhcr->in_modifier;
3531 struct res_mtt *mtt;
3532 struct res_srq *srq = NULL;
3533 struct mlx4_srq_context *srqc = inbox->buf;
3534 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3536 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3539 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3542 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3545 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3550 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3554 atomic_inc(&mtt->ref_count);
3556 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3557 res_end_move(dev, slave, RES_SRQ, srqn);
3561 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3563 res_abort_move(dev, slave, RES_SRQ, srqn);
3568 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3569 struct mlx4_vhcr *vhcr,
3570 struct mlx4_cmd_mailbox *inbox,
3571 struct mlx4_cmd_mailbox *outbox,
3572 struct mlx4_cmd_info *cmd)
3575 int srqn = vhcr->in_modifier;
3576 struct res_srq *srq = NULL;
3578 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3581 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3584 atomic_dec(&srq->mtt->ref_count);
3586 atomic_dec(&srq->cq->ref_count);
3587 res_end_move(dev, slave, RES_SRQ, srqn);
3592 res_abort_move(dev, slave, RES_SRQ, srqn);
3597 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3598 struct mlx4_vhcr *vhcr,
3599 struct mlx4_cmd_mailbox *inbox,
3600 struct mlx4_cmd_mailbox *outbox,
3601 struct mlx4_cmd_info *cmd)
3604 int srqn = vhcr->in_modifier;
3605 struct res_srq *srq;
3607 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3610 if (srq->com.from_state != RES_SRQ_HW) {
3614 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3616 put_res(dev, slave, srqn, RES_SRQ);
3620 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3621 struct mlx4_vhcr *vhcr,
3622 struct mlx4_cmd_mailbox *inbox,
3623 struct mlx4_cmd_mailbox *outbox,
3624 struct mlx4_cmd_info *cmd)
3627 int srqn = vhcr->in_modifier;
3628 struct res_srq *srq;
3630 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3634 if (srq->com.from_state != RES_SRQ_HW) {
3639 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3641 put_res(dev, slave, srqn, RES_SRQ);
3645 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3646 struct mlx4_vhcr *vhcr,
3647 struct mlx4_cmd_mailbox *inbox,
3648 struct mlx4_cmd_mailbox *outbox,
3649 struct mlx4_cmd_info *cmd)
3652 int qpn = vhcr->in_modifier & 0x7fffff;
3655 err = get_res(dev, slave, qpn, RES_QP, &qp);
3658 if (qp->com.from_state != RES_QP_HW) {
3663 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3665 put_res(dev, slave, qpn, RES_QP);
3669 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3670 struct mlx4_vhcr *vhcr,
3671 struct mlx4_cmd_mailbox *inbox,
3672 struct mlx4_cmd_mailbox *outbox,
3673 struct mlx4_cmd_info *cmd)
3675 struct mlx4_qp_context *context = inbox->buf + 8;
3676 adjust_proxy_tun_qkey(dev, vhcr, context);
3677 update_pkey_index(dev, slave, inbox);
3678 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3681 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3682 struct mlx4_qp_context *qpc,
3683 struct mlx4_cmd_mailbox *inbox)
3685 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3687 int port = mlx4_slave_convert_port(
3688 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3693 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3696 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3697 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3698 qpc->pri_path.sched_queue = pri_sched_queue;
3701 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3702 port = mlx4_slave_convert_port(
3703 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3707 qpc->alt_path.sched_queue =
3708 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3714 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3715 struct mlx4_qp_context *qpc,
3716 struct mlx4_cmd_mailbox *inbox)
3720 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3721 u8 sched = *(u8 *)(inbox->buf + 64);
3724 port = (sched >> 6 & 1) + 1;
3725 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3726 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3727 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3733 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3734 struct mlx4_vhcr *vhcr,
3735 struct mlx4_cmd_mailbox *inbox,
3736 struct mlx4_cmd_mailbox *outbox,
3737 struct mlx4_cmd_info *cmd)
3740 struct mlx4_qp_context *qpc = inbox->buf + 8;
3741 int qpn = vhcr->in_modifier & 0x7fffff;
3743 u8 orig_sched_queue;
3744 __be32 orig_param3 = qpc->param3;
3745 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3746 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3747 u8 orig_pri_path_fl = qpc->pri_path.fl;
3748 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3749 u8 orig_feup = qpc->pri_path.feup;
3751 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3754 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3758 if (roce_verify_mac(dev, slave, qpc, inbox))
3761 update_pkey_index(dev, slave, inbox);
3762 update_gid(dev, inbox, (u8)slave);
3763 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3764 orig_sched_queue = qpc->pri_path.sched_queue;
3765 err = update_vport_qp_param(dev, inbox, slave, qpn);
3769 err = get_res(dev, slave, qpn, RES_QP, &qp);
3772 if (qp->com.from_state != RES_QP_HW) {
3777 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3779 /* if no error, save sched queue value passed in by VF. This is
3780 * essentially the QOS value provided by the VF. This will be useful
3781 * if we allow dynamic changes from VST back to VGT
3784 qp->sched_queue = orig_sched_queue;
3785 qp->param3 = orig_param3;
3786 qp->vlan_control = orig_vlan_control;
3787 qp->fvl_rx = orig_fvl_rx;
3788 qp->pri_path_fl = orig_pri_path_fl;
3789 qp->vlan_index = orig_vlan_index;
3790 qp->feup = orig_feup;
3792 put_res(dev, slave, qpn, RES_QP);
3796 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3797 struct mlx4_vhcr *vhcr,
3798 struct mlx4_cmd_mailbox *inbox,
3799 struct mlx4_cmd_mailbox *outbox,
3800 struct mlx4_cmd_info *cmd)
3803 struct mlx4_qp_context *context = inbox->buf + 8;
3805 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3808 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3812 update_pkey_index(dev, slave, inbox);
3813 update_gid(dev, inbox, (u8)slave);
3814 adjust_proxy_tun_qkey(dev, vhcr, context);
3815 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3818 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3819 struct mlx4_vhcr *vhcr,
3820 struct mlx4_cmd_mailbox *inbox,
3821 struct mlx4_cmd_mailbox *outbox,
3822 struct mlx4_cmd_info *cmd)
3825 struct mlx4_qp_context *context = inbox->buf + 8;
3827 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3830 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3834 update_pkey_index(dev, slave, inbox);
3835 update_gid(dev, inbox, (u8)slave);
3836 adjust_proxy_tun_qkey(dev, vhcr, context);
3837 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3841 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3842 struct mlx4_vhcr *vhcr,
3843 struct mlx4_cmd_mailbox *inbox,
3844 struct mlx4_cmd_mailbox *outbox,
3845 struct mlx4_cmd_info *cmd)
3847 struct mlx4_qp_context *context = inbox->buf + 8;
3848 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3851 adjust_proxy_tun_qkey(dev, vhcr, context);
3852 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3855 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3856 struct mlx4_vhcr *vhcr,
3857 struct mlx4_cmd_mailbox *inbox,
3858 struct mlx4_cmd_mailbox *outbox,
3859 struct mlx4_cmd_info *cmd)
3862 struct mlx4_qp_context *context = inbox->buf + 8;
3864 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3867 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3871 adjust_proxy_tun_qkey(dev, vhcr, context);
3872 update_gid(dev, inbox, (u8)slave);
3873 update_pkey_index(dev, slave, inbox);
3874 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3877 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3878 struct mlx4_vhcr *vhcr,
3879 struct mlx4_cmd_mailbox *inbox,
3880 struct mlx4_cmd_mailbox *outbox,
3881 struct mlx4_cmd_info *cmd)
3884 struct mlx4_qp_context *context = inbox->buf + 8;
3886 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3889 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3893 adjust_proxy_tun_qkey(dev, vhcr, context);
3894 update_gid(dev, inbox, (u8)slave);
3895 update_pkey_index(dev, slave, inbox);
3896 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3899 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3900 struct mlx4_vhcr *vhcr,
3901 struct mlx4_cmd_mailbox *inbox,
3902 struct mlx4_cmd_mailbox *outbox,
3903 struct mlx4_cmd_info *cmd)
3906 int qpn = vhcr->in_modifier & 0x7fffff;
3909 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3912 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3916 atomic_dec(&qp->mtt->ref_count);
3917 atomic_dec(&qp->rcq->ref_count);
3918 atomic_dec(&qp->scq->ref_count);
3920 atomic_dec(&qp->srq->ref_count);
3921 res_end_move(dev, slave, RES_QP, qpn);
3925 res_abort_move(dev, slave, RES_QP, qpn);
3930 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3931 struct res_qp *rqp, u8 *gid)
3933 struct res_gid *res;
3935 list_for_each_entry(res, &rqp->mcg_list, list) {
3936 if (!memcmp(res->gid, gid, 16))
3942 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3943 u8 *gid, enum mlx4_protocol prot,
3944 enum mlx4_steer_type steer, u64 reg_id)
3946 struct res_gid *res;
3949 res = kzalloc(sizeof *res, GFP_KERNEL);
3953 spin_lock_irq(&rqp->mcg_spl);
3954 if (find_gid(dev, slave, rqp, gid)) {
3958 memcpy(res->gid, gid, 16);
3961 res->reg_id = reg_id;
3962 list_add_tail(&res->list, &rqp->mcg_list);
3965 spin_unlock_irq(&rqp->mcg_spl);
3970 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3971 u8 *gid, enum mlx4_protocol prot,
3972 enum mlx4_steer_type steer, u64 *reg_id)
3974 struct res_gid *res;
3977 spin_lock_irq(&rqp->mcg_spl);
3978 res = find_gid(dev, slave, rqp, gid);
3979 if (!res || res->prot != prot || res->steer != steer)
3982 *reg_id = res->reg_id;
3983 list_del(&res->list);
3987 spin_unlock_irq(&rqp->mcg_spl);
3992 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3993 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3994 enum mlx4_steer_type type, u64 *reg_id)
3996 switch (dev->caps.steering_mode) {
3997 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3998 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4001 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4002 block_loopback, prot,
4005 case MLX4_STEERING_MODE_B0:
4006 if (prot == MLX4_PROT_ETH) {
4007 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4012 return mlx4_qp_attach_common(dev, qp, gid,
4013 block_loopback, prot, type);
4019 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4020 u8 gid[16], enum mlx4_protocol prot,
4021 enum mlx4_steer_type type, u64 reg_id)
4023 switch (dev->caps.steering_mode) {
4024 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4025 return mlx4_flow_detach(dev, reg_id);
4026 case MLX4_STEERING_MODE_B0:
4027 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4033 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4034 u8 *gid, enum mlx4_protocol prot)
4038 if (prot != MLX4_PROT_ETH)
4041 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4042 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4043 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4052 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4053 struct mlx4_vhcr *vhcr,
4054 struct mlx4_cmd_mailbox *inbox,
4055 struct mlx4_cmd_mailbox *outbox,
4056 struct mlx4_cmd_info *cmd)
4058 struct mlx4_qp qp; /* dummy for calling attach/detach */
4059 u8 *gid = inbox->buf;
4060 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4065 int attach = vhcr->op_modifier;
4066 int block_loopback = vhcr->in_modifier >> 31;
4067 u8 steer_type_mask = 2;
4068 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4070 qpn = vhcr->in_modifier & 0xffffff;
4071 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4077 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4080 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4083 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4087 err = mlx4_adjust_port(dev, slave, gid, prot);
4091 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
4095 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4097 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4100 put_res(dev, slave, qpn, RES_QP);
4104 qp_detach(dev, &qp, gid, prot, type, reg_id);
4106 put_res(dev, slave, qpn, RES_QP);
4111 * MAC validation for Flow Steering rules.
4112 * VF can attach rules only with a mac address which is assigned to it.
4114 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4115 struct list_head *rlist)
4117 struct mac_res *res, *tmp;
4120 /* make sure it isn't multicast or broadcast mac*/
4121 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4122 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4123 list_for_each_entry_safe(res, tmp, rlist, list) {
4124 be_mac = cpu_to_be64(res->mac << 16);
4125 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4128 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4129 eth_header->eth.dst_mac, slave);
4135 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4136 struct _rule_hw *eth_header)
4138 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4139 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4140 struct mlx4_net_trans_rule_hw_eth *eth =
4141 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4142 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4143 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4144 next_rule->rsvd == 0;
4147 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4152 * In case of missing eth header, append eth header with a MAC address
4153 * assigned to the VF.
4155 static int add_eth_header(struct mlx4_dev *dev, int slave,
4156 struct mlx4_cmd_mailbox *inbox,
4157 struct list_head *rlist, int header_id)
4159 struct mac_res *res, *tmp;
4161 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4162 struct mlx4_net_trans_rule_hw_eth *eth_header;
4163 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4164 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4166 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4168 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4170 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4172 /* Clear a space in the inbox for eth header */
4173 switch (header_id) {
4174 case MLX4_NET_TRANS_RULE_ID_IPV4:
4176 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4177 memmove(ip_header, eth_header,
4178 sizeof(*ip_header) + sizeof(*l4_header));
4180 case MLX4_NET_TRANS_RULE_ID_TCP:
4181 case MLX4_NET_TRANS_RULE_ID_UDP:
4182 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4184 memmove(l4_header, eth_header, sizeof(*l4_header));
4189 list_for_each_entry_safe(res, tmp, rlist, list) {
4190 if (port == res->port) {
4191 be_mac = cpu_to_be64(res->mac << 16);
4196 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4201 memset(eth_header, 0, sizeof(*eth_header));
4202 eth_header->size = sizeof(*eth_header) >> 2;
4203 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4204 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4205 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4211 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4212 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4213 struct mlx4_vhcr *vhcr,
4214 struct mlx4_cmd_mailbox *inbox,
4215 struct mlx4_cmd_mailbox *outbox,
4216 struct mlx4_cmd_info *cmd_info)
4219 u32 qpn = vhcr->in_modifier & 0xffffff;
4223 u64 pri_addr_path_mask;
4224 struct mlx4_update_qp_context *cmd;
4227 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4229 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4230 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4231 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4234 /* Just change the smac for the QP */
4235 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4237 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4241 port = (rqp->sched_queue >> 6 & 1) + 1;
4243 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4244 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4245 err = mac_find_smac_ix_in_slave(dev, slave, port,
4249 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4255 err = mlx4_cmd(dev, inbox->dma,
4256 vhcr->in_modifier, 0,
4257 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4260 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4265 put_res(dev, slave, qpn, RES_QP);
4269 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4270 struct mlx4_vhcr *vhcr,
4271 struct mlx4_cmd_mailbox *inbox,
4272 struct mlx4_cmd_mailbox *outbox,
4273 struct mlx4_cmd_info *cmd)
4276 struct mlx4_priv *priv = mlx4_priv(dev);
4277 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4278 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4282 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4283 struct _rule_hw *rule_header;
4286 if (dev->caps.steering_mode !=
4287 MLX4_STEERING_MODE_DEVICE_MANAGED)
4290 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4291 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4292 if (ctrl->port <= 0)
4294 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4295 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4297 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4300 rule_header = (struct _rule_hw *)(ctrl + 1);
4301 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4303 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4304 handle_eth_header_mcast_prio(ctrl, rule_header);
4306 if (slave == dev->caps.function)
4309 switch (header_id) {
4310 case MLX4_NET_TRANS_RULE_ID_ETH:
4311 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4316 case MLX4_NET_TRANS_RULE_ID_IB:
4318 case MLX4_NET_TRANS_RULE_ID_IPV4:
4319 case MLX4_NET_TRANS_RULE_ID_TCP:
4320 case MLX4_NET_TRANS_RULE_ID_UDP:
4321 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4322 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4326 vhcr->in_modifier +=
4327 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4330 pr_err("Corrupted mailbox\n");
4336 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4337 vhcr->in_modifier, 0,
4338 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4343 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4345 mlx4_err(dev, "Fail to add flow steering resources\n");
4347 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4348 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4352 atomic_inc(&rqp->ref_count);
4354 put_res(dev, slave, qpn, RES_QP);
4358 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4359 struct mlx4_vhcr *vhcr,
4360 struct mlx4_cmd_mailbox *inbox,
4361 struct mlx4_cmd_mailbox *outbox,
4362 struct mlx4_cmd_info *cmd)
4366 struct res_fs_rule *rrule;
4368 if (dev->caps.steering_mode !=
4369 MLX4_STEERING_MODE_DEVICE_MANAGED)
4372 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4375 /* Release the rule form busy state before removal */
4376 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4377 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4381 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4383 mlx4_err(dev, "Fail to remove flow steering resources\n");
4387 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4388 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4391 atomic_dec(&rqp->ref_count);
4393 put_res(dev, slave, rrule->qpn, RES_QP);
4398 BUSY_MAX_RETRIES = 10
4401 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4402 struct mlx4_vhcr *vhcr,
4403 struct mlx4_cmd_mailbox *inbox,
4404 struct mlx4_cmd_mailbox *outbox,
4405 struct mlx4_cmd_info *cmd)
4408 int index = vhcr->in_modifier & 0xffff;
4410 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4414 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4415 put_res(dev, slave, index, RES_COUNTER);
4419 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4421 struct res_gid *rgid;
4422 struct res_gid *tmp;
4423 struct mlx4_qp qp; /* dummy for calling attach/detach */
4425 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4426 switch (dev->caps.steering_mode) {
4427 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4428 mlx4_flow_detach(dev, rgid->reg_id);
4430 case MLX4_STEERING_MODE_B0:
4431 qp.qpn = rqp->local_qpn;
4432 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4433 rgid->prot, rgid->steer);
4436 list_del(&rgid->list);
4441 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4442 enum mlx4_resource type, int print)
4444 struct mlx4_priv *priv = mlx4_priv(dev);
4445 struct mlx4_resource_tracker *tracker =
4446 &priv->mfunc.master.res_tracker;
4447 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4448 struct res_common *r;
4449 struct res_common *tmp;
4453 spin_lock_irq(mlx4_tlock(dev));
4454 list_for_each_entry_safe(r, tmp, rlist, list) {
4455 if (r->owner == slave) {
4457 if (r->state == RES_ANY_BUSY) {
4460 "%s id 0x%llx is busy\n",
4465 r->from_state = r->state;
4466 r->state = RES_ANY_BUSY;
4472 spin_unlock_irq(mlx4_tlock(dev));
4477 static int move_all_busy(struct mlx4_dev *dev, int slave,
4478 enum mlx4_resource type)
4480 unsigned long begin;
4485 busy = _move_all_busy(dev, slave, type, 0);
4486 if (time_after(jiffies, begin + 5 * HZ))
4493 busy = _move_all_busy(dev, slave, type, 1);
4497 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4499 struct mlx4_priv *priv = mlx4_priv(dev);
4500 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4501 struct list_head *qp_list =
4502 &tracker->slave_list[slave].res_list[RES_QP];
4510 err = move_all_busy(dev, slave, RES_QP);
4512 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4515 spin_lock_irq(mlx4_tlock(dev));
4516 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4517 spin_unlock_irq(mlx4_tlock(dev));
4518 if (qp->com.owner == slave) {
4519 qpn = qp->com.res_id;
4520 detach_qp(dev, slave, qp);
4521 state = qp->com.from_state;
4522 while (state != 0) {
4524 case RES_QP_RESERVED:
4525 spin_lock_irq(mlx4_tlock(dev));
4526 rb_erase(&qp->com.node,
4527 &tracker->res_tree[RES_QP]);
4528 list_del(&qp->com.list);
4529 spin_unlock_irq(mlx4_tlock(dev));
4530 if (!valid_reserved(dev, slave, qpn)) {
4531 __mlx4_qp_release_range(dev, qpn, 1);
4532 mlx4_release_resource(dev, slave,
4539 if (!valid_reserved(dev, slave, qpn))
4540 __mlx4_qp_free_icm(dev, qpn);
4541 state = RES_QP_RESERVED;
4545 err = mlx4_cmd(dev, in_param,
4548 MLX4_CMD_TIME_CLASS_A,
4551 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4552 slave, qp->local_qpn);
4553 atomic_dec(&qp->rcq->ref_count);
4554 atomic_dec(&qp->scq->ref_count);
4555 atomic_dec(&qp->mtt->ref_count);
4557 atomic_dec(&qp->srq->ref_count);
4558 state = RES_QP_MAPPED;
4565 spin_lock_irq(mlx4_tlock(dev));
4567 spin_unlock_irq(mlx4_tlock(dev));
4570 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4572 struct mlx4_priv *priv = mlx4_priv(dev);
4573 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4574 struct list_head *srq_list =
4575 &tracker->slave_list[slave].res_list[RES_SRQ];
4576 struct res_srq *srq;
4577 struct res_srq *tmp;
4584 err = move_all_busy(dev, slave, RES_SRQ);
4586 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4589 spin_lock_irq(mlx4_tlock(dev));
4590 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4591 spin_unlock_irq(mlx4_tlock(dev));
4592 if (srq->com.owner == slave) {
4593 srqn = srq->com.res_id;
4594 state = srq->com.from_state;
4595 while (state != 0) {
4597 case RES_SRQ_ALLOCATED:
4598 __mlx4_srq_free_icm(dev, srqn);
4599 spin_lock_irq(mlx4_tlock(dev));
4600 rb_erase(&srq->com.node,
4601 &tracker->res_tree[RES_SRQ]);
4602 list_del(&srq->com.list);
4603 spin_unlock_irq(mlx4_tlock(dev));
4604 mlx4_release_resource(dev, slave,
4612 err = mlx4_cmd(dev, in_param, srqn, 1,
4614 MLX4_CMD_TIME_CLASS_A,
4617 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4620 atomic_dec(&srq->mtt->ref_count);
4622 atomic_dec(&srq->cq->ref_count);
4623 state = RES_SRQ_ALLOCATED;
4631 spin_lock_irq(mlx4_tlock(dev));
4633 spin_unlock_irq(mlx4_tlock(dev));
4636 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4638 struct mlx4_priv *priv = mlx4_priv(dev);
4639 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4640 struct list_head *cq_list =
4641 &tracker->slave_list[slave].res_list[RES_CQ];
4650 err = move_all_busy(dev, slave, RES_CQ);
4652 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4655 spin_lock_irq(mlx4_tlock(dev));
4656 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4657 spin_unlock_irq(mlx4_tlock(dev));
4658 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4659 cqn = cq->com.res_id;
4660 state = cq->com.from_state;
4661 while (state != 0) {
4663 case RES_CQ_ALLOCATED:
4664 __mlx4_cq_free_icm(dev, cqn);
4665 spin_lock_irq(mlx4_tlock(dev));
4666 rb_erase(&cq->com.node,
4667 &tracker->res_tree[RES_CQ]);
4668 list_del(&cq->com.list);
4669 spin_unlock_irq(mlx4_tlock(dev));
4670 mlx4_release_resource(dev, slave,
4678 err = mlx4_cmd(dev, in_param, cqn, 1,
4680 MLX4_CMD_TIME_CLASS_A,
4683 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4685 atomic_dec(&cq->mtt->ref_count);
4686 state = RES_CQ_ALLOCATED;
4694 spin_lock_irq(mlx4_tlock(dev));
4696 spin_unlock_irq(mlx4_tlock(dev));
4699 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4701 struct mlx4_priv *priv = mlx4_priv(dev);
4702 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4703 struct list_head *mpt_list =
4704 &tracker->slave_list[slave].res_list[RES_MPT];
4705 struct res_mpt *mpt;
4706 struct res_mpt *tmp;
4713 err = move_all_busy(dev, slave, RES_MPT);
4715 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4718 spin_lock_irq(mlx4_tlock(dev));
4719 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4720 spin_unlock_irq(mlx4_tlock(dev));
4721 if (mpt->com.owner == slave) {
4722 mptn = mpt->com.res_id;
4723 state = mpt->com.from_state;
4724 while (state != 0) {
4726 case RES_MPT_RESERVED:
4727 __mlx4_mpt_release(dev, mpt->key);
4728 spin_lock_irq(mlx4_tlock(dev));
4729 rb_erase(&mpt->com.node,
4730 &tracker->res_tree[RES_MPT]);
4731 list_del(&mpt->com.list);
4732 spin_unlock_irq(mlx4_tlock(dev));
4733 mlx4_release_resource(dev, slave,
4739 case RES_MPT_MAPPED:
4740 __mlx4_mpt_free_icm(dev, mpt->key);
4741 state = RES_MPT_RESERVED;
4746 err = mlx4_cmd(dev, in_param, mptn, 0,
4748 MLX4_CMD_TIME_CLASS_A,
4751 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4754 atomic_dec(&mpt->mtt->ref_count);
4755 state = RES_MPT_MAPPED;
4762 spin_lock_irq(mlx4_tlock(dev));
4764 spin_unlock_irq(mlx4_tlock(dev));
4767 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4769 struct mlx4_priv *priv = mlx4_priv(dev);
4770 struct mlx4_resource_tracker *tracker =
4771 &priv->mfunc.master.res_tracker;
4772 struct list_head *mtt_list =
4773 &tracker->slave_list[slave].res_list[RES_MTT];
4774 struct res_mtt *mtt;
4775 struct res_mtt *tmp;
4781 err = move_all_busy(dev, slave, RES_MTT);
4783 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4786 spin_lock_irq(mlx4_tlock(dev));
4787 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4788 spin_unlock_irq(mlx4_tlock(dev));
4789 if (mtt->com.owner == slave) {
4790 base = mtt->com.res_id;
4791 state = mtt->com.from_state;
4792 while (state != 0) {
4794 case RES_MTT_ALLOCATED:
4795 __mlx4_free_mtt_range(dev, base,
4797 spin_lock_irq(mlx4_tlock(dev));
4798 rb_erase(&mtt->com.node,
4799 &tracker->res_tree[RES_MTT]);
4800 list_del(&mtt->com.list);
4801 spin_unlock_irq(mlx4_tlock(dev));
4802 mlx4_release_resource(dev, slave, RES_MTT,
4803 1 << mtt->order, 0);
4813 spin_lock_irq(mlx4_tlock(dev));
4815 spin_unlock_irq(mlx4_tlock(dev));
4818 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4820 struct mlx4_priv *priv = mlx4_priv(dev);
4821 struct mlx4_resource_tracker *tracker =
4822 &priv->mfunc.master.res_tracker;
4823 struct list_head *fs_rule_list =
4824 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4825 struct res_fs_rule *fs_rule;
4826 struct res_fs_rule *tmp;
4831 err = move_all_busy(dev, slave, RES_FS_RULE);
4833 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4836 spin_lock_irq(mlx4_tlock(dev));
4837 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4838 spin_unlock_irq(mlx4_tlock(dev));
4839 if (fs_rule->com.owner == slave) {
4840 base = fs_rule->com.res_id;
4841 state = fs_rule->com.from_state;
4842 while (state != 0) {
4844 case RES_FS_RULE_ALLOCATED:
4846 err = mlx4_cmd(dev, base, 0, 0,
4847 MLX4_QP_FLOW_STEERING_DETACH,
4848 MLX4_CMD_TIME_CLASS_A,
4851 spin_lock_irq(mlx4_tlock(dev));
4852 rb_erase(&fs_rule->com.node,
4853 &tracker->res_tree[RES_FS_RULE]);
4854 list_del(&fs_rule->com.list);
4855 spin_unlock_irq(mlx4_tlock(dev));
4865 spin_lock_irq(mlx4_tlock(dev));
4867 spin_unlock_irq(mlx4_tlock(dev));
4870 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4872 struct mlx4_priv *priv = mlx4_priv(dev);
4873 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4874 struct list_head *eq_list =
4875 &tracker->slave_list[slave].res_list[RES_EQ];
4883 err = move_all_busy(dev, slave, RES_EQ);
4885 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4888 spin_lock_irq(mlx4_tlock(dev));
4889 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4890 spin_unlock_irq(mlx4_tlock(dev));
4891 if (eq->com.owner == slave) {
4892 eqn = eq->com.res_id;
4893 state = eq->com.from_state;
4894 while (state != 0) {
4896 case RES_EQ_RESERVED:
4897 spin_lock_irq(mlx4_tlock(dev));
4898 rb_erase(&eq->com.node,
4899 &tracker->res_tree[RES_EQ]);
4900 list_del(&eq->com.list);
4901 spin_unlock_irq(mlx4_tlock(dev));
4907 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4908 1, MLX4_CMD_HW2SW_EQ,
4909 MLX4_CMD_TIME_CLASS_A,
4912 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4913 slave, eqn & 0x3ff);
4914 atomic_dec(&eq->mtt->ref_count);
4915 state = RES_EQ_RESERVED;
4923 spin_lock_irq(mlx4_tlock(dev));
4925 spin_unlock_irq(mlx4_tlock(dev));
4928 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4930 struct mlx4_priv *priv = mlx4_priv(dev);
4931 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4932 struct list_head *counter_list =
4933 &tracker->slave_list[slave].res_list[RES_COUNTER];
4934 struct res_counter *counter;
4935 struct res_counter *tmp;
4939 err = move_all_busy(dev, slave, RES_COUNTER);
4941 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4944 spin_lock_irq(mlx4_tlock(dev));
4945 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4946 if (counter->com.owner == slave) {
4947 index = counter->com.res_id;
4948 rb_erase(&counter->com.node,
4949 &tracker->res_tree[RES_COUNTER]);
4950 list_del(&counter->com.list);
4952 __mlx4_counter_free(dev, index);
4953 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4956 spin_unlock_irq(mlx4_tlock(dev));
4959 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4961 struct mlx4_priv *priv = mlx4_priv(dev);
4962 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4963 struct list_head *xrcdn_list =
4964 &tracker->slave_list[slave].res_list[RES_XRCD];
4965 struct res_xrcdn *xrcd;
4966 struct res_xrcdn *tmp;
4970 err = move_all_busy(dev, slave, RES_XRCD);
4972 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4975 spin_lock_irq(mlx4_tlock(dev));
4976 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4977 if (xrcd->com.owner == slave) {
4978 xrcdn = xrcd->com.res_id;
4979 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4980 list_del(&xrcd->com.list);
4982 __mlx4_xrcd_free(dev, xrcdn);
4985 spin_unlock_irq(mlx4_tlock(dev));
4988 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4990 struct mlx4_priv *priv = mlx4_priv(dev);
4991 mlx4_reset_roce_gids(dev, slave);
4992 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4993 rem_slave_vlans(dev, slave);
4994 rem_slave_macs(dev, slave);
4995 rem_slave_fs_rule(dev, slave);
4996 rem_slave_qps(dev, slave);
4997 rem_slave_srqs(dev, slave);
4998 rem_slave_cqs(dev, slave);
4999 rem_slave_mrs(dev, slave);
5000 rem_slave_eqs(dev, slave);
5001 rem_slave_mtts(dev, slave);
5002 rem_slave_counters(dev, slave);
5003 rem_slave_xrcdns(dev, slave);
5004 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5007 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5009 struct mlx4_vf_immed_vlan_work *work =
5010 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5011 struct mlx4_cmd_mailbox *mailbox;
5012 struct mlx4_update_qp_context *upd_context;
5013 struct mlx4_dev *dev = &work->priv->dev;
5014 struct mlx4_resource_tracker *tracker =
5015 &work->priv->mfunc.master.res_tracker;
5016 struct list_head *qp_list =
5017 &tracker->slave_list[work->slave].res_list[RES_QP];
5020 u64 qp_path_mask_vlan_ctrl =
5021 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5022 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5023 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5024 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5025 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5026 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5028 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5029 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5030 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5031 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5032 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5033 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5034 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5037 int port, errors = 0;
5040 if (mlx4_is_slave(dev)) {
5041 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5046 mailbox = mlx4_alloc_cmd_mailbox(dev);
5047 if (IS_ERR(mailbox))
5049 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5050 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5051 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5052 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5053 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5054 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5055 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5056 else if (!work->vlan_id)
5057 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5058 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5060 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5061 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5062 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5064 upd_context = mailbox->buf;
5065 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5067 spin_lock_irq(mlx4_tlock(dev));
5068 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5069 spin_unlock_irq(mlx4_tlock(dev));
5070 if (qp->com.owner == work->slave) {
5071 if (qp->com.from_state != RES_QP_HW ||
5072 !qp->sched_queue || /* no INIT2RTR trans yet */
5073 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5074 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5075 spin_lock_irq(mlx4_tlock(dev));
5078 port = (qp->sched_queue >> 6 & 1) + 1;
5079 if (port != work->port) {
5080 spin_lock_irq(mlx4_tlock(dev));
5083 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5084 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5086 upd_context->primary_addr_path_mask =
5087 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5088 if (work->vlan_id == MLX4_VGT) {
5089 upd_context->qp_context.param3 = qp->param3;
5090 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5091 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5092 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5093 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5094 upd_context->qp_context.pri_path.feup = qp->feup;
5095 upd_context->qp_context.pri_path.sched_queue =
5098 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5099 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5100 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5101 upd_context->qp_context.pri_path.fvl_rx =
5102 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5103 upd_context->qp_context.pri_path.fl =
5104 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
5105 upd_context->qp_context.pri_path.feup =
5106 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5107 upd_context->qp_context.pri_path.sched_queue =
5108 qp->sched_queue & 0xC7;
5109 upd_context->qp_context.pri_path.sched_queue |=
5110 ((work->qos & 0x7) << 3);
5111 upd_context->qp_mask |=
5113 MLX4_UPD_QP_MASK_QOS_VPP);
5114 upd_context->qp_context.qos_vport =
5118 err = mlx4_cmd(dev, mailbox->dma,
5119 qp->local_qpn & 0xffffff,
5120 0, MLX4_CMD_UPDATE_QP,
5121 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5123 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5124 work->slave, port, qp->local_qpn, err);
5128 spin_lock_irq(mlx4_tlock(dev));
5130 spin_unlock_irq(mlx4_tlock(dev));
5131 mlx4_free_cmd_mailbox(dev, mailbox);
5134 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5135 errors, work->slave, work->port);
5137 /* unregister previous vlan_id if needed and we had no errors
5138 * while updating the QPs
5140 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5141 NO_INDX != work->orig_vlan_ix)
5142 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5143 work->orig_vlan_id);