2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_sa.h>
38 #include <linux/mlx4/cmd.h>
39 #include <linux/rbtree.h>
40 #include <linux/delay.h>
45 #define MAX_PEND_REQS_PER_FUNC 4
46 #define MAD_TIMEOUT_MS 2000
48 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50 #define mcg_warn_group(group, format, arg...) \
51 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_error_group(group, format, arg...) \
55 pr_err(" %16s: " format, (group)->name, ## arg)
58 static union ib_gid mgid0;
60 static struct workqueue_struct *clean_wq;
67 enum mcast_group_state {
75 enum mcast_state state;
78 struct list_head pending;
81 struct ib_sa_mcmember_data {
83 union ib_gid port_gid;
91 __be32 sl_flowlabel_hoplimit;
98 struct ib_sa_mcmember_data rec;
100 struct list_head mgid0_list;
101 struct mlx4_ib_demux_ctx *demux;
102 struct mcast_member func[MAX_VFS];
104 struct work_struct work;
105 struct list_head pending_list;
107 enum mcast_group_state state;
108 enum mcast_group_state prev_state;
109 struct ib_sa_mad response_sa_mad;
112 char name[33]; /* MGID string */
113 struct device_attribute dentry;
115 /* refcount is the reference count for the following:
116 1. Each queued request
117 2. Each invocation of the worker thread
118 3. Membership of the port at the SA
122 /* delayed work to clean pending SM request */
123 struct delayed_work timeout_work;
124 struct list_head cleanup_list;
129 struct ib_sa_mad sa_mad;
130 struct list_head group_list;
131 struct list_head func_list;
132 struct mcast_group *group;
137 #define safe_atomic_dec(ref) \
139 if (atomic_dec_and_test(ref)) \
140 mcg_warn_group(group, "did not expect to reach zero\n"); \
143 static const char *get_state_string(enum mcast_group_state state)
148 case MCAST_JOIN_SENT:
149 return "MCAST_JOIN_SENT";
150 case MCAST_LEAVE_SENT:
151 return "MCAST_LEAVE_SENT";
152 case MCAST_RESP_READY:
153 return "MCAST_RESP_READY";
155 return "Invalid State";
158 static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx,
161 struct rb_node *node = ctx->mcg_table.rb_node;
162 struct mcast_group *group;
166 group = rb_entry(node, struct mcast_group, node);
167 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
172 node = node->rb_left;
174 node = node->rb_right;
179 static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx,
180 struct mcast_group *group)
182 struct rb_node **link = &ctx->mcg_table.rb_node;
183 struct rb_node *parent = NULL;
184 struct mcast_group *cur_group;
189 cur_group = rb_entry(parent, struct mcast_group, node);
191 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
192 sizeof group->rec.mgid);
194 link = &(*link)->rb_left;
196 link = &(*link)->rb_right;
200 rb_link_node(&group->node, parent, link);
201 rb_insert_color(&group->node, &ctx->mcg_table);
205 static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
207 struct mlx4_ib_dev *dev = ctx->dev;
208 struct ib_ah_attr ah_attr;
211 spin_lock_irqsave(&dev->sm_lock, flags);
212 if (!dev->sm_ah[ctx->port - 1]) {
213 /* port is not yet Active, sm_ah not ready */
214 spin_unlock_irqrestore(&dev->sm_lock, flags);
217 mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
218 spin_unlock_irqrestore(&dev->sm_lock, flags);
219 return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
220 ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
221 &ah_attr, NULL, mad);
224 static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
227 struct mlx4_ib_dev *dev = ctx->dev;
228 struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1];
230 struct ib_ah_attr ah_attr;
232 /* Our agent might not yet be registered when mads start to arrive */
236 ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
238 if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
241 wc.dlid_path_bits = 0;
242 wc.port_num = ctx->port;
243 wc.slid = ah_attr.dlid; /* opensm lid */
245 return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad);
248 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad)
250 struct ib_sa_mad mad;
251 struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data;
254 /* we rely on a mad request as arrived from a VF */
255 memcpy(&mad, sa_mad, sizeof mad);
257 /* fix port GID to be the real one (slave 0) */
258 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0];
260 /* assign our own TID */
261 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
262 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
264 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
265 /* set timeout handler */
267 /* calls mlx4_ib_mcg_timeout_handler */
268 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
269 msecs_to_jiffies(MAD_TIMEOUT_MS));
275 static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
277 struct ib_sa_mad mad;
278 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
281 memset(&mad, 0, sizeof mad);
282 mad.mad_hdr.base_version = 1;
283 mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
284 mad.mad_hdr.class_version = 2;
285 mad.mad_hdr.method = IB_SA_METHOD_DELETE;
286 mad.mad_hdr.status = cpu_to_be16(0);
287 mad.mad_hdr.class_specific = cpu_to_be16(0);
288 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
289 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
290 mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
291 mad.mad_hdr.attr_mod = cpu_to_be32(0);
292 mad.sa_hdr.sm_key = 0x0;
293 mad.sa_hdr.attr_offset = cpu_to_be16(7);
294 mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID |
295 IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE;
297 *sa_data = group->rec;
298 sa_data->scope_join_state = join_state;
300 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
302 group->state = MCAST_IDLE;
304 /* set timeout handler */
306 /* calls mlx4_ib_mcg_timeout_handler */
307 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
308 msecs_to_jiffies(MAD_TIMEOUT_MS));
314 static int send_reply_to_slave(int slave, struct mcast_group *group,
315 struct ib_sa_mad *req_sa_mad, u16 status)
317 struct ib_sa_mad mad;
318 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
319 struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data;
322 memset(&mad, 0, sizeof mad);
323 mad.mad_hdr.base_version = 1;
324 mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
325 mad.mad_hdr.class_version = 2;
326 mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
327 mad.mad_hdr.status = cpu_to_be16(status);
328 mad.mad_hdr.class_specific = cpu_to_be16(0);
329 mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid;
330 *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */
331 mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
332 mad.mad_hdr.attr_mod = cpu_to_be32(0);
333 mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key;
334 mad.sa_hdr.attr_offset = cpu_to_be16(7);
335 mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */
337 *sa_data = group->rec;
339 /* reconstruct VF's requested join_state and port_gid */
340 sa_data->scope_join_state &= 0xf0;
341 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f);
342 memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid);
344 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad);
348 static int check_selector(ib_sa_comp_mask comp_mask,
349 ib_sa_comp_mask selector_mask,
350 ib_sa_comp_mask value_mask,
351 u8 src_value, u8 dst_value)
354 u8 selector = dst_value >> 6;
358 if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
363 err = (src_value <= dst_value);
366 err = (src_value >= dst_value);
369 err = (src_value != dst_value);
379 static u16 cmp_rec(struct ib_sa_mcmember_data *src,
380 struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask)
382 /* src is group record, dst is request record */
383 /* MGID must already match */
384 /* Port_GID we always replace to our Port_GID, so it is a match */
386 #define MAD_STATUS_REQ_INVALID 0x0200
387 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
388 return MAD_STATUS_REQ_INVALID;
389 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
390 return MAD_STATUS_REQ_INVALID;
391 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
392 IB_SA_MCMEMBER_REC_MTU,
393 src->mtusel_mtu, dst->mtusel_mtu))
394 return MAD_STATUS_REQ_INVALID;
395 if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
396 src->tclass != dst->tclass)
397 return MAD_STATUS_REQ_INVALID;
398 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
399 return MAD_STATUS_REQ_INVALID;
400 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
401 IB_SA_MCMEMBER_REC_RATE,
402 src->ratesel_rate, dst->ratesel_rate))
403 return MAD_STATUS_REQ_INVALID;
404 if (check_selector(comp_mask,
405 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
406 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
407 src->lifetmsel_lifetm, dst->lifetmsel_lifetm))
408 return MAD_STATUS_REQ_INVALID;
409 if (comp_mask & IB_SA_MCMEMBER_REC_SL &&
410 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) !=
411 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000))
412 return MAD_STATUS_REQ_INVALID;
413 if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
414 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) !=
415 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00))
416 return MAD_STATUS_REQ_INVALID;
417 if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
418 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) !=
419 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff))
420 return MAD_STATUS_REQ_INVALID;
421 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE &&
422 (src->scope_join_state & 0xf0) !=
423 (dst->scope_join_state & 0xf0))
424 return MAD_STATUS_REQ_INVALID;
426 /* join_state checked separately, proxy_join ignored */
431 /* release group, return 1 if this was last release and group is destroyed
432 * timout work is canceled sync */
433 static int release_group(struct mcast_group *group, int from_timeout_handler)
435 struct mlx4_ib_demux_ctx *ctx = group->demux;
438 mutex_lock(&ctx->mcg_table_lock);
439 mutex_lock(&group->lock);
440 if (atomic_dec_and_test(&group->refcount)) {
441 if (!from_timeout_handler) {
442 if (group->state != MCAST_IDLE &&
443 !cancel_delayed_work(&group->timeout_work)) {
444 atomic_inc(&group->refcount);
445 mutex_unlock(&group->lock);
446 mutex_unlock(&ctx->mcg_table_lock);
451 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0);
453 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
454 if (!list_empty(&group->pending_list))
455 mcg_warn_group(group, "releasing a group with non empty pending list\n");
457 rb_erase(&group->node, &ctx->mcg_table);
458 list_del_init(&group->mgid0_list);
459 mutex_unlock(&group->lock);
460 mutex_unlock(&ctx->mcg_table_lock);
464 mutex_unlock(&group->lock);
465 mutex_unlock(&ctx->mcg_table_lock);
470 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
474 for (i = 0; i < 3; i++, join_state >>= 1)
475 if (join_state & 0x1)
476 group->members[i] += inc;
479 static u8 get_leave_state(struct mcast_group *group)
484 for (i = 0; i < 3; i++)
485 if (!group->members[i])
486 leave_state |= (1 << i);
488 return leave_state & (group->rec.scope_join_state & 7);
491 static int join_group(struct mcast_group *group, int slave, u8 join_mask)
496 /* remove bits that slave is already member of, and adjust */
497 join_state = join_mask & (~group->func[slave].join_state);
498 adjust_membership(group, join_state, 1);
499 group->func[slave].join_state |= join_state;
500 if (group->func[slave].state != MCAST_MEMBER && join_state) {
501 group->func[slave].state = MCAST_MEMBER;
507 static int leave_group(struct mcast_group *group, int slave, u8 leave_state)
511 adjust_membership(group, leave_state, -1);
512 group->func[slave].join_state &= ~leave_state;
513 if (!group->func[slave].join_state) {
514 group->func[slave].state = MCAST_NOT_MEMBER;
520 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask)
522 if (group->func[slave].state != MCAST_MEMBER)
523 return MAD_STATUS_REQ_INVALID;
525 /* make sure we're not deleting unset bits */
526 if (~group->func[slave].join_state & leave_mask)
527 return MAD_STATUS_REQ_INVALID;
530 return MAD_STATUS_REQ_INVALID;
535 static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
537 struct delayed_work *delay = to_delayed_work(work);
538 struct mcast_group *group;
539 struct mcast_req *req = NULL;
541 group = container_of(delay, typeof(*group), timeout_work);
543 mutex_lock(&group->lock);
544 if (group->state == MCAST_JOIN_SENT) {
545 if (!list_empty(&group->pending_list)) {
546 req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
547 list_del(&req->group_list);
548 list_del(&req->func_list);
549 --group->func[req->func].num_pend_reqs;
550 mutex_unlock(&group->lock);
552 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) {
553 if (release_group(group, 1))
559 mutex_lock(&group->lock);
561 mcg_warn_group(group, "DRIVER BUG\n");
562 } else if (group->state == MCAST_LEAVE_SENT) {
563 if (group->rec.scope_join_state & 7)
564 group->rec.scope_join_state &= 0xf8;
565 group->state = MCAST_IDLE;
566 mutex_unlock(&group->lock);
567 if (release_group(group, 1))
569 mutex_lock(&group->lock);
571 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state));
572 group->state = MCAST_IDLE;
573 atomic_inc(&group->refcount);
574 if (!queue_work(group->demux->mcg_wq, &group->work))
575 safe_atomic_dec(&group->refcount);
577 mutex_unlock(&group->lock);
580 static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
581 struct mcast_req *req)
586 leave_mask = group->func[req->func].join_state;
588 status = check_leave(group, req->func, leave_mask);
590 leave_group(group, req->func, leave_mask);
593 send_reply_to_slave(req->func, group, &req->sa_mad, status);
594 --group->func[req->func].num_pend_reqs;
595 list_del(&req->group_list);
596 list_del(&req->func_list);
601 static int handle_join_req(struct mcast_group *group, u8 join_mask,
602 struct mcast_req *req)
604 u8 group_join_state = group->rec.scope_join_state & 7;
607 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
609 if (join_mask == (group_join_state & join_mask)) {
610 /* port's membership need not change */
611 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask);
613 join_group(group, req->func, join_mask);
615 --group->func[req->func].num_pend_reqs;
616 send_reply_to_slave(req->func, group, &req->sa_mad, status);
617 list_del(&req->group_list);
618 list_del(&req->func_list);
622 /* port's membership needs to be updated */
623 group->prev_state = group->state;
624 if (send_join_to_wire(group, &req->sa_mad)) {
625 --group->func[req->func].num_pend_reqs;
626 list_del(&req->group_list);
627 list_del(&req->func_list);
630 group->state = group->prev_state;
632 group->state = MCAST_JOIN_SENT;
638 static void mlx4_ib_mcg_work_handler(struct work_struct *work)
640 struct mcast_group *group;
641 struct mcast_req *req = NULL;
642 struct ib_sa_mcmember_data *sa_data;
644 int rc = 1; /* release_count - this is for the scheduled work */
648 group = container_of(work, typeof(*group), work);
650 mutex_lock(&group->lock);
652 /* First, let's see if a response from SM is waiting regarding this group.
653 * If so, we need to update the group's REC. If this is a bad response, we
654 * may need to send a bad response to a VF waiting for it. If VF is waiting
655 * and this is a good response, the VF will be answered later in this func. */
656 if (group->state == MCAST_RESP_READY) {
657 /* cancels mlx4_ib_mcg_timeout_handler */
658 cancel_delayed_work(&group->timeout_work);
659 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status);
660 method = group->response_sa_mad.mad_hdr.method;
661 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) {
662 mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
663 be64_to_cpu(group->response_sa_mad.mad_hdr.tid),
664 be64_to_cpu(group->last_req_tid));
665 group->state = group->prev_state;
666 goto process_requests;
669 if (!list_empty(&group->pending_list))
670 req = list_first_entry(&group->pending_list,
671 struct mcast_req, group_list);
672 if ((method == IB_MGMT_METHOD_GET_RESP)) {
674 send_reply_to_slave(req->func, group, &req->sa_mad, status);
675 --group->func[req->func].num_pend_reqs;
676 list_del(&req->group_list);
677 list_del(&req->func_list);
681 mcg_warn_group(group, "no request for failed join\n");
682 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing)
688 resp_join_state = ((struct ib_sa_mcmember_data *)
689 group->response_sa_mad.data)->scope_join_state & 7;
690 cur_join_state = group->rec.scope_join_state & 7;
692 if (method == IB_MGMT_METHOD_GET_RESP) {
693 /* successfull join */
694 if (!cur_join_state && resp_join_state)
696 } else if (!resp_join_state)
698 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec);
700 group->state = MCAST_IDLE;
704 /* We should now go over pending join/leave requests, as long as we are idle. */
705 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) {
706 req = list_first_entry(&group->pending_list, struct mcast_req,
708 sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
709 req_join_state = sa_data->scope_join_state & 0x7;
711 /* For a leave request, we will immediately answer the VF, and
712 * update our internal counters. The actual leave will be sent
713 * to SM later, if at all needed. We dequeue the request now. */
714 if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE)
715 rc += handle_leave_req(group, req_join_state, req);
717 rc += handle_join_req(group, req_join_state, req);
721 if (group->state == MCAST_IDLE) {
722 req_join_state = get_leave_state(group);
723 if (req_join_state) {
724 group->rec.scope_join_state &= ~req_join_state;
725 group->prev_state = group->state;
726 if (send_leave_to_wire(group, req_join_state)) {
727 group->state = group->prev_state;
730 group->state = MCAST_LEAVE_SENT;
734 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE)
735 goto process_requests;
736 mutex_unlock(&group->lock);
739 release_group(group, 0);
742 static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx,
744 union ib_gid *new_mgid)
746 struct mcast_group *group = NULL, *cur_group;
747 struct mcast_req *req;
748 struct list_head *pos;
751 mutex_lock(&ctx->mcg_table_lock);
752 list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) {
753 group = list_entry(pos, struct mcast_group, mgid0_list);
754 mutex_lock(&group->lock);
755 if (group->last_req_tid == tid) {
756 if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
757 group->rec.mgid = *new_mgid;
758 sprintf(group->name, "%016llx%016llx",
759 be64_to_cpu(group->rec.mgid.global.subnet_prefix),
760 be64_to_cpu(group->rec.mgid.global.interface_id));
761 list_del_init(&group->mgid0_list);
762 cur_group = mcast_insert(ctx, group);
764 /* A race between our code and SM. Silently cleaning the new one */
765 req = list_first_entry(&group->pending_list,
766 struct mcast_req, group_list);
767 --group->func[req->func].num_pend_reqs;
768 list_del(&req->group_list);
769 list_del(&req->func_list);
771 mutex_unlock(&group->lock);
772 mutex_unlock(&ctx->mcg_table_lock);
773 release_group(group, 0);
777 atomic_inc(&group->refcount);
778 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
779 mutex_unlock(&group->lock);
780 mutex_unlock(&ctx->mcg_table_lock);
783 struct mcast_req *tmp1, *tmp2;
785 list_del(&group->mgid0_list);
786 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE)
787 cancel_delayed_work_sync(&group->timeout_work);
789 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) {
790 list_del(&tmp1->group_list);
793 mutex_unlock(&group->lock);
794 mutex_unlock(&ctx->mcg_table_lock);
799 mutex_unlock(&group->lock);
801 mutex_unlock(&ctx->mcg_table_lock);
806 static ssize_t sysfs_show_group(struct device *dev,
807 struct device_attribute *attr, char *buf);
809 static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
810 union ib_gid *mgid, int create,
813 struct mcast_group *group, *cur_group;
817 is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
819 group = mcast_find(ctx, mgid);
825 return ERR_PTR(-ENOENT);
827 group = kzalloc(sizeof *group, gfp_mask);
829 return ERR_PTR(-ENOMEM);
832 group->rec.mgid = *mgid;
833 INIT_LIST_HEAD(&group->pending_list);
834 INIT_LIST_HEAD(&group->mgid0_list);
835 for (i = 0; i < MAX_VFS; ++i)
836 INIT_LIST_HEAD(&group->func[i].pending);
837 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
838 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler);
839 mutex_init(&group->lock);
840 sprintf(group->name, "%016llx%016llx",
841 be64_to_cpu(group->rec.mgid.global.subnet_prefix),
842 be64_to_cpu(group->rec.mgid.global.interface_id));
843 sysfs_attr_init(&group->dentry.attr);
844 group->dentry.show = sysfs_show_group;
845 group->dentry.store = NULL;
846 group->dentry.attr.name = group->name;
847 group->dentry.attr.mode = 0400;
848 group->state = MCAST_IDLE;
851 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list);
855 cur_group = mcast_insert(ctx, group);
857 mcg_warn("group just showed up %s - confused\n", cur_group->name);
859 return ERR_PTR(-EINVAL);
862 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
865 atomic_inc(&group->refcount);
869 static void queue_req(struct mcast_req *req)
871 struct mcast_group *group = req->group;
873 atomic_inc(&group->refcount); /* for the request */
874 atomic_inc(&group->refcount); /* for scheduling the work */
875 list_add_tail(&req->group_list, &group->pending_list);
876 list_add_tail(&req->func_list, &group->func[req->func].pending);
877 /* calls mlx4_ib_mcg_work_handler */
878 if (!queue_work(group->demux->mcg_wq, &group->work))
879 safe_atomic_dec(&group->refcount);
882 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
883 struct ib_sa_mad *mad)
885 struct mlx4_ib_dev *dev = to_mdev(ibdev);
886 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data;
887 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
888 struct mcast_group *group;
890 switch (mad->mad_hdr.method) {
891 case IB_MGMT_METHOD_GET_RESP:
892 case IB_SA_METHOD_DELETE_RESP:
893 mutex_lock(&ctx->mcg_table_lock);
894 group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
895 mutex_unlock(&ctx->mcg_table_lock);
897 if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
898 __be64 tid = mad->mad_hdr.tid;
899 *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */
900 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid);
908 mutex_lock(&group->lock);
909 group->response_sa_mad = *mad;
910 group->prev_state = group->state;
911 group->state = MCAST_RESP_READY;
912 /* calls mlx4_ib_mcg_work_handler */
913 atomic_inc(&group->refcount);
914 if (!queue_work(ctx->mcg_wq, &group->work))
915 safe_atomic_dec(&group->refcount);
916 mutex_unlock(&group->lock);
917 release_group(group, 0);
918 return 1; /* consumed */
919 case IB_MGMT_METHOD_SET:
920 case IB_SA_METHOD_GET_TABLE:
921 case IB_SA_METHOD_GET_TABLE_RESP:
922 case IB_SA_METHOD_DELETE:
923 return 0; /* not consumed, pass-through to guest over tunnel */
925 mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
926 port, mad->mad_hdr.method);
927 return 1; /* consumed */
931 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
932 int slave, struct ib_sa_mad *sa_mad)
934 struct mlx4_ib_dev *dev = to_mdev(ibdev);
935 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data;
936 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
937 struct mcast_group *group;
938 struct mcast_req *req;
944 switch (sa_mad->mad_hdr.method) {
945 case IB_MGMT_METHOD_SET:
947 case IB_SA_METHOD_DELETE:
948 req = kzalloc(sizeof *req, GFP_KERNEL);
953 req->sa_mad = *sa_mad;
955 mutex_lock(&ctx->mcg_table_lock);
956 group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
957 mutex_unlock(&ctx->mcg_table_lock);
960 return PTR_ERR(group);
962 mutex_lock(&group->lock);
963 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
964 mutex_unlock(&group->lock);
965 mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
966 port, slave, MAX_PEND_REQS_PER_FUNC);
967 release_group(group, 0);
971 ++group->func[slave].num_pend_reqs;
974 mutex_unlock(&group->lock);
975 release_group(group, 0);
976 return 1; /* consumed */
977 case IB_SA_METHOD_GET_TABLE:
978 case IB_MGMT_METHOD_GET_RESP:
979 case IB_SA_METHOD_GET_TABLE_RESP:
980 case IB_SA_METHOD_DELETE_RESP:
981 return 0; /* not consumed, pass-through */
983 mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
984 port, slave, sa_mad->mad_hdr.method);
985 return 1; /* consumed */
989 static ssize_t sysfs_show_group(struct device *dev,
990 struct device_attribute *attr, char *buf)
992 struct mcast_group *group =
993 container_of(attr, struct mcast_group, dentry);
994 struct mcast_req *req = NULL;
995 char pending_str[40];
1000 if (group->state == MCAST_IDLE)
1001 sprintf(state_str, "%s", get_state_string(group->state));
1003 sprintf(state_str, "%s(TID=0x%llx)",
1004 get_state_string(group->state),
1005 be64_to_cpu(group->last_req_tid));
1006 if (list_empty(&group->pending_list)) {
1007 sprintf(pending_str, "No");
1009 req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1010 sprintf(pending_str, "Yes(TID=0x%llx)",
1011 be64_to_cpu(req->sa_mad.mad_hdr.tid));
1013 len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
1014 group->rec.scope_join_state & 0xf,
1015 group->members[2], group->members[1], group->members[0],
1016 atomic_read(&group->refcount),
1019 for (f = 0; f < MAX_VFS; ++f)
1020 if (group->func[f].state == MCAST_MEMBER)
1021 len += sprintf(buf + len, "%d[%1x] ",
1022 f, group->func[f].join_state);
1024 len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1025 "%4x %4x %2x %2x)\n",
1026 be16_to_cpu(group->rec.pkey),
1027 be32_to_cpu(group->rec.qkey),
1028 (group->rec.mtusel_mtu & 0xc0) >> 6,
1029 group->rec.mtusel_mtu & 0x3f,
1031 (group->rec.ratesel_rate & 0xc0) >> 6,
1032 group->rec.ratesel_rate & 0x3f,
1033 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
1034 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
1035 be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff,
1036 group->rec.proxy_join);
1041 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
1045 atomic_set(&ctx->tid, 0);
1046 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
1047 ctx->mcg_wq = create_singlethread_workqueue(name);
1051 mutex_init(&ctx->mcg_table_lock);
1052 ctx->mcg_table = RB_ROOT;
1053 INIT_LIST_HEAD(&ctx->mcg_mgid0_list);
1059 static void force_clean_group(struct mcast_group *group)
1061 struct mcast_req *req, *tmp
1063 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) {
1064 list_del(&req->group_list);
1067 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr);
1068 rb_erase(&group->node, &group->demux->mcg_table);
1072 static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1076 struct mcast_group *group;
1080 for (i = 0; i < MAX_VFS; ++i)
1081 clean_vf_mcast(ctx, i);
1083 end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
1086 mutex_lock(&ctx->mcg_table_lock);
1087 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p))
1089 mutex_unlock(&ctx->mcg_table_lock);
1094 } while (time_after(end, jiffies));
1096 flush_workqueue(ctx->mcg_wq);
1098 destroy_workqueue(ctx->mcg_wq);
1100 mutex_lock(&ctx->mcg_table_lock);
1101 while ((p = rb_first(&ctx->mcg_table)) != NULL) {
1102 group = rb_entry(p, struct mcast_group, node);
1103 if (atomic_read(&group->refcount))
1104 mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
1106 force_clean_group(group);
1108 mutex_unlock(&ctx->mcg_table_lock);
1112 struct work_struct work;
1113 struct mlx4_ib_demux_ctx *ctx;
1117 static void mcg_clean_task(struct work_struct *work)
1119 struct clean_work *cw = container_of(work, struct clean_work, work);
1121 _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
1122 cw->ctx->flushing = 0;
1126 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1128 struct clean_work *work;
1136 _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
1141 work = kmalloc(sizeof *work, GFP_KERNEL);
1144 mcg_warn("failed allocating work for cleanup\n");
1149 work->destroy_wq = destroy_wq;
1150 INIT_WORK(&work->work, mcg_clean_task);
1151 queue_work(clean_wq, &work->work);
1154 static void build_leave_mad(struct mcast_req *req)
1156 struct ib_sa_mad *mad = &req->sa_mad;
1158 mad->mad_hdr.method = IB_SA_METHOD_DELETE;
1162 static void clear_pending_reqs(struct mcast_group *group, int vf)
1164 struct mcast_req *req, *tmp, *group_first = NULL;
1168 if (!list_empty(&group->pending_list))
1169 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1171 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) {
1173 if (group_first == req &&
1174 (group->state == MCAST_JOIN_SENT ||
1175 group->state == MCAST_LEAVE_SENT)) {
1176 clear = cancel_delayed_work(&group->timeout_work);
1178 group->state = MCAST_IDLE;
1181 --group->func[vf].num_pend_reqs;
1182 list_del(&req->group_list);
1183 list_del(&req->func_list);
1185 atomic_dec(&group->refcount);
1189 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) {
1190 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1191 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs);
1195 static int push_deleteing_req(struct mcast_group *group, int slave)
1197 struct mcast_req *req;
1198 struct mcast_req *pend_req;
1200 if (!group->func[slave].join_state)
1203 req = kzalloc(sizeof *req, GFP_KERNEL);
1205 mcg_warn_group(group, "failed allocation - may leave stall groups\n");
1209 if (!list_empty(&group->func[slave].pending)) {
1210 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
1211 if (pend_req->clean) {
1220 ++group->func[slave].num_pend_reqs;
1221 build_leave_mad(req);
1226 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
1228 struct mcast_group *group;
1231 mutex_lock(&ctx->mcg_table_lock);
1232 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) {
1233 group = rb_entry(p, struct mcast_group, node);
1234 mutex_lock(&group->lock);
1235 if (atomic_read(&group->refcount)) {
1236 /* clear pending requests of this VF */
1237 clear_pending_reqs(group, slave);
1238 push_deleteing_req(group, slave);
1240 mutex_unlock(&group->lock);
1242 mutex_unlock(&ctx->mcg_table_lock);
1246 int mlx4_ib_mcg_init(void)
1248 clean_wq = create_singlethread_workqueue("mlx4_ib_mcg");
1255 void mlx4_ib_mcg_destroy(void)
1257 destroy_workqueue(clean_wq);