2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/dma-mapping.h>
37 #include <linux/slab.h>
38 #include <linux/module.h>
39 #include <rdma/ib_cache.h>
46 MODULE_LICENSE("Dual BSD/GPL");
47 MODULE_DESCRIPTION("kernel IB MAD API");
48 MODULE_AUTHOR("Hal Rosenstock");
49 MODULE_AUTHOR("Sean Hefty");
51 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
52 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
54 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
55 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
56 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
57 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
59 static struct kmem_cache *ib_mad_cache;
61 static struct list_head ib_mad_port_list;
62 static u32 ib_mad_client_id = 0;
65 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
67 /* Forward declarations */
68 static int method_in_use(struct ib_mad_mgmt_method_table **method,
69 struct ib_mad_reg_req *mad_reg_req);
70 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
71 static struct ib_mad_agent_private *find_mad_agent(
72 struct ib_mad_port_private *port_priv,
74 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
75 struct ib_mad_private *mad);
76 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
77 static void timeout_sends(struct work_struct *work);
78 static void local_completions(struct work_struct *work);
79 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
80 struct ib_mad_agent_private *agent_priv,
82 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv);
86 * Returns a ib_mad_port_private structure or NULL for a device/port
87 * Assumes ib_mad_port_list_lock is being held
89 static inline struct ib_mad_port_private *
90 __ib_get_mad_port(struct ib_device *device, int port_num)
92 struct ib_mad_port_private *entry;
94 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
95 if (entry->device == device && entry->port_num == port_num)
102 * Wrapper function to return a ib_mad_port_private structure or NULL
105 static inline struct ib_mad_port_private *
106 ib_get_mad_port(struct ib_device *device, int port_num)
108 struct ib_mad_port_private *entry;
111 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
112 entry = __ib_get_mad_port(device, port_num);
113 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
118 static inline u8 convert_mgmt_class(u8 mgmt_class)
120 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
121 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
125 static int get_spl_qp_index(enum ib_qp_type qp_type)
138 static int vendor_class_index(u8 mgmt_class)
140 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
143 static int is_vendor_class(u8 mgmt_class)
145 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
146 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
151 static int is_vendor_oui(char *oui)
153 if (oui[0] || oui[1] || oui[2])
158 static int is_vendor_method_in_use(
159 struct ib_mad_mgmt_vendor_class *vendor_class,
160 struct ib_mad_reg_req *mad_reg_req)
162 struct ib_mad_mgmt_method_table *method;
165 for (i = 0; i < MAX_MGMT_OUI; i++) {
166 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
167 method = vendor_class->method_table[i];
169 if (method_in_use(&method, mad_reg_req))
179 int ib_response_mad(struct ib_mad *mad)
181 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
182 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
183 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
184 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
186 EXPORT_SYMBOL(ib_response_mad);
189 * ib_register_mad_agent - Register to send/receive MADs
191 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
193 enum ib_qp_type qp_type,
194 struct ib_mad_reg_req *mad_reg_req,
196 ib_mad_send_handler send_handler,
197 ib_mad_recv_handler recv_handler,
200 struct ib_mad_port_private *port_priv;
201 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
202 struct ib_mad_agent_private *mad_agent_priv;
203 struct ib_mad_reg_req *reg_req = NULL;
204 struct ib_mad_mgmt_class_table *class;
205 struct ib_mad_mgmt_vendor_class_table *vendor;
206 struct ib_mad_mgmt_vendor_class *vendor_class;
207 struct ib_mad_mgmt_method_table *method;
210 u8 mgmt_class, vclass;
212 /* Validate parameters */
213 qpn = get_spl_qp_index(qp_type);
217 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
220 /* Validate MAD registration request if supplied */
222 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
226 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
228 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
229 * one in this range currently allowed
231 if (mad_reg_req->mgmt_class !=
232 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
234 } else if (mad_reg_req->mgmt_class == 0) {
236 * Class 0 is reserved in IBA and is used for
237 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
240 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
242 * If class is in "new" vendor range,
243 * ensure supplied OUI is not zero
245 if (!is_vendor_oui(mad_reg_req->oui))
248 /* Make sure class supplied is consistent with RMPP */
249 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
253 /* Make sure class supplied is consistent with QP type */
254 if (qp_type == IB_QPT_SMI) {
255 if ((mad_reg_req->mgmt_class !=
256 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
257 (mad_reg_req->mgmt_class !=
258 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
261 if ((mad_reg_req->mgmt_class ==
262 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
263 (mad_reg_req->mgmt_class ==
264 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
268 /* No registration request supplied */
273 /* Validate device and port */
274 port_priv = ib_get_mad_port(device, port_num);
276 ret = ERR_PTR(-ENODEV);
280 /* Verify the QP requested is supported. For example, Ethernet devices
281 * will not have QP0 */
282 if (!port_priv->qp_info[qpn].qp) {
283 ret = ERR_PTR(-EPROTONOSUPPORT);
287 /* Allocate structures */
288 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
289 if (!mad_agent_priv) {
290 ret = ERR_PTR(-ENOMEM);
294 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
295 IB_ACCESS_LOCAL_WRITE);
296 if (IS_ERR(mad_agent_priv->agent.mr)) {
297 ret = ERR_PTR(-ENOMEM);
302 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
304 ret = ERR_PTR(-ENOMEM);
309 /* Now, fill in the various structures */
310 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
311 mad_agent_priv->reg_req = reg_req;
312 mad_agent_priv->agent.rmpp_version = rmpp_version;
313 mad_agent_priv->agent.device = device;
314 mad_agent_priv->agent.recv_handler = recv_handler;
315 mad_agent_priv->agent.send_handler = send_handler;
316 mad_agent_priv->agent.context = context;
317 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
318 mad_agent_priv->agent.port_num = port_num;
319 spin_lock_init(&mad_agent_priv->lock);
320 INIT_LIST_HEAD(&mad_agent_priv->send_list);
321 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
322 INIT_LIST_HEAD(&mad_agent_priv->done_list);
323 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
324 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
325 INIT_LIST_HEAD(&mad_agent_priv->local_list);
326 INIT_WORK(&mad_agent_priv->local_work, local_completions);
327 atomic_set(&mad_agent_priv->refcount, 1);
328 init_completion(&mad_agent_priv->comp);
330 spin_lock_irqsave(&port_priv->reg_lock, flags);
331 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
334 * Make sure MAD registration (if supplied)
335 * is non overlapping with any existing ones
338 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
339 if (!is_vendor_class(mgmt_class)) {
340 class = port_priv->version[mad_reg_req->
341 mgmt_class_version].class;
343 method = class->method_table[mgmt_class];
345 if (method_in_use(&method,
350 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
353 /* "New" vendor class range */
354 vendor = port_priv->version[mad_reg_req->
355 mgmt_class_version].vendor;
357 vclass = vendor_class_index(mgmt_class);
358 vendor_class = vendor->vendor_class[vclass];
360 if (is_vendor_method_in_use(
366 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
374 /* Add mad agent into port's agent list */
375 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
376 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
378 return &mad_agent_priv->agent;
381 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
384 ib_dereg_mr(mad_agent_priv->agent.mr);
386 kfree(mad_agent_priv);
390 EXPORT_SYMBOL(ib_register_mad_agent);
392 static inline int is_snooping_sends(int mad_snoop_flags)
394 return (mad_snoop_flags &
395 (/*IB_MAD_SNOOP_POSTED_SENDS |
396 IB_MAD_SNOOP_RMPP_SENDS |*/
397 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
398 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
401 static inline int is_snooping_recvs(int mad_snoop_flags)
403 return (mad_snoop_flags &
404 (IB_MAD_SNOOP_RECVS /*|
405 IB_MAD_SNOOP_RMPP_RECVS*/));
408 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
409 struct ib_mad_snoop_private *mad_snoop_priv)
411 struct ib_mad_snoop_private **new_snoop_table;
415 spin_lock_irqsave(&qp_info->snoop_lock, flags);
416 /* Check for empty slot in array. */
417 for (i = 0; i < qp_info->snoop_table_size; i++)
418 if (!qp_info->snoop_table[i])
421 if (i == qp_info->snoop_table_size) {
423 new_snoop_table = krealloc(qp_info->snoop_table,
424 sizeof mad_snoop_priv *
425 (qp_info->snoop_table_size + 1),
427 if (!new_snoop_table) {
432 qp_info->snoop_table = new_snoop_table;
433 qp_info->snoop_table_size++;
435 qp_info->snoop_table[i] = mad_snoop_priv;
436 atomic_inc(&qp_info->snoop_count);
438 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
442 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
444 enum ib_qp_type qp_type,
446 ib_mad_snoop_handler snoop_handler,
447 ib_mad_recv_handler recv_handler,
450 struct ib_mad_port_private *port_priv;
451 struct ib_mad_agent *ret;
452 struct ib_mad_snoop_private *mad_snoop_priv;
455 /* Validate parameters */
456 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
457 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
458 ret = ERR_PTR(-EINVAL);
461 qpn = get_spl_qp_index(qp_type);
463 ret = ERR_PTR(-EINVAL);
466 port_priv = ib_get_mad_port(device, port_num);
468 ret = ERR_PTR(-ENODEV);
471 /* Allocate structures */
472 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
473 if (!mad_snoop_priv) {
474 ret = ERR_PTR(-ENOMEM);
478 /* Now, fill in the various structures */
479 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
480 mad_snoop_priv->agent.device = device;
481 mad_snoop_priv->agent.recv_handler = recv_handler;
482 mad_snoop_priv->agent.snoop_handler = snoop_handler;
483 mad_snoop_priv->agent.context = context;
484 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
485 mad_snoop_priv->agent.port_num = port_num;
486 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
487 init_completion(&mad_snoop_priv->comp);
488 mad_snoop_priv->snoop_index = register_snoop_agent(
489 &port_priv->qp_info[qpn],
491 if (mad_snoop_priv->snoop_index < 0) {
492 ret = ERR_PTR(mad_snoop_priv->snoop_index);
496 atomic_set(&mad_snoop_priv->refcount, 1);
497 return &mad_snoop_priv->agent;
500 kfree(mad_snoop_priv);
504 EXPORT_SYMBOL(ib_register_mad_snoop);
506 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
508 if (atomic_dec_and_test(&mad_agent_priv->refcount))
509 complete(&mad_agent_priv->comp);
512 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
514 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
515 complete(&mad_snoop_priv->comp);
518 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
520 struct ib_mad_port_private *port_priv;
523 /* Note that we could still be handling received MADs */
526 * Canceling all sends results in dropping received response
527 * MADs, preventing us from queuing additional work
529 cancel_mads(mad_agent_priv);
530 port_priv = mad_agent_priv->qp_info->port_priv;
531 cancel_delayed_work(&mad_agent_priv->timed_work);
533 spin_lock_irqsave(&port_priv->reg_lock, flags);
534 remove_mad_reg_req(mad_agent_priv);
535 list_del(&mad_agent_priv->agent_list);
536 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
538 flush_workqueue(port_priv->wq);
539 ib_cancel_rmpp_recvs(mad_agent_priv);
541 deref_mad_agent(mad_agent_priv);
542 wait_for_completion(&mad_agent_priv->comp);
544 kfree(mad_agent_priv->reg_req);
545 ib_dereg_mr(mad_agent_priv->agent.mr);
546 kfree(mad_agent_priv);
549 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
551 struct ib_mad_qp_info *qp_info;
554 qp_info = mad_snoop_priv->qp_info;
555 spin_lock_irqsave(&qp_info->snoop_lock, flags);
556 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
557 atomic_dec(&qp_info->snoop_count);
558 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
560 deref_snoop_agent(mad_snoop_priv);
561 wait_for_completion(&mad_snoop_priv->comp);
563 kfree(mad_snoop_priv);
567 * ib_unregister_mad_agent - Unregisters a client from using MAD services
569 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
571 struct ib_mad_agent_private *mad_agent_priv;
572 struct ib_mad_snoop_private *mad_snoop_priv;
574 /* If the TID is zero, the agent can only snoop. */
575 if (mad_agent->hi_tid) {
576 mad_agent_priv = container_of(mad_agent,
577 struct ib_mad_agent_private,
579 unregister_mad_agent(mad_agent_priv);
581 mad_snoop_priv = container_of(mad_agent,
582 struct ib_mad_snoop_private,
584 unregister_mad_snoop(mad_snoop_priv);
588 EXPORT_SYMBOL(ib_unregister_mad_agent);
590 static void dequeue_mad(struct ib_mad_list_head *mad_list)
592 struct ib_mad_queue *mad_queue;
595 BUG_ON(!mad_list->mad_queue);
596 mad_queue = mad_list->mad_queue;
597 spin_lock_irqsave(&mad_queue->lock, flags);
598 list_del(&mad_list->list);
600 spin_unlock_irqrestore(&mad_queue->lock, flags);
603 static void snoop_send(struct ib_mad_qp_info *qp_info,
604 struct ib_mad_send_buf *send_buf,
605 struct ib_mad_send_wc *mad_send_wc,
608 struct ib_mad_snoop_private *mad_snoop_priv;
612 spin_lock_irqsave(&qp_info->snoop_lock, flags);
613 for (i = 0; i < qp_info->snoop_table_size; i++) {
614 mad_snoop_priv = qp_info->snoop_table[i];
615 if (!mad_snoop_priv ||
616 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
619 atomic_inc(&mad_snoop_priv->refcount);
620 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
621 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
622 send_buf, mad_send_wc);
623 deref_snoop_agent(mad_snoop_priv);
624 spin_lock_irqsave(&qp_info->snoop_lock, flags);
626 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
629 static void snoop_recv(struct ib_mad_qp_info *qp_info,
630 struct ib_mad_recv_wc *mad_recv_wc,
633 struct ib_mad_snoop_private *mad_snoop_priv;
637 spin_lock_irqsave(&qp_info->snoop_lock, flags);
638 for (i = 0; i < qp_info->snoop_table_size; i++) {
639 mad_snoop_priv = qp_info->snoop_table[i];
640 if (!mad_snoop_priv ||
641 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
644 atomic_inc(&mad_snoop_priv->refcount);
645 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
646 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
648 deref_snoop_agent(mad_snoop_priv);
649 spin_lock_irqsave(&qp_info->snoop_lock, flags);
651 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
654 static void build_smp_wc(struct ib_qp *qp,
655 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
658 memset(wc, 0, sizeof *wc);
660 wc->status = IB_WC_SUCCESS;
661 wc->opcode = IB_WC_RECV;
662 wc->pkey_index = pkey_index;
663 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
668 wc->dlid_path_bits = 0;
669 wc->port_num = port_num;
673 * Return 0 if SMP is to be sent
674 * Return 1 if SMP was consumed locally (whether or not solicited)
675 * Return < 0 if error
677 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
678 struct ib_mad_send_wr_private *mad_send_wr)
681 struct ib_smp *smp = mad_send_wr->send_buf.mad;
683 struct ib_mad_local_private *local;
684 struct ib_mad_private *mad_priv;
685 struct ib_mad_port_private *port_priv;
686 struct ib_mad_agent_private *recv_mad_agent = NULL;
687 struct ib_device *device = mad_agent_priv->agent.device;
690 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
692 if (device->node_type == RDMA_NODE_IB_SWITCH &&
693 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
694 port_num = send_wr->wr.ud.port_num;
696 port_num = mad_agent_priv->agent.port_num;
699 * Directed route handling starts if the initial LID routed part of
700 * a request or the ending LID routed part of a response is empty.
701 * If we are at the start of the LID routed part, don't update the
702 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
704 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
706 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
709 printk(KERN_ERR PFX "Invalid directed route\n");
713 /* Check to post send on QP or process locally */
714 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
715 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
718 local = kmalloc(sizeof *local, GFP_ATOMIC);
721 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
724 local->mad_priv = NULL;
725 local->recv_mad_agent = NULL;
726 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
729 printk(KERN_ERR PFX "No memory for local response MAD\n");
734 build_smp_wc(mad_agent_priv->agent.qp,
735 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
736 send_wr->wr.ud.pkey_index,
737 send_wr->wr.ud.port_num, &mad_wc);
739 /* No GRH for DR SMP */
740 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
741 (struct ib_mad *)smp,
742 (struct ib_mad *)&mad_priv->mad);
745 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
746 if (ib_response_mad(&mad_priv->mad.mad) &&
747 mad_agent_priv->agent.recv_handler) {
748 local->mad_priv = mad_priv;
749 local->recv_mad_agent = mad_agent_priv;
751 * Reference MAD agent until receive
752 * side of local completion handled
754 atomic_inc(&mad_agent_priv->refcount);
756 kmem_cache_free(ib_mad_cache, mad_priv);
758 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
759 kmem_cache_free(ib_mad_cache, mad_priv);
761 case IB_MAD_RESULT_SUCCESS:
762 /* Treat like an incoming receive MAD */
763 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
764 mad_agent_priv->agent.port_num);
766 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
767 recv_mad_agent = find_mad_agent(port_priv,
770 if (!port_priv || !recv_mad_agent) {
772 * No receiving agent so drop packet and
773 * generate send completion.
775 kmem_cache_free(ib_mad_cache, mad_priv);
778 local->mad_priv = mad_priv;
779 local->recv_mad_agent = recv_mad_agent;
782 kmem_cache_free(ib_mad_cache, mad_priv);
788 local->mad_send_wr = mad_send_wr;
789 /* Reference MAD agent until send side of local completion handled */
790 atomic_inc(&mad_agent_priv->refcount);
791 /* Queue local completion to local list */
792 spin_lock_irqsave(&mad_agent_priv->lock, flags);
793 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
794 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
795 queue_work(mad_agent_priv->qp_info->port_priv->wq,
796 &mad_agent_priv->local_work);
802 static int get_pad_size(int hdr_len, int data_len)
806 seg_size = sizeof(struct ib_mad) - hdr_len;
807 if (data_len && seg_size) {
808 pad = seg_size - data_len % seg_size;
809 return pad == seg_size ? 0 : pad;
814 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
816 struct ib_rmpp_segment *s, *t;
818 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
824 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
827 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
828 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
829 struct ib_rmpp_segment *seg = NULL;
830 int left, seg_size, pad;
832 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
833 seg_size = send_buf->seg_size;
836 /* Allocate data segments. */
837 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
838 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
840 printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
841 "alloc failed for len %zd, gfp %#x\n",
842 sizeof (*seg) + seg_size, gfp_mask);
843 free_send_rmpp_list(send_wr);
846 seg->num = ++send_buf->seg_count;
847 list_add_tail(&seg->list, &send_wr->rmpp_list);
850 /* Zero any padding */
852 memset(seg->data + seg_size - pad, 0, pad);
854 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
856 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
857 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
859 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
860 struct ib_rmpp_segment, list);
861 send_wr->last_ack_seg = send_wr->cur_seg;
865 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
866 u32 remote_qpn, u16 pkey_index,
868 int hdr_len, int data_len,
871 struct ib_mad_agent_private *mad_agent_priv;
872 struct ib_mad_send_wr_private *mad_send_wr;
873 int pad, message_size, ret, size;
876 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
878 pad = get_pad_size(hdr_len, data_len);
879 message_size = hdr_len + data_len + pad;
881 if ((!mad_agent->rmpp_version &&
882 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
883 (!rmpp_active && message_size > sizeof(struct ib_mad)))
884 return ERR_PTR(-EINVAL);
886 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
887 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
889 return ERR_PTR(-ENOMEM);
891 mad_send_wr = buf + size;
892 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
893 mad_send_wr->send_buf.mad = buf;
894 mad_send_wr->send_buf.hdr_len = hdr_len;
895 mad_send_wr->send_buf.data_len = data_len;
896 mad_send_wr->pad = pad;
898 mad_send_wr->mad_agent_priv = mad_agent_priv;
899 mad_send_wr->sg_list[0].length = hdr_len;
900 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
901 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
902 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
904 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
905 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
906 mad_send_wr->send_wr.num_sge = 2;
907 mad_send_wr->send_wr.opcode = IB_WR_SEND;
908 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
909 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
910 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
911 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
914 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
921 mad_send_wr->send_buf.mad_agent = mad_agent;
922 atomic_inc(&mad_agent_priv->refcount);
923 return &mad_send_wr->send_buf;
925 EXPORT_SYMBOL(ib_create_send_mad);
927 int ib_get_mad_data_offset(u8 mgmt_class)
929 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
930 return IB_MGMT_SA_HDR;
931 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
932 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
933 (mgmt_class == IB_MGMT_CLASS_BIS))
934 return IB_MGMT_DEVICE_HDR;
935 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
936 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
937 return IB_MGMT_VENDOR_HDR;
939 return IB_MGMT_MAD_HDR;
941 EXPORT_SYMBOL(ib_get_mad_data_offset);
943 int ib_is_mad_class_rmpp(u8 mgmt_class)
945 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
946 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
947 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
948 (mgmt_class == IB_MGMT_CLASS_BIS) ||
949 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
950 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
954 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
956 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
958 struct ib_mad_send_wr_private *mad_send_wr;
959 struct list_head *list;
961 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
963 list = &mad_send_wr->cur_seg->list;
965 if (mad_send_wr->cur_seg->num < seg_num) {
966 list_for_each_entry(mad_send_wr->cur_seg, list, list)
967 if (mad_send_wr->cur_seg->num == seg_num)
969 } else if (mad_send_wr->cur_seg->num > seg_num) {
970 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
971 if (mad_send_wr->cur_seg->num == seg_num)
974 return mad_send_wr->cur_seg->data;
976 EXPORT_SYMBOL(ib_get_rmpp_segment);
978 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
980 if (mad_send_wr->send_buf.seg_count)
981 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
982 mad_send_wr->seg_num);
984 return mad_send_wr->send_buf.mad +
985 mad_send_wr->send_buf.hdr_len;
988 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
990 struct ib_mad_agent_private *mad_agent_priv;
991 struct ib_mad_send_wr_private *mad_send_wr;
993 mad_agent_priv = container_of(send_buf->mad_agent,
994 struct ib_mad_agent_private, agent);
995 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
998 free_send_rmpp_list(mad_send_wr);
999 kfree(send_buf->mad);
1000 deref_mad_agent(mad_agent_priv);
1002 EXPORT_SYMBOL(ib_free_send_mad);
1004 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1006 struct ib_mad_qp_info *qp_info;
1007 struct list_head *list;
1008 struct ib_send_wr *bad_send_wr;
1009 struct ib_mad_agent *mad_agent;
1011 unsigned long flags;
1014 /* Set WR ID to find mad_send_wr upon completion */
1015 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1016 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1017 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1019 mad_agent = mad_send_wr->send_buf.mad_agent;
1020 sge = mad_send_wr->sg_list;
1021 sge[0].addr = ib_dma_map_single(mad_agent->device,
1022 mad_send_wr->send_buf.mad,
1025 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1028 mad_send_wr->header_mapping = sge[0].addr;
1030 sge[1].addr = ib_dma_map_single(mad_agent->device,
1031 ib_get_payload(mad_send_wr),
1034 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1035 ib_dma_unmap_single(mad_agent->device,
1036 mad_send_wr->header_mapping,
1037 sge[0].length, DMA_TO_DEVICE);
1040 mad_send_wr->payload_mapping = sge[1].addr;
1042 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1043 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1044 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1046 list = &qp_info->send_queue.list;
1049 list = &qp_info->overflow_list;
1053 qp_info->send_queue.count++;
1054 list_add_tail(&mad_send_wr->mad_list.list, list);
1056 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1058 ib_dma_unmap_single(mad_agent->device,
1059 mad_send_wr->header_mapping,
1060 sge[0].length, DMA_TO_DEVICE);
1061 ib_dma_unmap_single(mad_agent->device,
1062 mad_send_wr->payload_mapping,
1063 sge[1].length, DMA_TO_DEVICE);
1069 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1070 * with the registered client
1072 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1073 struct ib_mad_send_buf **bad_send_buf)
1075 struct ib_mad_agent_private *mad_agent_priv;
1076 struct ib_mad_send_buf *next_send_buf;
1077 struct ib_mad_send_wr_private *mad_send_wr;
1078 unsigned long flags;
1081 /* Walk list of send WRs and post each on send list */
1082 for (; send_buf; send_buf = next_send_buf) {
1084 mad_send_wr = container_of(send_buf,
1085 struct ib_mad_send_wr_private,
1087 mad_agent_priv = mad_send_wr->mad_agent_priv;
1089 if (!send_buf->mad_agent->send_handler ||
1090 (send_buf->timeout_ms &&
1091 !send_buf->mad_agent->recv_handler)) {
1096 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1097 if (mad_agent_priv->agent.rmpp_version) {
1104 * Save pointer to next work request to post in case the
1105 * current one completes, and the user modifies the work
1106 * request associated with the completion
1108 next_send_buf = send_buf->next;
1109 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1111 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1112 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1113 ret = handle_outgoing_dr_smp(mad_agent_priv,
1115 if (ret < 0) /* error */
1117 else if (ret == 1) /* locally consumed */
1121 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1122 /* Timeout will be updated after send completes */
1123 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1124 mad_send_wr->max_retries = send_buf->retries;
1125 mad_send_wr->retries_left = send_buf->retries;
1126 send_buf->retries = 0;
1127 /* Reference for work request to QP + response */
1128 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1129 mad_send_wr->status = IB_WC_SUCCESS;
1131 /* Reference MAD agent until send completes */
1132 atomic_inc(&mad_agent_priv->refcount);
1133 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1134 list_add_tail(&mad_send_wr->agent_list,
1135 &mad_agent_priv->send_list);
1136 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1138 if (mad_agent_priv->agent.rmpp_version) {
1139 ret = ib_send_rmpp_mad(mad_send_wr);
1140 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1141 ret = ib_send_mad(mad_send_wr);
1143 ret = ib_send_mad(mad_send_wr);
1145 /* Fail send request */
1146 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1147 list_del(&mad_send_wr->agent_list);
1148 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1149 atomic_dec(&mad_agent_priv->refcount);
1156 *bad_send_buf = send_buf;
1159 EXPORT_SYMBOL(ib_post_send_mad);
1162 * ib_free_recv_mad - Returns data buffers used to receive
1163 * a MAD to the access layer
1165 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1167 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1168 struct ib_mad_private_header *mad_priv_hdr;
1169 struct ib_mad_private *priv;
1170 struct list_head free_list;
1172 INIT_LIST_HEAD(&free_list);
1173 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1175 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1177 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1179 mad_priv_hdr = container_of(mad_recv_wc,
1180 struct ib_mad_private_header,
1182 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1184 kmem_cache_free(ib_mad_cache, priv);
1187 EXPORT_SYMBOL(ib_free_recv_mad);
1189 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1191 ib_mad_send_handler send_handler,
1192 ib_mad_recv_handler recv_handler,
1195 return ERR_PTR(-EINVAL); /* XXX: for now */
1197 EXPORT_SYMBOL(ib_redirect_mad_qp);
1199 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1202 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1205 EXPORT_SYMBOL(ib_process_mad_wc);
1207 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1208 struct ib_mad_reg_req *mad_reg_req)
1212 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1213 if ((*method)->agent[i]) {
1214 printk(KERN_ERR PFX "Method %d already in use\n", i);
1221 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1223 /* Allocate management method table */
1224 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1226 printk(KERN_ERR PFX "No memory for "
1227 "ib_mad_mgmt_method_table\n");
1235 * Check to see if there are any methods still in use
1237 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1241 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1242 if (method->agent[i])
1248 * Check to see if there are any method tables for this class still in use
1250 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1254 for (i = 0; i < MAX_MGMT_CLASS; i++)
1255 if (class->method_table[i])
1260 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1264 for (i = 0; i < MAX_MGMT_OUI; i++)
1265 if (vendor_class->method_table[i])
1270 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1275 for (i = 0; i < MAX_MGMT_OUI; i++)
1276 /* Is there matching OUI for this vendor class ? */
1277 if (!memcmp(vendor_class->oui[i], oui, 3))
1283 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1287 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1288 if (vendor->vendor_class[i])
1294 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1295 struct ib_mad_agent_private *agent)
1299 /* Remove any methods for this mad agent */
1300 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1301 if (method->agent[i] == agent) {
1302 method->agent[i] = NULL;
1307 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1308 struct ib_mad_agent_private *agent_priv,
1311 struct ib_mad_port_private *port_priv;
1312 struct ib_mad_mgmt_class_table **class;
1313 struct ib_mad_mgmt_method_table **method;
1316 port_priv = agent_priv->qp_info->port_priv;
1317 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1319 /* Allocate management class table for "new" class version */
1320 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1322 printk(KERN_ERR PFX "No memory for "
1323 "ib_mad_mgmt_class_table\n");
1328 /* Allocate method table for this management class */
1329 method = &(*class)->method_table[mgmt_class];
1330 if ((ret = allocate_method_table(method)))
1333 method = &(*class)->method_table[mgmt_class];
1335 /* Allocate method table for this management class */
1336 if ((ret = allocate_method_table(method)))
1341 /* Now, make sure methods are not already in use */
1342 if (method_in_use(method, mad_reg_req))
1345 /* Finally, add in methods being registered */
1346 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1347 (*method)->agent[i] = agent_priv;
1352 /* Remove any methods for this mad agent */
1353 remove_methods_mad_agent(*method, agent_priv);
1354 /* Now, check to see if there are any methods in use */
1355 if (!check_method_table(*method)) {
1356 /* If not, release management method table */
1369 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1370 struct ib_mad_agent_private *agent_priv)
1372 struct ib_mad_port_private *port_priv;
1373 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1374 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1375 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1376 struct ib_mad_mgmt_method_table **method;
1377 int i, ret = -ENOMEM;
1380 /* "New" vendor (with OUI) class */
1381 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1382 port_priv = agent_priv->qp_info->port_priv;
1383 vendor_table = &port_priv->version[
1384 mad_reg_req->mgmt_class_version].vendor;
1385 if (!*vendor_table) {
1386 /* Allocate mgmt vendor class table for "new" class version */
1387 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1389 printk(KERN_ERR PFX "No memory for "
1390 "ib_mad_mgmt_vendor_class_table\n");
1394 *vendor_table = vendor;
1396 if (!(*vendor_table)->vendor_class[vclass]) {
1397 /* Allocate table for this management vendor class */
1398 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1399 if (!vendor_class) {
1400 printk(KERN_ERR PFX "No memory for "
1401 "ib_mad_mgmt_vendor_class\n");
1405 (*vendor_table)->vendor_class[vclass] = vendor_class;
1407 for (i = 0; i < MAX_MGMT_OUI; i++) {
1408 /* Is there matching OUI for this vendor class ? */
1409 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1410 mad_reg_req->oui, 3)) {
1411 method = &(*vendor_table)->vendor_class[
1412 vclass]->method_table[i];
1417 for (i = 0; i < MAX_MGMT_OUI; i++) {
1418 /* OUI slot available ? */
1419 if (!is_vendor_oui((*vendor_table)->vendor_class[
1421 method = &(*vendor_table)->vendor_class[
1422 vclass]->method_table[i];
1424 /* Allocate method table for this OUI */
1425 if ((ret = allocate_method_table(method)))
1427 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1428 mad_reg_req->oui, 3);
1432 printk(KERN_ERR PFX "All OUI slots in use\n");
1436 /* Now, make sure methods are not already in use */
1437 if (method_in_use(method, mad_reg_req))
1440 /* Finally, add in methods being registered */
1441 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1442 (*method)->agent[i] = agent_priv;
1447 /* Remove any methods for this mad agent */
1448 remove_methods_mad_agent(*method, agent_priv);
1449 /* Now, check to see if there are any methods in use */
1450 if (!check_method_table(*method)) {
1451 /* If not, release management method table */
1458 (*vendor_table)->vendor_class[vclass] = NULL;
1459 kfree(vendor_class);
1463 *vendor_table = NULL;
1470 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1472 struct ib_mad_port_private *port_priv;
1473 struct ib_mad_mgmt_class_table *class;
1474 struct ib_mad_mgmt_method_table *method;
1475 struct ib_mad_mgmt_vendor_class_table *vendor;
1476 struct ib_mad_mgmt_vendor_class *vendor_class;
1481 * Was MAD registration request supplied
1482 * with original registration ?
1484 if (!agent_priv->reg_req) {
1488 port_priv = agent_priv->qp_info->port_priv;
1489 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1490 class = port_priv->version[
1491 agent_priv->reg_req->mgmt_class_version].class;
1495 method = class->method_table[mgmt_class];
1497 /* Remove any methods for this mad agent */
1498 remove_methods_mad_agent(method, agent_priv);
1499 /* Now, check to see if there are any methods still in use */
1500 if (!check_method_table(method)) {
1501 /* If not, release management method table */
1503 class->method_table[mgmt_class] = NULL;
1504 /* Any management classes left ? */
1505 if (!check_class_table(class)) {
1506 /* If not, release management class table */
1509 agent_priv->reg_req->
1510 mgmt_class_version].class = NULL;
1516 if (!is_vendor_class(mgmt_class))
1519 /* normalize mgmt_class to vendor range 2 */
1520 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1521 vendor = port_priv->version[
1522 agent_priv->reg_req->mgmt_class_version].vendor;
1527 vendor_class = vendor->vendor_class[mgmt_class];
1529 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1532 method = vendor_class->method_table[index];
1534 /* Remove any methods for this mad agent */
1535 remove_methods_mad_agent(method, agent_priv);
1537 * Now, check to see if there are
1538 * any methods still in use
1540 if (!check_method_table(method)) {
1541 /* If not, release management method table */
1543 vendor_class->method_table[index] = NULL;
1544 memset(vendor_class->oui[index], 0, 3);
1545 /* Any OUIs left ? */
1546 if (!check_vendor_class(vendor_class)) {
1547 /* If not, release vendor class table */
1548 kfree(vendor_class);
1549 vendor->vendor_class[mgmt_class] = NULL;
1550 /* Any other vendor classes left ? */
1551 if (!check_vendor_table(vendor)) {
1554 agent_priv->reg_req->
1555 mgmt_class_version].
1567 static struct ib_mad_agent_private *
1568 find_mad_agent(struct ib_mad_port_private *port_priv,
1571 struct ib_mad_agent_private *mad_agent = NULL;
1572 unsigned long flags;
1574 spin_lock_irqsave(&port_priv->reg_lock, flags);
1575 if (ib_response_mad(mad)) {
1577 struct ib_mad_agent_private *entry;
1580 * Routing is based on high 32 bits of transaction ID
1583 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1584 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1585 if (entry->agent.hi_tid == hi_tid) {
1591 struct ib_mad_mgmt_class_table *class;
1592 struct ib_mad_mgmt_method_table *method;
1593 struct ib_mad_mgmt_vendor_class_table *vendor;
1594 struct ib_mad_mgmt_vendor_class *vendor_class;
1595 struct ib_vendor_mad *vendor_mad;
1599 * Routing is based on version, class, and method
1600 * For "newer" vendor MADs, also based on OUI
1602 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1604 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1605 class = port_priv->version[
1606 mad->mad_hdr.class_version].class;
1609 if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
1610 IB_MGMT_MAX_METHODS)
1612 method = class->method_table[convert_mgmt_class(
1613 mad->mad_hdr.mgmt_class)];
1615 mad_agent = method->agent[mad->mad_hdr.method &
1616 ~IB_MGMT_METHOD_RESP];
1618 vendor = port_priv->version[
1619 mad->mad_hdr.class_version].vendor;
1622 vendor_class = vendor->vendor_class[vendor_class_index(
1623 mad->mad_hdr.mgmt_class)];
1626 /* Find matching OUI */
1627 vendor_mad = (struct ib_vendor_mad *)mad;
1628 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1631 method = vendor_class->method_table[index];
1633 mad_agent = method->agent[mad->mad_hdr.method &
1634 ~IB_MGMT_METHOD_RESP];
1640 if (mad_agent->agent.recv_handler)
1641 atomic_inc(&mad_agent->refcount);
1643 printk(KERN_NOTICE PFX "No receive handler for client "
1645 &mad_agent->agent, port_priv->port_num);
1650 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1655 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1659 /* Make sure MAD base version is understood */
1660 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1661 printk(KERN_ERR PFX "MAD received with unsupported base "
1662 "version %d\n", mad->mad_hdr.base_version);
1666 /* Filter SMI packets sent to other than QP0 */
1667 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1668 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1672 /* Filter GSI packets sent to QP0 */
1681 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1682 struct ib_mad_hdr *mad_hdr)
1684 struct ib_rmpp_mad *rmpp_mad;
1686 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1687 return !mad_agent_priv->agent.rmpp_version ||
1688 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1689 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1690 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1693 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1694 struct ib_mad_recv_wc *rwc)
1696 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1697 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1700 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1701 struct ib_mad_send_wr_private *wr,
1702 struct ib_mad_recv_wc *rwc )
1704 struct ib_ah_attr attr;
1705 u8 send_resp, rcv_resp;
1707 struct ib_device *device = mad_agent_priv->agent.device;
1708 u8 port_num = mad_agent_priv->agent.port_num;
1711 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1712 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1714 if (send_resp == rcv_resp)
1715 /* both requests, or both responses. GIDs different */
1718 if (ib_query_ah(wr->send_buf.ah, &attr))
1719 /* Assume not equal, to avoid false positives. */
1722 if (!!(attr.ah_flags & IB_AH_GRH) !=
1723 !!(rwc->wc->wc_flags & IB_WC_GRH))
1724 /* one has GID, other does not. Assume different */
1727 if (!send_resp && rcv_resp) {
1728 /* is request/response. */
1729 if (!(attr.ah_flags & IB_AH_GRH)) {
1730 if (ib_get_cached_lmc(device, port_num, &lmc))
1732 return (!lmc || !((attr.src_path_bits ^
1733 rwc->wc->dlid_path_bits) &
1736 if (ib_get_cached_gid(device, port_num,
1737 attr.grh.sgid_index, &sgid))
1739 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1744 if (!(attr.ah_flags & IB_AH_GRH))
1745 return attr.dlid == rwc->wc->slid;
1747 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1751 static inline int is_direct(u8 class)
1753 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1756 struct ib_mad_send_wr_private*
1757 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1758 struct ib_mad_recv_wc *wc)
1760 struct ib_mad_send_wr_private *wr;
1763 mad = (struct ib_mad *)wc->recv_buf.mad;
1765 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1766 if ((wr->tid == mad->mad_hdr.tid) &&
1767 rcv_has_same_class(wr, wc) &&
1769 * Don't check GID for direct routed MADs.
1770 * These might have permissive LIDs.
1772 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1773 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1774 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1778 * It's possible to receive the response before we've
1779 * been notified that the send has completed
1781 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1782 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1783 wr->tid == mad->mad_hdr.tid &&
1785 rcv_has_same_class(wr, wc) &&
1787 * Don't check GID for direct routed MADs.
1788 * These might have permissive LIDs.
1790 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1791 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1792 /* Verify request has not been canceled */
1793 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1798 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1800 mad_send_wr->timeout = 0;
1801 if (mad_send_wr->refcount == 1)
1802 list_move_tail(&mad_send_wr->agent_list,
1803 &mad_send_wr->mad_agent_priv->done_list);
1806 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1807 struct ib_mad_recv_wc *mad_recv_wc)
1809 struct ib_mad_send_wr_private *mad_send_wr;
1810 struct ib_mad_send_wc mad_send_wc;
1811 unsigned long flags;
1813 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1814 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1815 if (mad_agent_priv->agent.rmpp_version) {
1816 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1819 deref_mad_agent(mad_agent_priv);
1824 /* Complete corresponding request */
1825 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1826 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1827 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1829 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1830 ib_free_recv_mad(mad_recv_wc);
1831 deref_mad_agent(mad_agent_priv);
1834 ib_mark_mad_done(mad_send_wr);
1835 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1837 /* Defined behavior is to complete response before request */
1838 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1839 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1841 atomic_dec(&mad_agent_priv->refcount);
1843 mad_send_wc.status = IB_WC_SUCCESS;
1844 mad_send_wc.vendor_err = 0;
1845 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1846 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1848 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1850 deref_mad_agent(mad_agent_priv);
1854 static bool generate_unmatched_resp(struct ib_mad_private *recv,
1855 struct ib_mad_private *response)
1857 if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET ||
1858 recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) {
1859 memcpy(response, recv, sizeof *response);
1860 response->header.recv_wc.wc = &response->header.wc;
1861 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1862 response->header.recv_wc.recv_buf.grh = &response->grh;
1863 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
1864 response->mad.mad.mad_hdr.status =
1865 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
1866 if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1867 response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
1874 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1877 struct ib_mad_qp_info *qp_info;
1878 struct ib_mad_private_header *mad_priv_hdr;
1879 struct ib_mad_private *recv, *response = NULL;
1880 struct ib_mad_list_head *mad_list;
1881 struct ib_mad_agent_private *mad_agent;
1883 int ret = IB_MAD_RESULT_SUCCESS;
1885 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1886 qp_info = mad_list->mad_queue->qp_info;
1887 dequeue_mad(mad_list);
1889 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1891 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1892 ib_dma_unmap_single(port_priv->device,
1893 recv->header.mapping,
1894 sizeof(struct ib_mad_private) -
1895 sizeof(struct ib_mad_private_header),
1898 /* Setup MAD receive work completion from "normal" work completion */
1899 recv->header.wc = *wc;
1900 recv->header.recv_wc.wc = &recv->header.wc;
1901 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1902 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1903 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1905 if (atomic_read(&qp_info->snoop_count))
1906 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1909 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1912 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1914 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1915 "for response buffer\n");
1919 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1920 port_num = wc->port_num;
1922 port_num = port_priv->port_num;
1924 if (recv->mad.mad.mad_hdr.mgmt_class ==
1925 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1926 enum smi_forward_action retsmi;
1928 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1929 port_priv->device->node_type,
1931 port_priv->device->phys_port_cnt) ==
1935 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1936 if (retsmi == IB_SMI_LOCAL)
1939 if (retsmi == IB_SMI_SEND) { /* don't forward */
1940 if (smi_handle_dr_smp_send(&recv->mad.smp,
1941 port_priv->device->node_type,
1942 port_num) == IB_SMI_DISCARD)
1945 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1947 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1948 /* forward case for switches */
1949 memcpy(response, recv, sizeof(*response));
1950 response->header.recv_wc.wc = &response->header.wc;
1951 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1952 response->header.recv_wc.recv_buf.grh = &response->grh;
1954 agent_send_response(&response->mad.mad,
1957 smi_get_fwd_port(&recv->mad.smp),
1958 qp_info->qp->qp_num);
1965 /* Give driver "right of first refusal" on incoming MAD */
1966 if (port_priv->device->process_mad) {
1967 ret = port_priv->device->process_mad(port_priv->device, 0,
1968 port_priv->port_num,
1971 &response->mad.mad);
1972 if (ret & IB_MAD_RESULT_SUCCESS) {
1973 if (ret & IB_MAD_RESULT_CONSUMED)
1975 if (ret & IB_MAD_RESULT_REPLY) {
1976 agent_send_response(&response->mad.mad,
1980 qp_info->qp->qp_num);
1986 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1988 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1990 * recv is freed up in error cases in ib_mad_complete_recv
1991 * or via recv_handler in ib_mad_complete_recv()
1994 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
1995 generate_unmatched_resp(recv, response)) {
1996 agent_send_response(&response->mad.mad, &recv->grh, wc,
1997 port_priv->device, port_num, qp_info->qp->qp_num);
2001 /* Post another receive request for this QP */
2003 ib_mad_post_receive_mads(qp_info, response);
2005 kmem_cache_free(ib_mad_cache, recv);
2007 ib_mad_post_receive_mads(qp_info, recv);
2010 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2012 struct ib_mad_send_wr_private *mad_send_wr;
2013 unsigned long delay;
2015 if (list_empty(&mad_agent_priv->wait_list)) {
2016 cancel_delayed_work(&mad_agent_priv->timed_work);
2018 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2019 struct ib_mad_send_wr_private,
2022 if (time_after(mad_agent_priv->timeout,
2023 mad_send_wr->timeout)) {
2024 mad_agent_priv->timeout = mad_send_wr->timeout;
2025 delay = mad_send_wr->timeout - jiffies;
2026 if ((long)delay <= 0)
2028 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2029 &mad_agent_priv->timed_work, delay);
2034 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2036 struct ib_mad_agent_private *mad_agent_priv;
2037 struct ib_mad_send_wr_private *temp_mad_send_wr;
2038 struct list_head *list_item;
2039 unsigned long delay;
2041 mad_agent_priv = mad_send_wr->mad_agent_priv;
2042 list_del(&mad_send_wr->agent_list);
2044 delay = mad_send_wr->timeout;
2045 mad_send_wr->timeout += jiffies;
2048 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2049 temp_mad_send_wr = list_entry(list_item,
2050 struct ib_mad_send_wr_private,
2052 if (time_after(mad_send_wr->timeout,
2053 temp_mad_send_wr->timeout))
2058 list_item = &mad_agent_priv->wait_list;
2059 list_add(&mad_send_wr->agent_list, list_item);
2061 /* Reschedule a work item if we have a shorter timeout */
2062 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2063 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2064 &mad_agent_priv->timed_work, delay);
2067 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2070 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2071 wait_for_response(mad_send_wr);
2075 * Process a send work completion
2077 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2078 struct ib_mad_send_wc *mad_send_wc)
2080 struct ib_mad_agent_private *mad_agent_priv;
2081 unsigned long flags;
2084 mad_agent_priv = mad_send_wr->mad_agent_priv;
2085 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2086 if (mad_agent_priv->agent.rmpp_version) {
2087 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2088 if (ret == IB_RMPP_RESULT_CONSUMED)
2091 ret = IB_RMPP_RESULT_UNHANDLED;
2093 if (mad_send_wc->status != IB_WC_SUCCESS &&
2094 mad_send_wr->status == IB_WC_SUCCESS) {
2095 mad_send_wr->status = mad_send_wc->status;
2096 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2099 if (--mad_send_wr->refcount > 0) {
2100 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2101 mad_send_wr->status == IB_WC_SUCCESS) {
2102 wait_for_response(mad_send_wr);
2107 /* Remove send from MAD agent and notify client of completion */
2108 list_del(&mad_send_wr->agent_list);
2109 adjust_timeout(mad_agent_priv);
2110 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2112 if (mad_send_wr->status != IB_WC_SUCCESS )
2113 mad_send_wc->status = mad_send_wr->status;
2114 if (ret == IB_RMPP_RESULT_INTERNAL)
2115 ib_rmpp_send_handler(mad_send_wc);
2117 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2120 /* Release reference on agent taken when sending */
2121 deref_mad_agent(mad_agent_priv);
2124 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2127 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2130 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2131 struct ib_mad_list_head *mad_list;
2132 struct ib_mad_qp_info *qp_info;
2133 struct ib_mad_queue *send_queue;
2134 struct ib_send_wr *bad_send_wr;
2135 struct ib_mad_send_wc mad_send_wc;
2136 unsigned long flags;
2139 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2140 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2142 send_queue = mad_list->mad_queue;
2143 qp_info = send_queue->qp_info;
2146 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2147 mad_send_wr->header_mapping,
2148 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2149 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2150 mad_send_wr->payload_mapping,
2151 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2152 queued_send_wr = NULL;
2153 spin_lock_irqsave(&send_queue->lock, flags);
2154 list_del(&mad_list->list);
2156 /* Move queued send to the send queue */
2157 if (send_queue->count-- > send_queue->max_active) {
2158 mad_list = container_of(qp_info->overflow_list.next,
2159 struct ib_mad_list_head, list);
2160 queued_send_wr = container_of(mad_list,
2161 struct ib_mad_send_wr_private,
2163 list_move_tail(&mad_list->list, &send_queue->list);
2165 spin_unlock_irqrestore(&send_queue->lock, flags);
2167 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2168 mad_send_wc.status = wc->status;
2169 mad_send_wc.vendor_err = wc->vendor_err;
2170 if (atomic_read(&qp_info->snoop_count))
2171 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2172 IB_MAD_SNOOP_SEND_COMPLETIONS);
2173 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2175 if (queued_send_wr) {
2176 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2179 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2180 mad_send_wr = queued_send_wr;
2181 wc->status = IB_WC_LOC_QP_OP_ERR;
2187 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2189 struct ib_mad_send_wr_private *mad_send_wr;
2190 struct ib_mad_list_head *mad_list;
2191 unsigned long flags;
2193 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2194 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2195 mad_send_wr = container_of(mad_list,
2196 struct ib_mad_send_wr_private,
2198 mad_send_wr->retry = 1;
2200 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2203 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2206 struct ib_mad_list_head *mad_list;
2207 struct ib_mad_qp_info *qp_info;
2208 struct ib_mad_send_wr_private *mad_send_wr;
2211 /* Determine if failure was a send or receive */
2212 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2213 qp_info = mad_list->mad_queue->qp_info;
2214 if (mad_list->mad_queue == &qp_info->recv_queue)
2216 * Receive errors indicate that the QP has entered the error
2217 * state - error handling/shutdown code will cleanup
2222 * Send errors will transition the QP to SQE - move
2223 * QP to RTS and repost flushed work requests
2225 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2227 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2228 if (mad_send_wr->retry) {
2230 struct ib_send_wr *bad_send_wr;
2232 mad_send_wr->retry = 0;
2233 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2236 ib_mad_send_done_handler(port_priv, wc);
2238 ib_mad_send_done_handler(port_priv, wc);
2240 struct ib_qp_attr *attr;
2242 /* Transition QP to RTS and fail offending send */
2243 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2245 attr->qp_state = IB_QPS_RTS;
2246 attr->cur_qp_state = IB_QPS_SQE;
2247 ret = ib_modify_qp(qp_info->qp, attr,
2248 IB_QP_STATE | IB_QP_CUR_STATE);
2251 printk(KERN_ERR PFX "mad_error_handler - "
2252 "ib_modify_qp to RTS : %d\n", ret);
2254 mark_sends_for_retry(qp_info);
2256 ib_mad_send_done_handler(port_priv, wc);
2261 * IB MAD completion callback
2263 static void ib_mad_completion_handler(struct work_struct *work)
2265 struct ib_mad_port_private *port_priv;
2268 port_priv = container_of(work, struct ib_mad_port_private, work);
2269 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2271 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2272 if (wc.status == IB_WC_SUCCESS) {
2273 switch (wc.opcode) {
2275 ib_mad_send_done_handler(port_priv, &wc);
2278 ib_mad_recv_done_handler(port_priv, &wc);
2285 mad_error_handler(port_priv, &wc);
2289 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2291 unsigned long flags;
2292 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2293 struct ib_mad_send_wc mad_send_wc;
2294 struct list_head cancel_list;
2296 INIT_LIST_HEAD(&cancel_list);
2298 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2299 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2300 &mad_agent_priv->send_list, agent_list) {
2301 if (mad_send_wr->status == IB_WC_SUCCESS) {
2302 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2303 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2307 /* Empty wait list to prevent receives from finding a request */
2308 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2309 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2311 /* Report all cancelled requests */
2312 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2313 mad_send_wc.vendor_err = 0;
2315 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2316 &cancel_list, agent_list) {
2317 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2318 list_del(&mad_send_wr->agent_list);
2319 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2321 atomic_dec(&mad_agent_priv->refcount);
2325 static struct ib_mad_send_wr_private*
2326 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2327 struct ib_mad_send_buf *send_buf)
2329 struct ib_mad_send_wr_private *mad_send_wr;
2331 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2333 if (&mad_send_wr->send_buf == send_buf)
2337 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2339 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2340 &mad_send_wr->send_buf == send_buf)
2346 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2347 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2349 struct ib_mad_agent_private *mad_agent_priv;
2350 struct ib_mad_send_wr_private *mad_send_wr;
2351 unsigned long flags;
2354 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2356 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2357 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2358 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2359 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2363 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2365 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2366 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2369 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2371 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2373 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2375 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2378 EXPORT_SYMBOL(ib_modify_mad);
2380 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2381 struct ib_mad_send_buf *send_buf)
2383 ib_modify_mad(mad_agent, send_buf, 0);
2385 EXPORT_SYMBOL(ib_cancel_mad);
2387 static void local_completions(struct work_struct *work)
2389 struct ib_mad_agent_private *mad_agent_priv;
2390 struct ib_mad_local_private *local;
2391 struct ib_mad_agent_private *recv_mad_agent;
2392 unsigned long flags;
2395 struct ib_mad_send_wc mad_send_wc;
2398 container_of(work, struct ib_mad_agent_private, local_work);
2400 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2401 while (!list_empty(&mad_agent_priv->local_list)) {
2402 local = list_entry(mad_agent_priv->local_list.next,
2403 struct ib_mad_local_private,
2405 list_del(&local->completion_list);
2406 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2408 if (local->mad_priv) {
2409 recv_mad_agent = local->recv_mad_agent;
2410 if (!recv_mad_agent) {
2411 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2413 goto local_send_completion;
2417 * Defined behavior is to complete response
2420 build_smp_wc(recv_mad_agent->agent.qp,
2421 (unsigned long) local->mad_send_wr,
2422 be16_to_cpu(IB_LID_PERMISSIVE),
2423 0, recv_mad_agent->agent.port_num, &wc);
2425 local->mad_priv->header.recv_wc.wc = &wc;
2426 local->mad_priv->header.recv_wc.mad_len =
2427 sizeof(struct ib_mad);
2428 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2429 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2430 &local->mad_priv->header.recv_wc.rmpp_list);
2431 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2432 local->mad_priv->header.recv_wc.recv_buf.mad =
2433 &local->mad_priv->mad.mad;
2434 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2435 snoop_recv(recv_mad_agent->qp_info,
2436 &local->mad_priv->header.recv_wc,
2437 IB_MAD_SNOOP_RECVS);
2438 recv_mad_agent->agent.recv_handler(
2439 &recv_mad_agent->agent,
2440 &local->mad_priv->header.recv_wc);
2441 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2442 atomic_dec(&recv_mad_agent->refcount);
2443 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2446 local_send_completion:
2448 mad_send_wc.status = IB_WC_SUCCESS;
2449 mad_send_wc.vendor_err = 0;
2450 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2451 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2452 snoop_send(mad_agent_priv->qp_info,
2453 &local->mad_send_wr->send_buf,
2454 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2455 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2458 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2459 atomic_dec(&mad_agent_priv->refcount);
2461 kmem_cache_free(ib_mad_cache, local->mad_priv);
2464 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2467 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2471 if (!mad_send_wr->retries_left)
2474 mad_send_wr->retries_left--;
2475 mad_send_wr->send_buf.retries++;
2477 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2479 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2480 ret = ib_retry_rmpp(mad_send_wr);
2482 case IB_RMPP_RESULT_UNHANDLED:
2483 ret = ib_send_mad(mad_send_wr);
2485 case IB_RMPP_RESULT_CONSUMED:
2493 ret = ib_send_mad(mad_send_wr);
2496 mad_send_wr->refcount++;
2497 list_add_tail(&mad_send_wr->agent_list,
2498 &mad_send_wr->mad_agent_priv->send_list);
2503 static void timeout_sends(struct work_struct *work)
2505 struct ib_mad_agent_private *mad_agent_priv;
2506 struct ib_mad_send_wr_private *mad_send_wr;
2507 struct ib_mad_send_wc mad_send_wc;
2508 unsigned long flags, delay;
2510 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2512 mad_send_wc.vendor_err = 0;
2514 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2515 while (!list_empty(&mad_agent_priv->wait_list)) {
2516 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2517 struct ib_mad_send_wr_private,
2520 if (time_after(mad_send_wr->timeout, jiffies)) {
2521 delay = mad_send_wr->timeout - jiffies;
2522 if ((long)delay <= 0)
2524 queue_delayed_work(mad_agent_priv->qp_info->
2526 &mad_agent_priv->timed_work, delay);
2530 list_del(&mad_send_wr->agent_list);
2531 if (mad_send_wr->status == IB_WC_SUCCESS &&
2532 !retry_send(mad_send_wr))
2535 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2537 if (mad_send_wr->status == IB_WC_SUCCESS)
2538 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2540 mad_send_wc.status = mad_send_wr->status;
2541 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2542 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2545 atomic_dec(&mad_agent_priv->refcount);
2546 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2548 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2551 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2553 struct ib_mad_port_private *port_priv = cq->cq_context;
2554 unsigned long flags;
2556 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2557 if (!list_empty(&port_priv->port_list))
2558 queue_work(port_priv->wq, &port_priv->work);
2559 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2563 * Allocate receive MADs and post receive WRs for them
2565 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2566 struct ib_mad_private *mad)
2568 unsigned long flags;
2570 struct ib_mad_private *mad_priv;
2571 struct ib_sge sg_list;
2572 struct ib_recv_wr recv_wr, *bad_recv_wr;
2573 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2575 /* Initialize common scatter list fields */
2576 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2577 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2579 /* Initialize common receive WR fields */
2580 recv_wr.next = NULL;
2581 recv_wr.sg_list = &sg_list;
2582 recv_wr.num_sge = 1;
2585 /* Allocate and map receive buffer */
2590 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2592 printk(KERN_ERR PFX "No memory for receive buffer\n");
2597 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2600 sizeof mad_priv->header,
2602 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2607 mad_priv->header.mapping = sg_list.addr;
2608 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2609 mad_priv->header.mad_list.mad_queue = recv_queue;
2611 /* Post receive WR */
2612 spin_lock_irqsave(&recv_queue->lock, flags);
2613 post = (++recv_queue->count < recv_queue->max_active);
2614 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2615 spin_unlock_irqrestore(&recv_queue->lock, flags);
2616 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2618 spin_lock_irqsave(&recv_queue->lock, flags);
2619 list_del(&mad_priv->header.mad_list.list);
2620 recv_queue->count--;
2621 spin_unlock_irqrestore(&recv_queue->lock, flags);
2622 ib_dma_unmap_single(qp_info->port_priv->device,
2623 mad_priv->header.mapping,
2625 sizeof mad_priv->header,
2627 kmem_cache_free(ib_mad_cache, mad_priv);
2628 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2637 * Return all the posted receive MADs
2639 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2641 struct ib_mad_private_header *mad_priv_hdr;
2642 struct ib_mad_private *recv;
2643 struct ib_mad_list_head *mad_list;
2648 while (!list_empty(&qp_info->recv_queue.list)) {
2650 mad_list = list_entry(qp_info->recv_queue.list.next,
2651 struct ib_mad_list_head, list);
2652 mad_priv_hdr = container_of(mad_list,
2653 struct ib_mad_private_header,
2655 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2658 /* Remove from posted receive MAD list */
2659 list_del(&mad_list->list);
2661 ib_dma_unmap_single(qp_info->port_priv->device,
2662 recv->header.mapping,
2663 sizeof(struct ib_mad_private) -
2664 sizeof(struct ib_mad_private_header),
2666 kmem_cache_free(ib_mad_cache, recv);
2669 qp_info->recv_queue.count = 0;
2675 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2678 struct ib_qp_attr *attr;
2682 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2684 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2688 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2689 IB_DEFAULT_PKEY_FULL, &pkey_index);
2693 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2694 qp = port_priv->qp_info[i].qp;
2699 * PKey index for QP1 is irrelevant but
2700 * one is needed for the Reset to Init transition
2702 attr->qp_state = IB_QPS_INIT;
2703 attr->pkey_index = pkey_index;
2704 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2705 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2706 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2708 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2709 "INIT: %d\n", i, ret);
2713 attr->qp_state = IB_QPS_RTR;
2714 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2716 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2717 "RTR: %d\n", i, ret);
2721 attr->qp_state = IB_QPS_RTS;
2722 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2723 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2725 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2726 "RTS: %d\n", i, ret);
2731 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2733 printk(KERN_ERR PFX "Failed to request completion "
2734 "notification: %d\n", ret);
2738 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2739 if (!port_priv->qp_info[i].qp)
2742 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2744 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2753 static void qp_event_handler(struct ib_event *event, void *qp_context)
2755 struct ib_mad_qp_info *qp_info = qp_context;
2757 /* It's worse than that! He's dead, Jim! */
2758 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2759 event->event, qp_info->qp->qp_num);
2762 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2763 struct ib_mad_queue *mad_queue)
2765 mad_queue->qp_info = qp_info;
2766 mad_queue->count = 0;
2767 spin_lock_init(&mad_queue->lock);
2768 INIT_LIST_HEAD(&mad_queue->list);
2771 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2772 struct ib_mad_qp_info *qp_info)
2774 qp_info->port_priv = port_priv;
2775 init_mad_queue(qp_info, &qp_info->send_queue);
2776 init_mad_queue(qp_info, &qp_info->recv_queue);
2777 INIT_LIST_HEAD(&qp_info->overflow_list);
2778 spin_lock_init(&qp_info->snoop_lock);
2779 qp_info->snoop_table = NULL;
2780 qp_info->snoop_table_size = 0;
2781 atomic_set(&qp_info->snoop_count, 0);
2784 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2785 enum ib_qp_type qp_type)
2787 struct ib_qp_init_attr qp_init_attr;
2790 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2791 qp_init_attr.send_cq = qp_info->port_priv->cq;
2792 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2793 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2794 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2795 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2796 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2797 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2798 qp_init_attr.qp_type = qp_type;
2799 qp_init_attr.port_num = qp_info->port_priv->port_num;
2800 qp_init_attr.qp_context = qp_info;
2801 qp_init_attr.event_handler = qp_event_handler;
2802 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2803 if (IS_ERR(qp_info->qp)) {
2804 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2805 get_spl_qp_index(qp_type));
2806 ret = PTR_ERR(qp_info->qp);
2809 /* Use minimum queue sizes unless the CQ is resized */
2810 qp_info->send_queue.max_active = mad_sendq_size;
2811 qp_info->recv_queue.max_active = mad_recvq_size;
2818 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2823 ib_destroy_qp(qp_info->qp);
2824 kfree(qp_info->snoop_table);
2829 * Create the QP, PD, MR, and CQ if needed
2831 static int ib_mad_port_open(struct ib_device *device,
2835 struct ib_mad_port_private *port_priv;
2836 unsigned long flags;
2837 char name[sizeof "ib_mad123"];
2840 /* Create new device info */
2841 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2843 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2847 port_priv->device = device;
2848 port_priv->port_num = port_num;
2849 spin_lock_init(&port_priv->reg_lock);
2850 INIT_LIST_HEAD(&port_priv->agent_list);
2851 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2852 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2854 cq_size = mad_sendq_size + mad_recvq_size;
2855 has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2859 port_priv->cq = ib_create_cq(port_priv->device,
2860 ib_mad_thread_completion_handler,
2861 NULL, port_priv, cq_size, 0);
2862 if (IS_ERR(port_priv->cq)) {
2863 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2864 ret = PTR_ERR(port_priv->cq);
2868 port_priv->pd = ib_alloc_pd(device);
2869 if (IS_ERR(port_priv->pd)) {
2870 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2871 ret = PTR_ERR(port_priv->pd);
2875 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2876 if (IS_ERR(port_priv->mr)) {
2877 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2878 ret = PTR_ERR(port_priv->mr);
2883 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2887 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2891 snprintf(name, sizeof name, "ib_mad%d", port_num);
2892 port_priv->wq = create_singlethread_workqueue(name);
2893 if (!port_priv->wq) {
2897 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2899 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2900 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2901 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2903 ret = ib_mad_port_start(port_priv);
2905 printk(KERN_ERR PFX "Couldn't start port\n");
2912 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2913 list_del_init(&port_priv->port_list);
2914 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2916 destroy_workqueue(port_priv->wq);
2918 destroy_mad_qp(&port_priv->qp_info[1]);
2920 destroy_mad_qp(&port_priv->qp_info[0]);
2922 ib_dereg_mr(port_priv->mr);
2924 ib_dealloc_pd(port_priv->pd);
2926 ib_destroy_cq(port_priv->cq);
2927 cleanup_recv_queue(&port_priv->qp_info[1]);
2928 cleanup_recv_queue(&port_priv->qp_info[0]);
2937 * If there are no classes using the port, free the port
2938 * resources (CQ, MR, PD, QP) and remove the port's info structure
2940 static int ib_mad_port_close(struct ib_device *device, int port_num)
2942 struct ib_mad_port_private *port_priv;
2943 unsigned long flags;
2945 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2946 port_priv = __ib_get_mad_port(device, port_num);
2947 if (port_priv == NULL) {
2948 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2949 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2952 list_del_init(&port_priv->port_list);
2953 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2955 destroy_workqueue(port_priv->wq);
2956 destroy_mad_qp(&port_priv->qp_info[1]);
2957 destroy_mad_qp(&port_priv->qp_info[0]);
2958 ib_dereg_mr(port_priv->mr);
2959 ib_dealloc_pd(port_priv->pd);
2960 ib_destroy_cq(port_priv->cq);
2961 cleanup_recv_queue(&port_priv->qp_info[1]);
2962 cleanup_recv_queue(&port_priv->qp_info[0]);
2963 /* XXX: Handle deallocation of MAD registration tables */
2970 static void ib_mad_init_device(struct ib_device *device)
2974 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2977 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2982 end = device->phys_port_cnt;
2985 for (i = start; i <= end; i++) {
2986 if (ib_mad_port_open(device, i)) {
2987 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2991 if (ib_agent_port_open(device, i)) {
2992 printk(KERN_ERR PFX "Couldn't open %s port %d "
3001 if (ib_mad_port_close(device, i))
3002 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
3008 while (i >= start) {
3009 if (ib_agent_port_close(device, i))
3010 printk(KERN_ERR PFX "Couldn't close %s port %d "
3013 if (ib_mad_port_close(device, i))
3014 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
3020 static void ib_mad_remove_device(struct ib_device *device)
3022 int i, num_ports, cur_port;
3024 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3027 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3031 num_ports = device->phys_port_cnt;
3034 for (i = 0; i < num_ports; i++, cur_port++) {
3035 if (ib_agent_port_close(device, cur_port))
3036 printk(KERN_ERR PFX "Couldn't close %s port %d "
3038 device->name, cur_port);
3039 if (ib_mad_port_close(device, cur_port))
3040 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
3041 device->name, cur_port);
3045 static struct ib_client mad_client = {
3047 .add = ib_mad_init_device,
3048 .remove = ib_mad_remove_device
3051 static int __init ib_mad_init_module(void)
3055 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3056 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3058 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3059 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3061 ib_mad_cache = kmem_cache_create("ib_mad",
3062 sizeof(struct ib_mad_private),
3066 if (!ib_mad_cache) {
3067 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3072 INIT_LIST_HEAD(&ib_mad_port_list);
3074 if (ib_register_client(&mad_client)) {
3075 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3083 kmem_cache_destroy(ib_mad_cache);
3088 static void __exit ib_mad_cleanup_module(void)
3090 ib_unregister_client(&mad_client);
3091 kmem_cache_destroy(ib_mad_cache);
3094 module_init(ib_mad_init_module);
3095 module_exit(ib_mad_cleanup_module);