2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $
35 #include <linux/dma-mapping.h>
41 MODULE_LICENSE("Dual BSD/GPL");
42 MODULE_DESCRIPTION("kernel IB MAD API");
43 MODULE_AUTHOR("Hal Rosenstock");
44 MODULE_AUTHOR("Sean Hefty");
47 kmem_cache_t *ib_mad_cache;
48 static struct list_head ib_mad_port_list;
49 static u32 ib_mad_client_id = 0;
52 static spinlock_t ib_mad_port_list_lock;
55 /* Forward declarations */
56 static int method_in_use(struct ib_mad_mgmt_method_table **method,
57 struct ib_mad_reg_req *mad_reg_req);
58 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
59 static struct ib_mad_agent_private *find_mad_agent(
60 struct ib_mad_port_private *port_priv,
62 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
63 struct ib_mad_private *mad);
64 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
65 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
66 struct ib_mad_send_wc *mad_send_wc);
67 static void timeout_sends(void *data);
68 static void cancel_sends(void *data);
69 static void local_completions(void *data);
70 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv,
73 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
74 struct ib_mad_agent_private *agent_priv);
77 * Returns a ib_mad_port_private structure or NULL for a device/port
78 * Assumes ib_mad_port_list_lock is being held
80 static inline struct ib_mad_port_private *
81 __ib_get_mad_port(struct ib_device *device, int port_num)
83 struct ib_mad_port_private *entry;
85 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
86 if (entry->device == device && entry->port_num == port_num)
93 * Wrapper function to return a ib_mad_port_private structure or NULL
96 static inline struct ib_mad_port_private *
97 ib_get_mad_port(struct ib_device *device, int port_num)
99 struct ib_mad_port_private *entry;
102 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
103 entry = __ib_get_mad_port(device, port_num);
104 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
109 static inline u8 convert_mgmt_class(u8 mgmt_class)
111 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
116 static int get_spl_qp_index(enum ib_qp_type qp_type)
129 static int vendor_class_index(u8 mgmt_class)
131 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
134 static int is_vendor_class(u8 mgmt_class)
136 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
137 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
142 static int is_vendor_oui(char *oui)
144 if (oui[0] || oui[1] || oui[2])
149 static int is_vendor_method_in_use(
150 struct ib_mad_mgmt_vendor_class *vendor_class,
151 struct ib_mad_reg_req *mad_reg_req)
153 struct ib_mad_mgmt_method_table *method;
156 for (i = 0; i < MAX_MGMT_OUI; i++) {
157 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
158 method = vendor_class->method_table[i];
160 if (method_in_use(&method, mad_reg_req))
171 * ib_register_mad_agent - Register to send/receive MADs
173 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
175 enum ib_qp_type qp_type,
176 struct ib_mad_reg_req *mad_reg_req,
178 ib_mad_send_handler send_handler,
179 ib_mad_recv_handler recv_handler,
182 struct ib_mad_port_private *port_priv;
183 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
184 struct ib_mad_agent_private *mad_agent_priv;
185 struct ib_mad_reg_req *reg_req = NULL;
186 struct ib_mad_mgmt_class_table *class;
187 struct ib_mad_mgmt_vendor_class_table *vendor;
188 struct ib_mad_mgmt_vendor_class *vendor_class;
189 struct ib_mad_mgmt_method_table *method;
192 u8 mgmt_class, vclass;
194 /* Validate parameters */
195 qpn = get_spl_qp_index(qp_type);
200 goto error1; /* XXX: until RMPP implemented */
202 /* Validate MAD registration request if supplied */
204 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
208 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
210 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
211 * one in this range currently allowed
213 if (mad_reg_req->mgmt_class !=
214 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
216 } else if (mad_reg_req->mgmt_class == 0) {
218 * Class 0 is reserved in IBA and is used for
219 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
222 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
224 * If class is in "new" vendor range,
225 * ensure supplied OUI is not zero
227 if (!is_vendor_oui(mad_reg_req->oui))
230 /* Make sure class supplied is consistent with QP type */
231 if (qp_type == IB_QPT_SMI) {
232 if ((mad_reg_req->mgmt_class !=
233 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
234 (mad_reg_req->mgmt_class !=
235 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
238 if ((mad_reg_req->mgmt_class ==
239 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
240 (mad_reg_req->mgmt_class ==
241 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
245 /* No registration request supplied */
250 /* Validate device and port */
251 port_priv = ib_get_mad_port(device, port_num);
253 ret = ERR_PTR(-ENODEV);
257 /* Allocate structures */
258 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL);
259 if (!mad_agent_priv) {
260 ret = ERR_PTR(-ENOMEM);
263 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
265 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
266 IB_ACCESS_LOCAL_WRITE);
267 if (IS_ERR(mad_agent_priv->agent.mr)) {
268 ret = ERR_PTR(-ENOMEM);
273 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
275 ret = ERR_PTR(-ENOMEM);
278 /* Make a copy of the MAD registration request */
279 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
282 /* Now, fill in the various structures */
283 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
284 mad_agent_priv->reg_req = reg_req;
285 mad_agent_priv->rmpp_version = rmpp_version;
286 mad_agent_priv->agent.device = device;
287 mad_agent_priv->agent.recv_handler = recv_handler;
288 mad_agent_priv->agent.send_handler = send_handler;
289 mad_agent_priv->agent.context = context;
290 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
291 mad_agent_priv->agent.port_num = port_num;
293 spin_lock_irqsave(&port_priv->reg_lock, flags);
294 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
297 * Make sure MAD registration (if supplied)
298 * is non overlapping with any existing ones
301 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
302 if (!is_vendor_class(mgmt_class)) {
303 class = port_priv->version[mad_reg_req->
304 mgmt_class_version].class;
306 method = class->method_table[mgmt_class];
308 if (method_in_use(&method,
313 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
316 /* "New" vendor class range */
317 vendor = port_priv->version[mad_reg_req->
318 mgmt_class_version].vendor;
320 vclass = vendor_class_index(mgmt_class);
321 vendor_class = vendor->vendor_class[vclass];
323 if (is_vendor_method_in_use(
329 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
337 /* Add mad agent into port's agent list */
338 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
339 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
341 spin_lock_init(&mad_agent_priv->lock);
342 INIT_LIST_HEAD(&mad_agent_priv->send_list);
343 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
344 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
345 INIT_LIST_HEAD(&mad_agent_priv->local_list);
346 INIT_WORK(&mad_agent_priv->local_work, local_completions,
348 INIT_LIST_HEAD(&mad_agent_priv->canceled_list);
349 INIT_WORK(&mad_agent_priv->canceled_work, cancel_sends, mad_agent_priv);
350 atomic_set(&mad_agent_priv->refcount, 1);
351 init_waitqueue_head(&mad_agent_priv->wait);
353 return &mad_agent_priv->agent;
356 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
359 kfree(mad_agent_priv);
361 ib_dereg_mr(mad_agent_priv->agent.mr);
365 EXPORT_SYMBOL(ib_register_mad_agent);
367 static inline int is_snooping_sends(int mad_snoop_flags)
369 return (mad_snoop_flags &
370 (/*IB_MAD_SNOOP_POSTED_SENDS |
371 IB_MAD_SNOOP_RMPP_SENDS |*/
372 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
373 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
376 static inline int is_snooping_recvs(int mad_snoop_flags)
378 return (mad_snoop_flags &
379 (IB_MAD_SNOOP_RECVS /*|
380 IB_MAD_SNOOP_RMPP_RECVS*/));
383 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
384 struct ib_mad_snoop_private *mad_snoop_priv)
386 struct ib_mad_snoop_private **new_snoop_table;
390 spin_lock_irqsave(&qp_info->snoop_lock, flags);
391 /* Check for empty slot in array. */
392 for (i = 0; i < qp_info->snoop_table_size; i++)
393 if (!qp_info->snoop_table[i])
396 if (i == qp_info->snoop_table_size) {
398 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
399 qp_info->snoop_table_size + 1,
401 if (!new_snoop_table) {
405 if (qp_info->snoop_table) {
406 memcpy(new_snoop_table, qp_info->snoop_table,
407 sizeof mad_snoop_priv *
408 qp_info->snoop_table_size);
409 kfree(qp_info->snoop_table);
411 qp_info->snoop_table = new_snoop_table;
412 qp_info->snoop_table_size++;
414 qp_info->snoop_table[i] = mad_snoop_priv;
415 atomic_inc(&qp_info->snoop_count);
417 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
421 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
423 enum ib_qp_type qp_type,
425 ib_mad_snoop_handler snoop_handler,
426 ib_mad_recv_handler recv_handler,
429 struct ib_mad_port_private *port_priv;
430 struct ib_mad_agent *ret;
431 struct ib_mad_snoop_private *mad_snoop_priv;
434 /* Validate parameters */
435 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
436 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
437 ret = ERR_PTR(-EINVAL);
440 qpn = get_spl_qp_index(qp_type);
442 ret = ERR_PTR(-EINVAL);
445 port_priv = ib_get_mad_port(device, port_num);
447 ret = ERR_PTR(-ENODEV);
450 /* Allocate structures */
451 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
452 if (!mad_snoop_priv) {
453 ret = ERR_PTR(-ENOMEM);
457 /* Now, fill in the various structures */
458 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
459 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
460 mad_snoop_priv->agent.device = device;
461 mad_snoop_priv->agent.recv_handler = recv_handler;
462 mad_snoop_priv->agent.snoop_handler = snoop_handler;
463 mad_snoop_priv->agent.context = context;
464 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
465 mad_snoop_priv->agent.port_num = port_num;
466 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
467 init_waitqueue_head(&mad_snoop_priv->wait);
468 mad_snoop_priv->snoop_index = register_snoop_agent(
469 &port_priv->qp_info[qpn],
471 if (mad_snoop_priv->snoop_index < 0) {
472 ret = ERR_PTR(mad_snoop_priv->snoop_index);
476 atomic_set(&mad_snoop_priv->refcount, 1);
477 return &mad_snoop_priv->agent;
480 kfree(mad_snoop_priv);
484 EXPORT_SYMBOL(ib_register_mad_snoop);
486 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
488 struct ib_mad_port_private *port_priv;
491 /* Note that we could still be handling received MADs */
494 * Canceling all sends results in dropping received response
495 * MADs, preventing us from queuing additional work
497 cancel_mads(mad_agent_priv);
498 port_priv = mad_agent_priv->qp_info->port_priv;
499 cancel_delayed_work(&mad_agent_priv->timed_work);
501 spin_lock_irqsave(&port_priv->reg_lock, flags);
502 remove_mad_reg_req(mad_agent_priv);
503 list_del(&mad_agent_priv->agent_list);
504 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
506 flush_workqueue(port_priv->wq);
508 atomic_dec(&mad_agent_priv->refcount);
509 wait_event(mad_agent_priv->wait,
510 !atomic_read(&mad_agent_priv->refcount));
512 if (mad_agent_priv->reg_req)
513 kfree(mad_agent_priv->reg_req);
514 ib_dereg_mr(mad_agent_priv->agent.mr);
515 kfree(mad_agent_priv);
518 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
520 struct ib_mad_qp_info *qp_info;
523 qp_info = mad_snoop_priv->qp_info;
524 spin_lock_irqsave(&qp_info->snoop_lock, flags);
525 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
526 atomic_dec(&qp_info->snoop_count);
527 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
529 atomic_dec(&mad_snoop_priv->refcount);
530 wait_event(mad_snoop_priv->wait,
531 !atomic_read(&mad_snoop_priv->refcount));
533 kfree(mad_snoop_priv);
537 * ib_unregister_mad_agent - Unregisters a client from using MAD services
539 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
541 struct ib_mad_agent_private *mad_agent_priv;
542 struct ib_mad_snoop_private *mad_snoop_priv;
544 /* If the TID is zero, the agent can only snoop. */
545 if (mad_agent->hi_tid) {
546 mad_agent_priv = container_of(mad_agent,
547 struct ib_mad_agent_private,
549 unregister_mad_agent(mad_agent_priv);
551 mad_snoop_priv = container_of(mad_agent,
552 struct ib_mad_snoop_private,
554 unregister_mad_snoop(mad_snoop_priv);
558 EXPORT_SYMBOL(ib_unregister_mad_agent);
560 static inline int response_mad(struct ib_mad *mad)
562 /* Trap represses are responses although response bit is reset */
563 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
564 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
567 static void dequeue_mad(struct ib_mad_list_head *mad_list)
569 struct ib_mad_queue *mad_queue;
572 BUG_ON(!mad_list->mad_queue);
573 mad_queue = mad_list->mad_queue;
574 spin_lock_irqsave(&mad_queue->lock, flags);
575 list_del(&mad_list->list);
577 spin_unlock_irqrestore(&mad_queue->lock, flags);
580 static void snoop_send(struct ib_mad_qp_info *qp_info,
581 struct ib_send_wr *send_wr,
582 struct ib_mad_send_wc *mad_send_wc,
585 struct ib_mad_snoop_private *mad_snoop_priv;
589 spin_lock_irqsave(&qp_info->snoop_lock, flags);
590 for (i = 0; i < qp_info->snoop_table_size; i++) {
591 mad_snoop_priv = qp_info->snoop_table[i];
592 if (!mad_snoop_priv ||
593 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
596 atomic_inc(&mad_snoop_priv->refcount);
597 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
598 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
599 send_wr, mad_send_wc);
600 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
601 wake_up(&mad_snoop_priv->wait);
602 spin_lock_irqsave(&qp_info->snoop_lock, flags);
604 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
607 static void snoop_recv(struct ib_mad_qp_info *qp_info,
608 struct ib_mad_recv_wc *mad_recv_wc,
611 struct ib_mad_snoop_private *mad_snoop_priv;
615 spin_lock_irqsave(&qp_info->snoop_lock, flags);
616 for (i = 0; i < qp_info->snoop_table_size; i++) {
617 mad_snoop_priv = qp_info->snoop_table[i];
618 if (!mad_snoop_priv ||
619 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
622 atomic_inc(&mad_snoop_priv->refcount);
623 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
624 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
626 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
627 wake_up(&mad_snoop_priv->wait);
628 spin_lock_irqsave(&qp_info->snoop_lock, flags);
630 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
633 static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
636 memset(wc, 0, sizeof *wc);
638 wc->status = IB_WC_SUCCESS;
639 wc->opcode = IB_WC_RECV;
640 wc->pkey_index = pkey_index;
641 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
646 wc->dlid_path_bits = 0;
647 wc->port_num = port_num;
651 * Return 0 if SMP is to be sent
652 * Return 1 if SMP was consumed locally (whether or not solicited)
653 * Return < 0 if error
655 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
657 struct ib_send_wr *send_wr)
661 struct ib_mad_local_private *local;
662 struct ib_mad_private *mad_priv;
663 struct ib_mad_port_private *port_priv;
664 struct ib_mad_agent_private *recv_mad_agent = NULL;
665 struct ib_device *device = mad_agent_priv->agent.device;
666 u8 port_num = mad_agent_priv->agent.port_num;
669 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
671 printk(KERN_ERR PFX "Invalid directed route\n");
674 /* Check to post send on QP or process locally */
675 ret = smi_check_local_dr_smp(smp, device, port_num);
676 if (!ret || !device->process_mad)
679 local = kmalloc(sizeof *local, GFP_ATOMIC);
682 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
685 local->mad_priv = NULL;
686 local->recv_mad_agent = NULL;
687 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
690 printk(KERN_ERR PFX "No memory for local response MAD\n");
695 build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,
696 send_wr->wr.ud.port_num, &mad_wc);
698 /* No GRH for DR SMP */
699 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
700 (struct ib_mad *)smp,
701 (struct ib_mad *)&mad_priv->mad);
704 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
705 if (response_mad(&mad_priv->mad.mad) &&
706 mad_agent_priv->agent.recv_handler) {
707 local->mad_priv = mad_priv;
708 local->recv_mad_agent = mad_agent_priv;
710 * Reference MAD agent until receive
711 * side of local completion handled
713 atomic_inc(&mad_agent_priv->refcount);
715 kmem_cache_free(ib_mad_cache, mad_priv);
717 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
718 kmem_cache_free(ib_mad_cache, mad_priv);
720 case IB_MAD_RESULT_SUCCESS:
721 /* Treat like an incoming receive MAD */
722 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
723 mad_agent_priv->agent.port_num);
725 mad_priv->mad.mad.mad_hdr.tid =
726 ((struct ib_mad *)smp)->mad_hdr.tid;
727 recv_mad_agent = find_mad_agent(port_priv,
730 if (!port_priv || !recv_mad_agent) {
731 kmem_cache_free(ib_mad_cache, mad_priv);
736 local->mad_priv = mad_priv;
737 local->recv_mad_agent = recv_mad_agent;
740 kmem_cache_free(ib_mad_cache, mad_priv);
746 local->send_wr = *send_wr;
747 local->send_wr.sg_list = local->sg_list;
748 memcpy(local->sg_list, send_wr->sg_list,
749 sizeof *send_wr->sg_list * send_wr->num_sge);
750 local->send_wr.next = NULL;
751 local->tid = send_wr->wr.ud.mad_hdr->tid;
752 local->wr_id = send_wr->wr_id;
753 /* Reference MAD agent until send side of local completion handled */
754 atomic_inc(&mad_agent_priv->refcount);
755 /* Queue local completion to local list */
756 spin_lock_irqsave(&mad_agent_priv->lock, flags);
757 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
758 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
759 queue_work(mad_agent_priv->qp_info->port_priv->wq,
760 &mad_agent_priv->local_work);
766 static int get_buf_length(int hdr_len, int data_len)
770 seg_size = sizeof(struct ib_mad) - hdr_len;
771 if (data_len && seg_size) {
772 pad = seg_size - data_len % seg_size;
777 return hdr_len + data_len + pad;
780 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
781 u32 remote_qpn, u16 pkey_index,
783 int hdr_len, int data_len,
784 unsigned int __nocast gfp_mask)
786 struct ib_mad_agent_private *mad_agent_priv;
787 struct ib_mad_send_buf *send_buf;
791 mad_agent_priv = container_of(mad_agent,
792 struct ib_mad_agent_private, agent);
793 buf_size = get_buf_length(hdr_len, data_len);
795 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask);
797 return ERR_PTR(-ENOMEM);
799 send_buf = buf + buf_size;
800 memset(send_buf, 0, sizeof *send_buf);
803 send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device,
804 buf, buf_size, DMA_TO_DEVICE);
805 pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr);
806 send_buf->sge.length = buf_size;
807 send_buf->sge.lkey = mad_agent->mr->lkey;
809 send_buf->send_wr.wr_id = (unsigned long) send_buf;
810 send_buf->send_wr.sg_list = &send_buf->sge;
811 send_buf->send_wr.num_sge = 1;
812 send_buf->send_wr.opcode = IB_WR_SEND;
813 send_buf->send_wr.send_flags = IB_SEND_SIGNALED;
814 send_buf->send_wr.wr.ud.ah = ah;
815 send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr;
816 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
817 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
818 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
819 send_buf->mad_agent = mad_agent;
820 atomic_inc(&mad_agent_priv->refcount);
823 EXPORT_SYMBOL(ib_create_send_mad);
825 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
827 struct ib_mad_agent_private *mad_agent_priv;
829 mad_agent_priv = container_of(send_buf->mad_agent,
830 struct ib_mad_agent_private, agent);
832 dma_unmap_single(send_buf->mad_agent->device->dma_device,
833 pci_unmap_addr(send_buf, mapping),
834 send_buf->sge.length, DMA_TO_DEVICE);
835 kfree(send_buf->mad);
837 if (atomic_dec_and_test(&mad_agent_priv->refcount))
838 wake_up(&mad_agent_priv->wait);
840 EXPORT_SYMBOL(ib_free_send_mad);
842 static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
844 struct ib_mad_qp_info *qp_info;
845 struct ib_send_wr *bad_send_wr;
849 /* Set WR ID to find mad_send_wr upon completion */
850 qp_info = mad_send_wr->mad_agent_priv->qp_info;
851 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
852 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
854 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
855 if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) {
856 list_add_tail(&mad_send_wr->mad_list.list,
857 &qp_info->send_queue.list);
858 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
859 ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp,
860 &mad_send_wr->send_wr, &bad_send_wr);
862 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
863 dequeue_mad(&mad_send_wr->mad_list);
866 list_add_tail(&mad_send_wr->mad_list.list,
867 &qp_info->overflow_list);
868 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
875 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
876 * with the registered client
878 int ib_post_send_mad(struct ib_mad_agent *mad_agent,
879 struct ib_send_wr *send_wr,
880 struct ib_send_wr **bad_send_wr)
883 struct ib_mad_agent_private *mad_agent_priv;
885 /* Validate supplied parameters */
889 if (!mad_agent || !send_wr)
892 if (!mad_agent->send_handler)
895 mad_agent_priv = container_of(mad_agent,
896 struct ib_mad_agent_private,
899 /* Walk list of send WRs and post each on send list */
902 struct ib_send_wr *next_send_wr;
903 struct ib_mad_send_wr_private *mad_send_wr;
906 /* Validate more parameters */
907 if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG)
910 if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler)
913 if (!send_wr->wr.ud.mad_hdr) {
914 printk(KERN_ERR PFX "MAD header must be supplied "
915 "in WR %p\n", send_wr);
920 * Save pointer to next work request to post in case the
921 * current one completes, and the user modifies the work
922 * request associated with the completion
924 next_send_wr = (struct ib_send_wr *)send_wr->next;
926 smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr;
927 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
928 ret = handle_outgoing_dr_smp(mad_agent_priv, smp,
930 if (ret < 0) /* error */
932 else if (ret == 1) /* locally consumed */
936 /* Allocate MAD send WR tracking structure */
937 mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC);
939 printk(KERN_ERR PFX "No memory for "
940 "ib_mad_send_wr_private\n");
945 mad_send_wr->send_wr = *send_wr;
946 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
947 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
948 sizeof *send_wr->sg_list * send_wr->num_sge);
949 mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id;
950 mad_send_wr->send_wr.next = NULL;
951 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
952 mad_send_wr->mad_agent_priv = mad_agent_priv;
953 /* Timeout will be updated after send completes */
954 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.
956 mad_send_wr->retry = 0;
957 /* One reference for each work request to QP + response */
958 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
959 mad_send_wr->status = IB_WC_SUCCESS;
961 /* Reference MAD agent until send completes */
962 atomic_inc(&mad_agent_priv->refcount);
963 spin_lock_irqsave(&mad_agent_priv->lock, flags);
964 list_add_tail(&mad_send_wr->agent_list,
965 &mad_agent_priv->send_list);
966 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
968 ret = ib_send_mad(mad_send_wr);
970 /* Fail send request */
971 spin_lock_irqsave(&mad_agent_priv->lock, flags);
972 list_del(&mad_send_wr->agent_list);
973 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
974 atomic_dec(&mad_agent_priv->refcount);
978 send_wr = next_send_wr;
983 *bad_send_wr = send_wr;
987 EXPORT_SYMBOL(ib_post_send_mad);
990 * ib_free_recv_mad - Returns data buffers used to receive
991 * a MAD to the access layer
993 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
995 struct ib_mad_recv_buf *entry;
996 struct ib_mad_private_header *mad_priv_hdr;
997 struct ib_mad_private *priv;
999 mad_priv_hdr = container_of(mad_recv_wc,
1000 struct ib_mad_private_header,
1002 priv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1005 * Walk receive buffer list associated with this WC
1006 * No need to remove them from list of receive buffers
1008 list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) {
1009 /* Free previous receive buffer */
1010 kmem_cache_free(ib_mad_cache, priv);
1011 mad_priv_hdr = container_of(mad_recv_wc,
1012 struct ib_mad_private_header,
1014 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1018 /* Free last buffer */
1019 kmem_cache_free(ib_mad_cache, priv);
1021 EXPORT_SYMBOL(ib_free_recv_mad);
1023 void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc,
1026 printk(KERN_ERR PFX "ib_coalesce_recv_mad() not implemented yet\n");
1028 EXPORT_SYMBOL(ib_coalesce_recv_mad);
1030 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1032 ib_mad_send_handler send_handler,
1033 ib_mad_recv_handler recv_handler,
1036 return ERR_PTR(-EINVAL); /* XXX: for now */
1038 EXPORT_SYMBOL(ib_redirect_mad_qp);
1040 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1043 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1046 EXPORT_SYMBOL(ib_process_mad_wc);
1048 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1049 struct ib_mad_reg_req *mad_reg_req)
1053 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1054 i < IB_MGMT_MAX_METHODS;
1055 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1057 if ((*method)->agent[i]) {
1058 printk(KERN_ERR PFX "Method %d already in use\n", i);
1065 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1067 /* Allocate management method table */
1068 *method = kmalloc(sizeof **method, GFP_ATOMIC);
1070 printk(KERN_ERR PFX "No memory for "
1071 "ib_mad_mgmt_method_table\n");
1074 /* Clear management method table */
1075 memset(*method, 0, sizeof **method);
1081 * Check to see if there are any methods still in use
1083 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1087 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1088 if (method->agent[i])
1094 * Check to see if there are any method tables for this class still in use
1096 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1100 for (i = 0; i < MAX_MGMT_CLASS; i++)
1101 if (class->method_table[i])
1106 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1110 for (i = 0; i < MAX_MGMT_OUI; i++)
1111 if (vendor_class->method_table[i])
1116 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1121 for (i = 0; i < MAX_MGMT_OUI; i++)
1122 /* Is there matching OUI for this vendor class ? */
1123 if (!memcmp(vendor_class->oui[i], oui, 3))
1129 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1133 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1134 if (vendor->vendor_class[i])
1140 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1141 struct ib_mad_agent_private *agent)
1145 /* Remove any methods for this mad agent */
1146 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1147 if (method->agent[i] == agent) {
1148 method->agent[i] = NULL;
1153 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1154 struct ib_mad_agent_private *agent_priv,
1157 struct ib_mad_port_private *port_priv;
1158 struct ib_mad_mgmt_class_table **class;
1159 struct ib_mad_mgmt_method_table **method;
1162 port_priv = agent_priv->qp_info->port_priv;
1163 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1165 /* Allocate management class table for "new" class version */
1166 *class = kmalloc(sizeof **class, GFP_ATOMIC);
1168 printk(KERN_ERR PFX "No memory for "
1169 "ib_mad_mgmt_class_table\n");
1173 /* Clear management class table */
1174 memset(*class, 0, sizeof(**class));
1175 /* Allocate method table for this management class */
1176 method = &(*class)->method_table[mgmt_class];
1177 if ((ret = allocate_method_table(method)))
1180 method = &(*class)->method_table[mgmt_class];
1182 /* Allocate method table for this management class */
1183 if ((ret = allocate_method_table(method)))
1188 /* Now, make sure methods are not already in use */
1189 if (method_in_use(method, mad_reg_req))
1192 /* Finally, add in methods being registered */
1193 for (i = find_first_bit(mad_reg_req->method_mask,
1194 IB_MGMT_MAX_METHODS);
1195 i < IB_MGMT_MAX_METHODS;
1196 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1198 (*method)->agent[i] = agent_priv;
1203 /* Remove any methods for this mad agent */
1204 remove_methods_mad_agent(*method, agent_priv);
1205 /* Now, check to see if there are any methods in use */
1206 if (!check_method_table(*method)) {
1207 /* If not, release management method table */
1220 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1221 struct ib_mad_agent_private *agent_priv)
1223 struct ib_mad_port_private *port_priv;
1224 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1225 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1226 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1227 struct ib_mad_mgmt_method_table **method;
1228 int i, ret = -ENOMEM;
1231 /* "New" vendor (with OUI) class */
1232 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1233 port_priv = agent_priv->qp_info->port_priv;
1234 vendor_table = &port_priv->version[
1235 mad_reg_req->mgmt_class_version].vendor;
1236 if (!*vendor_table) {
1237 /* Allocate mgmt vendor class table for "new" class version */
1238 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC);
1240 printk(KERN_ERR PFX "No memory for "
1241 "ib_mad_mgmt_vendor_class_table\n");
1244 /* Clear management vendor class table */
1245 memset(vendor, 0, sizeof(*vendor));
1246 *vendor_table = vendor;
1248 if (!(*vendor_table)->vendor_class[vclass]) {
1249 /* Allocate table for this management vendor class */
1250 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC);
1251 if (!vendor_class) {
1252 printk(KERN_ERR PFX "No memory for "
1253 "ib_mad_mgmt_vendor_class\n");
1256 memset(vendor_class, 0, sizeof(*vendor_class));
1257 (*vendor_table)->vendor_class[vclass] = vendor_class;
1259 for (i = 0; i < MAX_MGMT_OUI; i++) {
1260 /* Is there matching OUI for this vendor class ? */
1261 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1262 mad_reg_req->oui, 3)) {
1263 method = &(*vendor_table)->vendor_class[
1264 vclass]->method_table[i];
1269 for (i = 0; i < MAX_MGMT_OUI; i++) {
1270 /* OUI slot available ? */
1271 if (!is_vendor_oui((*vendor_table)->vendor_class[
1273 method = &(*vendor_table)->vendor_class[
1274 vclass]->method_table[i];
1276 /* Allocate method table for this OUI */
1277 if ((ret = allocate_method_table(method)))
1279 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1280 mad_reg_req->oui, 3);
1284 printk(KERN_ERR PFX "All OUI slots in use\n");
1288 /* Now, make sure methods are not already in use */
1289 if (method_in_use(method, mad_reg_req))
1292 /* Finally, add in methods being registered */
1293 for (i = find_first_bit(mad_reg_req->method_mask,
1294 IB_MGMT_MAX_METHODS);
1295 i < IB_MGMT_MAX_METHODS;
1296 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1298 (*method)->agent[i] = agent_priv;
1303 /* Remove any methods for this mad agent */
1304 remove_methods_mad_agent(*method, agent_priv);
1305 /* Now, check to see if there are any methods in use */
1306 if (!check_method_table(*method)) {
1307 /* If not, release management method table */
1314 (*vendor_table)->vendor_class[vclass] = NULL;
1315 kfree(vendor_class);
1319 *vendor_table = NULL;
1326 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1328 struct ib_mad_port_private *port_priv;
1329 struct ib_mad_mgmt_class_table *class;
1330 struct ib_mad_mgmt_method_table *method;
1331 struct ib_mad_mgmt_vendor_class_table *vendor;
1332 struct ib_mad_mgmt_vendor_class *vendor_class;
1337 * Was MAD registration request supplied
1338 * with original registration ?
1340 if (!agent_priv->reg_req) {
1344 port_priv = agent_priv->qp_info->port_priv;
1345 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1346 class = port_priv->version[
1347 agent_priv->reg_req->mgmt_class_version].class;
1351 method = class->method_table[mgmt_class];
1353 /* Remove any methods for this mad agent */
1354 remove_methods_mad_agent(method, agent_priv);
1355 /* Now, check to see if there are any methods still in use */
1356 if (!check_method_table(method)) {
1357 /* If not, release management method table */
1359 class->method_table[mgmt_class] = NULL;
1360 /* Any management classes left ? */
1361 if (!check_class_table(class)) {
1362 /* If not, release management class table */
1365 agent_priv->reg_req->
1366 mgmt_class_version].class = NULL;
1372 if (!is_vendor_class(mgmt_class))
1375 /* normalize mgmt_class to vendor range 2 */
1376 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1377 vendor = port_priv->version[
1378 agent_priv->reg_req->mgmt_class_version].vendor;
1383 vendor_class = vendor->vendor_class[mgmt_class];
1385 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1388 method = vendor_class->method_table[index];
1390 /* Remove any methods for this mad agent */
1391 remove_methods_mad_agent(method, agent_priv);
1393 * Now, check to see if there are
1394 * any methods still in use
1396 if (!check_method_table(method)) {
1397 /* If not, release management method table */
1399 vendor_class->method_table[index] = NULL;
1400 memset(vendor_class->oui[index], 0, 3);
1401 /* Any OUIs left ? */
1402 if (!check_vendor_class(vendor_class)) {
1403 /* If not, release vendor class table */
1404 kfree(vendor_class);
1405 vendor->vendor_class[mgmt_class] = NULL;
1406 /* Any other vendor classes left ? */
1407 if (!check_vendor_table(vendor)) {
1410 agent_priv->reg_req->
1411 mgmt_class_version].
1423 static struct ib_mad_agent_private *
1424 find_mad_agent(struct ib_mad_port_private *port_priv,
1427 struct ib_mad_agent_private *mad_agent = NULL;
1428 unsigned long flags;
1430 spin_lock_irqsave(&port_priv->reg_lock, flags);
1431 if (response_mad(mad)) {
1433 struct ib_mad_agent_private *entry;
1436 * Routing is based on high 32 bits of transaction ID
1439 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1440 list_for_each_entry(entry, &port_priv->agent_list,
1442 if (entry->agent.hi_tid == hi_tid) {
1448 struct ib_mad_mgmt_class_table *class;
1449 struct ib_mad_mgmt_method_table *method;
1450 struct ib_mad_mgmt_vendor_class_table *vendor;
1451 struct ib_mad_mgmt_vendor_class *vendor_class;
1452 struct ib_vendor_mad *vendor_mad;
1456 * Routing is based on version, class, and method
1457 * For "newer" vendor MADs, also based on OUI
1459 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1461 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1462 class = port_priv->version[
1463 mad->mad_hdr.class_version].class;
1466 method = class->method_table[convert_mgmt_class(
1467 mad->mad_hdr.mgmt_class)];
1469 mad_agent = method->agent[mad->mad_hdr.method &
1470 ~IB_MGMT_METHOD_RESP];
1472 vendor = port_priv->version[
1473 mad->mad_hdr.class_version].vendor;
1476 vendor_class = vendor->vendor_class[vendor_class_index(
1477 mad->mad_hdr.mgmt_class)];
1480 /* Find matching OUI */
1481 vendor_mad = (struct ib_vendor_mad *)mad;
1482 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1485 method = vendor_class->method_table[index];
1487 mad_agent = method->agent[mad->mad_hdr.method &
1488 ~IB_MGMT_METHOD_RESP];
1494 if (mad_agent->agent.recv_handler)
1495 atomic_inc(&mad_agent->refcount);
1497 printk(KERN_NOTICE PFX "No receive handler for client "
1499 &mad_agent->agent, port_priv->port_num);
1504 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1509 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1513 /* Make sure MAD base version is understood */
1514 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1515 printk(KERN_ERR PFX "MAD received with unsupported base "
1516 "version %d\n", mad->mad_hdr.base_version);
1520 /* Filter SMI packets sent to other than QP0 */
1521 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1522 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1526 /* Filter GSI packets sent to QP0 */
1535 static struct ib_mad_send_wr_private*
1536 find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1539 struct ib_mad_send_wr_private *mad_send_wr;
1541 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1543 if (mad_send_wr->tid == tid)
1548 * It's possible to receive the response before we've
1549 * been notified that the send has completed
1551 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1553 if (mad_send_wr->tid == tid && mad_send_wr->timeout) {
1554 /* Verify request has not been canceled */
1555 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1562 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1563 struct ib_mad_recv_wc *mad_recv_wc)
1565 struct ib_mad_send_wr_private *mad_send_wr;
1566 struct ib_mad_send_wc mad_send_wc;
1567 unsigned long flags;
1570 INIT_LIST_HEAD(&mad_recv_wc->recv_buf.list);
1571 /* Complete corresponding request */
1572 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1573 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1574 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1575 mad_send_wr = find_send_req(mad_agent_priv, tid);
1577 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1578 ib_free_recv_mad(mad_recv_wc);
1579 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1580 wake_up(&mad_agent_priv->wait);
1583 /* Timeout = 0 means that we won't wait for a response */
1584 mad_send_wr->timeout = 0;
1585 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1587 /* Defined behavior is to complete response before request */
1588 mad_recv_wc->wc->wr_id = mad_send_wr->wr_id;
1589 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1591 atomic_dec(&mad_agent_priv->refcount);
1593 mad_send_wc.status = IB_WC_SUCCESS;
1594 mad_send_wc.vendor_err = 0;
1595 mad_send_wc.wr_id = mad_send_wr->wr_id;
1596 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1598 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1600 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1601 wake_up(&mad_agent_priv->wait);
1605 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1608 struct ib_mad_qp_info *qp_info;
1609 struct ib_mad_private_header *mad_priv_hdr;
1610 struct ib_mad_private *recv, *response;
1611 struct ib_mad_list_head *mad_list;
1612 struct ib_mad_agent_private *mad_agent;
1614 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1616 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1617 "for response buffer\n");
1619 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1620 qp_info = mad_list->mad_queue->qp_info;
1621 dequeue_mad(mad_list);
1623 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1625 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1626 dma_unmap_single(port_priv->device->dma_device,
1627 pci_unmap_addr(&recv->header, mapping),
1628 sizeof(struct ib_mad_private) -
1629 sizeof(struct ib_mad_private_header),
1632 /* Setup MAD receive work completion from "normal" work completion */
1633 recv->header.wc = *wc;
1634 recv->header.recv_wc.wc = &recv->header.wc;
1635 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1636 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1637 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1639 if (atomic_read(&qp_info->snoop_count))
1640 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1643 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1646 if (recv->mad.mad.mad_hdr.mgmt_class ==
1647 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1648 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1649 port_priv->device->node_type,
1650 port_priv->port_num,
1651 port_priv->device->phys_port_cnt))
1653 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1655 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1656 port_priv->device->node_type,
1657 port_priv->port_num))
1659 if (!smi_check_local_dr_smp(&recv->mad.smp,
1661 port_priv->port_num))
1666 /* Give driver "right of first refusal" on incoming MAD */
1667 if (port_priv->device->process_mad) {
1671 printk(KERN_ERR PFX "No memory for response MAD\n");
1673 * Is it better to assume that
1674 * it wouldn't be processed ?
1679 ret = port_priv->device->process_mad(port_priv->device, 0,
1680 port_priv->port_num,
1683 &response->mad.mad);
1684 if (ret & IB_MAD_RESULT_SUCCESS) {
1685 if (ret & IB_MAD_RESULT_CONSUMED)
1687 if (ret & IB_MAD_RESULT_REPLY) {
1689 if (!agent_send(response, &recv->grh, wc,
1691 port_priv->port_num))
1698 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1700 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1702 * recv is freed up in error cases in ib_mad_complete_recv
1703 * or via recv_handler in ib_mad_complete_recv()
1709 /* Post another receive request for this QP */
1711 ib_mad_post_receive_mads(qp_info, response);
1713 kmem_cache_free(ib_mad_cache, recv);
1715 ib_mad_post_receive_mads(qp_info, recv);
1718 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1720 struct ib_mad_send_wr_private *mad_send_wr;
1721 unsigned long delay;
1723 if (list_empty(&mad_agent_priv->wait_list)) {
1724 cancel_delayed_work(&mad_agent_priv->timed_work);
1726 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1727 struct ib_mad_send_wr_private,
1730 if (time_after(mad_agent_priv->timeout,
1731 mad_send_wr->timeout)) {
1732 mad_agent_priv->timeout = mad_send_wr->timeout;
1733 cancel_delayed_work(&mad_agent_priv->timed_work);
1734 delay = mad_send_wr->timeout - jiffies;
1735 if ((long)delay <= 0)
1737 queue_delayed_work(mad_agent_priv->qp_info->
1739 &mad_agent_priv->timed_work, delay);
1744 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1746 struct ib_mad_agent_private *mad_agent_priv;
1747 struct ib_mad_send_wr_private *temp_mad_send_wr;
1748 struct list_head *list_item;
1749 unsigned long delay;
1751 mad_agent_priv = mad_send_wr->mad_agent_priv;
1752 list_del(&mad_send_wr->agent_list);
1754 delay = mad_send_wr->timeout;
1755 mad_send_wr->timeout += jiffies;
1757 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1758 temp_mad_send_wr = list_entry(list_item,
1759 struct ib_mad_send_wr_private,
1761 if (time_after(mad_send_wr->timeout,
1762 temp_mad_send_wr->timeout))
1765 list_add(&mad_send_wr->agent_list, list_item);
1767 /* Reschedule a work item if we have a shorter timeout */
1768 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1769 cancel_delayed_work(&mad_agent_priv->timed_work);
1770 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1771 &mad_agent_priv->timed_work, delay);
1776 * Process a send work completion
1778 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1779 struct ib_mad_send_wc *mad_send_wc)
1781 struct ib_mad_agent_private *mad_agent_priv;
1782 unsigned long flags;
1784 mad_agent_priv = mad_send_wr->mad_agent_priv;
1785 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1786 if (mad_send_wc->status != IB_WC_SUCCESS &&
1787 mad_send_wr->status == IB_WC_SUCCESS) {
1788 mad_send_wr->status = mad_send_wc->status;
1789 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1792 if (--mad_send_wr->refcount > 0) {
1793 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1794 mad_send_wr->status == IB_WC_SUCCESS) {
1795 wait_for_response(mad_send_wr);
1797 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1801 /* Remove send from MAD agent and notify client of completion */
1802 list_del(&mad_send_wr->agent_list);
1803 adjust_timeout(mad_agent_priv);
1804 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1806 if (mad_send_wr->status != IB_WC_SUCCESS )
1807 mad_send_wc->status = mad_send_wr->status;
1808 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1811 /* Release reference on agent taken when sending */
1812 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1813 wake_up(&mad_agent_priv->wait);
1818 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1821 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1822 struct ib_mad_list_head *mad_list;
1823 struct ib_mad_qp_info *qp_info;
1824 struct ib_mad_queue *send_queue;
1825 struct ib_send_wr *bad_send_wr;
1826 unsigned long flags;
1829 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1830 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1832 send_queue = mad_list->mad_queue;
1833 qp_info = send_queue->qp_info;
1836 queued_send_wr = NULL;
1837 spin_lock_irqsave(&send_queue->lock, flags);
1838 list_del(&mad_list->list);
1840 /* Move queued send to the send queue */
1841 if (send_queue->count-- > send_queue->max_active) {
1842 mad_list = container_of(qp_info->overflow_list.next,
1843 struct ib_mad_list_head, list);
1844 queued_send_wr = container_of(mad_list,
1845 struct ib_mad_send_wr_private,
1847 list_del(&mad_list->list);
1848 list_add_tail(&mad_list->list, &send_queue->list);
1850 spin_unlock_irqrestore(&send_queue->lock, flags);
1852 /* Restore client wr_id in WC and complete send */
1853 wc->wr_id = mad_send_wr->wr_id;
1854 if (atomic_read(&qp_info->snoop_count))
1855 snoop_send(qp_info, &mad_send_wr->send_wr,
1856 (struct ib_mad_send_wc *)wc,
1857 IB_MAD_SNOOP_SEND_COMPLETIONS);
1858 ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc);
1860 if (queued_send_wr) {
1861 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1864 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1865 mad_send_wr = queued_send_wr;
1866 wc->status = IB_WC_LOC_QP_OP_ERR;
1872 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1874 struct ib_mad_send_wr_private *mad_send_wr;
1875 struct ib_mad_list_head *mad_list;
1876 unsigned long flags;
1878 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1879 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1880 mad_send_wr = container_of(mad_list,
1881 struct ib_mad_send_wr_private,
1883 mad_send_wr->retry = 1;
1885 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1888 static void mad_error_handler(struct ib_mad_port_private *port_priv,
1891 struct ib_mad_list_head *mad_list;
1892 struct ib_mad_qp_info *qp_info;
1893 struct ib_mad_send_wr_private *mad_send_wr;
1896 /* Determine if failure was a send or receive */
1897 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1898 qp_info = mad_list->mad_queue->qp_info;
1899 if (mad_list->mad_queue == &qp_info->recv_queue)
1901 * Receive errors indicate that the QP has entered the error
1902 * state - error handling/shutdown code will cleanup
1907 * Send errors will transition the QP to SQE - move
1908 * QP to RTS and repost flushed work requests
1910 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1912 if (wc->status == IB_WC_WR_FLUSH_ERR) {
1913 if (mad_send_wr->retry) {
1915 struct ib_send_wr *bad_send_wr;
1917 mad_send_wr->retry = 0;
1918 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1921 ib_mad_send_done_handler(port_priv, wc);
1923 ib_mad_send_done_handler(port_priv, wc);
1925 struct ib_qp_attr *attr;
1927 /* Transition QP to RTS and fail offending send */
1928 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1930 attr->qp_state = IB_QPS_RTS;
1931 attr->cur_qp_state = IB_QPS_SQE;
1932 ret = ib_modify_qp(qp_info->qp, attr,
1933 IB_QP_STATE | IB_QP_CUR_STATE);
1936 printk(KERN_ERR PFX "mad_error_handler - "
1937 "ib_modify_qp to RTS : %d\n", ret);
1939 mark_sends_for_retry(qp_info);
1941 ib_mad_send_done_handler(port_priv, wc);
1946 * IB MAD completion callback
1948 static void ib_mad_completion_handler(void *data)
1950 struct ib_mad_port_private *port_priv;
1953 port_priv = (struct ib_mad_port_private *)data;
1954 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
1956 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
1957 if (wc.status == IB_WC_SUCCESS) {
1958 switch (wc.opcode) {
1960 ib_mad_send_done_handler(port_priv, &wc);
1963 ib_mad_recv_done_handler(port_priv, &wc);
1970 mad_error_handler(port_priv, &wc);
1974 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
1976 unsigned long flags;
1977 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
1978 struct ib_mad_send_wc mad_send_wc;
1979 struct list_head cancel_list;
1981 INIT_LIST_HEAD(&cancel_list);
1983 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1984 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
1985 &mad_agent_priv->send_list, agent_list) {
1986 if (mad_send_wr->status == IB_WC_SUCCESS) {
1987 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1988 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1992 /* Empty wait list to prevent receives from finding a request */
1993 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
1994 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1996 /* Report all cancelled requests */
1997 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1998 mad_send_wc.vendor_err = 0;
2000 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2001 &cancel_list, agent_list) {
2002 mad_send_wc.wr_id = mad_send_wr->wr_id;
2003 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2006 list_del(&mad_send_wr->agent_list);
2008 atomic_dec(&mad_agent_priv->refcount);
2012 static struct ib_mad_send_wr_private*
2013 find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
2016 struct ib_mad_send_wr_private *mad_send_wr;
2018 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2020 if (mad_send_wr->wr_id == wr_id)
2024 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2026 if (mad_send_wr->wr_id == wr_id)
2032 void cancel_sends(void *data)
2034 struct ib_mad_agent_private *mad_agent_priv;
2035 struct ib_mad_send_wr_private *mad_send_wr;
2036 struct ib_mad_send_wc mad_send_wc;
2037 unsigned long flags;
2039 mad_agent_priv = data;
2041 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2042 mad_send_wc.vendor_err = 0;
2044 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2045 while (!list_empty(&mad_agent_priv->canceled_list)) {
2046 mad_send_wr = list_entry(mad_agent_priv->canceled_list.next,
2047 struct ib_mad_send_wr_private,
2050 list_del(&mad_send_wr->agent_list);
2051 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2053 mad_send_wc.wr_id = mad_send_wr->wr_id;
2054 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2058 if (atomic_dec_and_test(&mad_agent_priv->refcount))
2059 wake_up(&mad_agent_priv->wait);
2060 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2062 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2065 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2068 struct ib_mad_agent_private *mad_agent_priv;
2069 struct ib_mad_send_wr_private *mad_send_wr;
2070 unsigned long flags;
2072 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2074 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2075 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
2077 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2081 if (mad_send_wr->status == IB_WC_SUCCESS)
2082 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2084 if (mad_send_wr->refcount != 0) {
2085 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2086 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2090 list_del(&mad_send_wr->agent_list);
2091 list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->canceled_list);
2092 adjust_timeout(mad_agent_priv);
2093 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2095 queue_work(mad_agent_priv->qp_info->port_priv->wq,
2096 &mad_agent_priv->canceled_work);
2100 EXPORT_SYMBOL(ib_cancel_mad);
2102 static void local_completions(void *data)
2104 struct ib_mad_agent_private *mad_agent_priv;
2105 struct ib_mad_local_private *local;
2106 struct ib_mad_agent_private *recv_mad_agent;
2107 unsigned long flags;
2109 struct ib_mad_send_wc mad_send_wc;
2111 mad_agent_priv = (struct ib_mad_agent_private *)data;
2113 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2114 while (!list_empty(&mad_agent_priv->local_list)) {
2115 local = list_entry(mad_agent_priv->local_list.next,
2116 struct ib_mad_local_private,
2118 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2119 if (local->mad_priv) {
2120 recv_mad_agent = local->recv_mad_agent;
2121 if (!recv_mad_agent) {
2122 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2123 kmem_cache_free(ib_mad_cache, local->mad_priv);
2124 goto local_send_completion;
2128 * Defined behavior is to complete response
2131 build_smp_wc(local->wr_id, IB_LID_PERMISSIVE,
2133 recv_mad_agent->agent.port_num, &wc);
2135 local->mad_priv->header.recv_wc.wc = &wc;
2136 local->mad_priv->header.recv_wc.mad_len =
2137 sizeof(struct ib_mad);
2138 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list);
2139 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2140 local->mad_priv->header.recv_wc.recv_buf.mad =
2141 &local->mad_priv->mad.mad;
2142 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2143 snoop_recv(recv_mad_agent->qp_info,
2144 &local->mad_priv->header.recv_wc,
2145 IB_MAD_SNOOP_RECVS);
2146 recv_mad_agent->agent.recv_handler(
2147 &recv_mad_agent->agent,
2148 &local->mad_priv->header.recv_wc);
2149 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2150 atomic_dec(&recv_mad_agent->refcount);
2151 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2154 local_send_completion:
2156 mad_send_wc.status = IB_WC_SUCCESS;
2157 mad_send_wc.vendor_err = 0;
2158 mad_send_wc.wr_id = local->wr_id;
2159 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2160 snoop_send(mad_agent_priv->qp_info, &local->send_wr,
2162 IB_MAD_SNOOP_SEND_COMPLETIONS);
2163 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2166 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2167 list_del(&local->completion_list);
2168 atomic_dec(&mad_agent_priv->refcount);
2171 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2174 static void timeout_sends(void *data)
2176 struct ib_mad_agent_private *mad_agent_priv;
2177 struct ib_mad_send_wr_private *mad_send_wr;
2178 struct ib_mad_send_wc mad_send_wc;
2179 unsigned long flags, delay;
2181 mad_agent_priv = (struct ib_mad_agent_private *)data;
2183 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2184 mad_send_wc.vendor_err = 0;
2186 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2187 while (!list_empty(&mad_agent_priv->wait_list)) {
2188 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2189 struct ib_mad_send_wr_private,
2192 if (time_after(mad_send_wr->timeout, jiffies)) {
2193 delay = mad_send_wr->timeout - jiffies;
2194 if ((long)delay <= 0)
2196 queue_delayed_work(mad_agent_priv->qp_info->
2198 &mad_agent_priv->timed_work, delay);
2202 list_del(&mad_send_wr->agent_list);
2203 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2205 mad_send_wc.wr_id = mad_send_wr->wr_id;
2206 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2210 atomic_dec(&mad_agent_priv->refcount);
2211 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2213 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2216 static void ib_mad_thread_completion_handler(struct ib_cq *cq)
2218 struct ib_mad_port_private *port_priv = cq->cq_context;
2220 queue_work(port_priv->wq, &port_priv->work);
2224 * Allocate receive MADs and post receive WRs for them
2226 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2227 struct ib_mad_private *mad)
2229 unsigned long flags;
2231 struct ib_mad_private *mad_priv;
2232 struct ib_sge sg_list;
2233 struct ib_recv_wr recv_wr, *bad_recv_wr;
2234 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2236 /* Initialize common scatter list fields */
2237 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2238 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2240 /* Initialize common receive WR fields */
2241 recv_wr.next = NULL;
2242 recv_wr.sg_list = &sg_list;
2243 recv_wr.num_sge = 1;
2246 /* Allocate and map receive buffer */
2251 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2253 printk(KERN_ERR PFX "No memory for receive buffer\n");
2258 sg_list.addr = dma_map_single(qp_info->port_priv->
2262 sizeof mad_priv->header,
2264 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2265 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2266 mad_priv->header.mad_list.mad_queue = recv_queue;
2268 /* Post receive WR */
2269 spin_lock_irqsave(&recv_queue->lock, flags);
2270 post = (++recv_queue->count < recv_queue->max_active);
2271 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2272 spin_unlock_irqrestore(&recv_queue->lock, flags);
2273 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2275 spin_lock_irqsave(&recv_queue->lock, flags);
2276 list_del(&mad_priv->header.mad_list.list);
2277 recv_queue->count--;
2278 spin_unlock_irqrestore(&recv_queue->lock, flags);
2279 dma_unmap_single(qp_info->port_priv->device->dma_device,
2280 pci_unmap_addr(&mad_priv->header,
2283 sizeof mad_priv->header,
2285 kmem_cache_free(ib_mad_cache, mad_priv);
2286 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2295 * Return all the posted receive MADs
2297 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2299 struct ib_mad_private_header *mad_priv_hdr;
2300 struct ib_mad_private *recv;
2301 struct ib_mad_list_head *mad_list;
2303 while (!list_empty(&qp_info->recv_queue.list)) {
2305 mad_list = list_entry(qp_info->recv_queue.list.next,
2306 struct ib_mad_list_head, list);
2307 mad_priv_hdr = container_of(mad_list,
2308 struct ib_mad_private_header,
2310 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2313 /* Remove from posted receive MAD list */
2314 list_del(&mad_list->list);
2316 dma_unmap_single(qp_info->port_priv->device->dma_device,
2317 pci_unmap_addr(&recv->header, mapping),
2318 sizeof(struct ib_mad_private) -
2319 sizeof(struct ib_mad_private_header),
2321 kmem_cache_free(ib_mad_cache, recv);
2324 qp_info->recv_queue.count = 0;
2330 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2333 struct ib_qp_attr *attr;
2336 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2338 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2342 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2343 qp = port_priv->qp_info[i].qp;
2345 * PKey index for QP1 is irrelevant but
2346 * one is needed for the Reset to Init transition
2348 attr->qp_state = IB_QPS_INIT;
2349 attr->pkey_index = 0;
2350 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2351 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2352 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2354 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2355 "INIT: %d\n", i, ret);
2359 attr->qp_state = IB_QPS_RTR;
2360 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2362 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2363 "RTR: %d\n", i, ret);
2367 attr->qp_state = IB_QPS_RTS;
2368 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2369 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2371 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2372 "RTS: %d\n", i, ret);
2377 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2379 printk(KERN_ERR PFX "Failed to request completion "
2380 "notification: %d\n", ret);
2384 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2385 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2387 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2396 static void qp_event_handler(struct ib_event *event, void *qp_context)
2398 struct ib_mad_qp_info *qp_info = qp_context;
2400 /* It's worse than that! He's dead, Jim! */
2401 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2402 event->event, qp_info->qp->qp_num);
2405 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2406 struct ib_mad_queue *mad_queue)
2408 mad_queue->qp_info = qp_info;
2409 mad_queue->count = 0;
2410 spin_lock_init(&mad_queue->lock);
2411 INIT_LIST_HEAD(&mad_queue->list);
2414 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2415 struct ib_mad_qp_info *qp_info)
2417 qp_info->port_priv = port_priv;
2418 init_mad_queue(qp_info, &qp_info->send_queue);
2419 init_mad_queue(qp_info, &qp_info->recv_queue);
2420 INIT_LIST_HEAD(&qp_info->overflow_list);
2421 spin_lock_init(&qp_info->snoop_lock);
2422 qp_info->snoop_table = NULL;
2423 qp_info->snoop_table_size = 0;
2424 atomic_set(&qp_info->snoop_count, 0);
2427 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2428 enum ib_qp_type qp_type)
2430 struct ib_qp_init_attr qp_init_attr;
2433 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2434 qp_init_attr.send_cq = qp_info->port_priv->cq;
2435 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2436 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2437 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2438 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2439 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2440 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2441 qp_init_attr.qp_type = qp_type;
2442 qp_init_attr.port_num = qp_info->port_priv->port_num;
2443 qp_init_attr.qp_context = qp_info;
2444 qp_init_attr.event_handler = qp_event_handler;
2445 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2446 if (IS_ERR(qp_info->qp)) {
2447 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2448 get_spl_qp_index(qp_type));
2449 ret = PTR_ERR(qp_info->qp);
2452 /* Use minimum queue sizes unless the CQ is resized */
2453 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2454 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2461 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2463 ib_destroy_qp(qp_info->qp);
2464 if (qp_info->snoop_table)
2465 kfree(qp_info->snoop_table);
2470 * Create the QP, PD, MR, and CQ if needed
2472 static int ib_mad_port_open(struct ib_device *device,
2476 struct ib_mad_port_private *port_priv;
2477 unsigned long flags;
2478 char name[sizeof "ib_mad123"];
2480 /* First, check if port already open at MAD layer */
2481 port_priv = ib_get_mad_port(device, port_num);
2483 printk(KERN_DEBUG PFX "%s port %d already open\n",
2484 device->name, port_num);
2488 /* Create new device info */
2489 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
2491 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2494 memset(port_priv, 0, sizeof *port_priv);
2495 port_priv->device = device;
2496 port_priv->port_num = port_num;
2497 spin_lock_init(&port_priv->reg_lock);
2498 INIT_LIST_HEAD(&port_priv->agent_list);
2499 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2500 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2502 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2503 port_priv->cq = ib_create_cq(port_priv->device,
2505 ib_mad_thread_completion_handler,
2506 NULL, port_priv, cq_size);
2507 if (IS_ERR(port_priv->cq)) {
2508 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2509 ret = PTR_ERR(port_priv->cq);
2513 port_priv->pd = ib_alloc_pd(device);
2514 if (IS_ERR(port_priv->pd)) {
2515 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2516 ret = PTR_ERR(port_priv->pd);
2520 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2521 if (IS_ERR(port_priv->mr)) {
2522 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2523 ret = PTR_ERR(port_priv->mr);
2527 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2530 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2534 snprintf(name, sizeof name, "ib_mad%d", port_num);
2535 port_priv->wq = create_singlethread_workqueue(name);
2536 if (!port_priv->wq) {
2540 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2542 ret = ib_mad_port_start(port_priv);
2544 printk(KERN_ERR PFX "Couldn't start port\n");
2548 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2549 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2550 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2554 destroy_workqueue(port_priv->wq);
2556 destroy_mad_qp(&port_priv->qp_info[1]);
2558 destroy_mad_qp(&port_priv->qp_info[0]);
2560 ib_dereg_mr(port_priv->mr);
2562 ib_dealloc_pd(port_priv->pd);
2564 ib_destroy_cq(port_priv->cq);
2565 cleanup_recv_queue(&port_priv->qp_info[1]);
2566 cleanup_recv_queue(&port_priv->qp_info[0]);
2575 * If there are no classes using the port, free the port
2576 * resources (CQ, MR, PD, QP) and remove the port's info structure
2578 static int ib_mad_port_close(struct ib_device *device, int port_num)
2580 struct ib_mad_port_private *port_priv;
2581 unsigned long flags;
2583 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2584 port_priv = __ib_get_mad_port(device, port_num);
2585 if (port_priv == NULL) {
2586 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2587 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2590 list_del(&port_priv->port_list);
2591 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2593 /* Stop processing completions. */
2594 flush_workqueue(port_priv->wq);
2595 destroy_workqueue(port_priv->wq);
2596 destroy_mad_qp(&port_priv->qp_info[1]);
2597 destroy_mad_qp(&port_priv->qp_info[0]);
2598 ib_dereg_mr(port_priv->mr);
2599 ib_dealloc_pd(port_priv->pd);
2600 ib_destroy_cq(port_priv->cq);
2601 cleanup_recv_queue(&port_priv->qp_info[1]);
2602 cleanup_recv_queue(&port_priv->qp_info[0]);
2603 /* XXX: Handle deallocation of MAD registration tables */
2610 static void ib_mad_init_device(struct ib_device *device)
2612 int ret, num_ports, cur_port, i, ret2;
2614 if (device->node_type == IB_NODE_SWITCH) {
2618 num_ports = device->phys_port_cnt;
2621 for (i = 0; i < num_ports; i++, cur_port++) {
2622 ret = ib_mad_port_open(device, cur_port);
2624 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2625 device->name, cur_port);
2626 goto error_device_open;
2628 ret = ib_agent_port_open(device, cur_port);
2630 printk(KERN_ERR PFX "Couldn't open %s port %d "
2632 device->name, cur_port);
2633 goto error_device_open;
2637 goto error_device_query;
2642 ret2 = ib_agent_port_close(device, cur_port);
2644 printk(KERN_ERR PFX "Couldn't close %s port %d "
2646 device->name, cur_port);
2648 ret2 = ib_mad_port_close(device, cur_port);
2650 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2651 device->name, cur_port);
2660 static void ib_mad_remove_device(struct ib_device *device)
2662 int ret = 0, i, num_ports, cur_port, ret2;
2664 if (device->node_type == IB_NODE_SWITCH) {
2668 num_ports = device->phys_port_cnt;
2671 for (i = 0; i < num_ports; i++, cur_port++) {
2672 ret2 = ib_agent_port_close(device, cur_port);
2674 printk(KERN_ERR PFX "Couldn't close %s port %d "
2676 device->name, cur_port);
2680 ret2 = ib_mad_port_close(device, cur_port);
2682 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2683 device->name, cur_port);
2690 static struct ib_client mad_client = {
2692 .add = ib_mad_init_device,
2693 .remove = ib_mad_remove_device
2696 static int __init ib_mad_init_module(void)
2700 spin_lock_init(&ib_mad_port_list_lock);
2701 spin_lock_init(&ib_agent_port_list_lock);
2703 ib_mad_cache = kmem_cache_create("ib_mad",
2704 sizeof(struct ib_mad_private),
2709 if (!ib_mad_cache) {
2710 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2715 INIT_LIST_HEAD(&ib_mad_port_list);
2717 if (ib_register_client(&mad_client)) {
2718 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2726 kmem_cache_destroy(ib_mad_cache);
2731 static void __exit ib_mad_cleanup_module(void)
2733 ib_unregister_client(&mad_client);
2735 if (kmem_cache_destroy(ib_mad_cache)) {
2736 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2740 module_init(ib_mad_init_module);
2741 module_exit(ib_mad_cleanup_module);