2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
46 #include <rdma/rdma_user_cm.h>
47 #include <rdma/ib_marshall.h>
48 #include <rdma/rdma_cm.h>
49 #include <rdma/rdma_cm_ib.h>
50 #include <rdma/ib_addr.h>
53 MODULE_AUTHOR("Sean Hefty");
54 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
55 MODULE_LICENSE("Dual BSD/GPL");
57 static unsigned int max_backlog = 1024;
59 static struct ctl_table_header *ucma_ctl_table_hdr;
60 static struct ctl_table ucma_ctl_table[] = {
62 .procname = "max_backlog",
64 .maxlen = sizeof max_backlog,
66 .proc_handler = proc_dointvec,
74 struct list_head ctx_list;
75 struct list_head event_list;
76 wait_queue_head_t poll_wait;
77 struct workqueue_struct *close_wq;
82 struct completion comp;
87 struct ucma_file *file;
88 struct rdma_cm_id *cm_id;
91 struct list_head list;
92 struct list_head mc_list;
93 /* mark that device is in process of destroying the internal HW
94 * resources, protected by the global mut
97 /* sync between removal event and id destroy, protected by file mut */
99 struct work_struct close_work;
102 struct ucma_multicast {
103 struct ucma_context *ctx;
108 struct list_head list;
109 struct sockaddr_storage addr;
113 struct ucma_context *ctx;
114 struct ucma_multicast *mc;
115 struct list_head list;
116 struct rdma_cm_id *cm_id;
117 struct rdma_ucm_event_resp resp;
118 struct work_struct close_work;
121 static DEFINE_MUTEX(mut);
122 static DEFINE_IDR(ctx_idr);
123 static DEFINE_IDR(multicast_idr);
125 static inline struct ucma_context *_ucma_find_context(int id,
126 struct ucma_file *file)
128 struct ucma_context *ctx;
130 ctx = idr_find(&ctx_idr, id);
132 ctx = ERR_PTR(-ENOENT);
133 else if (ctx->file != file)
134 ctx = ERR_PTR(-EINVAL);
138 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
140 struct ucma_context *ctx;
143 ctx = _ucma_find_context(id, file);
148 atomic_inc(&ctx->ref);
154 static void ucma_put_ctx(struct ucma_context *ctx)
156 if (atomic_dec_and_test(&ctx->ref))
157 complete(&ctx->comp);
160 static void ucma_close_event_id(struct work_struct *work)
162 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
164 rdma_destroy_id(uevent_close->cm_id);
168 static void ucma_close_id(struct work_struct *work)
170 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
172 /* once all inflight tasks are finished, we close all underlying
173 * resources. The context is still alive till its explicit destryoing
177 wait_for_completion(&ctx->comp);
178 /* No new events will be generated after destroying the id. */
179 rdma_destroy_id(ctx->cm_id);
182 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
184 struct ucma_context *ctx;
186 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
190 INIT_WORK(&ctx->close_work, ucma_close_id);
191 atomic_set(&ctx->ref, 1);
192 init_completion(&ctx->comp);
193 INIT_LIST_HEAD(&ctx->mc_list);
197 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
202 list_add_tail(&ctx->list, &file->ctx_list);
210 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
212 struct ucma_multicast *mc;
214 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
219 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
225 list_add_tail(&mc->list, &ctx->mc_list);
233 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
234 struct rdma_conn_param *src)
236 if (src->private_data_len)
237 memcpy(dst->private_data, src->private_data,
238 src->private_data_len);
239 dst->private_data_len = src->private_data_len;
240 dst->responder_resources =src->responder_resources;
241 dst->initiator_depth = src->initiator_depth;
242 dst->flow_control = src->flow_control;
243 dst->retry_count = src->retry_count;
244 dst->rnr_retry_count = src->rnr_retry_count;
246 dst->qp_num = src->qp_num;
249 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
250 struct rdma_ud_param *src)
252 if (src->private_data_len)
253 memcpy(dst->private_data, src->private_data,
254 src->private_data_len);
255 dst->private_data_len = src->private_data_len;
256 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
257 dst->qp_num = src->qp_num;
258 dst->qkey = src->qkey;
261 static void ucma_set_event_context(struct ucma_context *ctx,
262 struct rdma_cm_event *event,
263 struct ucma_event *uevent)
266 switch (event->event) {
267 case RDMA_CM_EVENT_MULTICAST_JOIN:
268 case RDMA_CM_EVENT_MULTICAST_ERROR:
269 uevent->mc = (struct ucma_multicast *)
270 event->param.ud.private_data;
271 uevent->resp.uid = uevent->mc->uid;
272 uevent->resp.id = uevent->mc->id;
275 uevent->resp.uid = ctx->uid;
276 uevent->resp.id = ctx->id;
281 /* Called with file->mut locked for the relevant context. */
282 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
284 struct ucma_context *ctx = cm_id->context;
285 struct ucma_event *con_req_eve;
291 /* only if context is pointing to cm_id that it owns it and can be
292 * queued to be closed, otherwise that cm_id is an inflight one that
293 * is part of that context event list pending to be detached and
294 * reattached to its new context as part of ucma_get_event,
295 * handled separately below.
297 if (ctx->cm_id == cm_id) {
301 queue_work(ctx->file->close_wq, &ctx->close_work);
305 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
306 if (con_req_eve->cm_id == cm_id &&
307 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
308 list_del(&con_req_eve->list);
309 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
310 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
316 printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
319 static int ucma_event_handler(struct rdma_cm_id *cm_id,
320 struct rdma_cm_event *event)
322 struct ucma_event *uevent;
323 struct ucma_context *ctx = cm_id->context;
326 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
328 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
330 mutex_lock(&ctx->file->mut);
331 uevent->cm_id = cm_id;
332 ucma_set_event_context(ctx, event, uevent);
333 uevent->resp.event = event->event;
334 uevent->resp.status = event->status;
335 if (cm_id->qp_type == IB_QPT_UD)
336 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
338 ucma_copy_conn_event(&uevent->resp.param.conn,
341 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
348 } else if (!ctx->uid || ctx->cm_id != cm_id) {
350 * We ignore events for new connections until userspace has set
351 * their context. This can only happen if an error occurs on a
352 * new connection before the user accepts it. This is okay,
353 * since the accept will just fail later. However, we do need
354 * to release the underlying HW resources in case of a device
357 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
358 ucma_removal_event_handler(cm_id);
364 list_add_tail(&uevent->list, &ctx->file->event_list);
365 wake_up_interruptible(&ctx->file->poll_wait);
366 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
367 ucma_removal_event_handler(cm_id);
369 mutex_unlock(&ctx->file->mut);
373 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
374 int in_len, int out_len)
376 struct ucma_context *ctx;
377 struct rdma_ucm_get_event cmd;
378 struct ucma_event *uevent;
381 if (out_len < sizeof uevent->resp)
384 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
387 mutex_lock(&file->mut);
388 while (list_empty(&file->event_list)) {
389 mutex_unlock(&file->mut);
391 if (file->filp->f_flags & O_NONBLOCK)
394 if (wait_event_interruptible(file->poll_wait,
395 !list_empty(&file->event_list)))
398 mutex_lock(&file->mut);
401 uevent = list_entry(file->event_list.next, struct ucma_event, list);
403 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
404 ctx = ucma_alloc_ctx(file);
409 uevent->ctx->backlog++;
410 ctx->cm_id = uevent->cm_id;
411 ctx->cm_id->context = ctx;
412 uevent->resp.id = ctx->id;
415 if (copy_to_user((void __user *)(unsigned long)cmd.response,
416 &uevent->resp, sizeof uevent->resp)) {
421 list_del(&uevent->list);
422 uevent->ctx->events_reported++;
424 uevent->mc->events_reported++;
427 mutex_unlock(&file->mut);
431 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
435 *qp_type = IB_QPT_RC;
439 *qp_type = IB_QPT_UD;
442 *qp_type = cmd->qp_type;
449 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
450 int in_len, int out_len)
452 struct rdma_ucm_create_id cmd;
453 struct rdma_ucm_create_id_resp resp;
454 struct ucma_context *ctx;
455 enum ib_qp_type qp_type;
458 if (out_len < sizeof(resp))
461 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
464 ret = ucma_get_qp_type(&cmd, &qp_type);
468 mutex_lock(&file->mut);
469 ctx = ucma_alloc_ctx(file);
470 mutex_unlock(&file->mut);
475 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
476 if (IS_ERR(ctx->cm_id)) {
477 ret = PTR_ERR(ctx->cm_id);
482 if (copy_to_user((void __user *)(unsigned long)cmd.response,
483 &resp, sizeof(resp))) {
490 rdma_destroy_id(ctx->cm_id);
493 idr_remove(&ctx_idr, ctx->id);
499 static void ucma_cleanup_multicast(struct ucma_context *ctx)
501 struct ucma_multicast *mc, *tmp;
504 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
506 idr_remove(&multicast_idr, mc->id);
512 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
514 struct ucma_event *uevent, *tmp;
516 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
517 if (uevent->mc != mc)
520 list_del(&uevent->list);
526 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
527 * this point, no new events will be reported from the hardware. However, we
528 * still need to cleanup the UCMA context for this ID. Specifically, there
529 * might be events that have not yet been consumed by the user space software.
530 * These might include pending connect requests which we have not completed
531 * processing. We cannot call rdma_destroy_id while holding the lock of the
532 * context (file->mut), as it might cause a deadlock. We therefore extract all
533 * relevant events from the context pending events list while holding the
534 * mutex. After that we release them as needed.
536 static int ucma_free_ctx(struct ucma_context *ctx)
539 struct ucma_event *uevent, *tmp;
543 ucma_cleanup_multicast(ctx);
545 /* Cleanup events not yet reported to the user. */
546 mutex_lock(&ctx->file->mut);
547 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
548 if (uevent->ctx == ctx)
549 list_move_tail(&uevent->list, &list);
551 list_del(&ctx->list);
552 mutex_unlock(&ctx->file->mut);
554 list_for_each_entry_safe(uevent, tmp, &list, list) {
555 list_del(&uevent->list);
556 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
557 rdma_destroy_id(uevent->cm_id);
561 events_reported = ctx->events_reported;
563 return events_reported;
566 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
567 int in_len, int out_len)
569 struct rdma_ucm_destroy_id cmd;
570 struct rdma_ucm_destroy_id_resp resp;
571 struct ucma_context *ctx;
574 if (out_len < sizeof(resp))
577 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
581 ctx = _ucma_find_context(cmd.id, file);
583 idr_remove(&ctx_idr, ctx->id);
589 mutex_lock(&ctx->file->mut);
591 mutex_unlock(&ctx->file->mut);
593 flush_workqueue(ctx->file->close_wq);
594 /* At this point it's guaranteed that there is no inflight
600 wait_for_completion(&ctx->comp);
601 rdma_destroy_id(ctx->cm_id);
606 resp.events_reported = ucma_free_ctx(ctx);
607 if (copy_to_user((void __user *)(unsigned long)cmd.response,
608 &resp, sizeof(resp)))
614 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
615 int in_len, int out_len)
617 struct rdma_ucm_bind_ip cmd;
618 struct ucma_context *ctx;
621 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
624 ctx = ucma_get_ctx(file, cmd.id);
628 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
633 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
634 int in_len, int out_len)
636 struct rdma_ucm_bind cmd;
637 struct sockaddr *addr;
638 struct ucma_context *ctx;
641 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
644 addr = (struct sockaddr *) &cmd.addr;
645 if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
648 ctx = ucma_get_ctx(file, cmd.id);
652 ret = rdma_bind_addr(ctx->cm_id, addr);
657 static ssize_t ucma_resolve_ip(struct ucma_file *file,
658 const char __user *inbuf,
659 int in_len, int out_len)
661 struct rdma_ucm_resolve_ip cmd;
662 struct ucma_context *ctx;
665 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
668 ctx = ucma_get_ctx(file, cmd.id);
672 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
673 (struct sockaddr *) &cmd.dst_addr,
679 static ssize_t ucma_resolve_addr(struct ucma_file *file,
680 const char __user *inbuf,
681 int in_len, int out_len)
683 struct rdma_ucm_resolve_addr cmd;
684 struct sockaddr *src, *dst;
685 struct ucma_context *ctx;
688 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
691 src = (struct sockaddr *) &cmd.src_addr;
692 dst = (struct sockaddr *) &cmd.dst_addr;
693 if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
694 !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
697 ctx = ucma_get_ctx(file, cmd.id);
701 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
706 static ssize_t ucma_resolve_route(struct ucma_file *file,
707 const char __user *inbuf,
708 int in_len, int out_len)
710 struct rdma_ucm_resolve_route cmd;
711 struct ucma_context *ctx;
714 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
717 ctx = ucma_get_ctx(file, cmd.id);
721 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
726 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
727 struct rdma_route *route)
729 struct rdma_dev_addr *dev_addr;
731 resp->num_paths = route->num_paths;
732 switch (route->num_paths) {
734 dev_addr = &route->addr.dev_addr;
735 rdma_addr_get_dgid(dev_addr,
736 (union ib_gid *) &resp->ib_route[0].dgid);
737 rdma_addr_get_sgid(dev_addr,
738 (union ib_gid *) &resp->ib_route[0].sgid);
739 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
742 ib_copy_path_rec_to_user(&resp->ib_route[1],
743 &route->path_rec[1]);
746 ib_copy_path_rec_to_user(&resp->ib_route[0],
747 &route->path_rec[0]);
754 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
755 struct rdma_route *route)
758 resp->num_paths = route->num_paths;
759 switch (route->num_paths) {
761 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
762 (union ib_gid *)&resp->ib_route[0].dgid);
763 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
764 (union ib_gid *)&resp->ib_route[0].sgid);
765 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
768 ib_copy_path_rec_to_user(&resp->ib_route[1],
769 &route->path_rec[1]);
772 ib_copy_path_rec_to_user(&resp->ib_route[0],
773 &route->path_rec[0]);
780 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
781 struct rdma_route *route)
783 struct rdma_dev_addr *dev_addr;
785 dev_addr = &route->addr.dev_addr;
786 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
787 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
790 static ssize_t ucma_query_route(struct ucma_file *file,
791 const char __user *inbuf,
792 int in_len, int out_len)
794 struct rdma_ucm_query cmd;
795 struct rdma_ucm_query_route_resp resp;
796 struct ucma_context *ctx;
797 struct sockaddr *addr;
800 if (out_len < sizeof(resp))
803 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
806 ctx = ucma_get_ctx(file, cmd.id);
810 memset(&resp, 0, sizeof resp);
811 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
812 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
813 sizeof(struct sockaddr_in) :
814 sizeof(struct sockaddr_in6));
815 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
816 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
817 sizeof(struct sockaddr_in) :
818 sizeof(struct sockaddr_in6));
819 if (!ctx->cm_id->device)
822 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
823 resp.port_num = ctx->cm_id->port_num;
825 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
826 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
827 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
828 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
829 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
830 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
833 if (copy_to_user((void __user *)(unsigned long)cmd.response,
834 &resp, sizeof(resp)))
841 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
842 struct rdma_ucm_query_addr_resp *resp)
847 resp->node_guid = (__force __u64) cm_id->device->node_guid;
848 resp->port_num = cm_id->port_num;
849 resp->pkey = (__force __u16) cpu_to_be16(
850 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
853 static ssize_t ucma_query_addr(struct ucma_context *ctx,
854 void __user *response, int out_len)
856 struct rdma_ucm_query_addr_resp resp;
857 struct sockaddr *addr;
860 if (out_len < sizeof(resp))
863 memset(&resp, 0, sizeof resp);
865 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
866 resp.src_size = rdma_addr_size(addr);
867 memcpy(&resp.src_addr, addr, resp.src_size);
869 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
870 resp.dst_size = rdma_addr_size(addr);
871 memcpy(&resp.dst_addr, addr, resp.dst_size);
873 ucma_query_device_addr(ctx->cm_id, &resp);
875 if (copy_to_user(response, &resp, sizeof(resp)))
881 static ssize_t ucma_query_path(struct ucma_context *ctx,
882 void __user *response, int out_len)
884 struct rdma_ucm_query_path_resp *resp;
887 if (out_len < sizeof(*resp))
890 resp = kzalloc(out_len, GFP_KERNEL);
894 resp->num_paths = ctx->cm_id->route.num_paths;
895 for (i = 0, out_len -= sizeof(*resp);
896 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
897 i++, out_len -= sizeof(struct ib_path_rec_data)) {
899 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
900 IB_PATH_BIDIRECTIONAL;
901 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
902 &resp->path_data[i].path_rec);
905 if (copy_to_user(response, resp,
906 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
913 static ssize_t ucma_query_gid(struct ucma_context *ctx,
914 void __user *response, int out_len)
916 struct rdma_ucm_query_addr_resp resp;
917 struct sockaddr_ib *addr;
920 if (out_len < sizeof(resp))
923 memset(&resp, 0, sizeof resp);
925 ucma_query_device_addr(ctx->cm_id, &resp);
927 addr = (struct sockaddr_ib *) &resp.src_addr;
928 resp.src_size = sizeof(*addr);
929 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
930 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
932 addr->sib_family = AF_IB;
933 addr->sib_pkey = (__force __be16) resp.pkey;
934 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
935 (union ib_gid *) &addr->sib_addr);
936 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
937 &ctx->cm_id->route.addr.src_addr);
940 addr = (struct sockaddr_ib *) &resp.dst_addr;
941 resp.dst_size = sizeof(*addr);
942 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
943 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
945 addr->sib_family = AF_IB;
946 addr->sib_pkey = (__force __be16) resp.pkey;
947 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
948 (union ib_gid *) &addr->sib_addr);
949 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
950 &ctx->cm_id->route.addr.dst_addr);
953 if (copy_to_user(response, &resp, sizeof(resp)))
959 static ssize_t ucma_query(struct ucma_file *file,
960 const char __user *inbuf,
961 int in_len, int out_len)
963 struct rdma_ucm_query cmd;
964 struct ucma_context *ctx;
965 void __user *response;
968 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
971 response = (void __user *)(unsigned long) cmd.response;
972 ctx = ucma_get_ctx(file, cmd.id);
976 switch (cmd.option) {
977 case RDMA_USER_CM_QUERY_ADDR:
978 ret = ucma_query_addr(ctx, response, out_len);
980 case RDMA_USER_CM_QUERY_PATH:
981 ret = ucma_query_path(ctx, response, out_len);
983 case RDMA_USER_CM_QUERY_GID:
984 ret = ucma_query_gid(ctx, response, out_len);
995 static void ucma_copy_conn_param(struct rdma_cm_id *id,
996 struct rdma_conn_param *dst,
997 struct rdma_ucm_conn_param *src)
999 dst->private_data = src->private_data;
1000 dst->private_data_len = src->private_data_len;
1001 dst->responder_resources =src->responder_resources;
1002 dst->initiator_depth = src->initiator_depth;
1003 dst->flow_control = src->flow_control;
1004 dst->retry_count = src->retry_count;
1005 dst->rnr_retry_count = src->rnr_retry_count;
1006 dst->srq = src->srq;
1007 dst->qp_num = src->qp_num;
1008 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1011 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1012 int in_len, int out_len)
1014 struct rdma_ucm_connect cmd;
1015 struct rdma_conn_param conn_param;
1016 struct ucma_context *ctx;
1019 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1022 if (!cmd.conn_param.valid)
1025 ctx = ucma_get_ctx(file, cmd.id);
1027 return PTR_ERR(ctx);
1029 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1030 ret = rdma_connect(ctx->cm_id, &conn_param);
1035 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1036 int in_len, int out_len)
1038 struct rdma_ucm_listen cmd;
1039 struct ucma_context *ctx;
1042 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1045 ctx = ucma_get_ctx(file, cmd.id);
1047 return PTR_ERR(ctx);
1049 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1050 cmd.backlog : max_backlog;
1051 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1056 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1057 int in_len, int out_len)
1059 struct rdma_ucm_accept cmd;
1060 struct rdma_conn_param conn_param;
1061 struct ucma_context *ctx;
1064 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1067 ctx = ucma_get_ctx(file, cmd.id);
1069 return PTR_ERR(ctx);
1071 if (cmd.conn_param.valid) {
1072 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1073 mutex_lock(&file->mut);
1074 ret = rdma_accept(ctx->cm_id, &conn_param);
1077 mutex_unlock(&file->mut);
1079 ret = rdma_accept(ctx->cm_id, NULL);
1085 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1086 int in_len, int out_len)
1088 struct rdma_ucm_reject cmd;
1089 struct ucma_context *ctx;
1092 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1095 ctx = ucma_get_ctx(file, cmd.id);
1097 return PTR_ERR(ctx);
1099 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1104 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1105 int in_len, int out_len)
1107 struct rdma_ucm_disconnect cmd;
1108 struct ucma_context *ctx;
1111 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1114 ctx = ucma_get_ctx(file, cmd.id);
1116 return PTR_ERR(ctx);
1118 ret = rdma_disconnect(ctx->cm_id);
1123 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1124 const char __user *inbuf,
1125 int in_len, int out_len)
1127 struct rdma_ucm_init_qp_attr cmd;
1128 struct ib_uverbs_qp_attr resp;
1129 struct ucma_context *ctx;
1130 struct ib_qp_attr qp_attr;
1133 if (out_len < sizeof(resp))
1136 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1139 ctx = ucma_get_ctx(file, cmd.id);
1141 return PTR_ERR(ctx);
1143 resp.qp_attr_mask = 0;
1144 memset(&qp_attr, 0, sizeof qp_attr);
1145 qp_attr.qp_state = cmd.qp_state;
1146 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1150 ib_copy_qp_attr_to_user(&resp, &qp_attr);
1151 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1152 &resp, sizeof(resp)))
1160 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1161 void *optval, size_t optlen)
1166 case RDMA_OPTION_ID_TOS:
1167 if (optlen != sizeof(u8)) {
1171 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1173 case RDMA_OPTION_ID_REUSEADDR:
1174 if (optlen != sizeof(int)) {
1178 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1180 case RDMA_OPTION_ID_AFONLY:
1181 if (optlen != sizeof(int)) {
1185 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1194 static int ucma_set_ib_path(struct ucma_context *ctx,
1195 struct ib_path_rec_data *path_data, size_t optlen)
1197 struct ib_sa_path_rec sa_path;
1198 struct rdma_cm_event event;
1201 if (optlen % sizeof(*path_data))
1204 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1205 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1206 IB_PATH_BIDIRECTIONAL))
1213 memset(&sa_path, 0, sizeof(sa_path));
1214 sa_path.vlan_id = 0xffff;
1216 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1217 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1221 memset(&event, 0, sizeof event);
1222 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1223 return ucma_event_handler(ctx->cm_id, &event);
1226 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1227 void *optval, size_t optlen)
1232 case RDMA_OPTION_IB_PATH:
1233 ret = ucma_set_ib_path(ctx, optval, optlen);
1242 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1243 int optname, void *optval, size_t optlen)
1248 case RDMA_OPTION_ID:
1249 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1251 case RDMA_OPTION_IB:
1252 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1261 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1262 int in_len, int out_len)
1264 struct rdma_ucm_set_option cmd;
1265 struct ucma_context *ctx;
1269 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1272 ctx = ucma_get_ctx(file, cmd.id);
1274 return PTR_ERR(ctx);
1276 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1278 if (IS_ERR(optval)) {
1279 ret = PTR_ERR(optval);
1283 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1292 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1293 int in_len, int out_len)
1295 struct rdma_ucm_notify cmd;
1296 struct ucma_context *ctx;
1299 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1302 ctx = ucma_get_ctx(file, cmd.id);
1304 return PTR_ERR(ctx);
1306 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1311 static ssize_t ucma_process_join(struct ucma_file *file,
1312 struct rdma_ucm_join_mcast *cmd, int out_len)
1314 struct rdma_ucm_create_id_resp resp;
1315 struct ucma_context *ctx;
1316 struct ucma_multicast *mc;
1317 struct sockaddr *addr;
1320 if (out_len < sizeof(resp))
1323 addr = (struct sockaddr *) &cmd->addr;
1324 if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1327 ctx = ucma_get_ctx(file, cmd->id);
1329 return PTR_ERR(ctx);
1331 mutex_lock(&file->mut);
1332 mc = ucma_alloc_multicast(ctx);
1339 memcpy(&mc->addr, addr, cmd->addr_size);
1340 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1345 if (copy_to_user((void __user *)(unsigned long) cmd->response,
1346 &resp, sizeof(resp))) {
1351 mutex_unlock(&file->mut);
1356 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1357 ucma_cleanup_mc_events(mc);
1360 idr_remove(&multicast_idr, mc->id);
1362 list_del(&mc->list);
1365 mutex_unlock(&file->mut);
1370 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1371 const char __user *inbuf,
1372 int in_len, int out_len)
1374 struct rdma_ucm_join_ip_mcast cmd;
1375 struct rdma_ucm_join_mcast join_cmd;
1377 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1380 join_cmd.response = cmd.response;
1381 join_cmd.uid = cmd.uid;
1382 join_cmd.id = cmd.id;
1383 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1384 join_cmd.reserved = 0;
1385 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1387 return ucma_process_join(file, &join_cmd, out_len);
1390 static ssize_t ucma_join_multicast(struct ucma_file *file,
1391 const char __user *inbuf,
1392 int in_len, int out_len)
1394 struct rdma_ucm_join_mcast cmd;
1396 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1399 return ucma_process_join(file, &cmd, out_len);
1402 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1403 const char __user *inbuf,
1404 int in_len, int out_len)
1406 struct rdma_ucm_destroy_id cmd;
1407 struct rdma_ucm_destroy_id_resp resp;
1408 struct ucma_multicast *mc;
1411 if (out_len < sizeof(resp))
1414 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1418 mc = idr_find(&multicast_idr, cmd.id);
1420 mc = ERR_PTR(-ENOENT);
1421 else if (mc->ctx->file != file)
1422 mc = ERR_PTR(-EINVAL);
1423 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1424 mc = ERR_PTR(-ENXIO);
1426 idr_remove(&multicast_idr, mc->id);
1434 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1435 mutex_lock(&mc->ctx->file->mut);
1436 ucma_cleanup_mc_events(mc);
1437 list_del(&mc->list);
1438 mutex_unlock(&mc->ctx->file->mut);
1440 ucma_put_ctx(mc->ctx);
1441 resp.events_reported = mc->events_reported;
1444 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1445 &resp, sizeof(resp)))
1451 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1453 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1454 if (file1 < file2) {
1455 mutex_lock(&file1->mut);
1456 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1458 mutex_lock(&file2->mut);
1459 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1463 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1465 if (file1 < file2) {
1466 mutex_unlock(&file2->mut);
1467 mutex_unlock(&file1->mut);
1469 mutex_unlock(&file1->mut);
1470 mutex_unlock(&file2->mut);
1474 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1476 struct ucma_event *uevent, *tmp;
1478 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1479 if (uevent->ctx == ctx)
1480 list_move_tail(&uevent->list, &file->event_list);
1483 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1484 const char __user *inbuf,
1485 int in_len, int out_len)
1487 struct rdma_ucm_migrate_id cmd;
1488 struct rdma_ucm_migrate_resp resp;
1489 struct ucma_context *ctx;
1491 struct ucma_file *cur_file;
1494 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1497 /* Get current fd to protect against it being closed */
1502 /* Validate current fd and prevent destruction of id. */
1503 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1509 cur_file = ctx->file;
1510 if (cur_file == new_file) {
1511 resp.events_reported = ctx->events_reported;
1516 * Migrate events between fd's, maintaining order, and avoiding new
1517 * events being added before existing events.
1519 ucma_lock_files(cur_file, new_file);
1522 list_move_tail(&ctx->list, &new_file->ctx_list);
1523 ucma_move_events(ctx, new_file);
1524 ctx->file = new_file;
1525 resp.events_reported = ctx->events_reported;
1528 ucma_unlock_files(cur_file, new_file);
1531 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1532 &resp, sizeof(resp)))
1541 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1542 const char __user *inbuf,
1543 int in_len, int out_len) = {
1544 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1545 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1546 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1547 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1548 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1549 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1550 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1551 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1552 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1553 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1554 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1555 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1556 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1557 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1558 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1559 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1560 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1561 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1562 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1563 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1564 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1565 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1566 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1569 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1570 size_t len, loff_t *pos)
1572 struct ucma_file *file = filp->private_data;
1573 struct rdma_ucm_cmd_hdr hdr;
1576 if (len < sizeof(hdr))
1579 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1582 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1585 if (hdr.in + sizeof(hdr) > len)
1588 if (!ucma_cmd_table[hdr.cmd])
1591 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1598 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1600 struct ucma_file *file = filp->private_data;
1601 unsigned int mask = 0;
1603 poll_wait(filp, &file->poll_wait, wait);
1605 if (!list_empty(&file->event_list))
1606 mask = POLLIN | POLLRDNORM;
1612 * ucma_open() does not need the BKL:
1614 * - no global state is referred to;
1615 * - there is no ioctl method to race against;
1616 * - no further module initialization is required for open to work
1617 * after the device is registered.
1619 static int ucma_open(struct inode *inode, struct file *filp)
1621 struct ucma_file *file;
1623 file = kmalloc(sizeof *file, GFP_KERNEL);
1627 file->close_wq = create_singlethread_workqueue("ucma_close_id");
1628 if (!file->close_wq) {
1633 INIT_LIST_HEAD(&file->event_list);
1634 INIT_LIST_HEAD(&file->ctx_list);
1635 init_waitqueue_head(&file->poll_wait);
1636 mutex_init(&file->mut);
1638 filp->private_data = file;
1641 return nonseekable_open(inode, filp);
1644 static int ucma_close(struct inode *inode, struct file *filp)
1646 struct ucma_file *file = filp->private_data;
1647 struct ucma_context *ctx, *tmp;
1649 mutex_lock(&file->mut);
1650 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1651 ctx->destroying = 1;
1652 mutex_unlock(&file->mut);
1655 idr_remove(&ctx_idr, ctx->id);
1658 flush_workqueue(file->close_wq);
1659 /* At that step once ctx was marked as destroying and workqueue
1660 * was flushed we are safe from any inflights handlers that
1661 * might put other closing task.
1664 if (!ctx->closing) {
1666 /* rdma_destroy_id ensures that no event handlers are
1667 * inflight for that id before releasing it.
1669 rdma_destroy_id(ctx->cm_id);
1675 mutex_lock(&file->mut);
1677 mutex_unlock(&file->mut);
1678 destroy_workqueue(file->close_wq);
1683 static const struct file_operations ucma_fops = {
1684 .owner = THIS_MODULE,
1686 .release = ucma_close,
1687 .write = ucma_write,
1689 .llseek = no_llseek,
1692 static struct miscdevice ucma_misc = {
1693 .minor = MISC_DYNAMIC_MINOR,
1695 .nodename = "infiniband/rdma_cm",
1700 static ssize_t show_abi_version(struct device *dev,
1701 struct device_attribute *attr,
1704 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1706 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1708 static int __init ucma_init(void)
1712 ret = misc_register(&ucma_misc);
1716 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1718 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1722 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1723 if (!ucma_ctl_table_hdr) {
1724 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1730 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1732 misc_deregister(&ucma_misc);
1736 static void __exit ucma_cleanup(void)
1738 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1739 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1740 misc_deregister(&ucma_misc);
1741 idr_destroy(&ctx_idr);
1742 idr_destroy(&multicast_idr);
1745 module_init(ucma_init);
1746 module_exit(ucma_cleanup);