2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
47 #include <linux/nospec.h>
49 #include <rdma/rdma_user_cm.h>
50 #include <rdma/ib_marshall.h>
51 #include <rdma/rdma_cm.h>
52 #include <rdma/rdma_cm_ib.h>
53 #include <rdma/ib_addr.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static unsigned int max_backlog = 1024;
62 static struct ctl_table_header *ucma_ctl_table_hdr;
63 static struct ctl_table ucma_ctl_table[] = {
65 .procname = "max_backlog",
67 .maxlen = sizeof max_backlog,
69 .proc_handler = proc_dointvec,
77 struct list_head ctx_list;
78 struct list_head event_list;
79 wait_queue_head_t poll_wait;
80 struct workqueue_struct *close_wq;
85 struct completion comp;
90 struct ucma_file *file;
91 struct rdma_cm_id *cm_id;
94 struct list_head list;
95 struct list_head mc_list;
96 /* mark that device is in process of destroying the internal HW
97 * resources, protected by the global mut
100 /* sync between removal event and id destroy, protected by file mut */
102 struct work_struct close_work;
105 struct ucma_multicast {
106 struct ucma_context *ctx;
111 struct list_head list;
112 struct sockaddr_storage addr;
116 struct ucma_context *ctx;
117 struct ucma_multicast *mc;
118 struct list_head list;
119 struct rdma_cm_id *cm_id;
120 struct rdma_ucm_event_resp resp;
121 struct work_struct close_work;
124 static DEFINE_MUTEX(mut);
125 static DEFINE_IDR(ctx_idr);
126 static DEFINE_IDR(multicast_idr);
128 static const struct file_operations ucma_fops;
130 static inline struct ucma_context *_ucma_find_context(int id,
131 struct ucma_file *file)
133 struct ucma_context *ctx;
135 ctx = idr_find(&ctx_idr, id);
137 ctx = ERR_PTR(-ENOENT);
138 else if (ctx->file != file || !ctx->cm_id)
139 ctx = ERR_PTR(-EINVAL);
143 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
145 struct ucma_context *ctx;
148 ctx = _ucma_find_context(id, file);
153 atomic_inc(&ctx->ref);
159 static void ucma_put_ctx(struct ucma_context *ctx)
161 if (atomic_dec_and_test(&ctx->ref))
162 complete(&ctx->comp);
165 static void ucma_close_event_id(struct work_struct *work)
167 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
169 rdma_destroy_id(uevent_close->cm_id);
173 static void ucma_close_id(struct work_struct *work)
175 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
177 /* once all inflight tasks are finished, we close all underlying
178 * resources. The context is still alive till its explicit destryoing
182 wait_for_completion(&ctx->comp);
183 /* No new events will be generated after destroying the id. */
184 rdma_destroy_id(ctx->cm_id);
187 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
189 struct ucma_context *ctx;
191 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
195 INIT_WORK(&ctx->close_work, ucma_close_id);
196 atomic_set(&ctx->ref, 1);
197 init_completion(&ctx->comp);
198 INIT_LIST_HEAD(&ctx->mc_list);
202 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
207 list_add_tail(&ctx->list, &file->ctx_list);
215 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
217 struct ucma_multicast *mc;
219 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
224 mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL);
230 list_add_tail(&mc->list, &ctx->mc_list);
238 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
239 struct rdma_conn_param *src)
241 if (src->private_data_len)
242 memcpy(dst->private_data, src->private_data,
243 src->private_data_len);
244 dst->private_data_len = src->private_data_len;
245 dst->responder_resources =src->responder_resources;
246 dst->initiator_depth = src->initiator_depth;
247 dst->flow_control = src->flow_control;
248 dst->retry_count = src->retry_count;
249 dst->rnr_retry_count = src->rnr_retry_count;
251 dst->qp_num = src->qp_num;
254 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
255 struct rdma_ud_param *src)
257 if (src->private_data_len)
258 memcpy(dst->private_data, src->private_data,
259 src->private_data_len);
260 dst->private_data_len = src->private_data_len;
261 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
262 dst->qp_num = src->qp_num;
263 dst->qkey = src->qkey;
266 static void ucma_set_event_context(struct ucma_context *ctx,
267 struct rdma_cm_event *event,
268 struct ucma_event *uevent)
271 switch (event->event) {
272 case RDMA_CM_EVENT_MULTICAST_JOIN:
273 case RDMA_CM_EVENT_MULTICAST_ERROR:
274 uevent->mc = (struct ucma_multicast *)
275 event->param.ud.private_data;
276 uevent->resp.uid = uevent->mc->uid;
277 uevent->resp.id = uevent->mc->id;
280 uevent->resp.uid = ctx->uid;
281 uevent->resp.id = ctx->id;
286 /* Called with file->mut locked for the relevant context. */
287 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
289 struct ucma_context *ctx = cm_id->context;
290 struct ucma_event *con_req_eve;
296 /* only if context is pointing to cm_id that it owns it and can be
297 * queued to be closed, otherwise that cm_id is an inflight one that
298 * is part of that context event list pending to be detached and
299 * reattached to its new context as part of ucma_get_event,
300 * handled separately below.
302 if (ctx->cm_id == cm_id) {
306 queue_work(ctx->file->close_wq, &ctx->close_work);
310 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
311 if (con_req_eve->cm_id == cm_id &&
312 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
313 list_del(&con_req_eve->list);
314 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
315 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
321 printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
324 static int ucma_event_handler(struct rdma_cm_id *cm_id,
325 struct rdma_cm_event *event)
327 struct ucma_event *uevent;
328 struct ucma_context *ctx = cm_id->context;
331 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
333 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
335 mutex_lock(&ctx->file->mut);
336 uevent->cm_id = cm_id;
337 ucma_set_event_context(ctx, event, uevent);
338 uevent->resp.event = event->event;
339 uevent->resp.status = event->status;
340 if (cm_id->qp_type == IB_QPT_UD)
341 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
343 ucma_copy_conn_event(&uevent->resp.param.conn,
346 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
353 } else if (!ctx->uid || ctx->cm_id != cm_id) {
355 * We ignore events for new connections until userspace has set
356 * their context. This can only happen if an error occurs on a
357 * new connection before the user accepts it. This is okay,
358 * since the accept will just fail later. However, we do need
359 * to release the underlying HW resources in case of a device
362 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
363 ucma_removal_event_handler(cm_id);
369 list_add_tail(&uevent->list, &ctx->file->event_list);
370 wake_up_interruptible(&ctx->file->poll_wait);
371 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
372 ucma_removal_event_handler(cm_id);
374 mutex_unlock(&ctx->file->mut);
378 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
379 int in_len, int out_len)
381 struct ucma_context *ctx;
382 struct rdma_ucm_get_event cmd;
383 struct ucma_event *uevent;
386 if (out_len < sizeof uevent->resp)
389 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
392 mutex_lock(&file->mut);
393 while (list_empty(&file->event_list)) {
394 mutex_unlock(&file->mut);
396 if (file->filp->f_flags & O_NONBLOCK)
399 if (wait_event_interruptible(file->poll_wait,
400 !list_empty(&file->event_list)))
403 mutex_lock(&file->mut);
406 uevent = list_entry(file->event_list.next, struct ucma_event, list);
408 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
409 ctx = ucma_alloc_ctx(file);
414 uevent->ctx->backlog++;
415 ctx->cm_id = uevent->cm_id;
416 ctx->cm_id->context = ctx;
417 uevent->resp.id = ctx->id;
420 if (copy_to_user((void __user *)(unsigned long)cmd.response,
421 &uevent->resp, sizeof uevent->resp)) {
426 list_del(&uevent->list);
427 uevent->ctx->events_reported++;
429 uevent->mc->events_reported++;
432 mutex_unlock(&file->mut);
436 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
440 *qp_type = IB_QPT_RC;
444 *qp_type = IB_QPT_UD;
447 *qp_type = cmd->qp_type;
454 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
455 int in_len, int out_len)
457 struct rdma_ucm_create_id cmd;
458 struct rdma_ucm_create_id_resp resp;
459 struct ucma_context *ctx;
460 struct rdma_cm_id *cm_id;
461 enum ib_qp_type qp_type;
464 if (out_len < sizeof(resp))
467 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
470 ret = ucma_get_qp_type(&cmd, &qp_type);
474 mutex_lock(&file->mut);
475 ctx = ucma_alloc_ctx(file);
476 mutex_unlock(&file->mut);
481 cm_id = rdma_create_id(current->nsproxy->net_ns,
482 ucma_event_handler, ctx, cmd.ps, qp_type);
484 ret = PTR_ERR(cm_id);
489 if (copy_to_user((void __user *)(unsigned long)cmd.response,
490 &resp, sizeof(resp))) {
499 rdma_destroy_id(cm_id);
502 idr_remove(&ctx_idr, ctx->id);
504 mutex_lock(&file->mut);
505 list_del(&ctx->list);
506 mutex_unlock(&file->mut);
511 static void ucma_cleanup_multicast(struct ucma_context *ctx)
513 struct ucma_multicast *mc, *tmp;
516 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
518 idr_remove(&multicast_idr, mc->id);
524 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
526 struct ucma_event *uevent, *tmp;
528 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
529 if (uevent->mc != mc)
532 list_del(&uevent->list);
538 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
539 * this point, no new events will be reported from the hardware. However, we
540 * still need to cleanup the UCMA context for this ID. Specifically, there
541 * might be events that have not yet been consumed by the user space software.
542 * These might include pending connect requests which we have not completed
543 * processing. We cannot call rdma_destroy_id while holding the lock of the
544 * context (file->mut), as it might cause a deadlock. We therefore extract all
545 * relevant events from the context pending events list while holding the
546 * mutex. After that we release them as needed.
548 static int ucma_free_ctx(struct ucma_context *ctx)
551 struct ucma_event *uevent, *tmp;
555 ucma_cleanup_multicast(ctx);
557 /* Cleanup events not yet reported to the user. */
558 mutex_lock(&ctx->file->mut);
559 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
560 if (uevent->ctx == ctx)
561 list_move_tail(&uevent->list, &list);
563 list_del(&ctx->list);
564 mutex_unlock(&ctx->file->mut);
566 list_for_each_entry_safe(uevent, tmp, &list, list) {
567 list_del(&uevent->list);
568 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
569 rdma_destroy_id(uevent->cm_id);
573 events_reported = ctx->events_reported;
575 return events_reported;
578 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
579 int in_len, int out_len)
581 struct rdma_ucm_destroy_id cmd;
582 struct rdma_ucm_destroy_id_resp resp;
583 struct ucma_context *ctx;
586 if (out_len < sizeof(resp))
589 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
593 ctx = _ucma_find_context(cmd.id, file);
595 idr_remove(&ctx_idr, ctx->id);
601 mutex_lock(&ctx->file->mut);
603 mutex_unlock(&ctx->file->mut);
605 flush_workqueue(ctx->file->close_wq);
606 /* At this point it's guaranteed that there is no inflight
612 wait_for_completion(&ctx->comp);
613 rdma_destroy_id(ctx->cm_id);
618 resp.events_reported = ucma_free_ctx(ctx);
619 if (copy_to_user((void __user *)(unsigned long)cmd.response,
620 &resp, sizeof(resp)))
626 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
627 int in_len, int out_len)
629 struct rdma_ucm_bind_ip cmd;
630 struct ucma_context *ctx;
633 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
636 if (!rdma_addr_size_in6(&cmd.addr))
639 ctx = ucma_get_ctx(file, cmd.id);
643 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
648 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
649 int in_len, int out_len)
651 struct rdma_ucm_bind cmd;
652 struct ucma_context *ctx;
655 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
658 if (cmd.reserved || !cmd.addr_size ||
659 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
662 ctx = ucma_get_ctx(file, cmd.id);
666 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
671 static ssize_t ucma_resolve_ip(struct ucma_file *file,
672 const char __user *inbuf,
673 int in_len, int out_len)
675 struct rdma_ucm_resolve_ip cmd;
676 struct ucma_context *ctx;
679 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
682 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
683 !rdma_addr_size_in6(&cmd.dst_addr))
686 ctx = ucma_get_ctx(file, cmd.id);
690 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
691 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
696 static ssize_t ucma_resolve_addr(struct ucma_file *file,
697 const char __user *inbuf,
698 int in_len, int out_len)
700 struct rdma_ucm_resolve_addr cmd;
701 struct ucma_context *ctx;
704 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
708 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
709 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
712 ctx = ucma_get_ctx(file, cmd.id);
716 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
717 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
722 static ssize_t ucma_resolve_route(struct ucma_file *file,
723 const char __user *inbuf,
724 int in_len, int out_len)
726 struct rdma_ucm_resolve_route cmd;
727 struct ucma_context *ctx;
730 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
733 ctx = ucma_get_ctx(file, cmd.id);
737 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
742 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
743 struct rdma_route *route)
745 struct rdma_dev_addr *dev_addr;
747 resp->num_paths = route->num_paths;
748 switch (route->num_paths) {
750 dev_addr = &route->addr.dev_addr;
751 rdma_addr_get_dgid(dev_addr,
752 (union ib_gid *) &resp->ib_route[0].dgid);
753 rdma_addr_get_sgid(dev_addr,
754 (union ib_gid *) &resp->ib_route[0].sgid);
755 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
758 ib_copy_path_rec_to_user(&resp->ib_route[1],
759 &route->path_rec[1]);
762 ib_copy_path_rec_to_user(&resp->ib_route[0],
763 &route->path_rec[0]);
770 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
771 struct rdma_route *route)
774 resp->num_paths = route->num_paths;
775 switch (route->num_paths) {
777 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
778 (union ib_gid *)&resp->ib_route[0].dgid);
779 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
780 (union ib_gid *)&resp->ib_route[0].sgid);
781 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
784 ib_copy_path_rec_to_user(&resp->ib_route[1],
785 &route->path_rec[1]);
788 ib_copy_path_rec_to_user(&resp->ib_route[0],
789 &route->path_rec[0]);
796 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
797 struct rdma_route *route)
799 struct rdma_dev_addr *dev_addr;
801 dev_addr = &route->addr.dev_addr;
802 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
803 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
806 static ssize_t ucma_query_route(struct ucma_file *file,
807 const char __user *inbuf,
808 int in_len, int out_len)
810 struct rdma_ucm_query cmd;
811 struct rdma_ucm_query_route_resp resp;
812 struct ucma_context *ctx;
813 struct sockaddr *addr;
816 if (out_len < sizeof(resp))
819 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
822 ctx = ucma_get_ctx(file, cmd.id);
826 memset(&resp, 0, sizeof resp);
827 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
828 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
829 sizeof(struct sockaddr_in) :
830 sizeof(struct sockaddr_in6));
831 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
832 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
833 sizeof(struct sockaddr_in) :
834 sizeof(struct sockaddr_in6));
835 if (!ctx->cm_id->device)
838 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
839 resp.port_num = ctx->cm_id->port_num;
841 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
842 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
843 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
844 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
845 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
846 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
849 if (copy_to_user((void __user *)(unsigned long)cmd.response,
850 &resp, sizeof(resp)))
857 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
858 struct rdma_ucm_query_addr_resp *resp)
863 resp->node_guid = (__force __u64) cm_id->device->node_guid;
864 resp->port_num = cm_id->port_num;
865 resp->pkey = (__force __u16) cpu_to_be16(
866 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
869 static ssize_t ucma_query_addr(struct ucma_context *ctx,
870 void __user *response, int out_len)
872 struct rdma_ucm_query_addr_resp resp;
873 struct sockaddr *addr;
876 if (out_len < sizeof(resp))
879 memset(&resp, 0, sizeof resp);
881 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
882 resp.src_size = rdma_addr_size(addr);
883 memcpy(&resp.src_addr, addr, resp.src_size);
885 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
886 resp.dst_size = rdma_addr_size(addr);
887 memcpy(&resp.dst_addr, addr, resp.dst_size);
889 ucma_query_device_addr(ctx->cm_id, &resp);
891 if (copy_to_user(response, &resp, sizeof(resp)))
897 static ssize_t ucma_query_path(struct ucma_context *ctx,
898 void __user *response, int out_len)
900 struct rdma_ucm_query_path_resp *resp;
903 if (out_len < sizeof(*resp))
906 resp = kzalloc(out_len, GFP_KERNEL);
910 resp->num_paths = ctx->cm_id->route.num_paths;
911 for (i = 0, out_len -= sizeof(*resp);
912 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
913 i++, out_len -= sizeof(struct ib_path_rec_data)) {
915 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
916 IB_PATH_BIDIRECTIONAL;
917 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
918 &resp->path_data[i].path_rec);
921 if (copy_to_user(response, resp,
922 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
929 static ssize_t ucma_query_gid(struct ucma_context *ctx,
930 void __user *response, int out_len)
932 struct rdma_ucm_query_addr_resp resp;
933 struct sockaddr_ib *addr;
936 if (out_len < sizeof(resp))
939 memset(&resp, 0, sizeof resp);
941 ucma_query_device_addr(ctx->cm_id, &resp);
943 addr = (struct sockaddr_ib *) &resp.src_addr;
944 resp.src_size = sizeof(*addr);
945 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
946 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
948 addr->sib_family = AF_IB;
949 addr->sib_pkey = (__force __be16) resp.pkey;
950 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
951 (union ib_gid *) &addr->sib_addr);
952 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
953 &ctx->cm_id->route.addr.src_addr);
956 addr = (struct sockaddr_ib *) &resp.dst_addr;
957 resp.dst_size = sizeof(*addr);
958 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
959 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
961 addr->sib_family = AF_IB;
962 addr->sib_pkey = (__force __be16) resp.pkey;
963 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
964 (union ib_gid *) &addr->sib_addr);
965 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
966 &ctx->cm_id->route.addr.dst_addr);
969 if (copy_to_user(response, &resp, sizeof(resp)))
975 static ssize_t ucma_query(struct ucma_file *file,
976 const char __user *inbuf,
977 int in_len, int out_len)
979 struct rdma_ucm_query cmd;
980 struct ucma_context *ctx;
981 void __user *response;
984 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
987 response = (void __user *)(unsigned long) cmd.response;
988 ctx = ucma_get_ctx(file, cmd.id);
992 switch (cmd.option) {
993 case RDMA_USER_CM_QUERY_ADDR:
994 ret = ucma_query_addr(ctx, response, out_len);
996 case RDMA_USER_CM_QUERY_PATH:
997 ret = ucma_query_path(ctx, response, out_len);
999 case RDMA_USER_CM_QUERY_GID:
1000 ret = ucma_query_gid(ctx, response, out_len);
1011 static void ucma_copy_conn_param(struct rdma_cm_id *id,
1012 struct rdma_conn_param *dst,
1013 struct rdma_ucm_conn_param *src)
1015 dst->private_data = src->private_data;
1016 dst->private_data_len = src->private_data_len;
1017 dst->responder_resources =src->responder_resources;
1018 dst->initiator_depth = src->initiator_depth;
1019 dst->flow_control = src->flow_control;
1020 dst->retry_count = src->retry_count;
1021 dst->rnr_retry_count = src->rnr_retry_count;
1022 dst->srq = src->srq;
1023 dst->qp_num = src->qp_num;
1024 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1027 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1028 int in_len, int out_len)
1030 struct rdma_ucm_connect cmd;
1031 struct rdma_conn_param conn_param;
1032 struct ucma_context *ctx;
1035 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1038 if (!cmd.conn_param.valid)
1041 ctx = ucma_get_ctx(file, cmd.id);
1043 return PTR_ERR(ctx);
1045 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1046 ret = rdma_connect(ctx->cm_id, &conn_param);
1051 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1052 int in_len, int out_len)
1054 struct rdma_ucm_listen cmd;
1055 struct ucma_context *ctx;
1058 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1061 ctx = ucma_get_ctx(file, cmd.id);
1063 return PTR_ERR(ctx);
1065 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1066 cmd.backlog : max_backlog;
1067 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1072 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1073 int in_len, int out_len)
1075 struct rdma_ucm_accept cmd;
1076 struct rdma_conn_param conn_param;
1077 struct ucma_context *ctx;
1080 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1083 ctx = ucma_get_ctx(file, cmd.id);
1085 return PTR_ERR(ctx);
1087 if (cmd.conn_param.valid) {
1088 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1089 mutex_lock(&file->mut);
1090 ret = rdma_accept(ctx->cm_id, &conn_param);
1093 mutex_unlock(&file->mut);
1095 ret = rdma_accept(ctx->cm_id, NULL);
1101 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1102 int in_len, int out_len)
1104 struct rdma_ucm_reject cmd;
1105 struct ucma_context *ctx;
1108 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1111 ctx = ucma_get_ctx(file, cmd.id);
1113 return PTR_ERR(ctx);
1115 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1120 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1121 int in_len, int out_len)
1123 struct rdma_ucm_disconnect cmd;
1124 struct ucma_context *ctx;
1127 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1130 ctx = ucma_get_ctx(file, cmd.id);
1132 return PTR_ERR(ctx);
1134 ret = rdma_disconnect(ctx->cm_id);
1139 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1140 const char __user *inbuf,
1141 int in_len, int out_len)
1143 struct rdma_ucm_init_qp_attr cmd;
1144 struct ib_uverbs_qp_attr resp;
1145 struct ucma_context *ctx;
1146 struct ib_qp_attr qp_attr;
1149 if (out_len < sizeof(resp))
1152 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1155 if (cmd.qp_state > IB_QPS_ERR)
1158 ctx = ucma_get_ctx(file, cmd.id);
1160 return PTR_ERR(ctx);
1162 if (!ctx->cm_id->device) {
1167 resp.qp_attr_mask = 0;
1168 memset(&qp_attr, 0, sizeof qp_attr);
1169 qp_attr.qp_state = cmd.qp_state;
1170 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1174 ib_copy_qp_attr_to_user(&resp, &qp_attr);
1175 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1176 &resp, sizeof(resp)))
1184 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1185 void *optval, size_t optlen)
1190 case RDMA_OPTION_ID_TOS:
1191 if (optlen != sizeof(u8)) {
1195 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1197 case RDMA_OPTION_ID_REUSEADDR:
1198 if (optlen != sizeof(int)) {
1202 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1204 case RDMA_OPTION_ID_AFONLY:
1205 if (optlen != sizeof(int)) {
1209 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1218 static int ucma_set_ib_path(struct ucma_context *ctx,
1219 struct ib_path_rec_data *path_data, size_t optlen)
1221 struct ib_sa_path_rec sa_path;
1222 struct rdma_cm_event event;
1225 if (optlen % sizeof(*path_data))
1228 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1229 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1230 IB_PATH_BIDIRECTIONAL))
1237 if (!ctx->cm_id->device)
1240 memset(&sa_path, 0, sizeof(sa_path));
1242 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1243 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1247 memset(&event, 0, sizeof event);
1248 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1249 return ucma_event_handler(ctx->cm_id, &event);
1252 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1253 void *optval, size_t optlen)
1258 case RDMA_OPTION_IB_PATH:
1259 ret = ucma_set_ib_path(ctx, optval, optlen);
1268 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1269 int optname, void *optval, size_t optlen)
1274 case RDMA_OPTION_ID:
1275 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1277 case RDMA_OPTION_IB:
1278 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1287 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1288 int in_len, int out_len)
1290 struct rdma_ucm_set_option cmd;
1291 struct ucma_context *ctx;
1295 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1298 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1301 ctx = ucma_get_ctx(file, cmd.id);
1303 return PTR_ERR(ctx);
1305 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1307 if (IS_ERR(optval)) {
1308 ret = PTR_ERR(optval);
1312 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1321 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1322 int in_len, int out_len)
1324 struct rdma_ucm_notify cmd;
1325 struct ucma_context *ctx;
1328 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1331 ctx = ucma_get_ctx(file, cmd.id);
1333 return PTR_ERR(ctx);
1335 if (ctx->cm_id->device)
1336 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1342 static ssize_t ucma_process_join(struct ucma_file *file,
1343 struct rdma_ucm_join_mcast *cmd, int out_len)
1345 struct rdma_ucm_create_id_resp resp;
1346 struct ucma_context *ctx;
1347 struct ucma_multicast *mc;
1348 struct sockaddr *addr;
1351 if (out_len < sizeof(resp))
1354 addr = (struct sockaddr *) &cmd->addr;
1355 if (cmd->reserved || (cmd->addr_size != rdma_addr_size(addr)))
1358 ctx = ucma_get_ctx(file, cmd->id);
1360 return PTR_ERR(ctx);
1362 mutex_lock(&file->mut);
1363 mc = ucma_alloc_multicast(ctx);
1370 memcpy(&mc->addr, addr, cmd->addr_size);
1371 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1376 if (copy_to_user((void __user *)(unsigned long) cmd->response,
1377 &resp, sizeof(resp))) {
1383 idr_replace(&multicast_idr, mc, mc->id);
1386 mutex_unlock(&file->mut);
1391 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1392 ucma_cleanup_mc_events(mc);
1395 idr_remove(&multicast_idr, mc->id);
1397 list_del(&mc->list);
1400 mutex_unlock(&file->mut);
1405 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1406 const char __user *inbuf,
1407 int in_len, int out_len)
1409 struct rdma_ucm_join_ip_mcast cmd;
1410 struct rdma_ucm_join_mcast join_cmd;
1412 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1415 join_cmd.response = cmd.response;
1416 join_cmd.uid = cmd.uid;
1417 join_cmd.id = cmd.id;
1418 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1419 if (!join_cmd.addr_size)
1422 join_cmd.reserved = 0;
1423 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1425 return ucma_process_join(file, &join_cmd, out_len);
1428 static ssize_t ucma_join_multicast(struct ucma_file *file,
1429 const char __user *inbuf,
1430 int in_len, int out_len)
1432 struct rdma_ucm_join_mcast cmd;
1434 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1437 if (!rdma_addr_size_kss(&cmd.addr))
1440 return ucma_process_join(file, &cmd, out_len);
1443 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1444 const char __user *inbuf,
1445 int in_len, int out_len)
1447 struct rdma_ucm_destroy_id cmd;
1448 struct rdma_ucm_destroy_id_resp resp;
1449 struct ucma_multicast *mc;
1452 if (out_len < sizeof(resp))
1455 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1459 mc = idr_find(&multicast_idr, cmd.id);
1461 mc = ERR_PTR(-ENOENT);
1462 else if (mc->ctx->file != file)
1463 mc = ERR_PTR(-EINVAL);
1464 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1465 mc = ERR_PTR(-ENXIO);
1467 idr_remove(&multicast_idr, mc->id);
1475 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1476 mutex_lock(&mc->ctx->file->mut);
1477 ucma_cleanup_mc_events(mc);
1478 list_del(&mc->list);
1479 mutex_unlock(&mc->ctx->file->mut);
1481 ucma_put_ctx(mc->ctx);
1482 resp.events_reported = mc->events_reported;
1485 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1486 &resp, sizeof(resp)))
1492 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1494 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1495 if (file1 < file2) {
1496 mutex_lock(&file1->mut);
1497 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1499 mutex_lock(&file2->mut);
1500 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1504 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1506 if (file1 < file2) {
1507 mutex_unlock(&file2->mut);
1508 mutex_unlock(&file1->mut);
1510 mutex_unlock(&file1->mut);
1511 mutex_unlock(&file2->mut);
1515 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1517 struct ucma_event *uevent, *tmp;
1519 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1520 if (uevent->ctx == ctx)
1521 list_move_tail(&uevent->list, &file->event_list);
1524 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1525 const char __user *inbuf,
1526 int in_len, int out_len)
1528 struct rdma_ucm_migrate_id cmd;
1529 struct rdma_ucm_migrate_resp resp;
1530 struct ucma_context *ctx;
1532 struct ucma_file *cur_file;
1535 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1538 /* Get current fd to protect against it being closed */
1542 if (f.file->f_op != &ucma_fops) {
1547 /* Validate current fd and prevent destruction of id. */
1548 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1554 cur_file = ctx->file;
1555 if (cur_file == new_file) {
1556 resp.events_reported = ctx->events_reported;
1561 * Migrate events between fd's, maintaining order, and avoiding new
1562 * events being added before existing events.
1564 ucma_lock_files(cur_file, new_file);
1567 list_move_tail(&ctx->list, &new_file->ctx_list);
1568 ucma_move_events(ctx, new_file);
1569 ctx->file = new_file;
1570 resp.events_reported = ctx->events_reported;
1573 ucma_unlock_files(cur_file, new_file);
1576 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1577 &resp, sizeof(resp)))
1586 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1587 const char __user *inbuf,
1588 int in_len, int out_len) = {
1589 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1590 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1591 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1592 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1593 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1594 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1595 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1596 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1597 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1598 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1599 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1600 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1601 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1602 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1603 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1604 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1605 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1606 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1607 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1608 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1609 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1610 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1611 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1614 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1615 size_t len, loff_t *pos)
1617 struct ucma_file *file = filp->private_data;
1618 struct rdma_ucm_cmd_hdr hdr;
1621 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1624 if (len < sizeof(hdr))
1627 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1630 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1632 hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
1634 if (hdr.in + sizeof(hdr) > len)
1637 if (!ucma_cmd_table[hdr.cmd])
1640 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1647 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1649 struct ucma_file *file = filp->private_data;
1650 unsigned int mask = 0;
1652 poll_wait(filp, &file->poll_wait, wait);
1654 if (!list_empty(&file->event_list))
1655 mask = POLLIN | POLLRDNORM;
1661 * ucma_open() does not need the BKL:
1663 * - no global state is referred to;
1664 * - there is no ioctl method to race against;
1665 * - no further module initialization is required for open to work
1666 * after the device is registered.
1668 static int ucma_open(struct inode *inode, struct file *filp)
1670 struct ucma_file *file;
1672 file = kmalloc(sizeof *file, GFP_KERNEL);
1676 file->close_wq = create_singlethread_workqueue("ucma_close_id");
1677 if (!file->close_wq) {
1682 INIT_LIST_HEAD(&file->event_list);
1683 INIT_LIST_HEAD(&file->ctx_list);
1684 init_waitqueue_head(&file->poll_wait);
1685 mutex_init(&file->mut);
1687 filp->private_data = file;
1690 return nonseekable_open(inode, filp);
1693 static int ucma_close(struct inode *inode, struct file *filp)
1695 struct ucma_file *file = filp->private_data;
1696 struct ucma_context *ctx, *tmp;
1698 mutex_lock(&file->mut);
1699 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1700 ctx->destroying = 1;
1701 mutex_unlock(&file->mut);
1704 idr_remove(&ctx_idr, ctx->id);
1707 flush_workqueue(file->close_wq);
1708 /* At that step once ctx was marked as destroying and workqueue
1709 * was flushed we are safe from any inflights handlers that
1710 * might put other closing task.
1713 if (!ctx->closing) {
1716 wait_for_completion(&ctx->comp);
1717 /* rdma_destroy_id ensures that no event handlers are
1718 * inflight for that id before releasing it.
1720 rdma_destroy_id(ctx->cm_id);
1726 mutex_lock(&file->mut);
1728 mutex_unlock(&file->mut);
1729 destroy_workqueue(file->close_wq);
1734 static const struct file_operations ucma_fops = {
1735 .owner = THIS_MODULE,
1737 .release = ucma_close,
1738 .write = ucma_write,
1740 .llseek = no_llseek,
1743 static struct miscdevice ucma_misc = {
1744 .minor = MISC_DYNAMIC_MINOR,
1746 .nodename = "infiniband/rdma_cm",
1751 static ssize_t show_abi_version(struct device *dev,
1752 struct device_attribute *attr,
1755 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1757 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1759 static int __init ucma_init(void)
1763 ret = misc_register(&ucma_misc);
1767 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1769 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1773 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1774 if (!ucma_ctl_table_hdr) {
1775 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1781 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1783 misc_deregister(&ucma_misc);
1787 static void __exit ucma_cleanup(void)
1789 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1790 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1791 misc_deregister(&ucma_misc);
1792 idr_destroy(&ctx_idr);
1793 idr_destroy(&multicast_idr);
1796 module_init(ucma_init);
1797 module_exit(ucma_cleanup);