2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
36 #include <asm/uaccess.h>
40 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
42 (udata)->inbuf = (void __user *) (ibuf); \
43 (udata)->outbuf = (void __user *) (obuf); \
44 (udata)->inlen = (ilen); \
45 (udata)->outlen = (olen); \
48 ssize_t ib_uverbs_query_params(struct ib_uverbs_file *file,
49 const char __user *buf,
50 int in_len, int out_len)
52 struct ib_uverbs_query_params cmd;
53 struct ib_uverbs_query_params_resp resp;
55 if (out_len < sizeof resp)
58 if (copy_from_user(&cmd, buf, sizeof cmd))
61 memset(&resp, 0, sizeof resp);
63 resp.num_cq_events = file->device->num_comp;
65 if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp))
71 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
72 const char __user *buf,
73 int in_len, int out_len)
75 struct ib_uverbs_get_context cmd;
76 struct ib_uverbs_get_context_resp resp;
77 struct ib_udata udata;
78 struct ib_device *ibdev = file->device->ib_dev;
82 if (out_len < sizeof resp)
85 if (copy_from_user(&cmd, buf, sizeof cmd))
88 INIT_UDATA(&udata, buf + sizeof cmd,
89 (unsigned long) cmd.response + sizeof resp,
90 in_len - sizeof cmd, out_len - sizeof resp);
92 file->ucontext = ibdev->alloc_ucontext(ibdev, &udata);
93 if (IS_ERR(file->ucontext)) {
94 ret = PTR_ERR(file->ucontext);
95 file->ucontext = NULL;
99 file->ucontext->device = ibdev;
100 INIT_LIST_HEAD(&file->ucontext->pd_list);
101 INIT_LIST_HEAD(&file->ucontext->mr_list);
102 INIT_LIST_HEAD(&file->ucontext->mw_list);
103 INIT_LIST_HEAD(&file->ucontext->cq_list);
104 INIT_LIST_HEAD(&file->ucontext->qp_list);
105 INIT_LIST_HEAD(&file->ucontext->srq_list);
106 INIT_LIST_HEAD(&file->ucontext->ah_list);
107 spin_lock_init(&file->ucontext->lock);
109 resp.async_fd = file->async_file.fd;
110 for (i = 0; i < file->device->num_comp; ++i)
111 if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab +
113 &file->comp_file[i].fd, sizeof (__u32)))
116 if (copy_to_user((void __user *) (unsigned long) cmd.response,
123 ibdev->dealloc_ucontext(file->ucontext);
124 file->ucontext = NULL;
129 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
130 const char __user *buf,
131 int in_len, int out_len)
133 struct ib_uverbs_query_device cmd;
134 struct ib_uverbs_query_device_resp resp;
135 struct ib_device_attr attr;
138 if (out_len < sizeof resp)
141 if (copy_from_user(&cmd, buf, sizeof cmd))
144 ret = ib_query_device(file->device->ib_dev, &attr);
148 memset(&resp, 0, sizeof resp);
150 resp.fw_ver = attr.fw_ver;
151 resp.node_guid = attr.node_guid;
152 resp.sys_image_guid = attr.sys_image_guid;
153 resp.max_mr_size = attr.max_mr_size;
154 resp.page_size_cap = attr.page_size_cap;
155 resp.vendor_id = attr.vendor_id;
156 resp.vendor_part_id = attr.vendor_part_id;
157 resp.hw_ver = attr.hw_ver;
158 resp.max_qp = attr.max_qp;
159 resp.max_qp_wr = attr.max_qp_wr;
160 resp.device_cap_flags = attr.device_cap_flags;
161 resp.max_sge = attr.max_sge;
162 resp.max_sge_rd = attr.max_sge_rd;
163 resp.max_cq = attr.max_cq;
164 resp.max_cqe = attr.max_cqe;
165 resp.max_mr = attr.max_mr;
166 resp.max_pd = attr.max_pd;
167 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
168 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
169 resp.max_res_rd_atom = attr.max_res_rd_atom;
170 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
171 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
172 resp.atomic_cap = attr.atomic_cap;
173 resp.max_ee = attr.max_ee;
174 resp.max_rdd = attr.max_rdd;
175 resp.max_mw = attr.max_mw;
176 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
177 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
178 resp.max_mcast_grp = attr.max_mcast_grp;
179 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
180 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
181 resp.max_ah = attr.max_ah;
182 resp.max_fmr = attr.max_fmr;
183 resp.max_map_per_fmr = attr.max_map_per_fmr;
184 resp.max_srq = attr.max_srq;
185 resp.max_srq_wr = attr.max_srq_wr;
186 resp.max_srq_sge = attr.max_srq_sge;
187 resp.max_pkeys = attr.max_pkeys;
188 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
189 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
191 if (copy_to_user((void __user *) (unsigned long) cmd.response,
198 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
199 const char __user *buf,
200 int in_len, int out_len)
202 struct ib_uverbs_query_port cmd;
203 struct ib_uverbs_query_port_resp resp;
204 struct ib_port_attr attr;
207 if (out_len < sizeof resp)
210 if (copy_from_user(&cmd, buf, sizeof cmd))
213 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
217 memset(&resp, 0, sizeof resp);
219 resp.state = attr.state;
220 resp.max_mtu = attr.max_mtu;
221 resp.active_mtu = attr.active_mtu;
222 resp.gid_tbl_len = attr.gid_tbl_len;
223 resp.port_cap_flags = attr.port_cap_flags;
224 resp.max_msg_sz = attr.max_msg_sz;
225 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
226 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
227 resp.pkey_tbl_len = attr.pkey_tbl_len;
229 resp.sm_lid = attr.sm_lid;
231 resp.max_vl_num = attr.max_vl_num;
232 resp.sm_sl = attr.sm_sl;
233 resp.subnet_timeout = attr.subnet_timeout;
234 resp.init_type_reply = attr.init_type_reply;
235 resp.active_width = attr.active_width;
236 resp.active_speed = attr.active_speed;
237 resp.phys_state = attr.phys_state;
239 if (copy_to_user((void __user *) (unsigned long) cmd.response,
246 ssize_t ib_uverbs_query_gid(struct ib_uverbs_file *file,
247 const char __user *buf,
248 int in_len, int out_len)
250 struct ib_uverbs_query_gid cmd;
251 struct ib_uverbs_query_gid_resp resp;
254 if (out_len < sizeof resp)
257 if (copy_from_user(&cmd, buf, sizeof cmd))
260 memset(&resp, 0, sizeof resp);
262 ret = ib_query_gid(file->device->ib_dev, cmd.port_num, cmd.index,
263 (union ib_gid *) resp.gid);
267 if (copy_to_user((void __user *) (unsigned long) cmd.response,
274 ssize_t ib_uverbs_query_pkey(struct ib_uverbs_file *file,
275 const char __user *buf,
276 int in_len, int out_len)
278 struct ib_uverbs_query_pkey cmd;
279 struct ib_uverbs_query_pkey_resp resp;
282 if (out_len < sizeof resp)
285 if (copy_from_user(&cmd, buf, sizeof cmd))
288 memset(&resp, 0, sizeof resp);
290 ret = ib_query_pkey(file->device->ib_dev, cmd.port_num, cmd.index,
295 if (copy_to_user((void __user *) (unsigned long) cmd.response,
302 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
303 const char __user *buf,
304 int in_len, int out_len)
306 struct ib_uverbs_alloc_pd cmd;
307 struct ib_uverbs_alloc_pd_resp resp;
308 struct ib_udata udata;
309 struct ib_uobject *uobj;
313 if (out_len < sizeof resp)
316 if (copy_from_user(&cmd, buf, sizeof cmd))
319 INIT_UDATA(&udata, buf + sizeof cmd,
320 (unsigned long) cmd.response + sizeof resp,
321 in_len - sizeof cmd, out_len - sizeof resp);
323 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
327 uobj->context = file->ucontext;
329 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
330 file->ucontext, &udata);
336 pd->device = file->device->ib_dev;
338 atomic_set(&pd->usecnt, 0);
341 if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
346 down(&ib_uverbs_idr_mutex);
347 ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id);
348 up(&ib_uverbs_idr_mutex);
355 spin_lock_irq(&file->ucontext->lock);
356 list_add_tail(&uobj->list, &file->ucontext->pd_list);
357 spin_unlock_irq(&file->ucontext->lock);
359 memset(&resp, 0, sizeof resp);
360 resp.pd_handle = uobj->id;
362 if (copy_to_user((void __user *) (unsigned long) cmd.response,
363 &resp, sizeof resp)) {
371 spin_lock_irq(&file->ucontext->lock);
372 list_del(&uobj->list);
373 spin_unlock_irq(&file->ucontext->lock);
375 down(&ib_uverbs_idr_mutex);
376 idr_remove(&ib_uverbs_pd_idr, uobj->id);
377 up(&ib_uverbs_idr_mutex);
387 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
388 const char __user *buf,
389 int in_len, int out_len)
391 struct ib_uverbs_dealloc_pd cmd;
393 struct ib_uobject *uobj;
396 if (copy_from_user(&cmd, buf, sizeof cmd))
399 down(&ib_uverbs_idr_mutex);
401 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
402 if (!pd || pd->uobject->context != file->ucontext)
407 ret = ib_dealloc_pd(pd);
411 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
413 spin_lock_irq(&file->ucontext->lock);
414 list_del(&uobj->list);
415 spin_unlock_irq(&file->ucontext->lock);
420 up(&ib_uverbs_idr_mutex);
422 return ret ? ret : in_len;
425 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
426 const char __user *buf, int in_len,
429 struct ib_uverbs_reg_mr cmd;
430 struct ib_uverbs_reg_mr_resp resp;
431 struct ib_udata udata;
432 struct ib_umem_object *obj;
437 if (out_len < sizeof resp)
440 if (copy_from_user(&cmd, buf, sizeof cmd))
443 INIT_UDATA(&udata, buf + sizeof cmd,
444 (unsigned long) cmd.response + sizeof resp,
445 in_len - sizeof cmd, out_len - sizeof resp);
447 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
450 obj = kmalloc(sizeof *obj, GFP_KERNEL);
454 obj->uobject.context = file->ucontext;
457 * We ask for writable memory if any access flags other than
458 * "remote read" are set. "Local write" and "remote write"
459 * obviously require write access. "Remote atomic" can do
460 * things like fetch and add, which will modify memory, and
461 * "MW bind" can change permissions by binding a window.
463 ret = ib_umem_get(file->device->ib_dev, &obj->umem,
464 (void *) (unsigned long) cmd.start, cmd.length,
465 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
469 obj->umem.virt_base = cmd.hca_va;
471 down(&ib_uverbs_idr_mutex);
473 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
474 if (!pd || pd->uobject->context != file->ucontext) {
479 if (!pd->device->reg_user_mr) {
484 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
490 mr->device = pd->device;
492 mr->uobject = &obj->uobject;
493 atomic_inc(&pd->usecnt);
494 atomic_set(&mr->usecnt, 0);
496 memset(&resp, 0, sizeof resp);
497 resp.lkey = mr->lkey;
498 resp.rkey = mr->rkey;
501 if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) {
506 ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id);
513 resp.mr_handle = obj->uobject.id;
515 spin_lock_irq(&file->ucontext->lock);
516 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
517 spin_unlock_irq(&file->ucontext->lock);
519 if (copy_to_user((void __user *) (unsigned long) cmd.response,
520 &resp, sizeof resp)) {
525 up(&ib_uverbs_idr_mutex);
530 spin_lock_irq(&file->ucontext->lock);
531 list_del(&obj->uobject.list);
532 spin_unlock_irq(&file->ucontext->lock);
538 up(&ib_uverbs_idr_mutex);
540 ib_umem_release(file->device->ib_dev, &obj->umem);
547 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
548 const char __user *buf, int in_len,
551 struct ib_uverbs_dereg_mr cmd;
553 struct ib_umem_object *memobj;
556 if (copy_from_user(&cmd, buf, sizeof cmd))
559 down(&ib_uverbs_idr_mutex);
561 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
562 if (!mr || mr->uobject->context != file->ucontext)
565 memobj = container_of(mr->uobject, struct ib_umem_object, uobject);
567 ret = ib_dereg_mr(mr);
571 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
573 spin_lock_irq(&file->ucontext->lock);
574 list_del(&memobj->uobject.list);
575 spin_unlock_irq(&file->ucontext->lock);
577 ib_umem_release(file->device->ib_dev, &memobj->umem);
581 up(&ib_uverbs_idr_mutex);
583 return ret ? ret : in_len;
586 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
587 const char __user *buf, int in_len,
590 struct ib_uverbs_create_cq cmd;
591 struct ib_uverbs_create_cq_resp resp;
592 struct ib_udata udata;
593 struct ib_ucq_object *uobj;
597 if (out_len < sizeof resp)
600 if (copy_from_user(&cmd, buf, sizeof cmd))
603 INIT_UDATA(&udata, buf + sizeof cmd,
604 (unsigned long) cmd.response + sizeof resp,
605 in_len - sizeof cmd, out_len - sizeof resp);
607 if (cmd.event_handler >= file->device->num_comp)
610 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
614 uobj->uobject.user_handle = cmd.user_handle;
615 uobj->uobject.context = file->ucontext;
616 uobj->comp_events_reported = 0;
617 uobj->async_events_reported = 0;
618 INIT_LIST_HEAD(&uobj->comp_list);
619 INIT_LIST_HEAD(&uobj->async_list);
621 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
622 file->ucontext, &udata);
628 cq->device = file->device->ib_dev;
629 cq->uobject = &uobj->uobject;
630 cq->comp_handler = ib_uverbs_comp_handler;
631 cq->event_handler = ib_uverbs_cq_event_handler;
632 cq->cq_context = file;
633 atomic_set(&cq->usecnt, 0);
636 if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
641 down(&ib_uverbs_idr_mutex);
642 ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);
643 up(&ib_uverbs_idr_mutex);
650 spin_lock_irq(&file->ucontext->lock);
651 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
652 spin_unlock_irq(&file->ucontext->lock);
654 memset(&resp, 0, sizeof resp);
655 resp.cq_handle = uobj->uobject.id;
658 if (copy_to_user((void __user *) (unsigned long) cmd.response,
659 &resp, sizeof resp)) {
667 spin_lock_irq(&file->ucontext->lock);
668 list_del(&uobj->uobject.list);
669 spin_unlock_irq(&file->ucontext->lock);
671 down(&ib_uverbs_idr_mutex);
672 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
673 up(&ib_uverbs_idr_mutex);
683 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
684 const char __user *buf, int in_len,
687 struct ib_uverbs_destroy_cq cmd;
688 struct ib_uverbs_destroy_cq_resp resp;
690 struct ib_ucq_object *uobj;
691 struct ib_uverbs_event *evt, *tmp;
695 if (copy_from_user(&cmd, buf, sizeof cmd))
698 memset(&resp, 0, sizeof resp);
700 down(&ib_uverbs_idr_mutex);
702 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
703 if (!cq || cq->uobject->context != file->ucontext)
706 user_handle = cq->uobject->user_handle;
707 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
709 ret = ib_destroy_cq(cq);
713 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
715 spin_lock_irq(&file->ucontext->lock);
716 list_del(&uobj->uobject.list);
717 spin_unlock_irq(&file->ucontext->lock);
719 spin_lock_irq(&file->comp_file[0].lock);
720 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
721 list_del(&evt->list);
724 spin_unlock_irq(&file->comp_file[0].lock);
726 spin_lock_irq(&file->async_file.lock);
727 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
728 list_del(&evt->list);
731 spin_unlock_irq(&file->async_file.lock);
733 resp.comp_events_reported = uobj->comp_events_reported;
734 resp.async_events_reported = uobj->async_events_reported;
738 if (copy_to_user((void __user *) (unsigned long) cmd.response,
743 up(&ib_uverbs_idr_mutex);
745 return ret ? ret : in_len;
748 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
749 const char __user *buf, int in_len,
752 struct ib_uverbs_create_qp cmd;
753 struct ib_uverbs_create_qp_resp resp;
754 struct ib_udata udata;
755 struct ib_uevent_object *uobj;
757 struct ib_cq *scq, *rcq;
760 struct ib_qp_init_attr attr;
763 if (out_len < sizeof resp)
766 if (copy_from_user(&cmd, buf, sizeof cmd))
769 INIT_UDATA(&udata, buf + sizeof cmd,
770 (unsigned long) cmd.response + sizeof resp,
771 in_len - sizeof cmd, out_len - sizeof resp);
773 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
777 down(&ib_uverbs_idr_mutex);
779 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
780 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
781 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
782 srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
784 if (!pd || pd->uobject->context != file->ucontext ||
785 !scq || scq->uobject->context != file->ucontext ||
786 !rcq || rcq->uobject->context != file->ucontext ||
787 (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
792 attr.event_handler = ib_uverbs_qp_event_handler;
793 attr.qp_context = file;
797 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
798 attr.qp_type = cmd.qp_type;
800 attr.cap.max_send_wr = cmd.max_send_wr;
801 attr.cap.max_recv_wr = cmd.max_recv_wr;
802 attr.cap.max_send_sge = cmd.max_send_sge;
803 attr.cap.max_recv_sge = cmd.max_recv_sge;
804 attr.cap.max_inline_data = cmd.max_inline_data;
806 uobj->uobject.user_handle = cmd.user_handle;
807 uobj->uobject.context = file->ucontext;
808 uobj->events_reported = 0;
809 INIT_LIST_HEAD(&uobj->event_list);
811 qp = pd->device->create_qp(pd, &attr, &udata);
817 qp->device = pd->device;
819 qp->send_cq = attr.send_cq;
820 qp->recv_cq = attr.recv_cq;
822 qp->uobject = &uobj->uobject;
823 qp->event_handler = attr.event_handler;
824 qp->qp_context = attr.qp_context;
825 qp->qp_type = attr.qp_type;
826 atomic_inc(&pd->usecnt);
827 atomic_inc(&attr.send_cq->usecnt);
828 atomic_inc(&attr.recv_cq->usecnt);
830 atomic_inc(&attr.srq->usecnt);
832 memset(&resp, 0, sizeof resp);
833 resp.qpn = qp->qp_num;
836 if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) {
841 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id);
848 resp.qp_handle = uobj->uobject.id;
850 spin_lock_irq(&file->ucontext->lock);
851 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
852 spin_unlock_irq(&file->ucontext->lock);
854 if (copy_to_user((void __user *) (unsigned long) cmd.response,
855 &resp, sizeof resp)) {
860 up(&ib_uverbs_idr_mutex);
865 spin_lock_irq(&file->ucontext->lock);
866 list_del(&uobj->uobject.list);
867 spin_unlock_irq(&file->ucontext->lock);
873 up(&ib_uverbs_idr_mutex);
879 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
880 const char __user *buf, int in_len,
883 struct ib_uverbs_modify_qp cmd;
885 struct ib_qp_attr *attr;
888 if (copy_from_user(&cmd, buf, sizeof cmd))
891 attr = kmalloc(sizeof *attr, GFP_KERNEL);
895 down(&ib_uverbs_idr_mutex);
897 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
898 if (!qp || qp->uobject->context != file->ucontext) {
903 attr->qp_state = cmd.qp_state;
904 attr->cur_qp_state = cmd.cur_qp_state;
905 attr->path_mtu = cmd.path_mtu;
906 attr->path_mig_state = cmd.path_mig_state;
907 attr->qkey = cmd.qkey;
908 attr->rq_psn = cmd.rq_psn;
909 attr->sq_psn = cmd.sq_psn;
910 attr->dest_qp_num = cmd.dest_qp_num;
911 attr->qp_access_flags = cmd.qp_access_flags;
912 attr->pkey_index = cmd.pkey_index;
913 attr->alt_pkey_index = cmd.pkey_index;
914 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
915 attr->max_rd_atomic = cmd.max_rd_atomic;
916 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
917 attr->min_rnr_timer = cmd.min_rnr_timer;
918 attr->port_num = cmd.port_num;
919 attr->timeout = cmd.timeout;
920 attr->retry_cnt = cmd.retry_cnt;
921 attr->rnr_retry = cmd.rnr_retry;
922 attr->alt_port_num = cmd.alt_port_num;
923 attr->alt_timeout = cmd.alt_timeout;
925 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
926 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
927 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
928 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
929 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
930 attr->ah_attr.dlid = cmd.dest.dlid;
931 attr->ah_attr.sl = cmd.dest.sl;
932 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
933 attr->ah_attr.static_rate = cmd.dest.static_rate;
934 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
935 attr->ah_attr.port_num = cmd.dest.port_num;
937 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
938 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
939 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
940 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
941 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
942 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
943 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
944 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
945 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
946 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
947 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
949 ret = ib_modify_qp(qp, attr, cmd.attr_mask);
956 up(&ib_uverbs_idr_mutex);
962 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
963 const char __user *buf, int in_len,
966 struct ib_uverbs_destroy_qp cmd;
967 struct ib_uverbs_destroy_qp_resp resp;
969 struct ib_uevent_object *uobj;
970 struct ib_uverbs_event *evt, *tmp;
973 if (copy_from_user(&cmd, buf, sizeof cmd))
976 memset(&resp, 0, sizeof resp);
978 down(&ib_uverbs_idr_mutex);
980 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
981 if (!qp || qp->uobject->context != file->ucontext)
984 uobj = container_of(qp->uobject, struct ib_uevent_object, uobject);
986 ret = ib_destroy_qp(qp);
990 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
992 spin_lock_irq(&file->ucontext->lock);
993 list_del(&uobj->uobject.list);
994 spin_unlock_irq(&file->ucontext->lock);
996 spin_lock_irq(&file->async_file.lock);
997 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
998 list_del(&evt->list);
1001 spin_unlock_irq(&file->async_file.lock);
1003 resp.events_reported = uobj->events_reported;
1007 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1008 &resp, sizeof resp))
1012 up(&ib_uverbs_idr_mutex);
1014 return ret ? ret : in_len;
1017 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1018 const char __user *buf, int in_len,
1021 struct ib_uverbs_attach_mcast cmd;
1025 if (copy_from_user(&cmd, buf, sizeof cmd))
1028 down(&ib_uverbs_idr_mutex);
1030 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1031 if (qp && qp->uobject->context == file->ucontext)
1032 ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1034 up(&ib_uverbs_idr_mutex);
1036 return ret ? ret : in_len;
1039 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1040 const char __user *buf, int in_len,
1043 struct ib_uverbs_detach_mcast cmd;
1047 if (copy_from_user(&cmd, buf, sizeof cmd))
1050 down(&ib_uverbs_idr_mutex);
1052 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1053 if (qp && qp->uobject->context == file->ucontext)
1054 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1056 up(&ib_uverbs_idr_mutex);
1058 return ret ? ret : in_len;
1061 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1062 const char __user *buf, int in_len,
1065 struct ib_uverbs_create_srq cmd;
1066 struct ib_uverbs_create_srq_resp resp;
1067 struct ib_udata udata;
1068 struct ib_uevent_object *uobj;
1071 struct ib_srq_init_attr attr;
1074 if (out_len < sizeof resp)
1077 if (copy_from_user(&cmd, buf, sizeof cmd))
1080 INIT_UDATA(&udata, buf + sizeof cmd,
1081 (unsigned long) cmd.response + sizeof resp,
1082 in_len - sizeof cmd, out_len - sizeof resp);
1084 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1088 down(&ib_uverbs_idr_mutex);
1090 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
1092 if (!pd || pd->uobject->context != file->ucontext) {
1097 attr.event_handler = ib_uverbs_srq_event_handler;
1098 attr.srq_context = file;
1099 attr.attr.max_wr = cmd.max_wr;
1100 attr.attr.max_sge = cmd.max_sge;
1101 attr.attr.srq_limit = cmd.srq_limit;
1103 uobj->uobject.user_handle = cmd.user_handle;
1104 uobj->uobject.context = file->ucontext;
1105 uobj->events_reported = 0;
1106 INIT_LIST_HEAD(&uobj->event_list);
1108 srq = pd->device->create_srq(pd, &attr, &udata);
1114 srq->device = pd->device;
1116 srq->uobject = &uobj->uobject;
1117 srq->event_handler = attr.event_handler;
1118 srq->srq_context = attr.srq_context;
1119 atomic_inc(&pd->usecnt);
1120 atomic_set(&srq->usecnt, 0);
1122 memset(&resp, 0, sizeof resp);
1125 if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
1130 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id);
1137 resp.srq_handle = uobj->uobject.id;
1139 spin_lock_irq(&file->ucontext->lock);
1140 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
1141 spin_unlock_irq(&file->ucontext->lock);
1143 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1144 &resp, sizeof resp)) {
1149 up(&ib_uverbs_idr_mutex);
1154 spin_lock_irq(&file->ucontext->lock);
1155 list_del(&uobj->uobject.list);
1156 spin_unlock_irq(&file->ucontext->lock);
1159 ib_destroy_srq(srq);
1162 up(&ib_uverbs_idr_mutex);
1168 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
1169 const char __user *buf, int in_len,
1172 struct ib_uverbs_modify_srq cmd;
1174 struct ib_srq_attr attr;
1177 if (copy_from_user(&cmd, buf, sizeof cmd))
1180 down(&ib_uverbs_idr_mutex);
1182 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1183 if (!srq || srq->uobject->context != file->ucontext) {
1188 attr.max_wr = cmd.max_wr;
1189 attr.max_sge = cmd.max_sge;
1190 attr.srq_limit = cmd.srq_limit;
1192 ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
1195 up(&ib_uverbs_idr_mutex);
1197 return ret ? ret : in_len;
1200 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1201 const char __user *buf, int in_len,
1204 struct ib_uverbs_destroy_srq cmd;
1205 struct ib_uverbs_destroy_srq_resp resp;
1207 struct ib_uevent_object *uobj;
1208 struct ib_uverbs_event *evt, *tmp;
1211 if (copy_from_user(&cmd, buf, sizeof cmd))
1214 down(&ib_uverbs_idr_mutex);
1216 memset(&resp, 0, sizeof resp);
1218 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1219 if (!srq || srq->uobject->context != file->ucontext)
1222 uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
1224 ret = ib_destroy_srq(srq);
1228 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1230 spin_lock_irq(&file->ucontext->lock);
1231 list_del(&uobj->uobject.list);
1232 spin_unlock_irq(&file->ucontext->lock);
1234 spin_lock_irq(&file->async_file.lock);
1235 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
1236 list_del(&evt->list);
1239 spin_unlock_irq(&file->async_file.lock);
1241 resp.events_reported = uobj->events_reported;
1245 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1246 &resp, sizeof resp))
1250 up(&ib_uverbs_idr_mutex);
1252 return ret ? ret : in_len;