2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <asm/uaccess.h>
44 #include "core_priv.h"
46 struct uverbs_lock_class {
47 struct lock_class_key key;
51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
62 * The ib_uobject locking scheme is as follows:
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr write operations. When an object is
66 * looked up, a reference must be taken on the object's kref before
67 * dropping this lock. For read operations, the rcu_read_lock()
68 * and rcu_write_lock() but similarly the kref reference is grabbed
69 * before the rcu_read_unlock().
71 * - Each object also has an rwsem. This rwsem must be held for
72 * reading while an operation that uses the object is performed.
73 * For example, while registering an MR, the associated PD's
74 * uobject.mutex must be held for reading. The rwsem must be held
75 * for writing while initializing or destroying an object.
77 * - In addition, each object has a "live" flag. If this flag is not
78 * set, then lookups of the object will fail even if it is found in
79 * the idr. This handles a reader that blocks and does not acquire
80 * the rwsem until after the object is destroyed. The destroy
81 * operation will set the live flag to 0 and then drop the rwsem;
82 * this will allow the reader to acquire the rwsem, see that the
83 * live flag is 0, and then drop the rwsem and its reference to
84 * object. The underlying storage will not be freed until the last
85 * reference to the object is dropped.
88 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
89 struct ib_ucontext *context, struct uverbs_lock_class *c)
91 uobj->user_handle = user_handle;
92 uobj->context = context;
93 kref_init(&uobj->ref);
94 init_rwsem(&uobj->mutex);
95 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
99 static void release_uobj(struct kref *kref)
101 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
104 static void put_uobj(struct ib_uobject *uobj)
106 kref_put(&uobj->ref, release_uobj);
109 static void put_uobj_read(struct ib_uobject *uobj)
111 up_read(&uobj->mutex);
115 static void put_uobj_write(struct ib_uobject *uobj)
117 up_write(&uobj->mutex);
121 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
125 idr_preload(GFP_KERNEL);
126 spin_lock(&ib_uverbs_idr_lock);
128 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
132 spin_unlock(&ib_uverbs_idr_lock);
135 return ret < 0 ? ret : 0;
138 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
140 spin_lock(&ib_uverbs_idr_lock);
141 idr_remove(idr, uobj->id);
142 spin_unlock(&ib_uverbs_idr_lock);
145 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
146 struct ib_ucontext *context)
148 struct ib_uobject *uobj;
151 uobj = idr_find(idr, id);
153 if (uobj->context == context)
154 kref_get(&uobj->ref);
163 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
164 struct ib_ucontext *context, int nested)
166 struct ib_uobject *uobj;
168 uobj = __idr_get_uobj(idr, id, context);
173 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
175 down_read(&uobj->mutex);
184 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
185 struct ib_ucontext *context)
187 struct ib_uobject *uobj;
189 uobj = __idr_get_uobj(idr, id, context);
193 down_write(&uobj->mutex);
195 put_uobj_write(uobj);
202 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
205 struct ib_uobject *uobj;
207 uobj = idr_read_uobj(idr, id, context, nested);
208 return uobj ? uobj->object : NULL;
211 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
213 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
216 static void put_pd_read(struct ib_pd *pd)
218 put_uobj_read(pd->uobject);
221 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
223 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
226 static void put_cq_read(struct ib_cq *cq)
228 put_uobj_read(cq->uobject);
231 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
233 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
236 static void put_ah_read(struct ib_ah *ah)
238 put_uobj_read(ah->uobject);
241 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
243 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
246 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
248 struct ib_uobject *uobj;
250 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
251 return uobj ? uobj->object : NULL;
254 static void put_qp_read(struct ib_qp *qp)
256 put_uobj_read(qp->uobject);
259 static void put_qp_write(struct ib_qp *qp)
261 put_uobj_write(qp->uobject);
264 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
266 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
269 static void put_srq_read(struct ib_srq *srq)
271 put_uobj_read(srq->uobject);
274 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
275 struct ib_uobject **uobj)
277 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
278 return *uobj ? (*uobj)->object : NULL;
281 static void put_xrcd_read(struct ib_uobject *uobj)
286 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
287 struct ib_device *ib_dev,
288 const char __user *buf,
289 int in_len, int out_len)
291 struct ib_uverbs_get_context cmd;
292 struct ib_uverbs_get_context_resp resp;
293 struct ib_udata udata;
294 struct ib_ucontext *ucontext;
298 if (out_len < sizeof resp)
301 if (copy_from_user(&cmd, buf, sizeof cmd))
304 mutex_lock(&file->mutex);
306 if (file->ucontext) {
311 INIT_UDATA(&udata, buf + sizeof cmd,
312 (unsigned long) cmd.response + sizeof resp,
313 in_len - sizeof cmd, out_len - sizeof resp);
315 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
316 if (IS_ERR(ucontext)) {
317 ret = PTR_ERR(ucontext);
321 ucontext->device = ib_dev;
322 INIT_LIST_HEAD(&ucontext->pd_list);
323 INIT_LIST_HEAD(&ucontext->mr_list);
324 INIT_LIST_HEAD(&ucontext->mw_list);
325 INIT_LIST_HEAD(&ucontext->cq_list);
326 INIT_LIST_HEAD(&ucontext->qp_list);
327 INIT_LIST_HEAD(&ucontext->srq_list);
328 INIT_LIST_HEAD(&ucontext->ah_list);
329 INIT_LIST_HEAD(&ucontext->xrcd_list);
330 INIT_LIST_HEAD(&ucontext->rule_list);
332 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
334 ucontext->closing = 0;
336 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
337 ucontext->umem_tree = RB_ROOT;
338 init_rwsem(&ucontext->umem_rwsem);
339 ucontext->odp_mrs_count = 0;
340 INIT_LIST_HEAD(&ucontext->no_private_counters);
342 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
343 ucontext->invalidate_range = NULL;
347 resp.num_comp_vectors = file->device->num_comp_vectors;
349 ret = get_unused_fd_flags(O_CLOEXEC);
354 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1);
360 if (copy_to_user((void __user *) (unsigned long) cmd.response,
361 &resp, sizeof resp)) {
366 file->ucontext = ucontext;
368 fd_install(resp.async_fd, filp);
370 mutex_unlock(&file->mutex);
375 ib_uverbs_free_async_event_file(file);
379 put_unused_fd(resp.async_fd);
382 put_pid(ucontext->tgid);
383 ib_dev->dealloc_ucontext(ucontext);
386 mutex_unlock(&file->mutex);
390 static void copy_query_dev_fields(struct ib_uverbs_file *file,
391 struct ib_device *ib_dev,
392 struct ib_uverbs_query_device_resp *resp,
393 struct ib_device_attr *attr)
395 resp->fw_ver = attr->fw_ver;
396 resp->node_guid = ib_dev->node_guid;
397 resp->sys_image_guid = attr->sys_image_guid;
398 resp->max_mr_size = attr->max_mr_size;
399 resp->page_size_cap = attr->page_size_cap;
400 resp->vendor_id = attr->vendor_id;
401 resp->vendor_part_id = attr->vendor_part_id;
402 resp->hw_ver = attr->hw_ver;
403 resp->max_qp = attr->max_qp;
404 resp->max_qp_wr = attr->max_qp_wr;
405 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
406 resp->max_sge = attr->max_sge;
407 resp->max_sge_rd = attr->max_sge_rd;
408 resp->max_cq = attr->max_cq;
409 resp->max_cqe = attr->max_cqe;
410 resp->max_mr = attr->max_mr;
411 resp->max_pd = attr->max_pd;
412 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
413 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
414 resp->max_res_rd_atom = attr->max_res_rd_atom;
415 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
416 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
417 resp->atomic_cap = attr->atomic_cap;
418 resp->max_ee = attr->max_ee;
419 resp->max_rdd = attr->max_rdd;
420 resp->max_mw = attr->max_mw;
421 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
422 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
423 resp->max_mcast_grp = attr->max_mcast_grp;
424 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
425 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
426 resp->max_ah = attr->max_ah;
427 resp->max_fmr = attr->max_fmr;
428 resp->max_map_per_fmr = attr->max_map_per_fmr;
429 resp->max_srq = attr->max_srq;
430 resp->max_srq_wr = attr->max_srq_wr;
431 resp->max_srq_sge = attr->max_srq_sge;
432 resp->max_pkeys = attr->max_pkeys;
433 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
434 resp->phys_port_cnt = ib_dev->phys_port_cnt;
437 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
438 struct ib_device *ib_dev,
439 const char __user *buf,
440 int in_len, int out_len)
442 struct ib_uverbs_query_device cmd;
443 struct ib_uverbs_query_device_resp resp;
445 if (out_len < sizeof resp)
448 if (copy_from_user(&cmd, buf, sizeof cmd))
451 memset(&resp, 0, sizeof resp);
452 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
454 if (copy_to_user((void __user *) (unsigned long) cmd.response,
461 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
462 struct ib_device *ib_dev,
463 const char __user *buf,
464 int in_len, int out_len)
466 struct ib_uverbs_query_port cmd;
467 struct ib_uverbs_query_port_resp resp;
468 struct ib_port_attr attr;
471 if (out_len < sizeof resp)
474 if (copy_from_user(&cmd, buf, sizeof cmd))
477 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
481 memset(&resp, 0, sizeof resp);
483 resp.state = attr.state;
484 resp.max_mtu = attr.max_mtu;
485 resp.active_mtu = attr.active_mtu;
486 resp.gid_tbl_len = attr.gid_tbl_len;
487 resp.port_cap_flags = attr.port_cap_flags;
488 resp.max_msg_sz = attr.max_msg_sz;
489 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
490 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
491 resp.pkey_tbl_len = attr.pkey_tbl_len;
493 resp.sm_lid = attr.sm_lid;
495 resp.max_vl_num = attr.max_vl_num;
496 resp.sm_sl = attr.sm_sl;
497 resp.subnet_timeout = attr.subnet_timeout;
498 resp.init_type_reply = attr.init_type_reply;
499 resp.active_width = attr.active_width;
500 resp.active_speed = attr.active_speed;
501 resp.phys_state = attr.phys_state;
502 resp.link_layer = rdma_port_get_link_layer(ib_dev,
505 if (copy_to_user((void __user *) (unsigned long) cmd.response,
512 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
513 struct ib_device *ib_dev,
514 const char __user *buf,
515 int in_len, int out_len)
517 struct ib_uverbs_alloc_pd cmd;
518 struct ib_uverbs_alloc_pd_resp resp;
519 struct ib_udata udata;
520 struct ib_uobject *uobj;
524 if (out_len < sizeof resp)
527 if (copy_from_user(&cmd, buf, sizeof cmd))
530 INIT_UDATA(&udata, buf + sizeof cmd,
531 (unsigned long) cmd.response + sizeof resp,
532 in_len - sizeof cmd, out_len - sizeof resp);
534 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
538 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
539 down_write(&uobj->mutex);
541 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
550 atomic_set(&pd->usecnt, 0);
553 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
557 memset(&resp, 0, sizeof resp);
558 resp.pd_handle = uobj->id;
560 if (copy_to_user((void __user *) (unsigned long) cmd.response,
561 &resp, sizeof resp)) {
566 mutex_lock(&file->mutex);
567 list_add_tail(&uobj->list, &file->ucontext->pd_list);
568 mutex_unlock(&file->mutex);
572 up_write(&uobj->mutex);
577 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
583 put_uobj_write(uobj);
587 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
588 struct ib_device *ib_dev,
589 const char __user *buf,
590 int in_len, int out_len)
592 struct ib_uverbs_dealloc_pd cmd;
593 struct ib_uobject *uobj;
597 if (copy_from_user(&cmd, buf, sizeof cmd))
600 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
605 if (atomic_read(&pd->usecnt)) {
610 ret = pd->device->dealloc_pd(uobj->object);
611 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
616 put_uobj_write(uobj);
618 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
620 mutex_lock(&file->mutex);
621 list_del(&uobj->list);
622 mutex_unlock(&file->mutex);
629 put_uobj_write(uobj);
633 struct xrcd_table_entry {
635 struct ib_xrcd *xrcd;
639 static int xrcd_table_insert(struct ib_uverbs_device *dev,
641 struct ib_xrcd *xrcd)
643 struct xrcd_table_entry *entry, *scan;
644 struct rb_node **p = &dev->xrcd_tree.rb_node;
645 struct rb_node *parent = NULL;
647 entry = kmalloc(sizeof *entry, GFP_KERNEL);
652 entry->inode = inode;
656 scan = rb_entry(parent, struct xrcd_table_entry, node);
658 if (inode < scan->inode) {
660 } else if (inode > scan->inode) {
668 rb_link_node(&entry->node, parent, p);
669 rb_insert_color(&entry->node, &dev->xrcd_tree);
674 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
677 struct xrcd_table_entry *entry;
678 struct rb_node *p = dev->xrcd_tree.rb_node;
681 entry = rb_entry(p, struct xrcd_table_entry, node);
683 if (inode < entry->inode)
685 else if (inode > entry->inode)
694 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
696 struct xrcd_table_entry *entry;
698 entry = xrcd_table_search(dev, inode);
705 static void xrcd_table_delete(struct ib_uverbs_device *dev,
708 struct xrcd_table_entry *entry;
710 entry = xrcd_table_search(dev, inode);
713 rb_erase(&entry->node, &dev->xrcd_tree);
718 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
719 struct ib_device *ib_dev,
720 const char __user *buf, int in_len,
723 struct ib_uverbs_open_xrcd cmd;
724 struct ib_uverbs_open_xrcd_resp resp;
725 struct ib_udata udata;
726 struct ib_uxrcd_object *obj;
727 struct ib_xrcd *xrcd = NULL;
728 struct fd f = {NULL, 0};
729 struct inode *inode = NULL;
733 if (out_len < sizeof resp)
736 if (copy_from_user(&cmd, buf, sizeof cmd))
739 INIT_UDATA(&udata, buf + sizeof cmd,
740 (unsigned long) cmd.response + sizeof resp,
741 in_len - sizeof cmd, out_len - sizeof resp);
743 mutex_lock(&file->device->xrcd_tree_mutex);
746 /* search for file descriptor */
750 goto err_tree_mutex_unlock;
753 inode = file_inode(f.file);
754 xrcd = find_xrcd(file->device, inode);
755 if (!xrcd && !(cmd.oflags & O_CREAT)) {
756 /* no file descriptor. Need CREATE flag */
758 goto err_tree_mutex_unlock;
761 if (xrcd && cmd.oflags & O_EXCL) {
763 goto err_tree_mutex_unlock;
767 obj = kmalloc(sizeof *obj, GFP_KERNEL);
770 goto err_tree_mutex_unlock;
773 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
775 down_write(&obj->uobject.mutex);
778 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
785 xrcd->device = ib_dev;
786 atomic_set(&xrcd->usecnt, 0);
787 mutex_init(&xrcd->tgt_qp_mutex);
788 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
792 atomic_set(&obj->refcnt, 0);
793 obj->uobject.object = xrcd;
794 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
798 memset(&resp, 0, sizeof resp);
799 resp.xrcd_handle = obj->uobject.id;
803 /* create new inode/xrcd table entry */
804 ret = xrcd_table_insert(file->device, inode, xrcd);
806 goto err_insert_xrcd;
808 atomic_inc(&xrcd->usecnt);
811 if (copy_to_user((void __user *) (unsigned long) cmd.response,
812 &resp, sizeof resp)) {
820 mutex_lock(&file->mutex);
821 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
822 mutex_unlock(&file->mutex);
824 obj->uobject.live = 1;
825 up_write(&obj->uobject.mutex);
827 mutex_unlock(&file->device->xrcd_tree_mutex);
833 xrcd_table_delete(file->device, inode);
834 atomic_dec(&xrcd->usecnt);
838 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
841 ib_dealloc_xrcd(xrcd);
844 put_uobj_write(&obj->uobject);
846 err_tree_mutex_unlock:
850 mutex_unlock(&file->device->xrcd_tree_mutex);
855 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
856 struct ib_device *ib_dev,
857 const char __user *buf, int in_len,
860 struct ib_uverbs_close_xrcd cmd;
861 struct ib_uobject *uobj;
862 struct ib_xrcd *xrcd = NULL;
863 struct inode *inode = NULL;
864 struct ib_uxrcd_object *obj;
868 if (copy_from_user(&cmd, buf, sizeof cmd))
871 mutex_lock(&file->device->xrcd_tree_mutex);
872 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
880 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
881 if (atomic_read(&obj->refcnt)) {
882 put_uobj_write(uobj);
887 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
888 ret = ib_dealloc_xrcd(uobj->object);
895 atomic_inc(&xrcd->usecnt);
897 put_uobj_write(uobj);
903 xrcd_table_delete(file->device, inode);
905 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
906 mutex_lock(&file->mutex);
907 list_del(&uobj->list);
908 mutex_unlock(&file->mutex);
914 mutex_unlock(&file->device->xrcd_tree_mutex);
918 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
919 struct ib_xrcd *xrcd)
924 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
927 ib_dealloc_xrcd(xrcd);
930 xrcd_table_delete(dev, inode);
933 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
934 struct ib_device *ib_dev,
935 const char __user *buf, int in_len,
938 struct ib_uverbs_reg_mr cmd;
939 struct ib_uverbs_reg_mr_resp resp;
940 struct ib_udata udata;
941 struct ib_uobject *uobj;
946 if (out_len < sizeof resp)
949 if (copy_from_user(&cmd, buf, sizeof cmd))
952 INIT_UDATA(&udata, buf + sizeof cmd,
953 (unsigned long) cmd.response + sizeof resp,
954 in_len - sizeof cmd, out_len - sizeof resp);
956 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
959 ret = ib_check_mr_access(cmd.access_flags);
963 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
967 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
968 down_write(&uobj->mutex);
970 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
976 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
977 if (!(pd->device->attrs.device_cap_flags &
978 IB_DEVICE_ON_DEMAND_PAGING)) {
979 pr_debug("ODP support not available\n");
985 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
986 cmd.access_flags, &udata);
992 mr->device = pd->device;
995 atomic_inc(&pd->usecnt);
998 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
1002 memset(&resp, 0, sizeof resp);
1003 resp.lkey = mr->lkey;
1004 resp.rkey = mr->rkey;
1005 resp.mr_handle = uobj->id;
1007 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1008 &resp, sizeof resp)) {
1015 mutex_lock(&file->mutex);
1016 list_add_tail(&uobj->list, &file->ucontext->mr_list);
1017 mutex_unlock(&file->mutex);
1021 up_write(&uobj->mutex);
1026 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1035 put_uobj_write(uobj);
1039 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1040 struct ib_device *ib_dev,
1041 const char __user *buf, int in_len,
1044 struct ib_uverbs_rereg_mr cmd;
1045 struct ib_uverbs_rereg_mr_resp resp;
1046 struct ib_udata udata;
1047 struct ib_pd *pd = NULL;
1049 struct ib_pd *old_pd;
1051 struct ib_uobject *uobj;
1053 if (out_len < sizeof(resp))
1056 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1059 INIT_UDATA(&udata, buf + sizeof(cmd),
1060 (unsigned long) cmd.response + sizeof(resp),
1061 in_len - sizeof(cmd), out_len - sizeof(resp));
1063 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1066 if ((cmd.flags & IB_MR_REREG_TRANS) &&
1067 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1068 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1071 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1079 if (cmd.flags & IB_MR_REREG_ACCESS) {
1080 ret = ib_check_mr_access(cmd.access_flags);
1085 if (cmd.flags & IB_MR_REREG_PD) {
1086 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1094 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1095 cmd.length, cmd.hca_va,
1096 cmd.access_flags, pd, &udata);
1098 if (cmd.flags & IB_MR_REREG_PD) {
1099 atomic_inc(&pd->usecnt);
1101 atomic_dec(&old_pd->usecnt);
1107 memset(&resp, 0, sizeof(resp));
1108 resp.lkey = mr->lkey;
1109 resp.rkey = mr->rkey;
1111 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1112 &resp, sizeof(resp)))
1118 if (cmd.flags & IB_MR_REREG_PD)
1123 put_uobj_write(mr->uobject);
1128 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1129 struct ib_device *ib_dev,
1130 const char __user *buf, int in_len,
1133 struct ib_uverbs_dereg_mr cmd;
1135 struct ib_uobject *uobj;
1138 if (copy_from_user(&cmd, buf, sizeof cmd))
1141 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1147 ret = ib_dereg_mr(mr);
1151 put_uobj_write(uobj);
1156 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1158 mutex_lock(&file->mutex);
1159 list_del(&uobj->list);
1160 mutex_unlock(&file->mutex);
1167 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1168 struct ib_device *ib_dev,
1169 const char __user *buf, int in_len,
1172 struct ib_uverbs_alloc_mw cmd;
1173 struct ib_uverbs_alloc_mw_resp resp;
1174 struct ib_uobject *uobj;
1177 struct ib_udata udata;
1180 if (out_len < sizeof(resp))
1183 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1186 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1190 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1191 down_write(&uobj->mutex);
1193 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1199 INIT_UDATA(&udata, buf + sizeof(cmd),
1200 (unsigned long)cmd.response + sizeof(resp),
1201 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1202 out_len - sizeof(resp));
1204 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
1210 mw->device = pd->device;
1213 atomic_inc(&pd->usecnt);
1216 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1220 memset(&resp, 0, sizeof(resp));
1221 resp.rkey = mw->rkey;
1222 resp.mw_handle = uobj->id;
1224 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1225 &resp, sizeof(resp))) {
1232 mutex_lock(&file->mutex);
1233 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1234 mutex_unlock(&file->mutex);
1238 up_write(&uobj->mutex);
1243 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1246 uverbs_dealloc_mw(mw);
1252 put_uobj_write(uobj);
1256 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1257 struct ib_device *ib_dev,
1258 const char __user *buf, int in_len,
1261 struct ib_uverbs_dealloc_mw cmd;
1263 struct ib_uobject *uobj;
1266 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1269 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1275 ret = uverbs_dealloc_mw(mw);
1279 put_uobj_write(uobj);
1284 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1286 mutex_lock(&file->mutex);
1287 list_del(&uobj->list);
1288 mutex_unlock(&file->mutex);
1295 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1296 struct ib_device *ib_dev,
1297 const char __user *buf, int in_len,
1300 struct ib_uverbs_create_comp_channel cmd;
1301 struct ib_uverbs_create_comp_channel_resp resp;
1305 if (out_len < sizeof resp)
1308 if (copy_from_user(&cmd, buf, sizeof cmd))
1311 ret = get_unused_fd_flags(O_CLOEXEC);
1316 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0);
1318 put_unused_fd(resp.fd);
1319 return PTR_ERR(filp);
1322 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1323 &resp, sizeof resp)) {
1324 put_unused_fd(resp.fd);
1329 fd_install(resp.fd, filp);
1333 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1334 struct ib_device *ib_dev,
1335 struct ib_udata *ucore,
1336 struct ib_udata *uhw,
1337 struct ib_uverbs_ex_create_cq *cmd,
1339 int (*cb)(struct ib_uverbs_file *file,
1340 struct ib_ucq_object *obj,
1341 struct ib_uverbs_ex_create_cq_resp *resp,
1342 struct ib_udata *udata,
1346 struct ib_ucq_object *obj;
1347 struct ib_uverbs_event_file *ev_file = NULL;
1350 struct ib_uverbs_ex_create_cq_resp resp;
1351 struct ib_cq_init_attr attr = {};
1353 if (cmd->comp_vector >= file->device->num_comp_vectors)
1354 return ERR_PTR(-EINVAL);
1356 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1358 return ERR_PTR(-ENOMEM);
1360 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
1361 down_write(&obj->uobject.mutex);
1363 if (cmd->comp_channel >= 0) {
1364 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
1371 obj->uverbs_file = file;
1372 obj->comp_events_reported = 0;
1373 obj->async_events_reported = 0;
1374 INIT_LIST_HEAD(&obj->comp_list);
1375 INIT_LIST_HEAD(&obj->async_list);
1377 attr.cqe = cmd->cqe;
1378 attr.comp_vector = cmd->comp_vector;
1380 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1381 attr.flags = cmd->flags;
1383 cq = ib_dev->create_cq(ib_dev, &attr,
1384 file->ucontext, uhw);
1390 cq->device = ib_dev;
1391 cq->uobject = &obj->uobject;
1392 cq->comp_handler = ib_uverbs_comp_handler;
1393 cq->event_handler = ib_uverbs_cq_event_handler;
1394 cq->cq_context = ev_file;
1395 atomic_set(&cq->usecnt, 0);
1397 obj->uobject.object = cq;
1398 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1402 memset(&resp, 0, sizeof resp);
1403 resp.base.cq_handle = obj->uobject.id;
1404 resp.base.cqe = cq->cqe;
1406 resp.response_length = offsetof(typeof(resp), response_length) +
1407 sizeof(resp.response_length);
1409 ret = cb(file, obj, &resp, ucore, context);
1413 mutex_lock(&file->mutex);
1414 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1415 mutex_unlock(&file->mutex);
1417 obj->uobject.live = 1;
1419 up_write(&obj->uobject.mutex);
1424 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1431 ib_uverbs_release_ucq(file, ev_file, obj);
1434 put_uobj_write(&obj->uobject);
1436 return ERR_PTR(ret);
1439 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1440 struct ib_ucq_object *obj,
1441 struct ib_uverbs_ex_create_cq_resp *resp,
1442 struct ib_udata *ucore, void *context)
1444 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1450 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1451 struct ib_device *ib_dev,
1452 const char __user *buf, int in_len,
1455 struct ib_uverbs_create_cq cmd;
1456 struct ib_uverbs_ex_create_cq cmd_ex;
1457 struct ib_uverbs_create_cq_resp resp;
1458 struct ib_udata ucore;
1459 struct ib_udata uhw;
1460 struct ib_ucq_object *obj;
1462 if (out_len < sizeof(resp))
1465 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1468 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
1470 INIT_UDATA(&uhw, buf + sizeof(cmd),
1471 (unsigned long)cmd.response + sizeof(resp),
1472 in_len - sizeof(cmd), out_len - sizeof(resp));
1474 memset(&cmd_ex, 0, sizeof(cmd_ex));
1475 cmd_ex.user_handle = cmd.user_handle;
1476 cmd_ex.cqe = cmd.cqe;
1477 cmd_ex.comp_vector = cmd.comp_vector;
1478 cmd_ex.comp_channel = cmd.comp_channel;
1480 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
1481 offsetof(typeof(cmd_ex), comp_channel) +
1482 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1486 return PTR_ERR(obj);
1491 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1492 struct ib_ucq_object *obj,
1493 struct ib_uverbs_ex_create_cq_resp *resp,
1494 struct ib_udata *ucore, void *context)
1496 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1502 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1503 struct ib_device *ib_dev,
1504 struct ib_udata *ucore,
1505 struct ib_udata *uhw)
1507 struct ib_uverbs_ex_create_cq_resp resp;
1508 struct ib_uverbs_ex_create_cq cmd;
1509 struct ib_ucq_object *obj;
1512 if (ucore->inlen < sizeof(cmd))
1515 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1525 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1526 sizeof(resp.response_length)))
1529 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
1530 min(ucore->inlen, sizeof(cmd)),
1531 ib_uverbs_ex_create_cq_cb, NULL);
1534 return PTR_ERR(obj);
1539 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1540 struct ib_device *ib_dev,
1541 const char __user *buf, int in_len,
1544 struct ib_uverbs_resize_cq cmd;
1545 struct ib_uverbs_resize_cq_resp resp;
1546 struct ib_udata udata;
1550 if (copy_from_user(&cmd, buf, sizeof cmd))
1553 INIT_UDATA(&udata, buf + sizeof cmd,
1554 (unsigned long) cmd.response + sizeof resp,
1555 in_len - sizeof cmd, out_len - sizeof resp);
1557 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1561 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1567 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1568 &resp, sizeof resp.cqe))
1574 return ret ? ret : in_len;
1577 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1579 struct ib_uverbs_wc tmp;
1581 tmp.wr_id = wc->wr_id;
1582 tmp.status = wc->status;
1583 tmp.opcode = wc->opcode;
1584 tmp.vendor_err = wc->vendor_err;
1585 tmp.byte_len = wc->byte_len;
1586 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1587 tmp.qp_num = wc->qp->qp_num;
1588 tmp.src_qp = wc->src_qp;
1589 tmp.wc_flags = wc->wc_flags;
1590 tmp.pkey_index = wc->pkey_index;
1591 tmp.slid = wc->slid;
1593 tmp.dlid_path_bits = wc->dlid_path_bits;
1594 tmp.port_num = wc->port_num;
1597 if (copy_to_user(dest, &tmp, sizeof tmp))
1603 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1604 struct ib_device *ib_dev,
1605 const char __user *buf, int in_len,
1608 struct ib_uverbs_poll_cq cmd;
1609 struct ib_uverbs_poll_cq_resp resp;
1610 u8 __user *header_ptr;
1611 u8 __user *data_ptr;
1616 if (copy_from_user(&cmd, buf, sizeof cmd))
1619 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1623 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1624 header_ptr = (void __user *)(unsigned long) cmd.response;
1625 data_ptr = header_ptr + sizeof resp;
1627 memset(&resp, 0, sizeof resp);
1628 while (resp.count < cmd.ne) {
1629 ret = ib_poll_cq(cq, 1, &wc);
1635 ret = copy_wc_to_user(data_ptr, &wc);
1639 data_ptr += sizeof(struct ib_uverbs_wc);
1643 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1655 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1656 struct ib_device *ib_dev,
1657 const char __user *buf, int in_len,
1660 struct ib_uverbs_req_notify_cq cmd;
1663 if (copy_from_user(&cmd, buf, sizeof cmd))
1666 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1670 ib_req_notify_cq(cq, cmd.solicited_only ?
1671 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1678 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1679 struct ib_device *ib_dev,
1680 const char __user *buf, int in_len,
1683 struct ib_uverbs_destroy_cq cmd;
1684 struct ib_uverbs_destroy_cq_resp resp;
1685 struct ib_uobject *uobj;
1687 struct ib_ucq_object *obj;
1688 struct ib_uverbs_event_file *ev_file;
1691 if (copy_from_user(&cmd, buf, sizeof cmd))
1694 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1698 ev_file = cq->cq_context;
1699 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1701 ret = ib_destroy_cq(cq);
1705 put_uobj_write(uobj);
1710 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1712 mutex_lock(&file->mutex);
1713 list_del(&uobj->list);
1714 mutex_unlock(&file->mutex);
1716 ib_uverbs_release_ucq(file, ev_file, obj);
1718 memset(&resp, 0, sizeof resp);
1719 resp.comp_events_reported = obj->comp_events_reported;
1720 resp.async_events_reported = obj->async_events_reported;
1724 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1725 &resp, sizeof resp))
1731 static int create_qp(struct ib_uverbs_file *file,
1732 struct ib_udata *ucore,
1733 struct ib_udata *uhw,
1734 struct ib_uverbs_ex_create_qp *cmd,
1736 int (*cb)(struct ib_uverbs_file *file,
1737 struct ib_uverbs_ex_create_qp_resp *resp,
1738 struct ib_udata *udata),
1741 struct ib_uqp_object *obj;
1742 struct ib_device *device;
1743 struct ib_pd *pd = NULL;
1744 struct ib_xrcd *xrcd = NULL;
1745 struct ib_uobject *uninitialized_var(xrcd_uobj);
1746 struct ib_cq *scq = NULL, *rcq = NULL;
1747 struct ib_srq *srq = NULL;
1750 struct ib_qp_init_attr attr;
1751 struct ib_uverbs_ex_create_qp_resp resp;
1754 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1757 obj = kzalloc(sizeof *obj, GFP_KERNEL);
1761 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext,
1763 down_write(&obj->uevent.uobject.mutex);
1765 if (cmd->qp_type == IB_QPT_XRC_TGT) {
1766 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext,
1772 device = xrcd->device;
1774 if (cmd->qp_type == IB_QPT_XRC_INI) {
1775 cmd->max_recv_wr = 0;
1776 cmd->max_recv_sge = 0;
1779 srq = idr_read_srq(cmd->srq_handle,
1781 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1787 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1788 rcq = idr_read_cq(cmd->recv_cq_handle,
1797 scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq);
1799 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
1805 device = pd->device;
1808 attr.event_handler = ib_uverbs_qp_event_handler;
1809 attr.qp_context = file;
1814 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1816 attr.qp_type = cmd->qp_type;
1817 attr.create_flags = 0;
1819 attr.cap.max_send_wr = cmd->max_send_wr;
1820 attr.cap.max_recv_wr = cmd->max_recv_wr;
1821 attr.cap.max_send_sge = cmd->max_send_sge;
1822 attr.cap.max_recv_sge = cmd->max_recv_sge;
1823 attr.cap.max_inline_data = cmd->max_inline_data;
1825 obj->uevent.events_reported = 0;
1826 INIT_LIST_HEAD(&obj->uevent.event_list);
1827 INIT_LIST_HEAD(&obj->mcast_list);
1829 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1830 sizeof(cmd->create_flags))
1831 attr.create_flags = cmd->create_flags;
1833 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1834 IB_QP_CREATE_CROSS_CHANNEL |
1835 IB_QP_CREATE_MANAGED_SEND |
1836 IB_QP_CREATE_MANAGED_RECV |
1837 IB_QP_CREATE_SCATTER_FCS)) {
1842 buf = (void *)cmd + sizeof(*cmd);
1843 if (cmd_sz > sizeof(*cmd))
1844 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1845 cmd_sz - sizeof(*cmd) - 1))) {
1850 if (cmd->qp_type == IB_QPT_XRC_TGT)
1851 qp = ib_create_qp(pd, &attr);
1853 qp = device->create_qp(pd, &attr, uhw);
1860 if (cmd->qp_type != IB_QPT_XRC_TGT) {
1862 qp->device = device;
1864 qp->send_cq = attr.send_cq;
1865 qp->recv_cq = attr.recv_cq;
1867 qp->event_handler = attr.event_handler;
1868 qp->qp_context = attr.qp_context;
1869 qp->qp_type = attr.qp_type;
1870 atomic_set(&qp->usecnt, 0);
1871 atomic_inc(&pd->usecnt);
1872 atomic_inc(&attr.send_cq->usecnt);
1874 atomic_inc(&attr.recv_cq->usecnt);
1876 atomic_inc(&attr.srq->usecnt);
1878 qp->uobject = &obj->uevent.uobject;
1880 obj->uevent.uobject.object = qp;
1881 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1885 memset(&resp, 0, sizeof resp);
1886 resp.base.qpn = qp->qp_num;
1887 resp.base.qp_handle = obj->uevent.uobject.id;
1888 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1889 resp.base.max_send_sge = attr.cap.max_send_sge;
1890 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1891 resp.base.max_send_wr = attr.cap.max_send_wr;
1892 resp.base.max_inline_data = attr.cap.max_inline_data;
1894 resp.response_length = offsetof(typeof(resp), response_length) +
1895 sizeof(resp.response_length);
1897 ret = cb(file, &resp, ucore);
1902 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1904 atomic_inc(&obj->uxrcd->refcnt);
1905 put_xrcd_read(xrcd_uobj);
1912 if (rcq && rcq != scq)
1917 mutex_lock(&file->mutex);
1918 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1919 mutex_unlock(&file->mutex);
1921 obj->uevent.uobject.live = 1;
1923 up_write(&obj->uevent.uobject.mutex);
1927 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1934 put_xrcd_read(xrcd_uobj);
1939 if (rcq && rcq != scq)
1944 put_uobj_write(&obj->uevent.uobject);
1948 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1949 struct ib_uverbs_ex_create_qp_resp *resp,
1950 struct ib_udata *ucore)
1952 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1958 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1959 struct ib_device *ib_dev,
1960 const char __user *buf, int in_len,
1963 struct ib_uverbs_create_qp cmd;
1964 struct ib_uverbs_ex_create_qp cmd_ex;
1965 struct ib_udata ucore;
1966 struct ib_udata uhw;
1967 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1970 if (out_len < resp_size)
1973 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1976 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
1978 INIT_UDATA(&uhw, buf + sizeof(cmd),
1979 (unsigned long)cmd.response + resp_size,
1980 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1981 out_len - resp_size);
1983 memset(&cmd_ex, 0, sizeof(cmd_ex));
1984 cmd_ex.user_handle = cmd.user_handle;
1985 cmd_ex.pd_handle = cmd.pd_handle;
1986 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1987 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1988 cmd_ex.srq_handle = cmd.srq_handle;
1989 cmd_ex.max_send_wr = cmd.max_send_wr;
1990 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1991 cmd_ex.max_send_sge = cmd.max_send_sge;
1992 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1993 cmd_ex.max_inline_data = cmd.max_inline_data;
1994 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1995 cmd_ex.qp_type = cmd.qp_type;
1996 cmd_ex.is_srq = cmd.is_srq;
1998 err = create_qp(file, &ucore, &uhw, &cmd_ex,
1999 offsetof(typeof(cmd_ex), is_srq) +
2000 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
2009 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
2010 struct ib_uverbs_ex_create_qp_resp *resp,
2011 struct ib_udata *ucore)
2013 if (ib_copy_to_udata(ucore, resp, resp->response_length))
2019 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
2020 struct ib_device *ib_dev,
2021 struct ib_udata *ucore,
2022 struct ib_udata *uhw)
2024 struct ib_uverbs_ex_create_qp_resp resp;
2025 struct ib_uverbs_ex_create_qp cmd = {0};
2028 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
2029 sizeof(cmd.comp_mask)))
2032 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2042 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
2043 sizeof(resp.response_length)))
2046 err = create_qp(file, ucore, uhw, &cmd,
2047 min(ucore->inlen, sizeof(cmd)),
2048 ib_uverbs_ex_create_qp_cb, NULL);
2056 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
2057 struct ib_device *ib_dev,
2058 const char __user *buf, int in_len, int out_len)
2060 struct ib_uverbs_open_qp cmd;
2061 struct ib_uverbs_create_qp_resp resp;
2062 struct ib_udata udata;
2063 struct ib_uqp_object *obj;
2064 struct ib_xrcd *xrcd;
2065 struct ib_uobject *uninitialized_var(xrcd_uobj);
2067 struct ib_qp_open_attr attr;
2070 if (out_len < sizeof resp)
2073 if (copy_from_user(&cmd, buf, sizeof cmd))
2076 INIT_UDATA(&udata, buf + sizeof cmd,
2077 (unsigned long) cmd.response + sizeof resp,
2078 in_len - sizeof cmd, out_len - sizeof resp);
2080 obj = kmalloc(sizeof *obj, GFP_KERNEL);
2084 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
2085 down_write(&obj->uevent.uobject.mutex);
2087 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
2093 attr.event_handler = ib_uverbs_qp_event_handler;
2094 attr.qp_context = file;
2095 attr.qp_num = cmd.qpn;
2096 attr.qp_type = cmd.qp_type;
2098 obj->uevent.events_reported = 0;
2099 INIT_LIST_HEAD(&obj->uevent.event_list);
2100 INIT_LIST_HEAD(&obj->mcast_list);
2102 qp = ib_open_qp(xrcd, &attr);
2108 qp->uobject = &obj->uevent.uobject;
2110 obj->uevent.uobject.object = qp;
2111 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2115 memset(&resp, 0, sizeof resp);
2116 resp.qpn = qp->qp_num;
2117 resp.qp_handle = obj->uevent.uobject.id;
2119 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2120 &resp, sizeof resp)) {
2125 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2126 atomic_inc(&obj->uxrcd->refcnt);
2127 put_xrcd_read(xrcd_uobj);
2129 mutex_lock(&file->mutex);
2130 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
2131 mutex_unlock(&file->mutex);
2133 obj->uevent.uobject.live = 1;
2135 up_write(&obj->uevent.uobject.mutex);
2140 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2146 put_xrcd_read(xrcd_uobj);
2147 put_uobj_write(&obj->uevent.uobject);
2151 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
2152 struct ib_device *ib_dev,
2153 const char __user *buf, int in_len,
2156 struct ib_uverbs_query_qp cmd;
2157 struct ib_uverbs_query_qp_resp resp;
2159 struct ib_qp_attr *attr;
2160 struct ib_qp_init_attr *init_attr;
2163 if (copy_from_user(&cmd, buf, sizeof cmd))
2166 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2167 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
2168 if (!attr || !init_attr) {
2173 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2179 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
2186 memset(&resp, 0, sizeof resp);
2188 resp.qp_state = attr->qp_state;
2189 resp.cur_qp_state = attr->cur_qp_state;
2190 resp.path_mtu = attr->path_mtu;
2191 resp.path_mig_state = attr->path_mig_state;
2192 resp.qkey = attr->qkey;
2193 resp.rq_psn = attr->rq_psn;
2194 resp.sq_psn = attr->sq_psn;
2195 resp.dest_qp_num = attr->dest_qp_num;
2196 resp.qp_access_flags = attr->qp_access_flags;
2197 resp.pkey_index = attr->pkey_index;
2198 resp.alt_pkey_index = attr->alt_pkey_index;
2199 resp.sq_draining = attr->sq_draining;
2200 resp.max_rd_atomic = attr->max_rd_atomic;
2201 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
2202 resp.min_rnr_timer = attr->min_rnr_timer;
2203 resp.port_num = attr->port_num;
2204 resp.timeout = attr->timeout;
2205 resp.retry_cnt = attr->retry_cnt;
2206 resp.rnr_retry = attr->rnr_retry;
2207 resp.alt_port_num = attr->alt_port_num;
2208 resp.alt_timeout = attr->alt_timeout;
2210 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
2211 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
2212 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
2213 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
2214 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
2215 resp.dest.dlid = attr->ah_attr.dlid;
2216 resp.dest.sl = attr->ah_attr.sl;
2217 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
2218 resp.dest.static_rate = attr->ah_attr.static_rate;
2219 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
2220 resp.dest.port_num = attr->ah_attr.port_num;
2222 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
2223 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
2224 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
2225 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
2226 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
2227 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
2228 resp.alt_dest.sl = attr->alt_ah_attr.sl;
2229 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
2230 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
2231 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
2232 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
2234 resp.max_send_wr = init_attr->cap.max_send_wr;
2235 resp.max_recv_wr = init_attr->cap.max_recv_wr;
2236 resp.max_send_sge = init_attr->cap.max_send_sge;
2237 resp.max_recv_sge = init_attr->cap.max_recv_sge;
2238 resp.max_inline_data = init_attr->cap.max_inline_data;
2239 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2241 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2242 &resp, sizeof resp))
2249 return ret ? ret : in_len;
2252 /* Remove ignored fields set in the attribute mask */
2253 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
2256 case IB_QPT_XRC_INI:
2257 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
2258 case IB_QPT_XRC_TGT:
2259 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
2266 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2267 struct ib_device *ib_dev,
2268 const char __user *buf, int in_len,
2271 struct ib_uverbs_modify_qp cmd;
2272 struct ib_udata udata;
2274 struct ib_qp_attr *attr;
2277 if (copy_from_user(&cmd, buf, sizeof cmd))
2280 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2283 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2287 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2293 attr->qp_state = cmd.qp_state;
2294 attr->cur_qp_state = cmd.cur_qp_state;
2295 attr->path_mtu = cmd.path_mtu;
2296 attr->path_mig_state = cmd.path_mig_state;
2297 attr->qkey = cmd.qkey;
2298 attr->rq_psn = cmd.rq_psn;
2299 attr->sq_psn = cmd.sq_psn;
2300 attr->dest_qp_num = cmd.dest_qp_num;
2301 attr->qp_access_flags = cmd.qp_access_flags;
2302 attr->pkey_index = cmd.pkey_index;
2303 attr->alt_pkey_index = cmd.alt_pkey_index;
2304 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2305 attr->max_rd_atomic = cmd.max_rd_atomic;
2306 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
2307 attr->min_rnr_timer = cmd.min_rnr_timer;
2308 attr->port_num = cmd.port_num;
2309 attr->timeout = cmd.timeout;
2310 attr->retry_cnt = cmd.retry_cnt;
2311 attr->rnr_retry = cmd.rnr_retry;
2312 attr->alt_port_num = cmd.alt_port_num;
2313 attr->alt_timeout = cmd.alt_timeout;
2315 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2316 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
2317 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
2318 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
2319 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
2320 attr->ah_attr.dlid = cmd.dest.dlid;
2321 attr->ah_attr.sl = cmd.dest.sl;
2322 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
2323 attr->ah_attr.static_rate = cmd.dest.static_rate;
2324 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
2325 attr->ah_attr.port_num = cmd.dest.port_num;
2327 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2328 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
2329 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
2330 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
2331 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2332 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
2333 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
2334 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
2335 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
2336 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2337 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
2339 if (qp->real_qp == qp) {
2340 ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
2343 ret = qp->device->modify_qp(qp, attr,
2344 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2346 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2363 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2364 struct ib_device *ib_dev,
2365 const char __user *buf, int in_len,
2368 struct ib_uverbs_destroy_qp cmd;
2369 struct ib_uverbs_destroy_qp_resp resp;
2370 struct ib_uobject *uobj;
2372 struct ib_uqp_object *obj;
2375 if (copy_from_user(&cmd, buf, sizeof cmd))
2378 memset(&resp, 0, sizeof resp);
2380 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2384 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2386 if (!list_empty(&obj->mcast_list)) {
2387 put_uobj_write(uobj);
2391 ret = ib_destroy_qp(qp);
2395 put_uobj_write(uobj);
2401 atomic_dec(&obj->uxrcd->refcnt);
2403 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2405 mutex_lock(&file->mutex);
2406 list_del(&uobj->list);
2407 mutex_unlock(&file->mutex);
2409 ib_uverbs_release_uevent(file, &obj->uevent);
2411 resp.events_reported = obj->uevent.events_reported;
2415 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2416 &resp, sizeof resp))
2422 static void *alloc_wr(size_t wr_size, __u32 num_sge)
2424 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2425 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2428 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2429 struct ib_device *ib_dev,
2430 const char __user *buf, int in_len,
2433 struct ib_uverbs_post_send cmd;
2434 struct ib_uverbs_post_send_resp resp;
2435 struct ib_uverbs_send_wr *user_wr;
2436 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2440 ssize_t ret = -EINVAL;
2443 if (copy_from_user(&cmd, buf, sizeof cmd))
2446 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2447 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2450 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2453 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2457 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2461 is_ud = qp->qp_type == IB_QPT_UD;
2464 for (i = 0; i < cmd.wr_count; ++i) {
2465 if (copy_from_user(user_wr,
2466 buf + sizeof cmd + i * cmd.wqe_size,
2472 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2478 struct ib_ud_wr *ud;
2480 if (user_wr->opcode != IB_WR_SEND &&
2481 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2486 next_size = sizeof(*ud);
2487 ud = alloc_wr(next_size, user_wr->num_sge);
2493 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext);
2499 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2500 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2503 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2504 user_wr->opcode == IB_WR_RDMA_WRITE ||
2505 user_wr->opcode == IB_WR_RDMA_READ) {
2506 struct ib_rdma_wr *rdma;
2508 next_size = sizeof(*rdma);
2509 rdma = alloc_wr(next_size, user_wr->num_sge);
2515 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2516 rdma->rkey = user_wr->wr.rdma.rkey;
2519 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2520 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2521 struct ib_atomic_wr *atomic;
2523 next_size = sizeof(*atomic);
2524 atomic = alloc_wr(next_size, user_wr->num_sge);
2530 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2531 atomic->compare_add = user_wr->wr.atomic.compare_add;
2532 atomic->swap = user_wr->wr.atomic.swap;
2533 atomic->rkey = user_wr->wr.atomic.rkey;
2536 } else if (user_wr->opcode == IB_WR_SEND ||
2537 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2538 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2539 next_size = sizeof(*next);
2540 next = alloc_wr(next_size, user_wr->num_sge);
2550 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2551 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2553 (__be32 __force) user_wr->ex.imm_data;
2554 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2555 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2565 next->wr_id = user_wr->wr_id;
2566 next->num_sge = user_wr->num_sge;
2567 next->opcode = user_wr->opcode;
2568 next->send_flags = user_wr->send_flags;
2570 if (next->num_sge) {
2571 next->sg_list = (void *) next +
2572 ALIGN(next_size, sizeof(struct ib_sge));
2573 if (copy_from_user(next->sg_list,
2575 cmd.wr_count * cmd.wqe_size +
2576 sg_ind * sizeof (struct ib_sge),
2577 next->num_sge * sizeof (struct ib_sge))) {
2581 sg_ind += next->num_sge;
2583 next->sg_list = NULL;
2587 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2589 for (next = wr; next; next = next->next) {
2595 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2596 &resp, sizeof resp))
2603 if (is_ud && ud_wr(wr)->ah)
2604 put_ah_read(ud_wr(wr)->ah);
2613 return ret ? ret : in_len;
2616 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2622 struct ib_uverbs_recv_wr *user_wr;
2623 struct ib_recv_wr *wr = NULL, *last, *next;
2628 if (in_len < wqe_size * wr_count +
2629 sge_count * sizeof (struct ib_uverbs_sge))
2630 return ERR_PTR(-EINVAL);
2632 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2633 return ERR_PTR(-EINVAL);
2635 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2637 return ERR_PTR(-ENOMEM);
2641 for (i = 0; i < wr_count; ++i) {
2642 if (copy_from_user(user_wr, buf + i * wqe_size,
2648 if (user_wr->num_sge + sg_ind > sge_count) {
2653 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2654 user_wr->num_sge * sizeof (struct ib_sge),
2668 next->wr_id = user_wr->wr_id;
2669 next->num_sge = user_wr->num_sge;
2671 if (next->num_sge) {
2672 next->sg_list = (void *) next +
2673 ALIGN(sizeof *next, sizeof (struct ib_sge));
2674 if (copy_from_user(next->sg_list,
2675 buf + wr_count * wqe_size +
2676 sg_ind * sizeof (struct ib_sge),
2677 next->num_sge * sizeof (struct ib_sge))) {
2681 sg_ind += next->num_sge;
2683 next->sg_list = NULL;
2698 return ERR_PTR(ret);
2701 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2702 struct ib_device *ib_dev,
2703 const char __user *buf, int in_len,
2706 struct ib_uverbs_post_recv cmd;
2707 struct ib_uverbs_post_recv_resp resp;
2708 struct ib_recv_wr *wr, *next, *bad_wr;
2710 ssize_t ret = -EINVAL;
2712 if (copy_from_user(&cmd, buf, sizeof cmd))
2715 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2716 in_len - sizeof cmd, cmd.wr_count,
2717 cmd.sge_count, cmd.wqe_size);
2721 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2726 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2731 for (next = wr; next; next = next->next) {
2737 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2738 &resp, sizeof resp))
2748 return ret ? ret : in_len;
2751 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2752 struct ib_device *ib_dev,
2753 const char __user *buf, int in_len,
2756 struct ib_uverbs_post_srq_recv cmd;
2757 struct ib_uverbs_post_srq_recv_resp resp;
2758 struct ib_recv_wr *wr, *next, *bad_wr;
2760 ssize_t ret = -EINVAL;
2762 if (copy_from_user(&cmd, buf, sizeof cmd))
2765 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2766 in_len - sizeof cmd, cmd.wr_count,
2767 cmd.sge_count, cmd.wqe_size);
2771 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2776 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2781 for (next = wr; next; next = next->next) {
2787 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2788 &resp, sizeof resp))
2798 return ret ? ret : in_len;
2801 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2802 struct ib_device *ib_dev,
2803 const char __user *buf, int in_len,
2806 struct ib_uverbs_create_ah cmd;
2807 struct ib_uverbs_create_ah_resp resp;
2808 struct ib_uobject *uobj;
2811 struct ib_ah_attr attr;
2814 if (out_len < sizeof resp)
2817 if (copy_from_user(&cmd, buf, sizeof cmd))
2820 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2824 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2825 down_write(&uobj->mutex);
2827 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2833 attr.dlid = cmd.attr.dlid;
2834 attr.sl = cmd.attr.sl;
2835 attr.src_path_bits = cmd.attr.src_path_bits;
2836 attr.static_rate = cmd.attr.static_rate;
2837 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
2838 attr.port_num = cmd.attr.port_num;
2839 attr.grh.flow_label = cmd.attr.grh.flow_label;
2840 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2841 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2842 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2843 memset(&attr.dmac, 0, sizeof(attr.dmac));
2844 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2846 ah = ib_create_ah(pd, &attr);
2855 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2859 resp.ah_handle = uobj->id;
2861 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2862 &resp, sizeof resp)) {
2869 mutex_lock(&file->mutex);
2870 list_add_tail(&uobj->list, &file->ucontext->ah_list);
2871 mutex_unlock(&file->mutex);
2875 up_write(&uobj->mutex);
2880 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2889 put_uobj_write(uobj);
2893 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2894 struct ib_device *ib_dev,
2895 const char __user *buf, int in_len, int out_len)
2897 struct ib_uverbs_destroy_ah cmd;
2899 struct ib_uobject *uobj;
2902 if (copy_from_user(&cmd, buf, sizeof cmd))
2905 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2910 ret = ib_destroy_ah(ah);
2914 put_uobj_write(uobj);
2919 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2921 mutex_lock(&file->mutex);
2922 list_del(&uobj->list);
2923 mutex_unlock(&file->mutex);
2930 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2931 struct ib_device *ib_dev,
2932 const char __user *buf, int in_len,
2935 struct ib_uverbs_attach_mcast cmd;
2937 struct ib_uqp_object *obj;
2938 struct ib_uverbs_mcast_entry *mcast;
2941 if (copy_from_user(&cmd, buf, sizeof cmd))
2944 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2948 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2950 list_for_each_entry(mcast, &obj->mcast_list, list)
2951 if (cmd.mlid == mcast->lid &&
2952 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2957 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2963 mcast->lid = cmd.mlid;
2964 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2966 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2968 list_add_tail(&mcast->list, &obj->mcast_list);
2975 return ret ? ret : in_len;
2978 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2979 struct ib_device *ib_dev,
2980 const char __user *buf, int in_len,
2983 struct ib_uverbs_detach_mcast cmd;
2984 struct ib_uqp_object *obj;
2986 struct ib_uverbs_mcast_entry *mcast;
2989 if (copy_from_user(&cmd, buf, sizeof cmd))
2992 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2996 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
3000 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
3002 list_for_each_entry(mcast, &obj->mcast_list, list)
3003 if (cmd.mlid == mcast->lid &&
3004 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
3005 list_del(&mcast->list);
3013 return ret ? ret : in_len;
3016 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
3017 union ib_flow_spec *ib_spec)
3019 if (kern_spec->reserved)
3022 ib_spec->type = kern_spec->type;
3024 switch (ib_spec->type) {
3025 case IB_FLOW_SPEC_ETH:
3026 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
3027 if (ib_spec->eth.size != kern_spec->eth.size)
3029 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
3030 sizeof(struct ib_flow_eth_filter));
3031 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
3032 sizeof(struct ib_flow_eth_filter));
3034 case IB_FLOW_SPEC_IPV4:
3035 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
3036 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
3038 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
3039 sizeof(struct ib_flow_ipv4_filter));
3040 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
3041 sizeof(struct ib_flow_ipv4_filter));
3043 case IB_FLOW_SPEC_TCP:
3044 case IB_FLOW_SPEC_UDP:
3045 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
3046 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
3048 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
3049 sizeof(struct ib_flow_tcp_udp_filter));
3050 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
3051 sizeof(struct ib_flow_tcp_udp_filter));
3059 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3060 struct ib_device *ib_dev,
3061 struct ib_udata *ucore,
3062 struct ib_udata *uhw)
3064 struct ib_uverbs_create_flow cmd;
3065 struct ib_uverbs_create_flow_resp resp;
3066 struct ib_uobject *uobj;
3067 struct ib_flow *flow_id;
3068 struct ib_uverbs_flow_attr *kern_flow_attr;
3069 struct ib_flow_attr *flow_attr;
3076 if (ucore->inlen < sizeof(cmd))
3079 if (ucore->outlen < sizeof(resp))
3082 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3086 ucore->inbuf += sizeof(cmd);
3087 ucore->inlen -= sizeof(cmd);
3092 if (!capable(CAP_NET_RAW))
3095 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3098 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3099 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3100 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3103 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3106 if (cmd.flow_attr.size > ucore->inlen ||
3107 cmd.flow_attr.size >
3108 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3111 if (cmd.flow_attr.reserved[0] ||
3112 cmd.flow_attr.reserved[1])
3115 if (cmd.flow_attr.num_of_specs) {
3116 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3118 if (!kern_flow_attr)
3121 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
3122 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3123 cmd.flow_attr.size);
3127 kern_flow_attr = &cmd.flow_attr;
3130 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
3135 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
3136 down_write(&uobj->mutex);
3138 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
3144 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
3150 flow_attr->type = kern_flow_attr->type;
3151 flow_attr->priority = kern_flow_attr->priority;
3152 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3153 flow_attr->port = kern_flow_attr->port;
3154 flow_attr->flags = kern_flow_attr->flags;
3155 flow_attr->size = sizeof(*flow_attr);
3157 kern_spec = kern_flow_attr + 1;
3158 ib_spec = flow_attr + 1;
3159 for (i = 0; i < flow_attr->num_of_specs &&
3160 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
3161 cmd.flow_attr.size >=
3162 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
3163 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3167 ((union ib_flow_spec *) ib_spec)->size;
3168 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3169 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
3170 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3172 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3173 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3174 i, cmd.flow_attr.size);
3178 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3179 if (IS_ERR(flow_id)) {
3180 err = PTR_ERR(flow_id);
3184 flow_id->uobject = uobj;
3185 uobj->object = flow_id;
3187 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
3191 memset(&resp, 0, sizeof(resp));
3192 resp.flow_handle = uobj->id;
3194 err = ib_copy_to_udata(ucore,
3195 &resp, sizeof(resp));
3200 mutex_lock(&file->mutex);
3201 list_add_tail(&uobj->list, &file->ucontext->rule_list);
3202 mutex_unlock(&file->mutex);
3206 up_write(&uobj->mutex);
3208 if (cmd.flow_attr.num_of_specs)
3209 kfree(kern_flow_attr);
3212 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3214 ib_destroy_flow(flow_id);
3220 put_uobj_write(uobj);
3222 if (cmd.flow_attr.num_of_specs)
3223 kfree(kern_flow_attr);
3227 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3228 struct ib_device *ib_dev,
3229 struct ib_udata *ucore,
3230 struct ib_udata *uhw)
3232 struct ib_uverbs_destroy_flow cmd;
3233 struct ib_flow *flow_id;
3234 struct ib_uobject *uobj;
3237 if (ucore->inlen < sizeof(cmd))
3240 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3247 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
3251 flow_id = uobj->object;
3253 ret = ib_destroy_flow(flow_id);
3257 put_uobj_write(uobj);
3259 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3261 mutex_lock(&file->mutex);
3262 list_del(&uobj->list);
3263 mutex_unlock(&file->mutex);
3270 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3271 struct ib_device *ib_dev,
3272 struct ib_uverbs_create_xsrq *cmd,
3273 struct ib_udata *udata)
3275 struct ib_uverbs_create_srq_resp resp;
3276 struct ib_usrq_object *obj;
3279 struct ib_uobject *uninitialized_var(xrcd_uobj);
3280 struct ib_srq_init_attr attr;
3283 obj = kmalloc(sizeof *obj, GFP_KERNEL);
3287 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
3288 down_write(&obj->uevent.uobject.mutex);
3290 if (cmd->srq_type == IB_SRQT_XRC) {
3291 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
3292 if (!attr.ext.xrc.xrcd) {
3297 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3298 atomic_inc(&obj->uxrcd->refcnt);
3300 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
3301 if (!attr.ext.xrc.cq) {
3307 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
3313 attr.event_handler = ib_uverbs_srq_event_handler;
3314 attr.srq_context = file;
3315 attr.srq_type = cmd->srq_type;
3316 attr.attr.max_wr = cmd->max_wr;
3317 attr.attr.max_sge = cmd->max_sge;
3318 attr.attr.srq_limit = cmd->srq_limit;
3320 obj->uevent.events_reported = 0;
3321 INIT_LIST_HEAD(&obj->uevent.event_list);
3323 srq = pd->device->create_srq(pd, &attr, udata);
3329 srq->device = pd->device;
3331 srq->srq_type = cmd->srq_type;
3332 srq->uobject = &obj->uevent.uobject;
3333 srq->event_handler = attr.event_handler;
3334 srq->srq_context = attr.srq_context;
3336 if (cmd->srq_type == IB_SRQT_XRC) {
3337 srq->ext.xrc.cq = attr.ext.xrc.cq;
3338 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3339 atomic_inc(&attr.ext.xrc.cq->usecnt);
3340 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3343 atomic_inc(&pd->usecnt);
3344 atomic_set(&srq->usecnt, 0);
3346 obj->uevent.uobject.object = srq;
3347 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3351 memset(&resp, 0, sizeof resp);
3352 resp.srq_handle = obj->uevent.uobject.id;
3353 resp.max_wr = attr.attr.max_wr;
3354 resp.max_sge = attr.attr.max_sge;
3355 if (cmd->srq_type == IB_SRQT_XRC)
3356 resp.srqn = srq->ext.xrc.srq_num;
3358 if (copy_to_user((void __user *) (unsigned long) cmd->response,
3359 &resp, sizeof resp)) {
3364 if (cmd->srq_type == IB_SRQT_XRC) {
3365 put_uobj_read(xrcd_uobj);
3366 put_cq_read(attr.ext.xrc.cq);
3370 mutex_lock(&file->mutex);
3371 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3372 mutex_unlock(&file->mutex);
3374 obj->uevent.uobject.live = 1;
3376 up_write(&obj->uevent.uobject.mutex);
3381 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3384 ib_destroy_srq(srq);
3390 if (cmd->srq_type == IB_SRQT_XRC)
3391 put_cq_read(attr.ext.xrc.cq);
3394 if (cmd->srq_type == IB_SRQT_XRC) {
3395 atomic_dec(&obj->uxrcd->refcnt);
3396 put_uobj_read(xrcd_uobj);
3400 put_uobj_write(&obj->uevent.uobject);
3404 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3405 struct ib_device *ib_dev,
3406 const char __user *buf, int in_len,
3409 struct ib_uverbs_create_srq cmd;
3410 struct ib_uverbs_create_xsrq xcmd;
3411 struct ib_uverbs_create_srq_resp resp;
3412 struct ib_udata udata;
3415 if (out_len < sizeof resp)
3418 if (copy_from_user(&cmd, buf, sizeof cmd))
3421 xcmd.response = cmd.response;
3422 xcmd.user_handle = cmd.user_handle;
3423 xcmd.srq_type = IB_SRQT_BASIC;
3424 xcmd.pd_handle = cmd.pd_handle;
3425 xcmd.max_wr = cmd.max_wr;
3426 xcmd.max_sge = cmd.max_sge;
3427 xcmd.srq_limit = cmd.srq_limit;
3429 INIT_UDATA(&udata, buf + sizeof cmd,
3430 (unsigned long) cmd.response + sizeof resp,
3431 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3432 out_len - sizeof resp);
3434 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3441 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3442 struct ib_device *ib_dev,
3443 const char __user *buf, int in_len, int out_len)
3445 struct ib_uverbs_create_xsrq cmd;
3446 struct ib_uverbs_create_srq_resp resp;
3447 struct ib_udata udata;
3450 if (out_len < sizeof resp)
3453 if (copy_from_user(&cmd, buf, sizeof cmd))
3456 INIT_UDATA(&udata, buf + sizeof cmd,
3457 (unsigned long) cmd.response + sizeof resp,
3458 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3459 out_len - sizeof resp);
3461 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3468 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3469 struct ib_device *ib_dev,
3470 const char __user *buf, int in_len,
3473 struct ib_uverbs_modify_srq cmd;
3474 struct ib_udata udata;
3476 struct ib_srq_attr attr;
3479 if (copy_from_user(&cmd, buf, sizeof cmd))
3482 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3485 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3489 attr.max_wr = cmd.max_wr;
3490 attr.srq_limit = cmd.srq_limit;
3492 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3496 return ret ? ret : in_len;
3499 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3500 struct ib_device *ib_dev,
3501 const char __user *buf,
3502 int in_len, int out_len)
3504 struct ib_uverbs_query_srq cmd;
3505 struct ib_uverbs_query_srq_resp resp;
3506 struct ib_srq_attr attr;
3510 if (out_len < sizeof resp)
3513 if (copy_from_user(&cmd, buf, sizeof cmd))
3516 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3520 ret = ib_query_srq(srq, &attr);
3527 memset(&resp, 0, sizeof resp);
3529 resp.max_wr = attr.max_wr;
3530 resp.max_sge = attr.max_sge;
3531 resp.srq_limit = attr.srq_limit;
3533 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3534 &resp, sizeof resp))
3540 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3541 struct ib_device *ib_dev,
3542 const char __user *buf, int in_len,
3545 struct ib_uverbs_destroy_srq cmd;
3546 struct ib_uverbs_destroy_srq_resp resp;
3547 struct ib_uobject *uobj;
3549 struct ib_uevent_object *obj;
3551 struct ib_usrq_object *us;
3552 enum ib_srq_type srq_type;
3554 if (copy_from_user(&cmd, buf, sizeof cmd))
3557 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3561 obj = container_of(uobj, struct ib_uevent_object, uobject);
3562 srq_type = srq->srq_type;
3564 ret = ib_destroy_srq(srq);
3568 put_uobj_write(uobj);
3573 if (srq_type == IB_SRQT_XRC) {
3574 us = container_of(obj, struct ib_usrq_object, uevent);
3575 atomic_dec(&us->uxrcd->refcnt);
3578 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3580 mutex_lock(&file->mutex);
3581 list_del(&uobj->list);
3582 mutex_unlock(&file->mutex);
3584 ib_uverbs_release_uevent(file, obj);
3586 memset(&resp, 0, sizeof resp);
3587 resp.events_reported = obj->events_reported;
3591 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3592 &resp, sizeof resp))
3595 return ret ? ret : in_len;
3598 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3599 struct ib_device *ib_dev,
3600 struct ib_udata *ucore,
3601 struct ib_udata *uhw)
3603 struct ib_uverbs_ex_query_device_resp resp = { {0} };
3604 struct ib_uverbs_ex_query_device cmd;
3605 struct ib_device_attr attr = {0};
3608 if (ucore->inlen < sizeof(cmd))
3611 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3621 resp.response_length = offsetof(typeof(resp), odp_caps);
3623 if (ucore->outlen < resp.response_length)
3626 err = ib_dev->query_device(ib_dev, &attr, uhw);
3630 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
3632 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3635 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3636 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3637 resp.odp_caps.per_transport_caps.rc_odp_caps =
3638 attr.odp_caps.per_transport_caps.rc_odp_caps;
3639 resp.odp_caps.per_transport_caps.uc_odp_caps =
3640 attr.odp_caps.per_transport_caps.uc_odp_caps;
3641 resp.odp_caps.per_transport_caps.ud_odp_caps =
3642 attr.odp_caps.per_transport_caps.ud_odp_caps;
3644 resp.response_length += sizeof(resp.odp_caps);
3646 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3649 resp.timestamp_mask = attr.timestamp_mask;
3650 resp.response_length += sizeof(resp.timestamp_mask);
3652 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3655 resp.hca_core_clock = attr.hca_core_clock;
3656 resp.response_length += sizeof(resp.hca_core_clock);
3658 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
3661 resp.device_cap_flags_ex = attr.device_cap_flags;
3662 resp.response_length += sizeof(resp.device_cap_flags_ex);
3664 err = ib_copy_to_udata(ucore, &resp, resp.response_length);