IB/core: Add flags for on demand paging support
[linux-2.6-block.git] / drivers / infiniband / core / uverbs_cmd.c
CommitLineData
bc38a6ab
RD
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
f7c6a7b5 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
eb9d3cd5 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8bdb0e86 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
bc38a6ab
RD
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
bc38a6ab
RD
34 */
35
6b73597e 36#include <linux/file.h>
70a30e16 37#include <linux/fs.h>
5a0e3ad6 38#include <linux/slab.h>
6b73597e 39
bc38a6ab
RD
40#include <asm/uaccess.h>
41
42#include "uverbs.h"
ed4c54e5 43#include "core_priv.h"
bc38a6ab 44
3bea57a5
RD
45struct uverbs_lock_class {
46 struct lock_class_key key;
47 char name[16];
48};
49
50static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
51static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
6b52a12b 52static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
3bea57a5
RD
53static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
54static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
55static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
56static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
57static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
436f2ad0 58static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
bc38a6ab 59
9ead190b
RD
60/*
61 * The ib_uobject locking scheme is as follows:
62 *
63 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
64 * needs to be held during all idr operations. When an object is
65 * looked up, a reference must be taken on the object's kref before
66 * dropping this lock.
67 *
68 * - Each object also has an rwsem. This rwsem must be held for
69 * reading while an operation that uses the object is performed.
70 * For example, while registering an MR, the associated PD's
71 * uobject.mutex must be held for reading. The rwsem must be held
72 * for writing while initializing or destroying an object.
73 *
74 * - In addition, each object has a "live" flag. If this flag is not
75 * set, then lookups of the object will fail even if it is found in
76 * the idr. This handles a reader that blocks and does not acquire
77 * the rwsem until after the object is destroyed. The destroy
78 * operation will set the live flag to 0 and then drop the rwsem;
79 * this will allow the reader to acquire the rwsem, see that the
80 * live flag is 0, and then drop the rwsem and its reference to
81 * object. The underlying storage will not be freed until the last
82 * reference to the object is dropped.
83 */
84
85static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
3bea57a5 86 struct ib_ucontext *context, struct uverbs_lock_class *c)
9ead190b
RD
87{
88 uobj->user_handle = user_handle;
89 uobj->context = context;
90 kref_init(&uobj->ref);
91 init_rwsem(&uobj->mutex);
3bea57a5 92 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
9ead190b
RD
93 uobj->live = 0;
94}
95
96static void release_uobj(struct kref *kref)
97{
98 kfree(container_of(kref, struct ib_uobject, ref));
99}
100
101static void put_uobj(struct ib_uobject *uobj)
102{
103 kref_put(&uobj->ref, release_uobj);
104}
105
106static void put_uobj_read(struct ib_uobject *uobj)
107{
108 up_read(&uobj->mutex);
109 put_uobj(uobj);
110}
111
112static void put_uobj_write(struct ib_uobject *uobj)
113{
114 up_write(&uobj->mutex);
115 put_uobj(uobj);
116}
117
118static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
3463175d
RD
119{
120 int ret;
121
3b069c5d 122 idr_preload(GFP_KERNEL);
9ead190b 123 spin_lock(&ib_uverbs_idr_lock);
3463175d 124
3b069c5d
TH
125 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
126 if (ret >= 0)
127 uobj->id = ret;
128
129 spin_unlock(&ib_uverbs_idr_lock);
130 idr_preload_end();
3463175d 131
3b069c5d 132 return ret < 0 ? ret : 0;
3463175d
RD
133}
134
9ead190b
RD
135void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
136{
137 spin_lock(&ib_uverbs_idr_lock);
138 idr_remove(idr, uobj->id);
139 spin_unlock(&ib_uverbs_idr_lock);
140}
141
142static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
143 struct ib_ucontext *context)
144{
145 struct ib_uobject *uobj;
146
147 spin_lock(&ib_uverbs_idr_lock);
148 uobj = idr_find(idr, id);
cbfb50e6
RD
149 if (uobj) {
150 if (uobj->context == context)
151 kref_get(&uobj->ref);
152 else
153 uobj = NULL;
154 }
9ead190b
RD
155 spin_unlock(&ib_uverbs_idr_lock);
156
157 return uobj;
158}
159
160static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
1ccf6aa1 161 struct ib_ucontext *context, int nested)
9ead190b
RD
162{
163 struct ib_uobject *uobj;
164
165 uobj = __idr_get_uobj(idr, id, context);
166 if (!uobj)
167 return NULL;
168
1ccf6aa1
RD
169 if (nested)
170 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
171 else
172 down_read(&uobj->mutex);
9ead190b
RD
173 if (!uobj->live) {
174 put_uobj_read(uobj);
175 return NULL;
176 }
177
178 return uobj;
179}
180
181static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
182 struct ib_ucontext *context)
183{
184 struct ib_uobject *uobj;
185
186 uobj = __idr_get_uobj(idr, id, context);
187 if (!uobj)
188 return NULL;
189
190 down_write(&uobj->mutex);
191 if (!uobj->live) {
192 put_uobj_write(uobj);
193 return NULL;
194 }
195
196 return uobj;
197}
198
1ccf6aa1
RD
199static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
200 int nested)
9ead190b
RD
201{
202 struct ib_uobject *uobj;
203
1ccf6aa1 204 uobj = idr_read_uobj(idr, id, context, nested);
9ead190b
RD
205 return uobj ? uobj->object : NULL;
206}
207
208static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
209{
1ccf6aa1 210 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
9ead190b
RD
211}
212
213static void put_pd_read(struct ib_pd *pd)
214{
215 put_uobj_read(pd->uobject);
216}
217
1ccf6aa1 218static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
9ead190b 219{
1ccf6aa1 220 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
9ead190b
RD
221}
222
223static void put_cq_read(struct ib_cq *cq)
224{
225 put_uobj_read(cq->uobject);
226}
227
228static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
229{
1ccf6aa1 230 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
9ead190b
RD
231}
232
233static void put_ah_read(struct ib_ah *ah)
234{
235 put_uobj_read(ah->uobject);
236}
237
238static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
239{
1ccf6aa1 240 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
9ead190b
RD
241}
242
e214a0fe
EC
243static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
244{
245 struct ib_uobject *uobj;
246
247 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
248 return uobj ? uobj->object : NULL;
249}
250
9ead190b
RD
251static void put_qp_read(struct ib_qp *qp)
252{
253 put_uobj_read(qp->uobject);
254}
255
e214a0fe
EC
256static void put_qp_write(struct ib_qp *qp)
257{
258 put_uobj_write(qp->uobject);
259}
260
9ead190b
RD
261static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
262{
1ccf6aa1 263 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
9ead190b
RD
264}
265
266static void put_srq_read(struct ib_srq *srq)
267{
268 put_uobj_read(srq->uobject);
269}
270
53d0bd1e
SH
271static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
272 struct ib_uobject **uobj)
273{
274 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
275 return *uobj ? (*uobj)->object : NULL;
276}
277
278static void put_xrcd_read(struct ib_uobject *uobj)
279{
280 put_uobj_read(uobj);
281}
282
bc38a6ab
RD
283ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
284 const char __user *buf,
285 int in_len, int out_len)
286{
287 struct ib_uverbs_get_context cmd;
288 struct ib_uverbs_get_context_resp resp;
289 struct ib_udata udata;
290 struct ib_device *ibdev = file->device->ib_dev;
63c47c28 291 struct ib_ucontext *ucontext;
6b73597e 292 struct file *filp;
63c47c28 293 int ret;
bc38a6ab
RD
294
295 if (out_len < sizeof resp)
296 return -ENOSPC;
297
298 if (copy_from_user(&cmd, buf, sizeof cmd))
299 return -EFAULT;
300
95ed644f 301 mutex_lock(&file->mutex);
63c47c28
RD
302
303 if (file->ucontext) {
304 ret = -EINVAL;
305 goto err;
306 }
307
bc38a6ab
RD
308 INIT_UDATA(&udata, buf + sizeof cmd,
309 (unsigned long) cmd.response + sizeof resp,
310 in_len - sizeof cmd, out_len - sizeof resp);
311
63c47c28 312 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
77f76013 313 if (IS_ERR(ucontext)) {
df42245a 314 ret = PTR_ERR(ucontext);
77f76013
GC
315 goto err;
316 }
bc38a6ab 317
63c47c28
RD
318 ucontext->device = ibdev;
319 INIT_LIST_HEAD(&ucontext->pd_list);
320 INIT_LIST_HEAD(&ucontext->mr_list);
321 INIT_LIST_HEAD(&ucontext->mw_list);
322 INIT_LIST_HEAD(&ucontext->cq_list);
323 INIT_LIST_HEAD(&ucontext->qp_list);
324 INIT_LIST_HEAD(&ucontext->srq_list);
325 INIT_LIST_HEAD(&ucontext->ah_list);
53d0bd1e 326 INIT_LIST_HEAD(&ucontext->xrcd_list);
436f2ad0 327 INIT_LIST_HEAD(&ucontext->rule_list);
f7c6a7b5 328 ucontext->closing = 0;
bc38a6ab 329
6b73597e
RD
330 resp.num_comp_vectors = file->device->num_comp_vectors;
331
da183c7a 332 ret = get_unused_fd_flags(O_CLOEXEC);
b1e4594b
AV
333 if (ret < 0)
334 goto err_free;
335 resp.async_fd = ret;
336
337 filp = ib_uverbs_alloc_event_file(file, 1);
6b73597e
RD
338 if (IS_ERR(filp)) {
339 ret = PTR_ERR(filp);
b1e4594b 340 goto err_fd;
6b73597e 341 }
bc38a6ab
RD
342
343 if (copy_to_user((void __user *) (unsigned long) cmd.response,
63c47c28
RD
344 &resp, sizeof resp)) {
345 ret = -EFAULT;
6b73597e 346 goto err_file;
63c47c28
RD
347 }
348
6b73597e
RD
349 file->async_file = filp->private_data;
350
351 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
352 ib_uverbs_event_handler);
353 ret = ib_register_event_handler(&file->event_handler);
354 if (ret)
355 goto err_file;
356
357 kref_get(&file->async_file->ref);
358 kref_get(&file->ref);
70a30e16 359 file->ucontext = ucontext;
6b73597e
RD
360
361 fd_install(resp.async_fd, filp);
362
95ed644f 363 mutex_unlock(&file->mutex);
bc38a6ab
RD
364
365 return in_len;
366
6b73597e 367err_file:
6b73597e
RD
368 fput(filp);
369
b1e4594b
AV
370err_fd:
371 put_unused_fd(resp.async_fd);
372
63c47c28
RD
373err_free:
374 ibdev->dealloc_ucontext(ucontext);
bc38a6ab 375
63c47c28 376err:
95ed644f 377 mutex_unlock(&file->mutex);
63c47c28 378 return ret;
bc38a6ab
RD
379}
380
5a77abf9
EC
381static void copy_query_dev_fields(struct ib_uverbs_file *file,
382 struct ib_uverbs_query_device_resp *resp,
383 struct ib_device_attr *attr)
384{
385 resp->fw_ver = attr->fw_ver;
386 resp->node_guid = file->device->ib_dev->node_guid;
387 resp->sys_image_guid = attr->sys_image_guid;
388 resp->max_mr_size = attr->max_mr_size;
389 resp->page_size_cap = attr->page_size_cap;
390 resp->vendor_id = attr->vendor_id;
391 resp->vendor_part_id = attr->vendor_part_id;
392 resp->hw_ver = attr->hw_ver;
393 resp->max_qp = attr->max_qp;
394 resp->max_qp_wr = attr->max_qp_wr;
395 resp->device_cap_flags = attr->device_cap_flags;
396 resp->max_sge = attr->max_sge;
397 resp->max_sge_rd = attr->max_sge_rd;
398 resp->max_cq = attr->max_cq;
399 resp->max_cqe = attr->max_cqe;
400 resp->max_mr = attr->max_mr;
401 resp->max_pd = attr->max_pd;
402 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
403 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
404 resp->max_res_rd_atom = attr->max_res_rd_atom;
405 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
406 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
407 resp->atomic_cap = attr->atomic_cap;
408 resp->max_ee = attr->max_ee;
409 resp->max_rdd = attr->max_rdd;
410 resp->max_mw = attr->max_mw;
411 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
412 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
413 resp->max_mcast_grp = attr->max_mcast_grp;
414 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
415 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
416 resp->max_ah = attr->max_ah;
417 resp->max_fmr = attr->max_fmr;
418 resp->max_map_per_fmr = attr->max_map_per_fmr;
419 resp->max_srq = attr->max_srq;
420 resp->max_srq_wr = attr->max_srq_wr;
421 resp->max_srq_sge = attr->max_srq_sge;
422 resp->max_pkeys = attr->max_pkeys;
423 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
424 resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
425}
426
bc38a6ab
RD
427ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
428 const char __user *buf,
429 int in_len, int out_len)
430{
431 struct ib_uverbs_query_device cmd;
432 struct ib_uverbs_query_device_resp resp;
433 struct ib_device_attr attr;
434 int ret;
435
436 if (out_len < sizeof resp)
437 return -ENOSPC;
438
439 if (copy_from_user(&cmd, buf, sizeof cmd))
440 return -EFAULT;
441
442 ret = ib_query_device(file->device->ib_dev, &attr);
443 if (ret)
444 return ret;
445
446 memset(&resp, 0, sizeof resp);
5a77abf9 447 copy_query_dev_fields(file, &resp, &attr);
bc38a6ab
RD
448
449 if (copy_to_user((void __user *) (unsigned long) cmd.response,
450 &resp, sizeof resp))
451 return -EFAULT;
452
453 return in_len;
454}
455
456ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
457 const char __user *buf,
458 int in_len, int out_len)
459{
460 struct ib_uverbs_query_port cmd;
461 struct ib_uverbs_query_port_resp resp;
462 struct ib_port_attr attr;
463 int ret;
464
465 if (out_len < sizeof resp)
466 return -ENOSPC;
467
468 if (copy_from_user(&cmd, buf, sizeof cmd))
469 return -EFAULT;
470
471 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
472 if (ret)
473 return ret;
474
475 memset(&resp, 0, sizeof resp);
476
477 resp.state = attr.state;
478 resp.max_mtu = attr.max_mtu;
479 resp.active_mtu = attr.active_mtu;
480 resp.gid_tbl_len = attr.gid_tbl_len;
481 resp.port_cap_flags = attr.port_cap_flags;
482 resp.max_msg_sz = attr.max_msg_sz;
483 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
484 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
485 resp.pkey_tbl_len = attr.pkey_tbl_len;
486 resp.lid = attr.lid;
487 resp.sm_lid = attr.sm_lid;
488 resp.lmc = attr.lmc;
489 resp.max_vl_num = attr.max_vl_num;
490 resp.sm_sl = attr.sm_sl;
491 resp.subnet_timeout = attr.subnet_timeout;
492 resp.init_type_reply = attr.init_type_reply;
493 resp.active_width = attr.active_width;
494 resp.active_speed = attr.active_speed;
495 resp.phys_state = attr.phys_state;
2420b60b
EC
496 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
497 cmd.port_num);
bc38a6ab
RD
498
499 if (copy_to_user((void __user *) (unsigned long) cmd.response,
500 &resp, sizeof resp))
501 return -EFAULT;
502
503 return in_len;
504}
505
bc38a6ab
RD
506ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
507 const char __user *buf,
508 int in_len, int out_len)
509{
510 struct ib_uverbs_alloc_pd cmd;
511 struct ib_uverbs_alloc_pd_resp resp;
512 struct ib_udata udata;
513 struct ib_uobject *uobj;
514 struct ib_pd *pd;
515 int ret;
516
517 if (out_len < sizeof resp)
518 return -ENOSPC;
519
520 if (copy_from_user(&cmd, buf, sizeof cmd))
521 return -EFAULT;
522
523 INIT_UDATA(&udata, buf + sizeof cmd,
524 (unsigned long) cmd.response + sizeof resp,
525 in_len - sizeof cmd, out_len - sizeof resp);
526
527 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
528 if (!uobj)
529 return -ENOMEM;
530
3bea57a5 531 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
9ead190b 532 down_write(&uobj->mutex);
bc38a6ab
RD
533
534 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
535 file->ucontext, &udata);
536 if (IS_ERR(pd)) {
537 ret = PTR_ERR(pd);
538 goto err;
539 }
540
541 pd->device = file->device->ib_dev;
542 pd->uobject = uobj;
543 atomic_set(&pd->usecnt, 0);
544
9ead190b
RD
545 uobj->object = pd;
546 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
bc38a6ab 547 if (ret)
9ead190b 548 goto err_idr;
bc38a6ab
RD
549
550 memset(&resp, 0, sizeof resp);
551 resp.pd_handle = uobj->id;
552
553 if (copy_to_user((void __user *) (unsigned long) cmd.response,
554 &resp, sizeof resp)) {
555 ret = -EFAULT;
9ead190b 556 goto err_copy;
bc38a6ab
RD
557 }
558
95ed644f 559 mutex_lock(&file->mutex);
eb9d3cd5 560 list_add_tail(&uobj->list, &file->ucontext->pd_list);
95ed644f 561 mutex_unlock(&file->mutex);
bc38a6ab 562
9ead190b
RD
563 uobj->live = 1;
564
565 up_write(&uobj->mutex);
bc38a6ab 566
eb9d3cd5
RD
567 return in_len;
568
9ead190b
RD
569err_copy:
570 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
eb9d3cd5 571
9ead190b 572err_idr:
bc38a6ab
RD
573 ib_dealloc_pd(pd);
574
575err:
9ead190b 576 put_uobj_write(uobj);
bc38a6ab
RD
577 return ret;
578}
579
580ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
581 const char __user *buf,
582 int in_len, int out_len)
583{
584 struct ib_uverbs_dealloc_pd cmd;
bc38a6ab 585 struct ib_uobject *uobj;
9ead190b 586 int ret;
bc38a6ab
RD
587
588 if (copy_from_user(&cmd, buf, sizeof cmd))
589 return -EFAULT;
590
9ead190b
RD
591 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
592 if (!uobj)
593 return -EINVAL;
bc38a6ab 594
9ead190b
RD
595 ret = ib_dealloc_pd(uobj->object);
596 if (!ret)
597 uobj->live = 0;
bc38a6ab 598
9ead190b 599 put_uobj_write(uobj);
bc38a6ab 600
bc38a6ab 601 if (ret)
9ead190b 602 return ret;
bc38a6ab 603
9ead190b 604 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
bc38a6ab 605
95ed644f 606 mutex_lock(&file->mutex);
bc38a6ab 607 list_del(&uobj->list);
95ed644f 608 mutex_unlock(&file->mutex);
bc38a6ab 609
9ead190b 610 put_uobj(uobj);
bc38a6ab 611
9ead190b 612 return in_len;
bc38a6ab
RD
613}
614
53d0bd1e
SH
615struct xrcd_table_entry {
616 struct rb_node node;
617 struct ib_xrcd *xrcd;
618 struct inode *inode;
619};
620
621static int xrcd_table_insert(struct ib_uverbs_device *dev,
622 struct inode *inode,
623 struct ib_xrcd *xrcd)
624{
625 struct xrcd_table_entry *entry, *scan;
626 struct rb_node **p = &dev->xrcd_tree.rb_node;
627 struct rb_node *parent = NULL;
628
629 entry = kmalloc(sizeof *entry, GFP_KERNEL);
630 if (!entry)
631 return -ENOMEM;
632
633 entry->xrcd = xrcd;
634 entry->inode = inode;
635
636 while (*p) {
637 parent = *p;
638 scan = rb_entry(parent, struct xrcd_table_entry, node);
639
640 if (inode < scan->inode) {
641 p = &(*p)->rb_left;
642 } else if (inode > scan->inode) {
643 p = &(*p)->rb_right;
644 } else {
645 kfree(entry);
646 return -EEXIST;
647 }
648 }
649
650 rb_link_node(&entry->node, parent, p);
651 rb_insert_color(&entry->node, &dev->xrcd_tree);
652 igrab(inode);
653 return 0;
654}
655
656static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
657 struct inode *inode)
658{
659 struct xrcd_table_entry *entry;
660 struct rb_node *p = dev->xrcd_tree.rb_node;
661
662 while (p) {
663 entry = rb_entry(p, struct xrcd_table_entry, node);
664
665 if (inode < entry->inode)
666 p = p->rb_left;
667 else if (inode > entry->inode)
668 p = p->rb_right;
669 else
670 return entry;
671 }
672
673 return NULL;
674}
675
676static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
677{
678 struct xrcd_table_entry *entry;
679
680 entry = xrcd_table_search(dev, inode);
681 if (!entry)
682 return NULL;
683
684 return entry->xrcd;
685}
686
687static void xrcd_table_delete(struct ib_uverbs_device *dev,
688 struct inode *inode)
689{
690 struct xrcd_table_entry *entry;
691
692 entry = xrcd_table_search(dev, inode);
693 if (entry) {
694 iput(inode);
695 rb_erase(&entry->node, &dev->xrcd_tree);
696 kfree(entry);
697 }
698}
699
700ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
701 const char __user *buf, int in_len,
702 int out_len)
703{
704 struct ib_uverbs_open_xrcd cmd;
705 struct ib_uverbs_open_xrcd_resp resp;
706 struct ib_udata udata;
707 struct ib_uxrcd_object *obj;
708 struct ib_xrcd *xrcd = NULL;
2903ff01 709 struct fd f = {NULL, 0};
53d0bd1e 710 struct inode *inode = NULL;
2903ff01 711 int ret = 0;
53d0bd1e
SH
712 int new_xrcd = 0;
713
714 if (out_len < sizeof resp)
715 return -ENOSPC;
716
717 if (copy_from_user(&cmd, buf, sizeof cmd))
718 return -EFAULT;
719
720 INIT_UDATA(&udata, buf + sizeof cmd,
721 (unsigned long) cmd.response + sizeof resp,
722 in_len - sizeof cmd, out_len - sizeof resp);
723
724 mutex_lock(&file->device->xrcd_tree_mutex);
725
726 if (cmd.fd != -1) {
727 /* search for file descriptor */
2903ff01
AV
728 f = fdget(cmd.fd);
729 if (!f.file) {
53d0bd1e
SH
730 ret = -EBADF;
731 goto err_tree_mutex_unlock;
732 }
733
496ad9aa 734 inode = file_inode(f.file);
53d0bd1e
SH
735 xrcd = find_xrcd(file->device, inode);
736 if (!xrcd && !(cmd.oflags & O_CREAT)) {
737 /* no file descriptor. Need CREATE flag */
738 ret = -EAGAIN;
739 goto err_tree_mutex_unlock;
740 }
741
742 if (xrcd && cmd.oflags & O_EXCL) {
743 ret = -EINVAL;
744 goto err_tree_mutex_unlock;
745 }
746 }
747
748 obj = kmalloc(sizeof *obj, GFP_KERNEL);
749 if (!obj) {
750 ret = -ENOMEM;
751 goto err_tree_mutex_unlock;
752 }
753
3bea57a5 754 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
53d0bd1e
SH
755
756 down_write(&obj->uobject.mutex);
757
758 if (!xrcd) {
759 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
760 file->ucontext, &udata);
761 if (IS_ERR(xrcd)) {
762 ret = PTR_ERR(xrcd);
763 goto err;
764 }
765
766 xrcd->inode = inode;
767 xrcd->device = file->device->ib_dev;
768 atomic_set(&xrcd->usecnt, 0);
769 mutex_init(&xrcd->tgt_qp_mutex);
770 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
771 new_xrcd = 1;
772 }
773
774 atomic_set(&obj->refcnt, 0);
775 obj->uobject.object = xrcd;
776 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
777 if (ret)
778 goto err_idr;
779
780 memset(&resp, 0, sizeof resp);
781 resp.xrcd_handle = obj->uobject.id;
782
783 if (inode) {
784 if (new_xrcd) {
785 /* create new inode/xrcd table entry */
786 ret = xrcd_table_insert(file->device, inode, xrcd);
787 if (ret)
788 goto err_insert_xrcd;
789 }
790 atomic_inc(&xrcd->usecnt);
791 }
792
793 if (copy_to_user((void __user *) (unsigned long) cmd.response,
794 &resp, sizeof resp)) {
795 ret = -EFAULT;
796 goto err_copy;
797 }
798
2903ff01
AV
799 if (f.file)
800 fdput(f);
53d0bd1e
SH
801
802 mutex_lock(&file->mutex);
803 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
804 mutex_unlock(&file->mutex);
805
806 obj->uobject.live = 1;
807 up_write(&obj->uobject.mutex);
808
809 mutex_unlock(&file->device->xrcd_tree_mutex);
810 return in_len;
811
812err_copy:
813 if (inode) {
814 if (new_xrcd)
815 xrcd_table_delete(file->device, inode);
816 atomic_dec(&xrcd->usecnt);
817 }
818
819err_insert_xrcd:
820 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
821
822err_idr:
823 ib_dealloc_xrcd(xrcd);
824
825err:
826 put_uobj_write(&obj->uobject);
827
828err_tree_mutex_unlock:
2903ff01
AV
829 if (f.file)
830 fdput(f);
53d0bd1e
SH
831
832 mutex_unlock(&file->device->xrcd_tree_mutex);
833
834 return ret;
835}
836
837ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
838 const char __user *buf, int in_len,
839 int out_len)
840{
841 struct ib_uverbs_close_xrcd cmd;
842 struct ib_uobject *uobj;
843 struct ib_xrcd *xrcd = NULL;
844 struct inode *inode = NULL;
845 struct ib_uxrcd_object *obj;
846 int live;
847 int ret = 0;
848
849 if (copy_from_user(&cmd, buf, sizeof cmd))
850 return -EFAULT;
851
852 mutex_lock(&file->device->xrcd_tree_mutex);
853 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
854 if (!uobj) {
855 ret = -EINVAL;
856 goto out;
857 }
858
859 xrcd = uobj->object;
860 inode = xrcd->inode;
861 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
862 if (atomic_read(&obj->refcnt)) {
863 put_uobj_write(uobj);
864 ret = -EBUSY;
865 goto out;
866 }
867
868 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
869 ret = ib_dealloc_xrcd(uobj->object);
870 if (!ret)
871 uobj->live = 0;
872 }
873
874 live = uobj->live;
875 if (inode && ret)
876 atomic_inc(&xrcd->usecnt);
877
878 put_uobj_write(uobj);
879
880 if (ret)
881 goto out;
882
883 if (inode && !live)
884 xrcd_table_delete(file->device, inode);
885
886 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
887 mutex_lock(&file->mutex);
888 list_del(&uobj->list);
889 mutex_unlock(&file->mutex);
890
891 put_uobj(uobj);
892 ret = in_len;
893
894out:
895 mutex_unlock(&file->device->xrcd_tree_mutex);
896 return ret;
897}
898
899void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
900 struct ib_xrcd *xrcd)
901{
902 struct inode *inode;
903
904 inode = xrcd->inode;
905 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
906 return;
907
908 ib_dealloc_xrcd(xrcd);
909
910 if (inode)
911 xrcd_table_delete(dev, inode);
912}
913
bc38a6ab
RD
914ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
915 const char __user *buf, int in_len,
916 int out_len)
917{
918 struct ib_uverbs_reg_mr cmd;
919 struct ib_uverbs_reg_mr_resp resp;
920 struct ib_udata udata;
f7c6a7b5 921 struct ib_uobject *uobj;
bc38a6ab
RD
922 struct ib_pd *pd;
923 struct ib_mr *mr;
924 int ret;
925
926 if (out_len < sizeof resp)
927 return -ENOSPC;
928
929 if (copy_from_user(&cmd, buf, sizeof cmd))
930 return -EFAULT;
931
932 INIT_UDATA(&udata, buf + sizeof cmd,
933 (unsigned long) cmd.response + sizeof resp,
934 in_len - sizeof cmd, out_len - sizeof resp);
935
936 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
937 return -EINVAL;
938
1c636f80
EC
939 ret = ib_check_mr_access(cmd.access_flags);
940 if (ret)
941 return ret;
f575394f 942
f7c6a7b5
RD
943 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
944 if (!uobj)
bc38a6ab
RD
945 return -ENOMEM;
946
3bea57a5 947 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
f7c6a7b5 948 down_write(&uobj->mutex);
bc38a6ab 949
9ead190b 950 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
aaf1aef5
RD
951 if (!pd) {
952 ret = -EINVAL;
f7c6a7b5 953 goto err_free;
aaf1aef5 954 }
bc38a6ab 955
860f10a7
SG
956 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
957 struct ib_device_attr attr;
958
959 ret = ib_query_device(pd->device, &attr);
960 if (ret || !(attr.device_cap_flags &
961 IB_DEVICE_ON_DEMAND_PAGING)) {
962 pr_debug("ODP support not available\n");
963 ret = -EINVAL;
964 goto err_put;
965 }
966 }
967
f7c6a7b5
RD
968 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
969 cmd.access_flags, &udata);
bc38a6ab
RD
970 if (IS_ERR(mr)) {
971 ret = PTR_ERR(mr);
9ead190b 972 goto err_put;
bc38a6ab
RD
973 }
974
975 mr->device = pd->device;
976 mr->pd = pd;
f7c6a7b5 977 mr->uobject = uobj;
bc38a6ab
RD
978 atomic_inc(&pd->usecnt);
979 atomic_set(&mr->usecnt, 0);
980
f7c6a7b5
RD
981 uobj->object = mr;
982 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
bc38a6ab
RD
983 if (ret)
984 goto err_unreg;
985
9ead190b
RD
986 memset(&resp, 0, sizeof resp);
987 resp.lkey = mr->lkey;
988 resp.rkey = mr->rkey;
f7c6a7b5 989 resp.mr_handle = uobj->id;
bc38a6ab 990
bc38a6ab
RD
991 if (copy_to_user((void __user *) (unsigned long) cmd.response,
992 &resp, sizeof resp)) {
993 ret = -EFAULT;
9ead190b 994 goto err_copy;
bc38a6ab
RD
995 }
996
9ead190b
RD
997 put_pd_read(pd);
998
95ed644f 999 mutex_lock(&file->mutex);
f7c6a7b5 1000 list_add_tail(&uobj->list, &file->ucontext->mr_list);
95ed644f 1001 mutex_unlock(&file->mutex);
eb9d3cd5 1002
f7c6a7b5 1003 uobj->live = 1;
9ead190b 1004
f7c6a7b5 1005 up_write(&uobj->mutex);
bc38a6ab
RD
1006
1007 return in_len;
1008
9ead190b 1009err_copy:
f7c6a7b5 1010 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
bc38a6ab
RD
1011
1012err_unreg:
1013 ib_dereg_mr(mr);
1014
9ead190b
RD
1015err_put:
1016 put_pd_read(pd);
bc38a6ab 1017
bc38a6ab 1018err_free:
f7c6a7b5 1019 put_uobj_write(uobj);
bc38a6ab
RD
1020 return ret;
1021}
1022
7e6edb9b
MB
1023ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1024 const char __user *buf, int in_len,
1025 int out_len)
1026{
1027 struct ib_uverbs_rereg_mr cmd;
1028 struct ib_uverbs_rereg_mr_resp resp;
1029 struct ib_udata udata;
1030 struct ib_pd *pd = NULL;
1031 struct ib_mr *mr;
1032 struct ib_pd *old_pd;
1033 int ret;
1034 struct ib_uobject *uobj;
1035
1036 if (out_len < sizeof(resp))
1037 return -ENOSPC;
1038
1039 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1040 return -EFAULT;
1041
1042 INIT_UDATA(&udata, buf + sizeof(cmd),
1043 (unsigned long) cmd.response + sizeof(resp),
1044 in_len - sizeof(cmd), out_len - sizeof(resp));
1045
1046 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1047 return -EINVAL;
1048
1049 if ((cmd.flags & IB_MR_REREG_TRANS) &&
1050 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1051 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1052 return -EINVAL;
1053
1054 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1055 file->ucontext);
1056
1057 if (!uobj)
1058 return -EINVAL;
1059
1060 mr = uobj->object;
1061
1062 if (cmd.flags & IB_MR_REREG_ACCESS) {
1063 ret = ib_check_mr_access(cmd.access_flags);
1064 if (ret)
1065 goto put_uobjs;
1066 }
1067
1068 if (cmd.flags & IB_MR_REREG_PD) {
1069 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1070 if (!pd) {
1071 ret = -EINVAL;
1072 goto put_uobjs;
1073 }
1074 }
1075
1076 if (atomic_read(&mr->usecnt)) {
1077 ret = -EBUSY;
1078 goto put_uobj_pd;
1079 }
1080
1081 old_pd = mr->pd;
1082 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1083 cmd.length, cmd.hca_va,
1084 cmd.access_flags, pd, &udata);
1085 if (!ret) {
1086 if (cmd.flags & IB_MR_REREG_PD) {
1087 atomic_inc(&pd->usecnt);
1088 mr->pd = pd;
1089 atomic_dec(&old_pd->usecnt);
1090 }
1091 } else {
1092 goto put_uobj_pd;
1093 }
1094
1095 memset(&resp, 0, sizeof(resp));
1096 resp.lkey = mr->lkey;
1097 resp.rkey = mr->rkey;
1098
1099 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1100 &resp, sizeof(resp)))
1101 ret = -EFAULT;
1102 else
1103 ret = in_len;
1104
1105put_uobj_pd:
1106 if (cmd.flags & IB_MR_REREG_PD)
1107 put_pd_read(pd);
1108
1109put_uobjs:
1110
1111 put_uobj_write(mr->uobject);
1112
1113 return ret;
1114}
1115
bc38a6ab
RD
1116ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1117 const char __user *buf, int in_len,
1118 int out_len)
1119{
1120 struct ib_uverbs_dereg_mr cmd;
1121 struct ib_mr *mr;
9ead190b 1122 struct ib_uobject *uobj;
bc38a6ab
RD
1123 int ret = -EINVAL;
1124
1125 if (copy_from_user(&cmd, buf, sizeof cmd))
1126 return -EFAULT;
1127
9ead190b
RD
1128 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1129 if (!uobj)
1130 return -EINVAL;
bc38a6ab 1131
f7c6a7b5 1132 mr = uobj->object;
bc38a6ab
RD
1133
1134 ret = ib_dereg_mr(mr);
9ead190b
RD
1135 if (!ret)
1136 uobj->live = 0;
1137
1138 put_uobj_write(uobj);
1139
bc38a6ab 1140 if (ret)
9ead190b 1141 return ret;
bc38a6ab 1142
9ead190b 1143 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
bc38a6ab 1144
95ed644f 1145 mutex_lock(&file->mutex);
9ead190b 1146 list_del(&uobj->list);
95ed644f 1147 mutex_unlock(&file->mutex);
bc38a6ab 1148
9ead190b 1149 put_uobj(uobj);
bc38a6ab 1150
9ead190b 1151 return in_len;
bc38a6ab
RD
1152}
1153
6b52a12b
SM
1154ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1155 const char __user *buf, int in_len,
1156 int out_len)
1157{
1158 struct ib_uverbs_alloc_mw cmd;
1159 struct ib_uverbs_alloc_mw_resp resp;
1160 struct ib_uobject *uobj;
1161 struct ib_pd *pd;
1162 struct ib_mw *mw;
1163 int ret;
1164
1165 if (out_len < sizeof(resp))
1166 return -ENOSPC;
1167
1168 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1169 return -EFAULT;
1170
1171 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1172 if (!uobj)
1173 return -ENOMEM;
1174
1175 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1176 down_write(&uobj->mutex);
1177
1178 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1179 if (!pd) {
1180 ret = -EINVAL;
1181 goto err_free;
1182 }
1183
1184 mw = pd->device->alloc_mw(pd, cmd.mw_type);
1185 if (IS_ERR(mw)) {
1186 ret = PTR_ERR(mw);
1187 goto err_put;
1188 }
1189
1190 mw->device = pd->device;
1191 mw->pd = pd;
1192 mw->uobject = uobj;
1193 atomic_inc(&pd->usecnt);
1194
1195 uobj->object = mw;
1196 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1197 if (ret)
1198 goto err_unalloc;
1199
1200 memset(&resp, 0, sizeof(resp));
1201 resp.rkey = mw->rkey;
1202 resp.mw_handle = uobj->id;
1203
1204 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1205 &resp, sizeof(resp))) {
1206 ret = -EFAULT;
1207 goto err_copy;
1208 }
1209
1210 put_pd_read(pd);
1211
1212 mutex_lock(&file->mutex);
1213 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1214 mutex_unlock(&file->mutex);
1215
1216 uobj->live = 1;
1217
1218 up_write(&uobj->mutex);
1219
1220 return in_len;
1221
1222err_copy:
1223 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1224
1225err_unalloc:
1226 ib_dealloc_mw(mw);
1227
1228err_put:
1229 put_pd_read(pd);
1230
1231err_free:
1232 put_uobj_write(uobj);
1233 return ret;
1234}
1235
1236ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1237 const char __user *buf, int in_len,
1238 int out_len)
1239{
1240 struct ib_uverbs_dealloc_mw cmd;
1241 struct ib_mw *mw;
1242 struct ib_uobject *uobj;
1243 int ret = -EINVAL;
1244
1245 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1246 return -EFAULT;
1247
1248 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1249 if (!uobj)
1250 return -EINVAL;
1251
1252 mw = uobj->object;
1253
1254 ret = ib_dealloc_mw(mw);
1255 if (!ret)
1256 uobj->live = 0;
1257
1258 put_uobj_write(uobj);
1259
1260 if (ret)
1261 return ret;
1262
1263 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1264
1265 mutex_lock(&file->mutex);
1266 list_del(&uobj->list);
1267 mutex_unlock(&file->mutex);
1268
1269 put_uobj(uobj);
1270
1271 return in_len;
1272}
1273
6b73597e
RD
1274ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1275 const char __user *buf, int in_len,
1276 int out_len)
1277{
1278 struct ib_uverbs_create_comp_channel cmd;
1279 struct ib_uverbs_create_comp_channel_resp resp;
1280 struct file *filp;
b1e4594b 1281 int ret;
6b73597e
RD
1282
1283 if (out_len < sizeof resp)
1284 return -ENOSPC;
1285
1286 if (copy_from_user(&cmd, buf, sizeof cmd))
1287 return -EFAULT;
1288
da183c7a 1289 ret = get_unused_fd_flags(O_CLOEXEC);
b1e4594b
AV
1290 if (ret < 0)
1291 return ret;
1292 resp.fd = ret;
1293
1294 filp = ib_uverbs_alloc_event_file(file, 0);
1295 if (IS_ERR(filp)) {
1296 put_unused_fd(resp.fd);
6b73597e 1297 return PTR_ERR(filp);
b1e4594b 1298 }
6b73597e
RD
1299
1300 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1301 &resp, sizeof resp)) {
1302 put_unused_fd(resp.fd);
1303 fput(filp);
1304 return -EFAULT;
1305 }
1306
1307 fd_install(resp.fd, filp);
1308 return in_len;
1309}
1310
bc38a6ab
RD
1311ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1312 const char __user *buf, int in_len,
1313 int out_len)
1314{
1315 struct ib_uverbs_create_cq cmd;
1316 struct ib_uverbs_create_cq_resp resp;
1317 struct ib_udata udata;
9ead190b 1318 struct ib_ucq_object *obj;
6b73597e 1319 struct ib_uverbs_event_file *ev_file = NULL;
bc38a6ab
RD
1320 struct ib_cq *cq;
1321 int ret;
1322
1323 if (out_len < sizeof resp)
1324 return -ENOSPC;
1325
1326 if (copy_from_user(&cmd, buf, sizeof cmd))
1327 return -EFAULT;
1328
1329 INIT_UDATA(&udata, buf + sizeof cmd,
1330 (unsigned long) cmd.response + sizeof resp,
1331 in_len - sizeof cmd, out_len - sizeof resp);
1332
6b73597e 1333 if (cmd.comp_vector >= file->device->num_comp_vectors)
bc38a6ab
RD
1334 return -EINVAL;
1335
9ead190b
RD
1336 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1337 if (!obj)
bc38a6ab
RD
1338 return -ENOMEM;
1339
3bea57a5 1340 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
9ead190b
RD
1341 down_write(&obj->uobject.mutex);
1342
ac4e7b35
JM
1343 if (cmd.comp_channel >= 0) {
1344 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1345 if (!ev_file) {
1346 ret = -EINVAL;
1347 goto err;
1348 }
1349 }
1350
9ead190b
RD
1351 obj->uverbs_file = file;
1352 obj->comp_events_reported = 0;
1353 obj->async_events_reported = 0;
1354 INIT_LIST_HEAD(&obj->comp_list);
1355 INIT_LIST_HEAD(&obj->async_list);
bc38a6ab
RD
1356
1357 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
f4fd0b22 1358 cmd.comp_vector,
bc38a6ab
RD
1359 file->ucontext, &udata);
1360 if (IS_ERR(cq)) {
1361 ret = PTR_ERR(cq);
9ead190b 1362 goto err_file;
bc38a6ab
RD
1363 }
1364
1365 cq->device = file->device->ib_dev;
9ead190b 1366 cq->uobject = &obj->uobject;
bc38a6ab
RD
1367 cq->comp_handler = ib_uverbs_comp_handler;
1368 cq->event_handler = ib_uverbs_cq_event_handler;
6b73597e 1369 cq->cq_context = ev_file;
bc38a6ab
RD
1370 atomic_set(&cq->usecnt, 0);
1371
9ead190b
RD
1372 obj->uobject.object = cq;
1373 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
bc38a6ab 1374 if (ret)
9ead190b 1375 goto err_free;
bc38a6ab
RD
1376
1377 memset(&resp, 0, sizeof resp);
9ead190b 1378 resp.cq_handle = obj->uobject.id;
bc38a6ab
RD
1379 resp.cqe = cq->cqe;
1380
1381 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1382 &resp, sizeof resp)) {
1383 ret = -EFAULT;
9ead190b 1384 goto err_copy;
bc38a6ab
RD
1385 }
1386
95ed644f 1387 mutex_lock(&file->mutex);
9ead190b 1388 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
95ed644f 1389 mutex_unlock(&file->mutex);
bc38a6ab 1390
9ead190b
RD
1391 obj->uobject.live = 1;
1392
1393 up_write(&obj->uobject.mutex);
bc38a6ab 1394
eb9d3cd5
RD
1395 return in_len;
1396
9ead190b
RD
1397err_copy:
1398 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
eb9d3cd5 1399
9ead190b 1400err_free:
bc38a6ab
RD
1401 ib_destroy_cq(cq);
1402
9ead190b 1403err_file:
ac4e7b35 1404 if (ev_file)
9ead190b
RD
1405 ib_uverbs_release_ucq(file, ev_file, obj);
1406
1407err:
1408 put_uobj_write(&obj->uobject);
bc38a6ab
RD
1409 return ret;
1410}
1411
33b9b3ee
RD
1412ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1413 const char __user *buf, int in_len,
1414 int out_len)
1415{
1416 struct ib_uverbs_resize_cq cmd;
1417 struct ib_uverbs_resize_cq_resp resp;
1418 struct ib_udata udata;
1419 struct ib_cq *cq;
1420 int ret = -EINVAL;
1421
1422 if (copy_from_user(&cmd, buf, sizeof cmd))
1423 return -EFAULT;
1424
1425 INIT_UDATA(&udata, buf + sizeof cmd,
1426 (unsigned long) cmd.response + sizeof resp,
1427 in_len - sizeof cmd, out_len - sizeof resp);
1428
1ccf6aa1 1429 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
9ead190b
RD
1430 if (!cq)
1431 return -EINVAL;
33b9b3ee
RD
1432
1433 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1434 if (ret)
1435 goto out;
1436
33b9b3ee
RD
1437 resp.cqe = cq->cqe;
1438
1439 if (copy_to_user((void __user *) (unsigned long) cmd.response,
64f817ba 1440 &resp, sizeof resp.cqe))
33b9b3ee
RD
1441 ret = -EFAULT;
1442
1443out:
9ead190b 1444 put_cq_read(cq);
33b9b3ee
RD
1445
1446 return ret ? ret : in_len;
1447}
1448
7182afea
DC
1449static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1450{
1451 struct ib_uverbs_wc tmp;
1452
1453 tmp.wr_id = wc->wr_id;
1454 tmp.status = wc->status;
1455 tmp.opcode = wc->opcode;
1456 tmp.vendor_err = wc->vendor_err;
1457 tmp.byte_len = wc->byte_len;
1458 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1459 tmp.qp_num = wc->qp->qp_num;
1460 tmp.src_qp = wc->src_qp;
1461 tmp.wc_flags = wc->wc_flags;
1462 tmp.pkey_index = wc->pkey_index;
1463 tmp.slid = wc->slid;
1464 tmp.sl = wc->sl;
1465 tmp.dlid_path_bits = wc->dlid_path_bits;
1466 tmp.port_num = wc->port_num;
1467 tmp.reserved = 0;
1468
1469 if (copy_to_user(dest, &tmp, sizeof tmp))
1470 return -EFAULT;
1471
1472 return 0;
1473}
1474
67cdb40c
RD
1475ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1476 const char __user *buf, int in_len,
1477 int out_len)
1478{
1479 struct ib_uverbs_poll_cq cmd;
7182afea
DC
1480 struct ib_uverbs_poll_cq_resp resp;
1481 u8 __user *header_ptr;
1482 u8 __user *data_ptr;
67cdb40c 1483 struct ib_cq *cq;
7182afea
DC
1484 struct ib_wc wc;
1485 int ret;
67cdb40c
RD
1486
1487 if (copy_from_user(&cmd, buf, sizeof cmd))
1488 return -EFAULT;
1489
1ccf6aa1 1490 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
7182afea
DC
1491 if (!cq)
1492 return -EINVAL;
67cdb40c 1493
7182afea
DC
1494 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1495 header_ptr = (void __user *)(unsigned long) cmd.response;
1496 data_ptr = header_ptr + sizeof resp;
9ead190b 1497
7182afea
DC
1498 memset(&resp, 0, sizeof resp);
1499 while (resp.count < cmd.ne) {
1500 ret = ib_poll_cq(cq, 1, &wc);
1501 if (ret < 0)
1502 goto out_put;
1503 if (!ret)
1504 break;
1505
1506 ret = copy_wc_to_user(data_ptr, &wc);
1507 if (ret)
1508 goto out_put;
1509
1510 data_ptr += sizeof(struct ib_uverbs_wc);
1511 ++resp.count;
67cdb40c
RD
1512 }
1513
7182afea 1514 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
67cdb40c 1515 ret = -EFAULT;
7182afea
DC
1516 goto out_put;
1517 }
67cdb40c 1518
7182afea 1519 ret = in_len;
67cdb40c 1520
7182afea
DC
1521out_put:
1522 put_cq_read(cq);
1523 return ret;
67cdb40c
RD
1524}
1525
1526ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1527 const char __user *buf, int in_len,
1528 int out_len)
1529{
1530 struct ib_uverbs_req_notify_cq cmd;
1531 struct ib_cq *cq;
67cdb40c
RD
1532
1533 if (copy_from_user(&cmd, buf, sizeof cmd))
1534 return -EFAULT;
1535
1ccf6aa1 1536 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
ab108676 1537 if (!cq)
9ead190b 1538 return -EINVAL;
67cdb40c 1539
9ead190b
RD
1540 ib_req_notify_cq(cq, cmd.solicited_only ?
1541 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1542
ab108676 1543 put_cq_read(cq);
9ead190b
RD
1544
1545 return in_len;
67cdb40c
RD
1546}
1547
bc38a6ab
RD
1548ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1549 const char __user *buf, int in_len,
1550 int out_len)
1551{
63aaf647
RD
1552 struct ib_uverbs_destroy_cq cmd;
1553 struct ib_uverbs_destroy_cq_resp resp;
9ead190b 1554 struct ib_uobject *uobj;
63aaf647 1555 struct ib_cq *cq;
9ead190b 1556 struct ib_ucq_object *obj;
6b73597e 1557 struct ib_uverbs_event_file *ev_file;
63aaf647 1558 int ret = -EINVAL;
bc38a6ab
RD
1559
1560 if (copy_from_user(&cmd, buf, sizeof cmd))
1561 return -EFAULT;
1562
9ead190b
RD
1563 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1564 if (!uobj)
1565 return -EINVAL;
1566 cq = uobj->object;
1567 ev_file = cq->cq_context;
1568 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
bc38a6ab 1569
9ead190b
RD
1570 ret = ib_destroy_cq(cq);
1571 if (!ret)
1572 uobj->live = 0;
bc38a6ab 1573
9ead190b 1574 put_uobj_write(uobj);
bc38a6ab 1575
bc38a6ab 1576 if (ret)
9ead190b 1577 return ret;
bc38a6ab 1578
9ead190b 1579 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
bc38a6ab 1580
95ed644f 1581 mutex_lock(&file->mutex);
9ead190b 1582 list_del(&uobj->list);
95ed644f 1583 mutex_unlock(&file->mutex);
bc38a6ab 1584
9ead190b 1585 ib_uverbs_release_ucq(file, ev_file, obj);
63aaf647 1586
9ead190b
RD
1587 memset(&resp, 0, sizeof resp);
1588 resp.comp_events_reported = obj->comp_events_reported;
1589 resp.async_events_reported = obj->async_events_reported;
63aaf647 1590
9ead190b 1591 put_uobj(uobj);
bc38a6ab 1592
63aaf647
RD
1593 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1594 &resp, sizeof resp))
9ead190b 1595 return -EFAULT;
bc38a6ab 1596
9ead190b 1597 return in_len;
bc38a6ab
RD
1598}
1599
1600ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1601 const char __user *buf, int in_len,
1602 int out_len)
1603{
1604 struct ib_uverbs_create_qp cmd;
1605 struct ib_uverbs_create_qp_resp resp;
1606 struct ib_udata udata;
9ead190b 1607 struct ib_uqp_object *obj;
b93f3c18
SH
1608 struct ib_device *device;
1609 struct ib_pd *pd = NULL;
1610 struct ib_xrcd *xrcd = NULL;
1611 struct ib_uobject *uninitialized_var(xrcd_uobj);
1612 struct ib_cq *scq = NULL, *rcq = NULL;
9977f4f6 1613 struct ib_srq *srq = NULL;
bc38a6ab
RD
1614 struct ib_qp *qp;
1615 struct ib_qp_init_attr attr;
1616 int ret;
1617
1618 if (out_len < sizeof resp)
1619 return -ENOSPC;
1620
1621 if (copy_from_user(&cmd, buf, sizeof cmd))
1622 return -EFAULT;
1623
c938a616
OG
1624 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1625 return -EPERM;
1626
bc38a6ab
RD
1627 INIT_UDATA(&udata, buf + sizeof cmd,
1628 (unsigned long) cmd.response + sizeof resp,
1629 in_len - sizeof cmd, out_len - sizeof resp);
1630
846be90d 1631 obj = kzalloc(sizeof *obj, GFP_KERNEL);
9ead190b 1632 if (!obj)
bc38a6ab
RD
1633 return -ENOMEM;
1634
3bea57a5 1635 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
9ead190b 1636 down_write(&obj->uevent.uobject.mutex);
bc38a6ab 1637
b93f3c18
SH
1638 if (cmd.qp_type == IB_QPT_XRC_TGT) {
1639 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1640 if (!xrcd) {
1641 ret = -EINVAL;
1642 goto err_put;
1643 }
1644 device = xrcd->device;
9977f4f6 1645 } else {
b93f3c18
SH
1646 if (cmd.qp_type == IB_QPT_XRC_INI) {
1647 cmd.max_recv_wr = cmd.max_recv_sge = 0;
1648 } else {
1649 if (cmd.is_srq) {
1650 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1651 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1652 ret = -EINVAL;
1653 goto err_put;
1654 }
1655 }
5909ce54
RD
1656
1657 if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1658 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1659 if (!rcq) {
1660 ret = -EINVAL;
1661 goto err_put;
1662 }
9977f4f6
SH
1663 }
1664 }
5909ce54
RD
1665
1666 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1667 rcq = rcq ?: scq;
1668 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1669 if (!pd || !scq) {
1670 ret = -EINVAL;
1671 goto err_put;
1672 }
1673
b93f3c18 1674 device = pd->device;
9977f4f6
SH
1675 }
1676
bc38a6ab
RD
1677 attr.event_handler = ib_uverbs_qp_event_handler;
1678 attr.qp_context = file;
1679 attr.send_cq = scq;
1680 attr.recv_cq = rcq;
f520ba5a 1681 attr.srq = srq;
b93f3c18 1682 attr.xrcd = xrcd;
bc38a6ab
RD
1683 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1684 attr.qp_type = cmd.qp_type;
b846f25a 1685 attr.create_flags = 0;
bc38a6ab
RD
1686
1687 attr.cap.max_send_wr = cmd.max_send_wr;
1688 attr.cap.max_recv_wr = cmd.max_recv_wr;
1689 attr.cap.max_send_sge = cmd.max_send_sge;
1690 attr.cap.max_recv_sge = cmd.max_recv_sge;
1691 attr.cap.max_inline_data = cmd.max_inline_data;
1692
9ead190b
RD
1693 obj->uevent.events_reported = 0;
1694 INIT_LIST_HEAD(&obj->uevent.event_list);
1695 INIT_LIST_HEAD(&obj->mcast_list);
bc38a6ab 1696
b93f3c18
SH
1697 if (cmd.qp_type == IB_QPT_XRC_TGT)
1698 qp = ib_create_qp(pd, &attr);
1699 else
1700 qp = device->create_qp(pd, &attr, &udata);
1701
bc38a6ab
RD
1702 if (IS_ERR(qp)) {
1703 ret = PTR_ERR(qp);
9ead190b 1704 goto err_put;
bc38a6ab
RD
1705 }
1706
b93f3c18 1707 if (cmd.qp_type != IB_QPT_XRC_TGT) {
0e0ec7e0 1708 qp->real_qp = qp;
b93f3c18
SH
1709 qp->device = device;
1710 qp->pd = pd;
1711 qp->send_cq = attr.send_cq;
1712 qp->recv_cq = attr.recv_cq;
1713 qp->srq = attr.srq;
1714 qp->event_handler = attr.event_handler;
1715 qp->qp_context = attr.qp_context;
1716 qp->qp_type = attr.qp_type;
e47e321a 1717 atomic_set(&qp->usecnt, 0);
b93f3c18
SH
1718 atomic_inc(&pd->usecnt);
1719 atomic_inc(&attr.send_cq->usecnt);
1720 if (attr.recv_cq)
1721 atomic_inc(&attr.recv_cq->usecnt);
1722 if (attr.srq)
1723 atomic_inc(&attr.srq->usecnt);
1724 }
1725 qp->uobject = &obj->uevent.uobject;
bc38a6ab 1726
9ead190b
RD
1727 obj->uevent.uobject.object = qp;
1728 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
bc38a6ab
RD
1729 if (ret)
1730 goto err_destroy;
1731
9ead190b
RD
1732 memset(&resp, 0, sizeof resp);
1733 resp.qpn = qp->qp_num;
1734 resp.qp_handle = obj->uevent.uobject.id;
77369ed3
JM
1735 resp.max_recv_sge = attr.cap.max_recv_sge;
1736 resp.max_send_sge = attr.cap.max_send_sge;
1737 resp.max_recv_wr = attr.cap.max_recv_wr;
1738 resp.max_send_wr = attr.cap.max_send_wr;
1739 resp.max_inline_data = attr.cap.max_inline_data;
bc38a6ab 1740
bc38a6ab
RD
1741 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1742 &resp, sizeof resp)) {
1743 ret = -EFAULT;
9ead190b 1744 goto err_copy;
bc38a6ab
RD
1745 }
1746
846be90d
YH
1747 if (xrcd) {
1748 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1749 uobject);
1750 atomic_inc(&obj->uxrcd->refcnt);
b93f3c18 1751 put_xrcd_read(xrcd_uobj);
846be90d
YH
1752 }
1753
b93f3c18
SH
1754 if (pd)
1755 put_pd_read(pd);
1756 if (scq)
1757 put_cq_read(scq);
9977f4f6 1758 if (rcq && rcq != scq)
43db2bc0 1759 put_cq_read(rcq);
9ead190b
RD
1760 if (srq)
1761 put_srq_read(srq);
1762
95ed644f 1763 mutex_lock(&file->mutex);
9ead190b 1764 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
95ed644f 1765 mutex_unlock(&file->mutex);
eb9d3cd5 1766
9ead190b
RD
1767 obj->uevent.uobject.live = 1;
1768
1769 up_write(&obj->uevent.uobject.mutex);
bc38a6ab
RD
1770
1771 return in_len;
1772
9ead190b
RD
1773err_copy:
1774 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
bc38a6ab
RD
1775
1776err_destroy:
1777 ib_destroy_qp(qp);
1778
9ead190b 1779err_put:
b93f3c18
SH
1780 if (xrcd)
1781 put_xrcd_read(xrcd_uobj);
9ead190b
RD
1782 if (pd)
1783 put_pd_read(pd);
1784 if (scq)
1785 put_cq_read(scq);
43db2bc0 1786 if (rcq && rcq != scq)
9ead190b
RD
1787 put_cq_read(rcq);
1788 if (srq)
1789 put_srq_read(srq);
1790
1791 put_uobj_write(&obj->uevent.uobject);
bc38a6ab
RD
1792 return ret;
1793}
1794
42849b26
SH
1795ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1796 const char __user *buf, int in_len, int out_len)
1797{
1798 struct ib_uverbs_open_qp cmd;
1799 struct ib_uverbs_create_qp_resp resp;
1800 struct ib_udata udata;
1801 struct ib_uqp_object *obj;
1802 struct ib_xrcd *xrcd;
1803 struct ib_uobject *uninitialized_var(xrcd_uobj);
1804 struct ib_qp *qp;
1805 struct ib_qp_open_attr attr;
1806 int ret;
1807
1808 if (out_len < sizeof resp)
1809 return -ENOSPC;
1810
1811 if (copy_from_user(&cmd, buf, sizeof cmd))
1812 return -EFAULT;
1813
1814 INIT_UDATA(&udata, buf + sizeof cmd,
1815 (unsigned long) cmd.response + sizeof resp,
1816 in_len - sizeof cmd, out_len - sizeof resp);
1817
1818 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1819 if (!obj)
1820 return -ENOMEM;
1821
3bea57a5 1822 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
42849b26
SH
1823 down_write(&obj->uevent.uobject.mutex);
1824
1825 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1826 if (!xrcd) {
1827 ret = -EINVAL;
1828 goto err_put;
1829 }
1830
1831 attr.event_handler = ib_uverbs_qp_event_handler;
1832 attr.qp_context = file;
1833 attr.qp_num = cmd.qpn;
1834 attr.qp_type = cmd.qp_type;
1835
1836 obj->uevent.events_reported = 0;
1837 INIT_LIST_HEAD(&obj->uevent.event_list);
1838 INIT_LIST_HEAD(&obj->mcast_list);
1839
1840 qp = ib_open_qp(xrcd, &attr);
1841 if (IS_ERR(qp)) {
1842 ret = PTR_ERR(qp);
1843 goto err_put;
1844 }
1845
1846 qp->uobject = &obj->uevent.uobject;
1847
1848 obj->uevent.uobject.object = qp;
1849 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1850 if (ret)
1851 goto err_destroy;
1852
1853 memset(&resp, 0, sizeof resp);
1854 resp.qpn = qp->qp_num;
1855 resp.qp_handle = obj->uevent.uobject.id;
1856
1857 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1858 &resp, sizeof resp)) {
1859 ret = -EFAULT;
1860 goto err_remove;
1861 }
1862
846be90d
YH
1863 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1864 atomic_inc(&obj->uxrcd->refcnt);
42849b26
SH
1865 put_xrcd_read(xrcd_uobj);
1866
1867 mutex_lock(&file->mutex);
1868 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1869 mutex_unlock(&file->mutex);
1870
1871 obj->uevent.uobject.live = 1;
1872
1873 up_write(&obj->uevent.uobject.mutex);
1874
1875 return in_len;
1876
1877err_remove:
1878 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1879
1880err_destroy:
1881 ib_destroy_qp(qp);
1882
1883err_put:
1884 put_xrcd_read(xrcd_uobj);
1885 put_uobj_write(&obj->uevent.uobject);
1886 return ret;
1887}
1888
7ccc9a24
DB
1889ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1890 const char __user *buf, int in_len,
1891 int out_len)
1892{
1893 struct ib_uverbs_query_qp cmd;
1894 struct ib_uverbs_query_qp_resp resp;
1895 struct ib_qp *qp;
1896 struct ib_qp_attr *attr;
1897 struct ib_qp_init_attr *init_attr;
1898 int ret;
1899
1900 if (copy_from_user(&cmd, buf, sizeof cmd))
1901 return -EFAULT;
1902
1903 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1904 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1905 if (!attr || !init_attr) {
1906 ret = -ENOMEM;
1907 goto out;
1908 }
1909
9ead190b
RD
1910 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1911 if (!qp) {
7ccc9a24 1912 ret = -EINVAL;
9ead190b
RD
1913 goto out;
1914 }
1915
1916 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
7ccc9a24 1917
9ead190b 1918 put_qp_read(qp);
7ccc9a24
DB
1919
1920 if (ret)
1921 goto out;
1922
1923 memset(&resp, 0, sizeof resp);
1924
1925 resp.qp_state = attr->qp_state;
1926 resp.cur_qp_state = attr->cur_qp_state;
1927 resp.path_mtu = attr->path_mtu;
1928 resp.path_mig_state = attr->path_mig_state;
1929 resp.qkey = attr->qkey;
1930 resp.rq_psn = attr->rq_psn;
1931 resp.sq_psn = attr->sq_psn;
1932 resp.dest_qp_num = attr->dest_qp_num;
1933 resp.qp_access_flags = attr->qp_access_flags;
1934 resp.pkey_index = attr->pkey_index;
1935 resp.alt_pkey_index = attr->alt_pkey_index;
0b26c88f 1936 resp.sq_draining = attr->sq_draining;
7ccc9a24
DB
1937 resp.max_rd_atomic = attr->max_rd_atomic;
1938 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1939 resp.min_rnr_timer = attr->min_rnr_timer;
1940 resp.port_num = attr->port_num;
1941 resp.timeout = attr->timeout;
1942 resp.retry_cnt = attr->retry_cnt;
1943 resp.rnr_retry = attr->rnr_retry;
1944 resp.alt_port_num = attr->alt_port_num;
1945 resp.alt_timeout = attr->alt_timeout;
1946
1947 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1948 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
1949 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
1950 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
1951 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
1952 resp.dest.dlid = attr->ah_attr.dlid;
1953 resp.dest.sl = attr->ah_attr.sl;
1954 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
1955 resp.dest.static_rate = attr->ah_attr.static_rate;
1956 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1957 resp.dest.port_num = attr->ah_attr.port_num;
1958
1959 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1960 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
1961 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
1962 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
1963 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1964 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
1965 resp.alt_dest.sl = attr->alt_ah_attr.sl;
1966 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1967 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
1968 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1969 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
1970
1971 resp.max_send_wr = init_attr->cap.max_send_wr;
1972 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1973 resp.max_send_sge = init_attr->cap.max_send_sge;
1974 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1975 resp.max_inline_data = init_attr->cap.max_inline_data;
27d56300 1976 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
7ccc9a24
DB
1977
1978 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1979 &resp, sizeof resp))
1980 ret = -EFAULT;
1981
1982out:
1983 kfree(attr);
1984 kfree(init_attr);
1985
1986 return ret ? ret : in_len;
1987}
1988
9977f4f6
SH
1989/* Remove ignored fields set in the attribute mask */
1990static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1991{
1992 switch (qp_type) {
1993 case IB_QPT_XRC_INI:
1994 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
b93f3c18
SH
1995 case IB_QPT_XRC_TGT:
1996 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1997 IB_QP_RNR_RETRY);
9977f4f6
SH
1998 default:
1999 return mask;
2000 }
2001}
2002
bc38a6ab
RD
2003ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2004 const char __user *buf, int in_len,
2005 int out_len)
2006{
2007 struct ib_uverbs_modify_qp cmd;
9bc57e2d 2008 struct ib_udata udata;
bc38a6ab
RD
2009 struct ib_qp *qp;
2010 struct ib_qp_attr *attr;
2011 int ret;
2012
2013 if (copy_from_user(&cmd, buf, sizeof cmd))
2014 return -EFAULT;
2015
9bc57e2d
RC
2016 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2017 out_len);
2018
bc38a6ab
RD
2019 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2020 if (!attr)
2021 return -ENOMEM;
2022
9ead190b
RD
2023 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2024 if (!qp) {
bc38a6ab
RD
2025 ret = -EINVAL;
2026 goto out;
2027 }
2028
2029 attr->qp_state = cmd.qp_state;
2030 attr->cur_qp_state = cmd.cur_qp_state;
2031 attr->path_mtu = cmd.path_mtu;
2032 attr->path_mig_state = cmd.path_mig_state;
2033 attr->qkey = cmd.qkey;
2034 attr->rq_psn = cmd.rq_psn;
2035 attr->sq_psn = cmd.sq_psn;
2036 attr->dest_qp_num = cmd.dest_qp_num;
2037 attr->qp_access_flags = cmd.qp_access_flags;
2038 attr->pkey_index = cmd.pkey_index;
702b2aac 2039 attr->alt_pkey_index = cmd.alt_pkey_index;
bc38a6ab
RD
2040 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2041 attr->max_rd_atomic = cmd.max_rd_atomic;
2042 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
2043 attr->min_rnr_timer = cmd.min_rnr_timer;
2044 attr->port_num = cmd.port_num;
2045 attr->timeout = cmd.timeout;
2046 attr->retry_cnt = cmd.retry_cnt;
2047 attr->rnr_retry = cmd.rnr_retry;
2048 attr->alt_port_num = cmd.alt_port_num;
2049 attr->alt_timeout = cmd.alt_timeout;
2050
2051 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2052 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
2053 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
2054 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
2055 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
2056 attr->ah_attr.dlid = cmd.dest.dlid;
2057 attr->ah_attr.sl = cmd.dest.sl;
2058 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
2059 attr->ah_attr.static_rate = cmd.dest.static_rate;
2060 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
2061 attr->ah_attr.port_num = cmd.dest.port_num;
2062
2063 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2064 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
2065 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
2066 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
2067 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2068 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
2069 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
2070 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
2071 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
2072 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2073 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
2074
0e0ec7e0 2075 if (qp->real_qp == qp) {
ed4c54e5
OG
2076 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2077 if (ret)
2078 goto out;
0e0ec7e0
SH
2079 ret = qp->device->modify_qp(qp, attr,
2080 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2081 } else {
2082 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2083 }
9ead190b
RD
2084
2085 put_qp_read(qp);
2086
bc38a6ab
RD
2087 if (ret)
2088 goto out;
2089
2090 ret = in_len;
2091
2092out:
bc38a6ab
RD
2093 kfree(attr);
2094
2095 return ret;
2096}
2097
2098ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2099 const char __user *buf, int in_len,
2100 int out_len)
2101{
63aaf647
RD
2102 struct ib_uverbs_destroy_qp cmd;
2103 struct ib_uverbs_destroy_qp_resp resp;
9ead190b 2104 struct ib_uobject *uobj;
63aaf647 2105 struct ib_qp *qp;
9ead190b 2106 struct ib_uqp_object *obj;
63aaf647 2107 int ret = -EINVAL;
bc38a6ab
RD
2108
2109 if (copy_from_user(&cmd, buf, sizeof cmd))
2110 return -EFAULT;
2111
63aaf647
RD
2112 memset(&resp, 0, sizeof resp);
2113
9ead190b
RD
2114 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2115 if (!uobj)
2116 return -EINVAL;
2117 qp = uobj->object;
2118 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
f4e40156 2119
9ead190b
RD
2120 if (!list_empty(&obj->mcast_list)) {
2121 put_uobj_write(uobj);
2122 return -EBUSY;
f4e40156 2123 }
bc38a6ab
RD
2124
2125 ret = ib_destroy_qp(qp);
9ead190b
RD
2126 if (!ret)
2127 uobj->live = 0;
2128
2129 put_uobj_write(uobj);
2130
bc38a6ab 2131 if (ret)
9ead190b 2132 return ret;
bc38a6ab 2133
846be90d
YH
2134 if (obj->uxrcd)
2135 atomic_dec(&obj->uxrcd->refcnt);
2136
9ead190b 2137 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
bc38a6ab 2138
95ed644f 2139 mutex_lock(&file->mutex);
9ead190b 2140 list_del(&uobj->list);
95ed644f 2141 mutex_unlock(&file->mutex);
bc38a6ab 2142
9ead190b 2143 ib_uverbs_release_uevent(file, &obj->uevent);
63aaf647 2144
9ead190b 2145 resp.events_reported = obj->uevent.events_reported;
63aaf647 2146
9ead190b 2147 put_uobj(uobj);
bc38a6ab 2148
63aaf647
RD
2149 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2150 &resp, sizeof resp))
9ead190b 2151 return -EFAULT;
bc38a6ab 2152
9ead190b 2153 return in_len;
bc38a6ab
RD
2154}
2155
67cdb40c 2156ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
a74cd4af
RD
2157 const char __user *buf, int in_len,
2158 int out_len)
67cdb40c
RD
2159{
2160 struct ib_uverbs_post_send cmd;
2161 struct ib_uverbs_post_send_resp resp;
2162 struct ib_uverbs_send_wr *user_wr;
2163 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2164 struct ib_qp *qp;
2165 int i, sg_ind;
9ead190b 2166 int is_ud;
67cdb40c
RD
2167 ssize_t ret = -EINVAL;
2168
2169 if (copy_from_user(&cmd, buf, sizeof cmd))
2170 return -EFAULT;
2171
2172 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2173 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2174 return -EINVAL;
2175
2176 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2177 return -EINVAL;
2178
2179 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2180 if (!user_wr)
2181 return -ENOMEM;
2182
9ead190b
RD
2183 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2184 if (!qp)
67cdb40c
RD
2185 goto out;
2186
9ead190b 2187 is_ud = qp->qp_type == IB_QPT_UD;
67cdb40c
RD
2188 sg_ind = 0;
2189 last = NULL;
2190 for (i = 0; i < cmd.wr_count; ++i) {
2191 if (copy_from_user(user_wr,
2192 buf + sizeof cmd + i * cmd.wqe_size,
2193 cmd.wqe_size)) {
2194 ret = -EFAULT;
9ead190b 2195 goto out_put;
67cdb40c
RD
2196 }
2197
2198 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2199 ret = -EINVAL;
9ead190b 2200 goto out_put;
67cdb40c
RD
2201 }
2202
2203 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2204 user_wr->num_sge * sizeof (struct ib_sge),
2205 GFP_KERNEL);
2206 if (!next) {
2207 ret = -ENOMEM;
9ead190b 2208 goto out_put;
67cdb40c
RD
2209 }
2210
2211 if (!last)
2212 wr = next;
2213 else
2214 last->next = next;
2215 last = next;
2216
2217 next->next = NULL;
2218 next->wr_id = user_wr->wr_id;
2219 next->num_sge = user_wr->num_sge;
2220 next->opcode = user_wr->opcode;
2221 next->send_flags = user_wr->send_flags;
67cdb40c 2222
9ead190b
RD
2223 if (is_ud) {
2224 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2225 file->ucontext);
67cdb40c
RD
2226 if (!next->wr.ud.ah) {
2227 ret = -EINVAL;
9ead190b 2228 goto out_put;
67cdb40c
RD
2229 }
2230 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2231 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
6b7d103c
LI
2232 if (next->opcode == IB_WR_SEND_WITH_IMM)
2233 next->ex.imm_data =
2234 (__be32 __force) user_wr->ex.imm_data;
67cdb40c
RD
2235 } else {
2236 switch (next->opcode) {
67cdb40c 2237 case IB_WR_RDMA_WRITE_WITH_IMM:
0f39cf3d
RD
2238 next->ex.imm_data =
2239 (__be32 __force) user_wr->ex.imm_data;
2240 case IB_WR_RDMA_WRITE:
67cdb40c
RD
2241 case IB_WR_RDMA_READ:
2242 next->wr.rdma.remote_addr =
2243 user_wr->wr.rdma.remote_addr;
2244 next->wr.rdma.rkey =
2245 user_wr->wr.rdma.rkey;
2246 break;
0f39cf3d
RD
2247 case IB_WR_SEND_WITH_IMM:
2248 next->ex.imm_data =
2249 (__be32 __force) user_wr->ex.imm_data;
2250 break;
2251 case IB_WR_SEND_WITH_INV:
2252 next->ex.invalidate_rkey =
2253 user_wr->ex.invalidate_rkey;
2254 break;
67cdb40c
RD
2255 case IB_WR_ATOMIC_CMP_AND_SWP:
2256 case IB_WR_ATOMIC_FETCH_AND_ADD:
2257 next->wr.atomic.remote_addr =
2258 user_wr->wr.atomic.remote_addr;
2259 next->wr.atomic.compare_add =
2260 user_wr->wr.atomic.compare_add;
2261 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2262 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2263 break;
2264 default:
2265 break;
2266 }
2267 }
2268
2269 if (next->num_sge) {
2270 next->sg_list = (void *) next +
2271 ALIGN(sizeof *next, sizeof (struct ib_sge));
2272 if (copy_from_user(next->sg_list,
2273 buf + sizeof cmd +
2274 cmd.wr_count * cmd.wqe_size +
2275 sg_ind * sizeof (struct ib_sge),
2276 next->num_sge * sizeof (struct ib_sge))) {
2277 ret = -EFAULT;
9ead190b 2278 goto out_put;
67cdb40c
RD
2279 }
2280 sg_ind += next->num_sge;
2281 } else
2282 next->sg_list = NULL;
2283 }
2284
2285 resp.bad_wr = 0;
0e0ec7e0 2286 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
67cdb40c
RD
2287 if (ret)
2288 for (next = wr; next; next = next->next) {
2289 ++resp.bad_wr;
2290 if (next == bad_wr)
2291 break;
2292 }
2293
2294 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2295 &resp, sizeof resp))
2296 ret = -EFAULT;
2297
9ead190b
RD
2298out_put:
2299 put_qp_read(qp);
67cdb40c
RD
2300
2301 while (wr) {
9ead190b
RD
2302 if (is_ud && wr->wr.ud.ah)
2303 put_ah_read(wr->wr.ud.ah);
67cdb40c
RD
2304 next = wr->next;
2305 kfree(wr);
2306 wr = next;
2307 }
2308
18320828 2309out:
67cdb40c
RD
2310 kfree(user_wr);
2311
2312 return ret ? ret : in_len;
2313}
2314
2315static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2316 int in_len,
2317 u32 wr_count,
2318 u32 sge_count,
2319 u32 wqe_size)
2320{
2321 struct ib_uverbs_recv_wr *user_wr;
2322 struct ib_recv_wr *wr = NULL, *last, *next;
2323 int sg_ind;
2324 int i;
2325 int ret;
2326
2327 if (in_len < wqe_size * wr_count +
2328 sge_count * sizeof (struct ib_uverbs_sge))
2329 return ERR_PTR(-EINVAL);
2330
2331 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2332 return ERR_PTR(-EINVAL);
2333
2334 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2335 if (!user_wr)
2336 return ERR_PTR(-ENOMEM);
2337
2338 sg_ind = 0;
2339 last = NULL;
2340 for (i = 0; i < wr_count; ++i) {
2341 if (copy_from_user(user_wr, buf + i * wqe_size,
2342 wqe_size)) {
2343 ret = -EFAULT;
2344 goto err;
2345 }
2346
2347 if (user_wr->num_sge + sg_ind > sge_count) {
2348 ret = -EINVAL;
2349 goto err;
2350 }
2351
2352 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2353 user_wr->num_sge * sizeof (struct ib_sge),
2354 GFP_KERNEL);
2355 if (!next) {
2356 ret = -ENOMEM;
2357 goto err;
2358 }
2359
2360 if (!last)
2361 wr = next;
2362 else
2363 last->next = next;
2364 last = next;
2365
2366 next->next = NULL;
2367 next->wr_id = user_wr->wr_id;
2368 next->num_sge = user_wr->num_sge;
2369
2370 if (next->num_sge) {
2371 next->sg_list = (void *) next +
2372 ALIGN(sizeof *next, sizeof (struct ib_sge));
2373 if (copy_from_user(next->sg_list,
2374 buf + wr_count * wqe_size +
2375 sg_ind * sizeof (struct ib_sge),
2376 next->num_sge * sizeof (struct ib_sge))) {
2377 ret = -EFAULT;
2378 goto err;
2379 }
2380 sg_ind += next->num_sge;
2381 } else
2382 next->sg_list = NULL;
2383 }
2384
2385 kfree(user_wr);
2386 return wr;
2387
2388err:
2389 kfree(user_wr);
2390
2391 while (wr) {
2392 next = wr->next;
2393 kfree(wr);
2394 wr = next;
2395 }
2396
2397 return ERR_PTR(ret);
2398}
2399
2400ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
a74cd4af
RD
2401 const char __user *buf, int in_len,
2402 int out_len)
67cdb40c
RD
2403{
2404 struct ib_uverbs_post_recv cmd;
2405 struct ib_uverbs_post_recv_resp resp;
2406 struct ib_recv_wr *wr, *next, *bad_wr;
2407 struct ib_qp *qp;
2408 ssize_t ret = -EINVAL;
2409
2410 if (copy_from_user(&cmd, buf, sizeof cmd))
2411 return -EFAULT;
2412
2413 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2414 in_len - sizeof cmd, cmd.wr_count,
2415 cmd.sge_count, cmd.wqe_size);
2416 if (IS_ERR(wr))
2417 return PTR_ERR(wr);
2418
9ead190b
RD
2419 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2420 if (!qp)
67cdb40c
RD
2421 goto out;
2422
2423 resp.bad_wr = 0;
0e0ec7e0 2424 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
9ead190b
RD
2425
2426 put_qp_read(qp);
2427
67cdb40c
RD
2428 if (ret)
2429 for (next = wr; next; next = next->next) {
2430 ++resp.bad_wr;
2431 if (next == bad_wr)
2432 break;
2433 }
2434
67cdb40c
RD
2435 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2436 &resp, sizeof resp))
2437 ret = -EFAULT;
2438
2439out:
67cdb40c
RD
2440 while (wr) {
2441 next = wr->next;
2442 kfree(wr);
2443 wr = next;
2444 }
2445
2446 return ret ? ret : in_len;
2447}
2448
2449ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
a74cd4af
RD
2450 const char __user *buf, int in_len,
2451 int out_len)
67cdb40c
RD
2452{
2453 struct ib_uverbs_post_srq_recv cmd;
2454 struct ib_uverbs_post_srq_recv_resp resp;
2455 struct ib_recv_wr *wr, *next, *bad_wr;
2456 struct ib_srq *srq;
2457 ssize_t ret = -EINVAL;
2458
2459 if (copy_from_user(&cmd, buf, sizeof cmd))
2460 return -EFAULT;
2461
2462 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2463 in_len - sizeof cmd, cmd.wr_count,
2464 cmd.sge_count, cmd.wqe_size);
2465 if (IS_ERR(wr))
2466 return PTR_ERR(wr);
2467
9ead190b
RD
2468 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2469 if (!srq)
67cdb40c
RD
2470 goto out;
2471
2472 resp.bad_wr = 0;
2473 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
9ead190b
RD
2474
2475 put_srq_read(srq);
2476
67cdb40c
RD
2477 if (ret)
2478 for (next = wr; next; next = next->next) {
2479 ++resp.bad_wr;
2480 if (next == bad_wr)
2481 break;
2482 }
2483
67cdb40c
RD
2484 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2485 &resp, sizeof resp))
2486 ret = -EFAULT;
2487
2488out:
67cdb40c
RD
2489 while (wr) {
2490 next = wr->next;
2491 kfree(wr);
2492 wr = next;
2493 }
2494
2495 return ret ? ret : in_len;
2496}
2497
2498ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2499 const char __user *buf, int in_len,
2500 int out_len)
2501{
2502 struct ib_uverbs_create_ah cmd;
2503 struct ib_uverbs_create_ah_resp resp;
2504 struct ib_uobject *uobj;
2505 struct ib_pd *pd;
2506 struct ib_ah *ah;
2507 struct ib_ah_attr attr;
2508 int ret;
2509
2510 if (out_len < sizeof resp)
2511 return -ENOSPC;
2512
2513 if (copy_from_user(&cmd, buf, sizeof cmd))
2514 return -EFAULT;
2515
2516 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2517 if (!uobj)
2518 return -ENOMEM;
2519
3bea57a5 2520 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
9ead190b 2521 down_write(&uobj->mutex);
67cdb40c 2522
9ead190b
RD
2523 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2524 if (!pd) {
67cdb40c 2525 ret = -EINVAL;
9ead190b 2526 goto err;
67cdb40c
RD
2527 }
2528
67cdb40c
RD
2529 attr.dlid = cmd.attr.dlid;
2530 attr.sl = cmd.attr.sl;
2531 attr.src_path_bits = cmd.attr.src_path_bits;
2532 attr.static_rate = cmd.attr.static_rate;
ea5d4a6a 2533 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
67cdb40c
RD
2534 attr.port_num = cmd.attr.port_num;
2535 attr.grh.flow_label = cmd.attr.grh.flow_label;
2536 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2537 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2538 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
8b0f93d9
DS
2539 attr.vlan_id = 0;
2540 memset(&attr.dmac, 0, sizeof(attr.dmac));
67cdb40c
RD
2541 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2542
2543 ah = ib_create_ah(pd, &attr);
2544 if (IS_ERR(ah)) {
2545 ret = PTR_ERR(ah);
ec924b47 2546 goto err_put;
67cdb40c
RD
2547 }
2548
9ead190b
RD
2549 ah->uobject = uobj;
2550 uobj->object = ah;
67cdb40c 2551
9ead190b 2552 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
67cdb40c
RD
2553 if (ret)
2554 goto err_destroy;
2555
2556 resp.ah_handle = uobj->id;
2557
2558 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2559 &resp, sizeof resp)) {
2560 ret = -EFAULT;
9ead190b 2561 goto err_copy;
67cdb40c
RD
2562 }
2563
9ead190b
RD
2564 put_pd_read(pd);
2565
95ed644f 2566 mutex_lock(&file->mutex);
67cdb40c 2567 list_add_tail(&uobj->list, &file->ucontext->ah_list);
95ed644f 2568 mutex_unlock(&file->mutex);
67cdb40c 2569
9ead190b
RD
2570 uobj->live = 1;
2571
2572 up_write(&uobj->mutex);
67cdb40c
RD
2573
2574 return in_len;
2575
9ead190b
RD
2576err_copy:
2577 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
67cdb40c
RD
2578
2579err_destroy:
2580 ib_destroy_ah(ah);
2581
ec924b47
MT
2582err_put:
2583 put_pd_read(pd);
2584
9ead190b
RD
2585err:
2586 put_uobj_write(uobj);
67cdb40c
RD
2587 return ret;
2588}
2589
2590ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2591 const char __user *buf, int in_len, int out_len)
2592{
2593 struct ib_uverbs_destroy_ah cmd;
2594 struct ib_ah *ah;
2595 struct ib_uobject *uobj;
9ead190b 2596 int ret;
67cdb40c
RD
2597
2598 if (copy_from_user(&cmd, buf, sizeof cmd))
2599 return -EFAULT;
2600
9ead190b
RD
2601 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2602 if (!uobj)
2603 return -EINVAL;
2604 ah = uobj->object;
67cdb40c 2605
9ead190b
RD
2606 ret = ib_destroy_ah(ah);
2607 if (!ret)
2608 uobj->live = 0;
67cdb40c 2609
9ead190b 2610 put_uobj_write(uobj);
67cdb40c 2611
67cdb40c 2612 if (ret)
9ead190b 2613 return ret;
67cdb40c 2614
9ead190b 2615 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
67cdb40c 2616
95ed644f 2617 mutex_lock(&file->mutex);
67cdb40c 2618 list_del(&uobj->list);
95ed644f 2619 mutex_unlock(&file->mutex);
67cdb40c 2620
9ead190b 2621 put_uobj(uobj);
67cdb40c 2622
9ead190b 2623 return in_len;
67cdb40c
RD
2624}
2625
bc38a6ab
RD
2626ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2627 const char __user *buf, int in_len,
2628 int out_len)
2629{
2630 struct ib_uverbs_attach_mcast cmd;
2631 struct ib_qp *qp;
9ead190b 2632 struct ib_uqp_object *obj;
f4e40156 2633 struct ib_uverbs_mcast_entry *mcast;
9ead190b 2634 int ret;
bc38a6ab
RD
2635
2636 if (copy_from_user(&cmd, buf, sizeof cmd))
2637 return -EFAULT;
2638
e214a0fe 2639 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
9ead190b
RD
2640 if (!qp)
2641 return -EINVAL;
f4e40156 2642
9ead190b 2643 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f4e40156 2644
9ead190b 2645 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2646 if (cmd.mlid == mcast->lid &&
2647 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2648 ret = 0;
9ead190b 2649 goto out_put;
f4e40156
JM
2650 }
2651
2652 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2653 if (!mcast) {
2654 ret = -ENOMEM;
9ead190b 2655 goto out_put;
f4e40156
JM
2656 }
2657
2658 mcast->lid = cmd.mlid;
2659 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
bc38a6ab 2660
f4e40156 2661 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
9ead190b
RD
2662 if (!ret)
2663 list_add_tail(&mcast->list, &obj->mcast_list);
2664 else
f4e40156
JM
2665 kfree(mcast);
2666
9ead190b 2667out_put:
e214a0fe 2668 put_qp_write(qp);
bc38a6ab
RD
2669
2670 return ret ? ret : in_len;
2671}
2672
2673ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2674 const char __user *buf, int in_len,
2675 int out_len)
2676{
2677 struct ib_uverbs_detach_mcast cmd;
9ead190b 2678 struct ib_uqp_object *obj;
bc38a6ab 2679 struct ib_qp *qp;
f4e40156 2680 struct ib_uverbs_mcast_entry *mcast;
bc38a6ab
RD
2681 int ret = -EINVAL;
2682
2683 if (copy_from_user(&cmd, buf, sizeof cmd))
2684 return -EFAULT;
2685
e214a0fe 2686 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
9ead190b
RD
2687 if (!qp)
2688 return -EINVAL;
bc38a6ab 2689
f4e40156
JM
2690 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2691 if (ret)
9ead190b 2692 goto out_put;
f4e40156 2693
9ead190b 2694 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f4e40156 2695
9ead190b 2696 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2697 if (cmd.mlid == mcast->lid &&
2698 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2699 list_del(&mcast->list);
2700 kfree(mcast);
2701 break;
2702 }
2703
9ead190b 2704out_put:
e214a0fe 2705 put_qp_write(qp);
bc38a6ab
RD
2706
2707 return ret ? ret : in_len;
2708}
f520ba5a 2709
b68c9560 2710static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
436f2ad0
HHZ
2711 union ib_flow_spec *ib_spec)
2712{
c780d82a
YD
2713 if (kern_spec->reserved)
2714 return -EINVAL;
2715
436f2ad0
HHZ
2716 ib_spec->type = kern_spec->type;
2717
2718 switch (ib_spec->type) {
2719 case IB_FLOW_SPEC_ETH:
2720 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2721 if (ib_spec->eth.size != kern_spec->eth.size)
2722 return -EINVAL;
2723 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2724 sizeof(struct ib_flow_eth_filter));
2725 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2726 sizeof(struct ib_flow_eth_filter));
2727 break;
2728 case IB_FLOW_SPEC_IPV4:
2729 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2730 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2731 return -EINVAL;
2732 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2733 sizeof(struct ib_flow_ipv4_filter));
2734 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2735 sizeof(struct ib_flow_ipv4_filter));
2736 break;
2737 case IB_FLOW_SPEC_TCP:
2738 case IB_FLOW_SPEC_UDP:
2739 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2740 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2741 return -EINVAL;
2742 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2743 sizeof(struct ib_flow_tcp_udp_filter));
2744 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2745 sizeof(struct ib_flow_tcp_udp_filter));
2746 break;
2747 default:
2748 return -EINVAL;
2749 }
2750 return 0;
2751}
2752
f21519b2
YD
2753int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2754 struct ib_udata *ucore,
2755 struct ib_udata *uhw)
436f2ad0
HHZ
2756{
2757 struct ib_uverbs_create_flow cmd;
2758 struct ib_uverbs_create_flow_resp resp;
2759 struct ib_uobject *uobj;
2760 struct ib_flow *flow_id;
d82693da 2761 struct ib_uverbs_flow_attr *kern_flow_attr;
436f2ad0
HHZ
2762 struct ib_flow_attr *flow_attr;
2763 struct ib_qp *qp;
2764 int err = 0;
2765 void *kern_spec;
2766 void *ib_spec;
2767 int i;
436f2ad0 2768
6bcca3d4
YD
2769 if (ucore->inlen < sizeof(cmd))
2770 return -EINVAL;
2771
f21519b2 2772 if (ucore->outlen < sizeof(resp))
436f2ad0
HHZ
2773 return -ENOSPC;
2774
f21519b2
YD
2775 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2776 if (err)
2777 return err;
2778
2779 ucore->inbuf += sizeof(cmd);
2780 ucore->inlen -= sizeof(cmd);
436f2ad0 2781
22878dbc
MB
2782 if (cmd.comp_mask)
2783 return -EINVAL;
2784
436f2ad0
HHZ
2785 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2786 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2787 return -EPERM;
2788
f8848274 2789 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
22878dbc
MB
2790 return -EINVAL;
2791
f21519b2 2792 if (cmd.flow_attr.size > ucore->inlen ||
f8848274 2793 cmd.flow_attr.size >
b68c9560 2794 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
22878dbc
MB
2795 return -EINVAL;
2796
c780d82a
YD
2797 if (cmd.flow_attr.reserved[0] ||
2798 cmd.flow_attr.reserved[1])
2799 return -EINVAL;
2800
436f2ad0 2801 if (cmd.flow_attr.num_of_specs) {
f8848274
MB
2802 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2803 GFP_KERNEL);
436f2ad0
HHZ
2804 if (!kern_flow_attr)
2805 return -ENOMEM;
2806
2807 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
f21519b2
YD
2808 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2809 cmd.flow_attr.size);
2810 if (err)
436f2ad0 2811 goto err_free_attr;
436f2ad0
HHZ
2812 } else {
2813 kern_flow_attr = &cmd.flow_attr;
436f2ad0
HHZ
2814 }
2815
2816 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2817 if (!uobj) {
2818 err = -ENOMEM;
2819 goto err_free_attr;
2820 }
2821 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2822 down_write(&uobj->mutex);
2823
2824 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2825 if (!qp) {
2826 err = -EINVAL;
2827 goto err_uobj;
2828 }
2829
f8848274 2830 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
436f2ad0
HHZ
2831 if (!flow_attr) {
2832 err = -ENOMEM;
2833 goto err_put;
2834 }
2835
2836 flow_attr->type = kern_flow_attr->type;
2837 flow_attr->priority = kern_flow_attr->priority;
2838 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2839 flow_attr->port = kern_flow_attr->port;
2840 flow_attr->flags = kern_flow_attr->flags;
2841 flow_attr->size = sizeof(*flow_attr);
2842
2843 kern_spec = kern_flow_attr + 1;
2844 ib_spec = flow_attr + 1;
f8848274 2845 for (i = 0; i < flow_attr->num_of_specs &&
b68c9560 2846 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
f8848274 2847 cmd.flow_attr.size >=
b68c9560 2848 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
436f2ad0
HHZ
2849 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2850 if (err)
2851 goto err_free;
2852 flow_attr->size +=
2853 ((union ib_flow_spec *) ib_spec)->size;
b68c9560
YD
2854 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
2855 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
436f2ad0
HHZ
2856 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2857 }
f8848274
MB
2858 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2859 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2860 i, cmd.flow_attr.size);
98a37510 2861 err = -EINVAL;
436f2ad0
HHZ
2862 goto err_free;
2863 }
2864 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2865 if (IS_ERR(flow_id)) {
2866 err = PTR_ERR(flow_id);
2867 goto err_free;
2868 }
2869 flow_id->qp = qp;
2870 flow_id->uobject = uobj;
2871 uobj->object = flow_id;
2872
2873 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
2874 if (err)
2875 goto destroy_flow;
2876
2877 memset(&resp, 0, sizeof(resp));
2878 resp.flow_handle = uobj->id;
2879
f21519b2
YD
2880 err = ib_copy_to_udata(ucore,
2881 &resp, sizeof(resp));
2882 if (err)
436f2ad0 2883 goto err_copy;
436f2ad0
HHZ
2884
2885 put_qp_read(qp);
2886 mutex_lock(&file->mutex);
2887 list_add_tail(&uobj->list, &file->ucontext->rule_list);
2888 mutex_unlock(&file->mutex);
2889
2890 uobj->live = 1;
2891
2892 up_write(&uobj->mutex);
2893 kfree(flow_attr);
2894 if (cmd.flow_attr.num_of_specs)
2895 kfree(kern_flow_attr);
f21519b2 2896 return 0;
436f2ad0
HHZ
2897err_copy:
2898 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2899destroy_flow:
2900 ib_destroy_flow(flow_id);
2901err_free:
2902 kfree(flow_attr);
2903err_put:
2904 put_qp_read(qp);
2905err_uobj:
2906 put_uobj_write(uobj);
2907err_free_attr:
2908 if (cmd.flow_attr.num_of_specs)
2909 kfree(kern_flow_attr);
2910 return err;
2911}
2912
f21519b2
YD
2913int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2914 struct ib_udata *ucore,
2915 struct ib_udata *uhw)
2916{
436f2ad0
HHZ
2917 struct ib_uverbs_destroy_flow cmd;
2918 struct ib_flow *flow_id;
2919 struct ib_uobject *uobj;
2920 int ret;
2921
6bcca3d4
YD
2922 if (ucore->inlen < sizeof(cmd))
2923 return -EINVAL;
2924
f21519b2
YD
2925 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2926 if (ret)
2927 return ret;
436f2ad0 2928
2782c2d3
YD
2929 if (cmd.comp_mask)
2930 return -EINVAL;
2931
436f2ad0
HHZ
2932 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2933 file->ucontext);
2934 if (!uobj)
2935 return -EINVAL;
2936 flow_id = uobj->object;
2937
2938 ret = ib_destroy_flow(flow_id);
2939 if (!ret)
2940 uobj->live = 0;
2941
2942 put_uobj_write(uobj);
2943
2944 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2945
2946 mutex_lock(&file->mutex);
2947 list_del(&uobj->list);
2948 mutex_unlock(&file->mutex);
2949
2950 put_uobj(uobj);
2951
f21519b2 2952 return ret;
436f2ad0
HHZ
2953}
2954
c89d1bed
SH
2955static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2956 struct ib_uverbs_create_xsrq *cmd,
2957 struct ib_udata *udata)
f520ba5a 2958{
f520ba5a 2959 struct ib_uverbs_create_srq_resp resp;
8541f8de 2960 struct ib_usrq_object *obj;
f520ba5a
RD
2961 struct ib_pd *pd;
2962 struct ib_srq *srq;
8541f8de 2963 struct ib_uobject *uninitialized_var(xrcd_uobj);
f520ba5a
RD
2964 struct ib_srq_init_attr attr;
2965 int ret;
2966
9ead190b
RD
2967 obj = kmalloc(sizeof *obj, GFP_KERNEL);
2968 if (!obj)
f520ba5a
RD
2969 return -ENOMEM;
2970
3bea57a5 2971 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
8541f8de 2972 down_write(&obj->uevent.uobject.mutex);
f520ba5a 2973
8541f8de 2974 if (cmd->srq_type == IB_SRQT_XRC) {
8541f8de
SH
2975 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2976 if (!attr.ext.xrc.xrcd) {
2977 ret = -EINVAL;
5909ce54 2978 goto err;
8541f8de
SH
2979 }
2980
2981 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2982 atomic_inc(&obj->uxrcd->refcnt);
5909ce54
RD
2983
2984 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
2985 if (!attr.ext.xrc.cq) {
2986 ret = -EINVAL;
2987 goto err_put_xrcd;
2988 }
2989 }
2990
2991 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
2992 if (!pd) {
2993 ret = -EINVAL;
2994 goto err_put_cq;
8541f8de
SH
2995 }
2996
f520ba5a
RD
2997 attr.event_handler = ib_uverbs_srq_event_handler;
2998 attr.srq_context = file;
8541f8de
SH
2999 attr.srq_type = cmd->srq_type;
3000 attr.attr.max_wr = cmd->max_wr;
3001 attr.attr.max_sge = cmd->max_sge;
3002 attr.attr.srq_limit = cmd->srq_limit;
f520ba5a 3003
8541f8de
SH
3004 obj->uevent.events_reported = 0;
3005 INIT_LIST_HEAD(&obj->uevent.event_list);
f520ba5a 3006
8541f8de 3007 srq = pd->device->create_srq(pd, &attr, udata);
f520ba5a
RD
3008 if (IS_ERR(srq)) {
3009 ret = PTR_ERR(srq);
ec924b47 3010 goto err_put;
f520ba5a
RD
3011 }
3012
8541f8de
SH
3013 srq->device = pd->device;
3014 srq->pd = pd;
3015 srq->srq_type = cmd->srq_type;
3016 srq->uobject = &obj->uevent.uobject;
f520ba5a
RD
3017 srq->event_handler = attr.event_handler;
3018 srq->srq_context = attr.srq_context;
8541f8de
SH
3019
3020 if (cmd->srq_type == IB_SRQT_XRC) {
3021 srq->ext.xrc.cq = attr.ext.xrc.cq;
3022 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3023 atomic_inc(&attr.ext.xrc.cq->usecnt);
3024 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3025 }
3026
f520ba5a
RD
3027 atomic_inc(&pd->usecnt);
3028 atomic_set(&srq->usecnt, 0);
3029
8541f8de
SH
3030 obj->uevent.uobject.object = srq;
3031 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
f520ba5a
RD
3032 if (ret)
3033 goto err_destroy;
3034
9ead190b 3035 memset(&resp, 0, sizeof resp);
8541f8de 3036 resp.srq_handle = obj->uevent.uobject.id;
ea88fd16
DB
3037 resp.max_wr = attr.attr.max_wr;
3038 resp.max_sge = attr.attr.max_sge;
8541f8de
SH
3039 if (cmd->srq_type == IB_SRQT_XRC)
3040 resp.srqn = srq->ext.xrc.srq_num;
f520ba5a 3041
8541f8de 3042 if (copy_to_user((void __user *) (unsigned long) cmd->response,
f520ba5a
RD
3043 &resp, sizeof resp)) {
3044 ret = -EFAULT;
9ead190b 3045 goto err_copy;
f520ba5a
RD
3046 }
3047
8541f8de
SH
3048 if (cmd->srq_type == IB_SRQT_XRC) {
3049 put_uobj_read(xrcd_uobj);
3050 put_cq_read(attr.ext.xrc.cq);
3051 }
9ead190b
RD
3052 put_pd_read(pd);
3053
95ed644f 3054 mutex_lock(&file->mutex);
8541f8de 3055 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
95ed644f 3056 mutex_unlock(&file->mutex);
eb9d3cd5 3057
8541f8de 3058 obj->uevent.uobject.live = 1;
9ead190b 3059
8541f8de 3060 up_write(&obj->uevent.uobject.mutex);
f520ba5a 3061
8541f8de 3062 return 0;
f520ba5a 3063
9ead190b 3064err_copy:
8541f8de 3065 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
f520ba5a
RD
3066
3067err_destroy:
3068 ib_destroy_srq(srq);
3069
ec924b47 3070err_put:
5909ce54 3071 put_pd_read(pd);
8541f8de
SH
3072
3073err_put_cq:
3074 if (cmd->srq_type == IB_SRQT_XRC)
3075 put_cq_read(attr.ext.xrc.cq);
3076
5909ce54
RD
3077err_put_xrcd:
3078 if (cmd->srq_type == IB_SRQT_XRC) {
3079 atomic_dec(&obj->uxrcd->refcnt);
3080 put_uobj_read(xrcd_uobj);
3081 }
ec924b47 3082
9ead190b 3083err:
8541f8de 3084 put_uobj_write(&obj->uevent.uobject);
f520ba5a
RD
3085 return ret;
3086}
3087
8541f8de
SH
3088ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3089 const char __user *buf, int in_len,
3090 int out_len)
3091{
3092 struct ib_uverbs_create_srq cmd;
3093 struct ib_uverbs_create_xsrq xcmd;
3094 struct ib_uverbs_create_srq_resp resp;
3095 struct ib_udata udata;
3096 int ret;
3097
3098 if (out_len < sizeof resp)
3099 return -ENOSPC;
3100
3101 if (copy_from_user(&cmd, buf, sizeof cmd))
3102 return -EFAULT;
3103
3104 xcmd.response = cmd.response;
3105 xcmd.user_handle = cmd.user_handle;
3106 xcmd.srq_type = IB_SRQT_BASIC;
3107 xcmd.pd_handle = cmd.pd_handle;
3108 xcmd.max_wr = cmd.max_wr;
3109 xcmd.max_sge = cmd.max_sge;
3110 xcmd.srq_limit = cmd.srq_limit;
3111
3112 INIT_UDATA(&udata, buf + sizeof cmd,
3113 (unsigned long) cmd.response + sizeof resp,
3114 in_len - sizeof cmd, out_len - sizeof resp);
3115
3116 ret = __uverbs_create_xsrq(file, &xcmd, &udata);
3117 if (ret)
3118 return ret;
3119
3120 return in_len;
3121}
3122
3123ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3124 const char __user *buf, int in_len, int out_len)
3125{
3126 struct ib_uverbs_create_xsrq cmd;
3127 struct ib_uverbs_create_srq_resp resp;
3128 struct ib_udata udata;
3129 int ret;
3130
3131 if (out_len < sizeof resp)
3132 return -ENOSPC;
3133
3134 if (copy_from_user(&cmd, buf, sizeof cmd))
3135 return -EFAULT;
3136
3137 INIT_UDATA(&udata, buf + sizeof cmd,
3138 (unsigned long) cmd.response + sizeof resp,
3139 in_len - sizeof cmd, out_len - sizeof resp);
3140
3141 ret = __uverbs_create_xsrq(file, &cmd, &udata);
3142 if (ret)
3143 return ret;
3144
3145 return in_len;
3146}
3147
f520ba5a
RD
3148ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3149 const char __user *buf, int in_len,
3150 int out_len)
3151{
3152 struct ib_uverbs_modify_srq cmd;
9bc57e2d 3153 struct ib_udata udata;
f520ba5a
RD
3154 struct ib_srq *srq;
3155 struct ib_srq_attr attr;
3156 int ret;
3157
3158 if (copy_from_user(&cmd, buf, sizeof cmd))
3159 return -EFAULT;
3160
9bc57e2d
RC
3161 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3162 out_len);
3163
9ead190b
RD
3164 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3165 if (!srq)
3166 return -EINVAL;
f520ba5a
RD
3167
3168 attr.max_wr = cmd.max_wr;
f520ba5a
RD
3169 attr.srq_limit = cmd.srq_limit;
3170
9bc57e2d 3171 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
f520ba5a 3172
9ead190b 3173 put_srq_read(srq);
f520ba5a
RD
3174
3175 return ret ? ret : in_len;
3176}
3177
8bdb0e86
DB
3178ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3179 const char __user *buf,
3180 int in_len, int out_len)
3181{
3182 struct ib_uverbs_query_srq cmd;
3183 struct ib_uverbs_query_srq_resp resp;
3184 struct ib_srq_attr attr;
3185 struct ib_srq *srq;
3186 int ret;
3187
3188 if (out_len < sizeof resp)
3189 return -ENOSPC;
3190
3191 if (copy_from_user(&cmd, buf, sizeof cmd))
3192 return -EFAULT;
3193
9ead190b
RD
3194 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3195 if (!srq)
3196 return -EINVAL;
8bdb0e86 3197
9ead190b 3198 ret = ib_query_srq(srq, &attr);
8bdb0e86 3199
9ead190b 3200 put_srq_read(srq);
8bdb0e86
DB
3201
3202 if (ret)
9ead190b 3203 return ret;
8bdb0e86
DB
3204
3205 memset(&resp, 0, sizeof resp);
3206
3207 resp.max_wr = attr.max_wr;
3208 resp.max_sge = attr.max_sge;
3209 resp.srq_limit = attr.srq_limit;
3210
3211 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3212 &resp, sizeof resp))
9ead190b 3213 return -EFAULT;
8bdb0e86 3214
9ead190b 3215 return in_len;
8bdb0e86
DB
3216}
3217
f520ba5a
RD
3218ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3219 const char __user *buf, int in_len,
3220 int out_len)
3221{
63aaf647
RD
3222 struct ib_uverbs_destroy_srq cmd;
3223 struct ib_uverbs_destroy_srq_resp resp;
9ead190b 3224 struct ib_uobject *uobj;
63aaf647 3225 struct ib_srq *srq;
9ead190b 3226 struct ib_uevent_object *obj;
63aaf647 3227 int ret = -EINVAL;
846be90d
YH
3228 struct ib_usrq_object *us;
3229 enum ib_srq_type srq_type;
f520ba5a
RD
3230
3231 if (copy_from_user(&cmd, buf, sizeof cmd))
3232 return -EFAULT;
3233
9ead190b
RD
3234 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3235 if (!uobj)
3236 return -EINVAL;
3237 srq = uobj->object;
3238 obj = container_of(uobj, struct ib_uevent_object, uobject);
846be90d 3239 srq_type = srq->srq_type;
63aaf647 3240
9ead190b
RD
3241 ret = ib_destroy_srq(srq);
3242 if (!ret)
3243 uobj->live = 0;
f520ba5a 3244
9ead190b 3245 put_uobj_write(uobj);
f520ba5a 3246
f520ba5a 3247 if (ret)
9ead190b 3248 return ret;
f520ba5a 3249
846be90d
YH
3250 if (srq_type == IB_SRQT_XRC) {
3251 us = container_of(obj, struct ib_usrq_object, uevent);
3252 atomic_dec(&us->uxrcd->refcnt);
3253 }
3254
9ead190b 3255 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
f520ba5a 3256
95ed644f 3257 mutex_lock(&file->mutex);
9ead190b 3258 list_del(&uobj->list);
95ed644f 3259 mutex_unlock(&file->mutex);
f520ba5a 3260
9ead190b 3261 ib_uverbs_release_uevent(file, obj);
63aaf647 3262
9ead190b
RD
3263 memset(&resp, 0, sizeof resp);
3264 resp.events_reported = obj->events_reported;
63aaf647 3265
9ead190b 3266 put_uobj(uobj);
f520ba5a 3267
63aaf647
RD
3268 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3269 &resp, sizeof resp))
3270 ret = -EFAULT;
3271
f520ba5a
RD
3272 return ret ? ret : in_len;
3273}
5a77abf9
EC
3274
3275int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3276 struct ib_udata *ucore,
3277 struct ib_udata *uhw)
3278{
3279 struct ib_uverbs_ex_query_device_resp resp;
3280 struct ib_uverbs_ex_query_device cmd;
3281 struct ib_device_attr attr;
3282 struct ib_device *device;
3283 int err;
3284
3285 device = file->device->ib_dev;
3286 if (ucore->inlen < sizeof(cmd))
3287 return -EINVAL;
3288
3289 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3290 if (err)
3291 return err;
3292
3293 if (cmd.reserved)
3294 return -EINVAL;
3295
3296 err = device->query_device(device, &attr);
3297 if (err)
3298 return err;
3299
3300 memset(&resp, 0, sizeof(resp));
3301 copy_query_dev_fields(file, &resp.base, &attr);
3302 resp.comp_mask = 0;
3303
860f10a7
SG
3304#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3305 if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
3306 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3307 resp.odp_caps.per_transport_caps.rc_odp_caps =
3308 attr.odp_caps.per_transport_caps.rc_odp_caps;
3309 resp.odp_caps.per_transport_caps.uc_odp_caps =
3310 attr.odp_caps.per_transport_caps.uc_odp_caps;
3311 resp.odp_caps.per_transport_caps.ud_odp_caps =
3312 attr.odp_caps.per_transport_caps.ud_odp_caps;
3313 resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
3314 }
3315#endif
3316
5a77abf9
EC
3317 err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
3318 if (err)
3319 return err;
3320
3321 return 0;
3322}