Merge remote-tracking branches 'asoc/topic/wm8753', 'asoc/topic/wm8770', 'asoc/topic...
[linux-block.git] / drivers / infiniband / core / uverbs_cmd.c
CommitLineData
bc38a6ab
RD
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
f7c6a7b5 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
eb9d3cd5 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8bdb0e86 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
bc38a6ab
RD
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
bc38a6ab
RD
34 */
35
6b73597e 36#include <linux/file.h>
70a30e16 37#include <linux/fs.h>
5a0e3ad6 38#include <linux/slab.h>
8ada2c1c 39#include <linux/sched.h>
6b73597e 40
7c0f6ba6 41#include <linux/uaccess.h>
bc38a6ab 42
fd3c7904
MB
43#include <rdma/uverbs_types.h>
44#include <rdma/uverbs_std_types.h>
45#include "rdma_core.h"
46
bc38a6ab 47#include "uverbs.h"
ed4c54e5 48#include "core_priv.h"
bc38a6ab 49
1e7710f3
MB
50static struct ib_uverbs_completion_event_file *
51ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
52{
53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel),
54 fd, context);
55 struct ib_uobject_file *uobj_file;
56
57 if (IS_ERR(uobj))
58 return (void *)uobj;
59
60 uverbs_uobject_get(uobj);
61 uobj_put_read(uobj);
62
63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
64 return container_of(uobj_file, struct ib_uverbs_completion_event_file,
65 uobj_file);
66}
67
bc38a6ab 68ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
057aec0d 69 struct ib_device *ib_dev,
bc38a6ab
RD
70 const char __user *buf,
71 int in_len, int out_len)
72{
73 struct ib_uverbs_get_context cmd;
74 struct ib_uverbs_get_context_resp resp;
75 struct ib_udata udata;
63c47c28 76 struct ib_ucontext *ucontext;
6b73597e 77 struct file *filp;
43579b5f 78 struct ib_rdmacg_object cg_obj;
63c47c28 79 int ret;
bc38a6ab
RD
80
81 if (out_len < sizeof resp)
82 return -ENOSPC;
83
84 if (copy_from_user(&cmd, buf, sizeof cmd))
85 return -EFAULT;
86
95ed644f 87 mutex_lock(&file->mutex);
63c47c28
RD
88
89 if (file->ucontext) {
90 ret = -EINVAL;
91 goto err;
92 }
93
40a20339
AB
94 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
95 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
97 out_len - sizeof(resp));
bc38a6ab 98
43579b5f
PP
99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
100 if (ret)
101 goto err;
102
057aec0d 103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
77f76013 104 if (IS_ERR(ucontext)) {
df42245a 105 ret = PTR_ERR(ucontext);
43579b5f 106 goto err_alloc;
77f76013 107 }
bc38a6ab 108
057aec0d 109 ucontext->device = ib_dev;
43579b5f 110 ucontext->cg_obj = cg_obj;
771addf6
MB
111 /* ufile is required when some objects are released */
112 ucontext->ufile = file;
fd3c7904
MB
113 uverbs_initialize_ucontext(ucontext);
114
8ada2c1c
SR
115 rcu_read_lock();
116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
117 rcu_read_unlock();
f7c6a7b5 118 ucontext->closing = 0;
bc38a6ab 119
882214e2 120#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
f808c13f 121 ucontext->umem_tree = RB_ROOT_CACHED;
882214e2
HE
122 init_rwsem(&ucontext->umem_rwsem);
123 ucontext->odp_mrs_count = 0;
124 INIT_LIST_HEAD(&ucontext->no_private_counters);
125
86bee4c9 126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
882214e2
HE
127 ucontext->invalidate_range = NULL;
128
129#endif
130
6b73597e
RD
131 resp.num_comp_vectors = file->device->num_comp_vectors;
132
da183c7a 133 ret = get_unused_fd_flags(O_CLOEXEC);
b1e4594b
AV
134 if (ret < 0)
135 goto err_free;
136 resp.async_fd = ret;
137
1e7710f3 138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
6b73597e
RD
139 if (IS_ERR(filp)) {
140 ret = PTR_ERR(filp);
b1e4594b 141 goto err_fd;
6b73597e 142 }
bc38a6ab 143
40a20339 144 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
63c47c28 145 ret = -EFAULT;
6b73597e 146 goto err_file;
63c47c28
RD
147 }
148
70a30e16 149 file->ucontext = ucontext;
6b73597e
RD
150
151 fd_install(resp.async_fd, filp);
152
95ed644f 153 mutex_unlock(&file->mutex);
bc38a6ab
RD
154
155 return in_len;
156
6b73597e 157err_file:
03c40442 158 ib_uverbs_free_async_event_file(file);
6b73597e
RD
159 fput(filp);
160
b1e4594b
AV
161err_fd:
162 put_unused_fd(resp.async_fd);
163
63c47c28 164err_free:
8ada2c1c 165 put_pid(ucontext->tgid);
057aec0d 166 ib_dev->dealloc_ucontext(ucontext);
bc38a6ab 167
43579b5f
PP
168err_alloc:
169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
170
63c47c28 171err:
95ed644f 172 mutex_unlock(&file->mutex);
63c47c28 173 return ret;
bc38a6ab
RD
174}
175
02d1aa7a 176static void copy_query_dev_fields(struct ib_uverbs_file *file,
057aec0d 177 struct ib_device *ib_dev,
02d1aa7a
EC
178 struct ib_uverbs_query_device_resp *resp,
179 struct ib_device_attr *attr)
180{
181 resp->fw_ver = attr->fw_ver;
057aec0d 182 resp->node_guid = ib_dev->node_guid;
02d1aa7a
EC
183 resp->sys_image_guid = attr->sys_image_guid;
184 resp->max_mr_size = attr->max_mr_size;
185 resp->page_size_cap = attr->page_size_cap;
186 resp->vendor_id = attr->vendor_id;
187 resp->vendor_part_id = attr->vendor_part_id;
188 resp->hw_ver = attr->hw_ver;
189 resp->max_qp = attr->max_qp;
190 resp->max_qp_wr = attr->max_qp_wr;
fb532d6a 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
02d1aa7a
EC
192 resp->max_sge = attr->max_sge;
193 resp->max_sge_rd = attr->max_sge_rd;
194 resp->max_cq = attr->max_cq;
195 resp->max_cqe = attr->max_cqe;
196 resp->max_mr = attr->max_mr;
197 resp->max_pd = attr->max_pd;
198 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
199 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
200 resp->max_res_rd_atom = attr->max_res_rd_atom;
201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
203 resp->atomic_cap = attr->atomic_cap;
204 resp->max_ee = attr->max_ee;
205 resp->max_rdd = attr->max_rdd;
206 resp->max_mw = attr->max_mw;
207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
209 resp->max_mcast_grp = attr->max_mcast_grp;
210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
212 resp->max_ah = attr->max_ah;
213 resp->max_fmr = attr->max_fmr;
214 resp->max_map_per_fmr = attr->max_map_per_fmr;
215 resp->max_srq = attr->max_srq;
216 resp->max_srq_wr = attr->max_srq_wr;
217 resp->max_srq_sge = attr->max_srq_sge;
218 resp->max_pkeys = attr->max_pkeys;
219 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
057aec0d 220 resp->phys_port_cnt = ib_dev->phys_port_cnt;
02d1aa7a
EC
221}
222
bc38a6ab 223ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
057aec0d 224 struct ib_device *ib_dev,
bc38a6ab
RD
225 const char __user *buf,
226 int in_len, int out_len)
227{
228 struct ib_uverbs_query_device cmd;
229 struct ib_uverbs_query_device_resp resp;
bc38a6ab
RD
230
231 if (out_len < sizeof resp)
232 return -ENOSPC;
233
234 if (copy_from_user(&cmd, buf, sizeof cmd))
235 return -EFAULT;
236
bc38a6ab 237 memset(&resp, 0, sizeof resp);
86bee4c9 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
bc38a6ab 239
40a20339 240 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
bc38a6ab
RD
241 return -EFAULT;
242
243 return in_len;
244}
245
246ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
057aec0d 247 struct ib_device *ib_dev,
bc38a6ab
RD
248 const char __user *buf,
249 int in_len, int out_len)
250{
251 struct ib_uverbs_query_port cmd;
252 struct ib_uverbs_query_port_resp resp;
253 struct ib_port_attr attr;
254 int ret;
255
256 if (out_len < sizeof resp)
257 return -ENOSPC;
258
259 if (copy_from_user(&cmd, buf, sizeof cmd))
260 return -EFAULT;
261
057aec0d 262 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
bc38a6ab
RD
263 if (ret)
264 return ret;
265
266 memset(&resp, 0, sizeof resp);
267
268 resp.state = attr.state;
269 resp.max_mtu = attr.max_mtu;
270 resp.active_mtu = attr.active_mtu;
271 resp.gid_tbl_len = attr.gid_tbl_len;
272 resp.port_cap_flags = attr.port_cap_flags;
273 resp.max_msg_sz = attr.max_msg_sz;
274 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
275 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
276 resp.pkey_tbl_len = attr.pkey_tbl_len;
62ede777 277
db58540b 278 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
62ede777 279 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
db58540b
DC
280 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
281 } else {
62ede777
HD
282 resp.lid = ib_lid_cpu16(attr.lid);
283 resp.sm_lid = ib_lid_cpu16(attr.sm_lid);
db58540b 284 }
bc38a6ab
RD
285 resp.lmc = attr.lmc;
286 resp.max_vl_num = attr.max_vl_num;
287 resp.sm_sl = attr.sm_sl;
288 resp.subnet_timeout = attr.subnet_timeout;
289 resp.init_type_reply = attr.init_type_reply;
290 resp.active_width = attr.active_width;
291 resp.active_speed = attr.active_speed;
292 resp.phys_state = attr.phys_state;
057aec0d 293 resp.link_layer = rdma_port_get_link_layer(ib_dev,
2420b60b 294 cmd.port_num);
bc38a6ab 295
40a20339 296 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
bc38a6ab
RD
297 return -EFAULT;
298
299 return in_len;
300}
301
bc38a6ab 302ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
057aec0d 303 struct ib_device *ib_dev,
bc38a6ab
RD
304 const char __user *buf,
305 int in_len, int out_len)
306{
307 struct ib_uverbs_alloc_pd cmd;
308 struct ib_uverbs_alloc_pd_resp resp;
309 struct ib_udata udata;
310 struct ib_uobject *uobj;
311 struct ib_pd *pd;
312 int ret;
313
314 if (out_len < sizeof resp)
315 return -ENOSPC;
316
317 if (copy_from_user(&cmd, buf, sizeof cmd))
318 return -EFAULT;
319
40a20339
AB
320 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
321 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
322 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
323 out_len - sizeof(resp));
bc38a6ab 324
fd3c7904
MB
325 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext);
326 if (IS_ERR(uobj))
327 return PTR_ERR(uobj);
bc38a6ab 328
057aec0d 329 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
bc38a6ab
RD
330 if (IS_ERR(pd)) {
331 ret = PTR_ERR(pd);
332 goto err;
333 }
334
057aec0d 335 pd->device = ib_dev;
bc38a6ab 336 pd->uobject = uobj;
50d46335 337 pd->__internal_mr = NULL;
bc38a6ab
RD
338 atomic_set(&pd->usecnt, 0);
339
9ead190b 340 uobj->object = pd;
bc38a6ab
RD
341 memset(&resp, 0, sizeof resp);
342 resp.pd_handle = uobj->id;
9d5f8c20
LR
343 pd->res.type = RDMA_RESTRACK_PD;
344 rdma_restrack_add(&pd->res);
bc38a6ab 345
40a20339 346 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
bc38a6ab 347 ret = -EFAULT;
9ead190b 348 goto err_copy;
bc38a6ab
RD
349 }
350
fd3c7904 351 uobj_alloc_commit(uobj);
bc38a6ab 352
eb9d3cd5
RD
353 return in_len;
354
9ead190b 355err_copy:
bc38a6ab
RD
356 ib_dealloc_pd(pd);
357
358err:
fd3c7904 359 uobj_alloc_abort(uobj);
bc38a6ab
RD
360 return ret;
361}
362
363ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
057aec0d 364 struct ib_device *ib_dev,
bc38a6ab
RD
365 const char __user *buf,
366 int in_len, int out_len)
367{
368 struct ib_uverbs_dealloc_pd cmd;
bc38a6ab 369 struct ib_uobject *uobj;
9ead190b 370 int ret;
bc38a6ab
RD
371
372 if (copy_from_user(&cmd, buf, sizeof cmd))
373 return -EFAULT;
374
fd3c7904
MB
375 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle,
376 file->ucontext);
377 if (IS_ERR(uobj))
378 return PTR_ERR(uobj);
bc38a6ab 379
fd3c7904 380 ret = uobj_remove_commit(uobj);
bc38a6ab 381
fd3c7904 382 return ret ?: in_len;
bc38a6ab
RD
383}
384
53d0bd1e
SH
385struct xrcd_table_entry {
386 struct rb_node node;
387 struct ib_xrcd *xrcd;
388 struct inode *inode;
389};
390
391static int xrcd_table_insert(struct ib_uverbs_device *dev,
392 struct inode *inode,
393 struct ib_xrcd *xrcd)
394{
395 struct xrcd_table_entry *entry, *scan;
396 struct rb_node **p = &dev->xrcd_tree.rb_node;
397 struct rb_node *parent = NULL;
398
399 entry = kmalloc(sizeof *entry, GFP_KERNEL);
400 if (!entry)
401 return -ENOMEM;
402
403 entry->xrcd = xrcd;
404 entry->inode = inode;
405
406 while (*p) {
407 parent = *p;
408 scan = rb_entry(parent, struct xrcd_table_entry, node);
409
410 if (inode < scan->inode) {
411 p = &(*p)->rb_left;
412 } else if (inode > scan->inode) {
413 p = &(*p)->rb_right;
414 } else {
415 kfree(entry);
416 return -EEXIST;
417 }
418 }
419
420 rb_link_node(&entry->node, parent, p);
421 rb_insert_color(&entry->node, &dev->xrcd_tree);
422 igrab(inode);
423 return 0;
424}
425
426static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
427 struct inode *inode)
428{
429 struct xrcd_table_entry *entry;
430 struct rb_node *p = dev->xrcd_tree.rb_node;
431
432 while (p) {
433 entry = rb_entry(p, struct xrcd_table_entry, node);
434
435 if (inode < entry->inode)
436 p = p->rb_left;
437 else if (inode > entry->inode)
438 p = p->rb_right;
439 else
440 return entry;
441 }
442
443 return NULL;
444}
445
446static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
447{
448 struct xrcd_table_entry *entry;
449
450 entry = xrcd_table_search(dev, inode);
451 if (!entry)
452 return NULL;
453
454 return entry->xrcd;
455}
456
457static void xrcd_table_delete(struct ib_uverbs_device *dev,
458 struct inode *inode)
459{
460 struct xrcd_table_entry *entry;
461
462 entry = xrcd_table_search(dev, inode);
463 if (entry) {
464 iput(inode);
465 rb_erase(&entry->node, &dev->xrcd_tree);
466 kfree(entry);
467 }
468}
469
470ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
057aec0d 471 struct ib_device *ib_dev,
53d0bd1e
SH
472 const char __user *buf, int in_len,
473 int out_len)
474{
475 struct ib_uverbs_open_xrcd cmd;
476 struct ib_uverbs_open_xrcd_resp resp;
477 struct ib_udata udata;
478 struct ib_uxrcd_object *obj;
479 struct ib_xrcd *xrcd = NULL;
2903ff01 480 struct fd f = {NULL, 0};
53d0bd1e 481 struct inode *inode = NULL;
2903ff01 482 int ret = 0;
53d0bd1e
SH
483 int new_xrcd = 0;
484
485 if (out_len < sizeof resp)
486 return -ENOSPC;
487
488 if (copy_from_user(&cmd, buf, sizeof cmd))
489 return -EFAULT;
490
40a20339
AB
491 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
492 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
493 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
494 out_len - sizeof(resp));
53d0bd1e
SH
495
496 mutex_lock(&file->device->xrcd_tree_mutex);
497
498 if (cmd.fd != -1) {
499 /* search for file descriptor */
2903ff01
AV
500 f = fdget(cmd.fd);
501 if (!f.file) {
53d0bd1e
SH
502 ret = -EBADF;
503 goto err_tree_mutex_unlock;
504 }
505
496ad9aa 506 inode = file_inode(f.file);
53d0bd1e
SH
507 xrcd = find_xrcd(file->device, inode);
508 if (!xrcd && !(cmd.oflags & O_CREAT)) {
509 /* no file descriptor. Need CREATE flag */
510 ret = -EAGAIN;
511 goto err_tree_mutex_unlock;
512 }
513
514 if (xrcd && cmd.oflags & O_EXCL) {
515 ret = -EINVAL;
516 goto err_tree_mutex_unlock;
517 }
518 }
519
fd3c7904
MB
520 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd),
521 file->ucontext);
522 if (IS_ERR(obj)) {
523 ret = PTR_ERR(obj);
53d0bd1e
SH
524 goto err_tree_mutex_unlock;
525 }
526
53d0bd1e 527 if (!xrcd) {
057aec0d 528 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
53d0bd1e
SH
529 if (IS_ERR(xrcd)) {
530 ret = PTR_ERR(xrcd);
531 goto err;
532 }
533
534 xrcd->inode = inode;
057aec0d 535 xrcd->device = ib_dev;
53d0bd1e
SH
536 atomic_set(&xrcd->usecnt, 0);
537 mutex_init(&xrcd->tgt_qp_mutex);
538 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
539 new_xrcd = 1;
540 }
541
542 atomic_set(&obj->refcnt, 0);
543 obj->uobject.object = xrcd;
53d0bd1e
SH
544 memset(&resp, 0, sizeof resp);
545 resp.xrcd_handle = obj->uobject.id;
546
547 if (inode) {
548 if (new_xrcd) {
549 /* create new inode/xrcd table entry */
550 ret = xrcd_table_insert(file->device, inode, xrcd);
551 if (ret)
fd3c7904 552 goto err_dealloc_xrcd;
53d0bd1e
SH
553 }
554 atomic_inc(&xrcd->usecnt);
555 }
556
40a20339 557 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
53d0bd1e
SH
558 ret = -EFAULT;
559 goto err_copy;
560 }
561
2903ff01
AV
562 if (f.file)
563 fdput(f);
53d0bd1e 564
1ff5325c
LR
565 mutex_unlock(&file->device->xrcd_tree_mutex);
566
fd3c7904 567 uobj_alloc_commit(&obj->uobject);
53d0bd1e 568
53d0bd1e
SH
569 return in_len;
570
571err_copy:
572 if (inode) {
573 if (new_xrcd)
574 xrcd_table_delete(file->device, inode);
575 atomic_dec(&xrcd->usecnt);
576 }
577
fd3c7904 578err_dealloc_xrcd:
53d0bd1e
SH
579 ib_dealloc_xrcd(xrcd);
580
581err:
fd3c7904 582 uobj_alloc_abort(&obj->uobject);
53d0bd1e
SH
583
584err_tree_mutex_unlock:
2903ff01
AV
585 if (f.file)
586 fdput(f);
53d0bd1e
SH
587
588 mutex_unlock(&file->device->xrcd_tree_mutex);
589
590 return ret;
591}
592
593ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
057aec0d 594 struct ib_device *ib_dev,
53d0bd1e
SH
595 const char __user *buf, int in_len,
596 int out_len)
597{
598 struct ib_uverbs_close_xrcd cmd;
599 struct ib_uobject *uobj;
53d0bd1e
SH
600 int ret = 0;
601
602 if (copy_from_user(&cmd, buf, sizeof cmd))
603 return -EFAULT;
604
fd3c7904
MB
605 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
606 file->ucontext);
5c2e1c4f 607 if (IS_ERR(uobj))
fd3c7904 608 return PTR_ERR(uobj);
53d0bd1e 609
fd3c7904
MB
610 ret = uobj_remove_commit(uobj);
611 return ret ?: in_len;
53d0bd1e
SH
612}
613
6be60aed
MB
614int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
615 struct ib_xrcd *xrcd,
616 enum rdma_remove_reason why)
53d0bd1e
SH
617{
618 struct inode *inode;
6be60aed 619 int ret;
53d0bd1e
SH
620
621 inode = xrcd->inode;
622 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
6be60aed 623 return 0;
53d0bd1e 624
6be60aed 625 ret = ib_dealloc_xrcd(xrcd);
53d0bd1e 626
6be60aed
MB
627 if (why == RDMA_REMOVE_DESTROY && ret)
628 atomic_inc(&xrcd->usecnt);
629 else if (inode)
53d0bd1e 630 xrcd_table_delete(dev, inode);
6be60aed
MB
631
632 return ret;
53d0bd1e
SH
633}
634
bc38a6ab 635ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
057aec0d 636 struct ib_device *ib_dev,
bc38a6ab
RD
637 const char __user *buf, int in_len,
638 int out_len)
639{
640 struct ib_uverbs_reg_mr cmd;
641 struct ib_uverbs_reg_mr_resp resp;
642 struct ib_udata udata;
f7c6a7b5 643 struct ib_uobject *uobj;
bc38a6ab
RD
644 struct ib_pd *pd;
645 struct ib_mr *mr;
646 int ret;
647
648 if (out_len < sizeof resp)
649 return -ENOSPC;
650
651 if (copy_from_user(&cmd, buf, sizeof cmd))
652 return -EFAULT;
653
40a20339
AB
654 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
655 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
656 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
657 out_len - sizeof(resp));
bc38a6ab
RD
658
659 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
660 return -EINVAL;
661
1c636f80
EC
662 ret = ib_check_mr_access(cmd.access_flags);
663 if (ret)
664 return ret;
f575394f 665
fd3c7904
MB
666 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext);
667 if (IS_ERR(uobj))
668 return PTR_ERR(uobj);
bc38a6ab 669
fd3c7904 670 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
aaf1aef5
RD
671 if (!pd) {
672 ret = -EINVAL;
f7c6a7b5 673 goto err_free;
aaf1aef5 674 }
bc38a6ab 675
860f10a7 676 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
86bee4c9
OG
677 if (!(pd->device->attrs.device_cap_flags &
678 IB_DEVICE_ON_DEMAND_PAGING)) {
860f10a7
SG
679 pr_debug("ODP support not available\n");
680 ret = -EINVAL;
681 goto err_put;
682 }
683 }
684
f7c6a7b5
RD
685 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
686 cmd.access_flags, &udata);
bc38a6ab
RD
687 if (IS_ERR(mr)) {
688 ret = PTR_ERR(mr);
9ead190b 689 goto err_put;
bc38a6ab
RD
690 }
691
692 mr->device = pd->device;
693 mr->pd = pd;
f7c6a7b5 694 mr->uobject = uobj;
bc38a6ab 695 atomic_inc(&pd->usecnt);
bc38a6ab 696
f7c6a7b5 697 uobj->object = mr;
bc38a6ab 698
9ead190b
RD
699 memset(&resp, 0, sizeof resp);
700 resp.lkey = mr->lkey;
701 resp.rkey = mr->rkey;
f7c6a7b5 702 resp.mr_handle = uobj->id;
bc38a6ab 703
40a20339 704 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
bc38a6ab 705 ret = -EFAULT;
9ead190b 706 goto err_copy;
bc38a6ab
RD
707 }
708
fd3c7904 709 uobj_put_obj_read(pd);
eb9d3cd5 710
fd3c7904 711 uobj_alloc_commit(uobj);
bc38a6ab
RD
712
713 return in_len;
714
9ead190b 715err_copy:
bc38a6ab
RD
716 ib_dereg_mr(mr);
717
9ead190b 718err_put:
fd3c7904 719 uobj_put_obj_read(pd);
bc38a6ab 720
bc38a6ab 721err_free:
fd3c7904 722 uobj_alloc_abort(uobj);
bc38a6ab
RD
723 return ret;
724}
725
7e6edb9b 726ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
057aec0d 727 struct ib_device *ib_dev,
7e6edb9b
MB
728 const char __user *buf, int in_len,
729 int out_len)
730{
731 struct ib_uverbs_rereg_mr cmd;
732 struct ib_uverbs_rereg_mr_resp resp;
733 struct ib_udata udata;
734 struct ib_pd *pd = NULL;
735 struct ib_mr *mr;
736 struct ib_pd *old_pd;
737 int ret;
738 struct ib_uobject *uobj;
739
740 if (out_len < sizeof(resp))
741 return -ENOSPC;
742
743 if (copy_from_user(&cmd, buf, sizeof(cmd)))
744 return -EFAULT;
745
40a20339
AB
746 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
747 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
748 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
749 out_len - sizeof(resp));
7e6edb9b
MB
750
751 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
752 return -EINVAL;
753
754 if ((cmd.flags & IB_MR_REREG_TRANS) &&
755 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
756 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
757 return -EINVAL;
758
fd3c7904
MB
759 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
760 file->ucontext);
761 if (IS_ERR(uobj))
762 return PTR_ERR(uobj);
7e6edb9b
MB
763
764 mr = uobj->object;
765
766 if (cmd.flags & IB_MR_REREG_ACCESS) {
767 ret = ib_check_mr_access(cmd.access_flags);
768 if (ret)
769 goto put_uobjs;
770 }
771
772 if (cmd.flags & IB_MR_REREG_PD) {
fd3c7904 773 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
7e6edb9b
MB
774 if (!pd) {
775 ret = -EINVAL;
776 goto put_uobjs;
777 }
778 }
779
7e6edb9b
MB
780 old_pd = mr->pd;
781 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
782 cmd.length, cmd.hca_va,
783 cmd.access_flags, pd, &udata);
784 if (!ret) {
785 if (cmd.flags & IB_MR_REREG_PD) {
786 atomic_inc(&pd->usecnt);
787 mr->pd = pd;
788 atomic_dec(&old_pd->usecnt);
789 }
790 } else {
791 goto put_uobj_pd;
792 }
793
794 memset(&resp, 0, sizeof(resp));
795 resp.lkey = mr->lkey;
796 resp.rkey = mr->rkey;
797
40a20339 798 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
7e6edb9b
MB
799 ret = -EFAULT;
800 else
801 ret = in_len;
802
803put_uobj_pd:
804 if (cmd.flags & IB_MR_REREG_PD)
fd3c7904 805 uobj_put_obj_read(pd);
7e6edb9b
MB
806
807put_uobjs:
fd3c7904 808 uobj_put_write(uobj);
7e6edb9b
MB
809
810 return ret;
811}
812
bc38a6ab 813ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
057aec0d 814 struct ib_device *ib_dev,
bc38a6ab
RD
815 const char __user *buf, int in_len,
816 int out_len)
817{
818 struct ib_uverbs_dereg_mr cmd;
9ead190b 819 struct ib_uobject *uobj;
bc38a6ab
RD
820 int ret = -EINVAL;
821
822 if (copy_from_user(&cmd, buf, sizeof cmd))
823 return -EFAULT;
824
fd3c7904
MB
825 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
826 file->ucontext);
827 if (IS_ERR(uobj))
828 return PTR_ERR(uobj);
9ead190b 829
fd3c7904 830 ret = uobj_remove_commit(uobj);
bc38a6ab 831
fd3c7904 832 return ret ?: in_len;
bc38a6ab
RD
833}
834
6b52a12b 835ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
057aec0d
YH
836 struct ib_device *ib_dev,
837 const char __user *buf, int in_len,
838 int out_len)
6b52a12b
SM
839{
840 struct ib_uverbs_alloc_mw cmd;
841 struct ib_uverbs_alloc_mw_resp resp;
842 struct ib_uobject *uobj;
843 struct ib_pd *pd;
844 struct ib_mw *mw;
b2a239df 845 struct ib_udata udata;
6b52a12b
SM
846 int ret;
847
848 if (out_len < sizeof(resp))
849 return -ENOSPC;
850
851 if (copy_from_user(&cmd, buf, sizeof(cmd)))
852 return -EFAULT;
853
fd3c7904
MB
854 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext);
855 if (IS_ERR(uobj))
856 return PTR_ERR(uobj);
6b52a12b 857
fd3c7904 858 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
6b52a12b
SM
859 if (!pd) {
860 ret = -EINVAL;
861 goto err_free;
862 }
863
40a20339
AB
864 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
865 u64_to_user_ptr(cmd.response) + sizeof(resp),
b2a239df
MB
866 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
867 out_len - sizeof(resp));
868
869 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
6b52a12b
SM
870 if (IS_ERR(mw)) {
871 ret = PTR_ERR(mw);
872 goto err_put;
873 }
874
875 mw->device = pd->device;
876 mw->pd = pd;
877 mw->uobject = uobj;
878 atomic_inc(&pd->usecnt);
879
880 uobj->object = mw;
6b52a12b
SM
881
882 memset(&resp, 0, sizeof(resp));
883 resp.rkey = mw->rkey;
884 resp.mw_handle = uobj->id;
885
40a20339 886 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
6b52a12b
SM
887 ret = -EFAULT;
888 goto err_copy;
889 }
890
fd3c7904
MB
891 uobj_put_obj_read(pd);
892 uobj_alloc_commit(uobj);
6b52a12b
SM
893
894 return in_len;
895
896err_copy:
feb7c1e3 897 uverbs_dealloc_mw(mw);
6b52a12b 898err_put:
fd3c7904 899 uobj_put_obj_read(pd);
6b52a12b 900err_free:
fd3c7904 901 uobj_alloc_abort(uobj);
6b52a12b
SM
902 return ret;
903}
904
905ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
057aec0d
YH
906 struct ib_device *ib_dev,
907 const char __user *buf, int in_len,
908 int out_len)
6b52a12b
SM
909{
910 struct ib_uverbs_dealloc_mw cmd;
6b52a12b
SM
911 struct ib_uobject *uobj;
912 int ret = -EINVAL;
913
914 if (copy_from_user(&cmd, buf, sizeof(cmd)))
915 return -EFAULT;
916
fd3c7904
MB
917 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle,
918 file->ucontext);
919 if (IS_ERR(uobj))
920 return PTR_ERR(uobj);
6b52a12b 921
fd3c7904
MB
922 ret = uobj_remove_commit(uobj);
923 return ret ?: in_len;
6b52a12b
SM
924}
925
6b73597e 926ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
057aec0d 927 struct ib_device *ib_dev,
6b73597e
RD
928 const char __user *buf, int in_len,
929 int out_len)
930{
931 struct ib_uverbs_create_comp_channel cmd;
932 struct ib_uverbs_create_comp_channel_resp resp;
1e7710f3
MB
933 struct ib_uobject *uobj;
934 struct ib_uverbs_completion_event_file *ev_file;
6b73597e
RD
935
936 if (out_len < sizeof resp)
937 return -ENOSPC;
938
939 if (copy_from_user(&cmd, buf, sizeof cmd))
940 return -EFAULT;
941
1e7710f3
MB
942 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext);
943 if (IS_ERR(uobj))
944 return PTR_ERR(uobj);
b1e4594b 945
1e7710f3
MB
946 resp.fd = uobj->id;
947
948 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
949 uobj_file.uobj);
db1b5ddd 950 ib_uverbs_init_event_queue(&ev_file->ev_queue);
6b73597e 951
40a20339 952 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
1e7710f3 953 uobj_alloc_abort(uobj);
6b73597e
RD
954 return -EFAULT;
955 }
956
1e7710f3 957 uobj_alloc_commit(uobj);
6b73597e
RD
958 return in_len;
959}
960
565197dd 961static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
057aec0d 962 struct ib_device *ib_dev,
565197dd
MB
963 struct ib_udata *ucore,
964 struct ib_udata *uhw,
965 struct ib_uverbs_ex_create_cq *cmd,
966 size_t cmd_sz,
967 int (*cb)(struct ib_uverbs_file *file,
968 struct ib_ucq_object *obj,
969 struct ib_uverbs_ex_create_cq_resp *resp,
970 struct ib_udata *udata,
971 void *context),
972 void *context)
bc38a6ab 973{
9ead190b 974 struct ib_ucq_object *obj;
1e7710f3 975 struct ib_uverbs_completion_event_file *ev_file = NULL;
bc38a6ab
RD
976 struct ib_cq *cq;
977 int ret;
565197dd 978 struct ib_uverbs_ex_create_cq_resp resp;
bcf4c1ea 979 struct ib_cq_init_attr attr = {};
bc38a6ab 980
21885586
LR
981 if (!ib_dev->create_cq)
982 return ERR_PTR(-EOPNOTSUPP);
983
565197dd
MB
984 if (cmd->comp_vector >= file->device->num_comp_vectors)
985 return ERR_PTR(-EINVAL);
bc38a6ab 986
fd3c7904
MB
987 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq),
988 file->ucontext);
989 if (IS_ERR(obj))
990 return obj;
9ead190b 991
565197dd 992 if (cmd->comp_channel >= 0) {
1e7710f3
MB
993 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel,
994 file->ucontext);
995 if (IS_ERR(ev_file)) {
996 ret = PTR_ERR(ev_file);
ac4e7b35
JM
997 goto err;
998 }
999 }
1000
fd3c7904 1001 obj->uobject.user_handle = cmd->user_handle;
9ead190b
RD
1002 obj->uverbs_file = file;
1003 obj->comp_events_reported = 0;
1004 obj->async_events_reported = 0;
1005 INIT_LIST_HEAD(&obj->comp_list);
1006 INIT_LIST_HEAD(&obj->async_list);
bc38a6ab 1007
565197dd
MB
1008 attr.cqe = cmd->cqe;
1009 attr.comp_vector = cmd->comp_vector;
1010
1011 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1012 attr.flags = cmd->flags;
1013
fd3c7904 1014 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw);
bc38a6ab
RD
1015 if (IS_ERR(cq)) {
1016 ret = PTR_ERR(cq);
9ead190b 1017 goto err_file;
bc38a6ab
RD
1018 }
1019
057aec0d 1020 cq->device = ib_dev;
9ead190b 1021 cq->uobject = &obj->uobject;
bc38a6ab
RD
1022 cq->comp_handler = ib_uverbs_comp_handler;
1023 cq->event_handler = ib_uverbs_cq_event_handler;
699a2d5b 1024 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
bc38a6ab
RD
1025 atomic_set(&cq->usecnt, 0);
1026
9ead190b 1027 obj->uobject.object = cq;
bc38a6ab 1028 memset(&resp, 0, sizeof resp);
565197dd
MB
1029 resp.base.cq_handle = obj->uobject.id;
1030 resp.base.cqe = cq->cqe;
bc38a6ab 1031
565197dd
MB
1032 resp.response_length = offsetof(typeof(resp), response_length) +
1033 sizeof(resp.response_length);
1034
0cba0efc
LR
1035 cq->res.type = RDMA_RESTRACK_CQ;
1036 rdma_restrack_add(&cq->res);
1037
565197dd
MB
1038 ret = cb(file, obj, &resp, ucore, context);
1039 if (ret)
1040 goto err_cb;
bc38a6ab 1041
fd3c7904 1042 uobj_alloc_commit(&obj->uobject);
565197dd 1043 return obj;
eb9d3cd5 1044
565197dd 1045err_cb:
bc38a6ab
RD
1046 ib_destroy_cq(cq);
1047
9ead190b 1048err_file:
ac4e7b35 1049 if (ev_file)
9ead190b
RD
1050 ib_uverbs_release_ucq(file, ev_file, obj);
1051
1052err:
fd3c7904 1053 uobj_alloc_abort(&obj->uobject);
565197dd
MB
1054
1055 return ERR_PTR(ret);
1056}
1057
1058static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1059 struct ib_ucq_object *obj,
1060 struct ib_uverbs_ex_create_cq_resp *resp,
1061 struct ib_udata *ucore, void *context)
1062{
1063 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1064 return -EFAULT;
1065
1066 return 0;
1067}
1068
1069ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
057aec0d 1070 struct ib_device *ib_dev,
565197dd
MB
1071 const char __user *buf, int in_len,
1072 int out_len)
1073{
1074 struct ib_uverbs_create_cq cmd;
1075 struct ib_uverbs_ex_create_cq cmd_ex;
1076 struct ib_uverbs_create_cq_resp resp;
1077 struct ib_udata ucore;
1078 struct ib_udata uhw;
1079 struct ib_ucq_object *obj;
1080
1081 if (out_len < sizeof(resp))
1082 return -ENOSPC;
1083
1084 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1085 return -EFAULT;
1086
40a20339
AB
1087 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1088 sizeof(cmd), sizeof(resp));
565197dd 1089
40a20339
AB
1090 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1091 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
1092 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1093 out_len - sizeof(resp));
565197dd
MB
1094
1095 memset(&cmd_ex, 0, sizeof(cmd_ex));
1096 cmd_ex.user_handle = cmd.user_handle;
1097 cmd_ex.cqe = cmd.cqe;
1098 cmd_ex.comp_vector = cmd.comp_vector;
1099 cmd_ex.comp_channel = cmd.comp_channel;
1100
057aec0d 1101 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
565197dd
MB
1102 offsetof(typeof(cmd_ex), comp_channel) +
1103 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1104 NULL);
1105
1106 if (IS_ERR(obj))
1107 return PTR_ERR(obj);
1108
1109 return in_len;
1110}
1111
1112static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1113 struct ib_ucq_object *obj,
1114 struct ib_uverbs_ex_create_cq_resp *resp,
1115 struct ib_udata *ucore, void *context)
1116{
1117 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1118 return -EFAULT;
1119
1120 return 0;
1121}
1122
1123int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
057aec0d 1124 struct ib_device *ib_dev,
565197dd
MB
1125 struct ib_udata *ucore,
1126 struct ib_udata *uhw)
1127{
1128 struct ib_uverbs_ex_create_cq_resp resp;
1129 struct ib_uverbs_ex_create_cq cmd;
1130 struct ib_ucq_object *obj;
1131 int err;
1132
1133 if (ucore->inlen < sizeof(cmd))
1134 return -EINVAL;
1135
1136 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1137 if (err)
1138 return err;
1139
1140 if (cmd.comp_mask)
1141 return -EINVAL;
1142
1143 if (cmd.reserved)
1144 return -EINVAL;
1145
1146 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1147 sizeof(resp.response_length)))
1148 return -ENOSPC;
1149
057aec0d 1150 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
565197dd
MB
1151 min(ucore->inlen, sizeof(cmd)),
1152 ib_uverbs_ex_create_cq_cb, NULL);
1153
f4cd9d58 1154 return PTR_ERR_OR_ZERO(obj);
bc38a6ab
RD
1155}
1156
33b9b3ee 1157ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
057aec0d 1158 struct ib_device *ib_dev,
33b9b3ee
RD
1159 const char __user *buf, int in_len,
1160 int out_len)
1161{
1162 struct ib_uverbs_resize_cq cmd;
f7a6cb7b 1163 struct ib_uverbs_resize_cq_resp resp = {};
33b9b3ee
RD
1164 struct ib_udata udata;
1165 struct ib_cq *cq;
1166 int ret = -EINVAL;
1167
1168 if (copy_from_user(&cmd, buf, sizeof cmd))
1169 return -EFAULT;
1170
40a20339
AB
1171 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1172 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
1173 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1174 out_len - sizeof(resp));
33b9b3ee 1175
fd3c7904 1176 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
9ead190b
RD
1177 if (!cq)
1178 return -EINVAL;
33b9b3ee
RD
1179
1180 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1181 if (ret)
1182 goto out;
1183
33b9b3ee
RD
1184 resp.cqe = cq->cqe;
1185
40a20339 1186 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
33b9b3ee
RD
1187 ret = -EFAULT;
1188
1189out:
fd3c7904 1190 uobj_put_obj_read(cq);
33b9b3ee
RD
1191
1192 return ret ? ret : in_len;
1193}
1194
7db20ecd
HD
1195static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1196 struct ib_wc *wc)
7182afea
DC
1197{
1198 struct ib_uverbs_wc tmp;
1199
1200 tmp.wr_id = wc->wr_id;
1201 tmp.status = wc->status;
1202 tmp.opcode = wc->opcode;
1203 tmp.vendor_err = wc->vendor_err;
1204 tmp.byte_len = wc->byte_len;
c966ea12 1205 tmp.ex.imm_data = wc->ex.imm_data;
7182afea
DC
1206 tmp.qp_num = wc->qp->qp_num;
1207 tmp.src_qp = wc->src_qp;
1208 tmp.wc_flags = wc->wc_flags;
1209 tmp.pkey_index = wc->pkey_index;
7db20ecd 1210 if (rdma_cap_opa_ah(ib_dev, wc->port_num))
62ede777 1211 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
7db20ecd 1212 else
62ede777 1213 tmp.slid = ib_lid_cpu16(wc->slid);
7182afea
DC
1214 tmp.sl = wc->sl;
1215 tmp.dlid_path_bits = wc->dlid_path_bits;
1216 tmp.port_num = wc->port_num;
1217 tmp.reserved = 0;
1218
1219 if (copy_to_user(dest, &tmp, sizeof tmp))
1220 return -EFAULT;
1221
1222 return 0;
1223}
1224
67cdb40c 1225ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
057aec0d 1226 struct ib_device *ib_dev,
67cdb40c
RD
1227 const char __user *buf, int in_len,
1228 int out_len)
1229{
1230 struct ib_uverbs_poll_cq cmd;
7182afea
DC
1231 struct ib_uverbs_poll_cq_resp resp;
1232 u8 __user *header_ptr;
1233 u8 __user *data_ptr;
67cdb40c 1234 struct ib_cq *cq;
7182afea
DC
1235 struct ib_wc wc;
1236 int ret;
67cdb40c
RD
1237
1238 if (copy_from_user(&cmd, buf, sizeof cmd))
1239 return -EFAULT;
1240
fd3c7904 1241 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
7182afea
DC
1242 if (!cq)
1243 return -EINVAL;
67cdb40c 1244
7182afea 1245 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
40a20339 1246 header_ptr = u64_to_user_ptr(cmd.response);
7182afea 1247 data_ptr = header_ptr + sizeof resp;
9ead190b 1248
7182afea
DC
1249 memset(&resp, 0, sizeof resp);
1250 while (resp.count < cmd.ne) {
1251 ret = ib_poll_cq(cq, 1, &wc);
1252 if (ret < 0)
1253 goto out_put;
1254 if (!ret)
1255 break;
1256
7db20ecd 1257 ret = copy_wc_to_user(ib_dev, data_ptr, &wc);
7182afea
DC
1258 if (ret)
1259 goto out_put;
1260
1261 data_ptr += sizeof(struct ib_uverbs_wc);
1262 ++resp.count;
67cdb40c
RD
1263 }
1264
7182afea 1265 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
67cdb40c 1266 ret = -EFAULT;
7182afea
DC
1267 goto out_put;
1268 }
67cdb40c 1269
7182afea 1270 ret = in_len;
67cdb40c 1271
7182afea 1272out_put:
fd3c7904 1273 uobj_put_obj_read(cq);
7182afea 1274 return ret;
67cdb40c
RD
1275}
1276
1277ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
057aec0d 1278 struct ib_device *ib_dev,
67cdb40c
RD
1279 const char __user *buf, int in_len,
1280 int out_len)
1281{
1282 struct ib_uverbs_req_notify_cq cmd;
1283 struct ib_cq *cq;
67cdb40c
RD
1284
1285 if (copy_from_user(&cmd, buf, sizeof cmd))
1286 return -EFAULT;
1287
fd3c7904 1288 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
ab108676 1289 if (!cq)
9ead190b 1290 return -EINVAL;
67cdb40c 1291
9ead190b
RD
1292 ib_req_notify_cq(cq, cmd.solicited_only ?
1293 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1294
fd3c7904 1295 uobj_put_obj_read(cq);
9ead190b
RD
1296
1297 return in_len;
67cdb40c
RD
1298}
1299
bc38a6ab 1300ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
057aec0d 1301 struct ib_device *ib_dev,
bc38a6ab
RD
1302 const char __user *buf, int in_len,
1303 int out_len)
1304{
63aaf647
RD
1305 struct ib_uverbs_destroy_cq cmd;
1306 struct ib_uverbs_destroy_cq_resp resp;
9ead190b 1307 struct ib_uobject *uobj;
63aaf647 1308 struct ib_cq *cq;
9ead190b 1309 struct ib_ucq_object *obj;
63aaf647 1310 int ret = -EINVAL;
bc38a6ab
RD
1311
1312 if (copy_from_user(&cmd, buf, sizeof cmd))
1313 return -EFAULT;
1314
fd3c7904
MB
1315 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle,
1316 file->ucontext);
1317 if (IS_ERR(uobj))
1318 return PTR_ERR(uobj);
1319
1320 /*
1321 * Make sure we don't free the memory in remove_commit as we still
1322 * needs the uobject memory to create the response.
1323 */
1324 uverbs_uobject_get(uobj);
9ead190b 1325 cq = uobj->object;
9ead190b 1326 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
bc38a6ab 1327
fd3c7904 1328 memset(&resp, 0, sizeof(resp));
bc38a6ab 1329
fd3c7904
MB
1330 ret = uobj_remove_commit(uobj);
1331 if (ret) {
1332 uverbs_uobject_put(uobj);
9ead190b 1333 return ret;
fd3c7904 1334 }
bc38a6ab 1335
9ead190b
RD
1336 resp.comp_events_reported = obj->comp_events_reported;
1337 resp.async_events_reported = obj->async_events_reported;
63aaf647 1338
fd3c7904 1339 uverbs_uobject_put(uobj);
40a20339 1340 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
9ead190b 1341 return -EFAULT;
bc38a6ab 1342
9ead190b 1343 return in_len;
bc38a6ab
RD
1344}
1345
6d8a7497
EBE
1346static int create_qp(struct ib_uverbs_file *file,
1347 struct ib_udata *ucore,
1348 struct ib_udata *uhw,
1349 struct ib_uverbs_ex_create_qp *cmd,
1350 size_t cmd_sz,
1351 int (*cb)(struct ib_uverbs_file *file,
1352 struct ib_uverbs_ex_create_qp_resp *resp,
1353 struct ib_udata *udata),
1354 void *context)
bc38a6ab 1355{
6d8a7497
EBE
1356 struct ib_uqp_object *obj;
1357 struct ib_device *device;
1358 struct ib_pd *pd = NULL;
1359 struct ib_xrcd *xrcd = NULL;
fd3c7904 1360 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
6d8a7497
EBE
1361 struct ib_cq *scq = NULL, *rcq = NULL;
1362 struct ib_srq *srq = NULL;
1363 struct ib_qp *qp;
1364 char *buf;
c70285f8 1365 struct ib_qp_init_attr attr = {};
6d8a7497
EBE
1366 struct ib_uverbs_ex_create_qp_resp resp;
1367 int ret;
c70285f8
YH
1368 struct ib_rwq_ind_table *ind_tbl = NULL;
1369 bool has_sq = true;
6d8a7497
EBE
1370
1371 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
c938a616
OG
1372 return -EPERM;
1373
fd3c7904
MB
1374 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1375 file->ucontext);
1376 if (IS_ERR(obj))
1377 return PTR_ERR(obj);
1378 obj->uxrcd = NULL;
1379 obj->uevent.uobject.user_handle = cmd->user_handle;
f48b7269 1380 mutex_init(&obj->mcast_lock);
bc38a6ab 1381
c70285f8
YH
1382 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
1383 sizeof(cmd->rwq_ind_tbl_handle) &&
1384 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
fd3c7904
MB
1385 ind_tbl = uobj_get_obj_read(rwq_ind_table,
1386 cmd->rwq_ind_tbl_handle,
1387 file->ucontext);
c70285f8
YH
1388 if (!ind_tbl) {
1389 ret = -EINVAL;
1390 goto err_put;
1391 }
1392
1393 attr.rwq_ind_tbl = ind_tbl;
1394 }
1395
2dee0e54
YH
1396 if (cmd_sz > sizeof(*cmd) &&
1397 !ib_is_udata_cleared(ucore, sizeof(*cmd),
1398 cmd_sz - sizeof(*cmd))) {
c70285f8
YH
1399 ret = -EOPNOTSUPP;
1400 goto err_put;
1401 }
1402
1403 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1404 ret = -EINVAL;
1405 goto err_put;
1406 }
1407
1408 if (ind_tbl && !cmd->max_send_wr)
1409 has_sq = false;
bc38a6ab 1410
6d8a7497 1411 if (cmd->qp_type == IB_QPT_XRC_TGT) {
fd3c7904
MB
1412 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle,
1413 file->ucontext);
1414
1415 if (IS_ERR(xrcd_uobj)) {
1416 ret = -EINVAL;
1417 goto err_put;
1418 }
1419
1420 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
b93f3c18
SH
1421 if (!xrcd) {
1422 ret = -EINVAL;
1423 goto err_put;
1424 }
1425 device = xrcd->device;
9977f4f6 1426 } else {
6d8a7497
EBE
1427 if (cmd->qp_type == IB_QPT_XRC_INI) {
1428 cmd->max_recv_wr = 0;
1429 cmd->max_recv_sge = 0;
b93f3c18 1430 } else {
6d8a7497 1431 if (cmd->is_srq) {
fd3c7904
MB
1432 srq = uobj_get_obj_read(srq, cmd->srq_handle,
1433 file->ucontext);
38eb44fa 1434 if (!srq || srq->srq_type == IB_SRQT_XRC) {
b93f3c18
SH
1435 ret = -EINVAL;
1436 goto err_put;
1437 }
1438 }
5909ce54 1439
c70285f8
YH
1440 if (!ind_tbl) {
1441 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
fd3c7904
MB
1442 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle,
1443 file->ucontext);
c70285f8
YH
1444 if (!rcq) {
1445 ret = -EINVAL;
1446 goto err_put;
1447 }
5909ce54 1448 }
9977f4f6
SH
1449 }
1450 }
5909ce54 1451
c70285f8 1452 if (has_sq)
fd3c7904
MB
1453 scq = uobj_get_obj_read(cq, cmd->send_cq_handle,
1454 file->ucontext);
c70285f8
YH
1455 if (!ind_tbl)
1456 rcq = rcq ?: scq;
fd3c7904 1457 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
c70285f8 1458 if (!pd || (!scq && has_sq)) {
5909ce54
RD
1459 ret = -EINVAL;
1460 goto err_put;
1461 }
1462
b93f3c18 1463 device = pd->device;
9977f4f6
SH
1464 }
1465
bc38a6ab
RD
1466 attr.event_handler = ib_uverbs_qp_event_handler;
1467 attr.qp_context = file;
1468 attr.send_cq = scq;
1469 attr.recv_cq = rcq;
f520ba5a 1470 attr.srq = srq;
b93f3c18 1471 attr.xrcd = xrcd;
6d8a7497
EBE
1472 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1473 IB_SIGNAL_REQ_WR;
1474 attr.qp_type = cmd->qp_type;
b846f25a 1475 attr.create_flags = 0;
bc38a6ab 1476
6d8a7497
EBE
1477 attr.cap.max_send_wr = cmd->max_send_wr;
1478 attr.cap.max_recv_wr = cmd->max_recv_wr;
1479 attr.cap.max_send_sge = cmd->max_send_sge;
1480 attr.cap.max_recv_sge = cmd->max_recv_sge;
1481 attr.cap.max_inline_data = cmd->max_inline_data;
bc38a6ab 1482
9ead190b
RD
1483 obj->uevent.events_reported = 0;
1484 INIT_LIST_HEAD(&obj->uevent.event_list);
1485 INIT_LIST_HEAD(&obj->mcast_list);
bc38a6ab 1486
6d8a7497
EBE
1487 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1488 sizeof(cmd->create_flags))
1489 attr.create_flags = cmd->create_flags;
1490
8a06ce59
LR
1491 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1492 IB_QP_CREATE_CROSS_CHANNEL |
1493 IB_QP_CREATE_MANAGED_SEND |
b531b909 1494 IB_QP_CREATE_MANAGED_RECV |
9e1b161f 1495 IB_QP_CREATE_SCATTER_FCS |
2dee0e54 1496 IB_QP_CREATE_CVLAN_STRIPPING |
e1d2e887
NO
1497 IB_QP_CREATE_SOURCE_QPN |
1498 IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
6d8a7497
EBE
1499 ret = -EINVAL;
1500 goto err_put;
1501 }
1502
2dee0e54
YH
1503 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1504 if (!capable(CAP_NET_RAW)) {
1505 ret = -EPERM;
1506 goto err_put;
1507 }
1508
1509 attr.source_qpn = cmd->source_qpn;
1510 }
1511
6d8a7497
EBE
1512 buf = (void *)cmd + sizeof(*cmd);
1513 if (cmd_sz > sizeof(*cmd))
1514 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1515 cmd_sz - sizeof(*cmd) - 1))) {
1516 ret = -EINVAL;
1517 goto err_put;
1518 }
1519
1520 if (cmd->qp_type == IB_QPT_XRC_TGT)
b93f3c18
SH
1521 qp = ib_create_qp(pd, &attr);
1522 else
2f08ee36
SW
1523 qp = _ib_create_qp(device, pd, &attr, uhw,
1524 &obj->uevent.uobject);
b93f3c18 1525
bc38a6ab
RD
1526 if (IS_ERR(qp)) {
1527 ret = PTR_ERR(qp);
fd3c7904 1528 goto err_put;
bc38a6ab
RD
1529 }
1530
6d8a7497 1531 if (cmd->qp_type != IB_QPT_XRC_TGT) {
d291f1a6
DJ
1532 ret = ib_create_qp_security(qp, device);
1533 if (ret)
1534 goto err_cb;
1535
0e0ec7e0 1536 qp->real_qp = qp;
b93f3c18
SH
1537 qp->pd = pd;
1538 qp->send_cq = attr.send_cq;
1539 qp->recv_cq = attr.recv_cq;
1540 qp->srq = attr.srq;
c70285f8 1541 qp->rwq_ind_tbl = ind_tbl;
b93f3c18
SH
1542 qp->event_handler = attr.event_handler;
1543 qp->qp_context = attr.qp_context;
1544 qp->qp_type = attr.qp_type;
e47e321a 1545 atomic_set(&qp->usecnt, 0);
b93f3c18 1546 atomic_inc(&pd->usecnt);
498ca3c8 1547 qp->port = 0;
c70285f8
YH
1548 if (attr.send_cq)
1549 atomic_inc(&attr.send_cq->usecnt);
b93f3c18
SH
1550 if (attr.recv_cq)
1551 atomic_inc(&attr.recv_cq->usecnt);
1552 if (attr.srq)
1553 atomic_inc(&attr.srq->usecnt);
c70285f8
YH
1554 if (ind_tbl)
1555 atomic_inc(&ind_tbl->usecnt);
f4576587
LR
1556 } else {
1557 /* It is done in _ib_create_qp for other QP types */
1558 qp->uobject = &obj->uevent.uobject;
b93f3c18 1559 }
bc38a6ab 1560
9ead190b 1561 obj->uevent.uobject.object = qp;
bc38a6ab 1562
9ead190b 1563 memset(&resp, 0, sizeof resp);
6d8a7497
EBE
1564 resp.base.qpn = qp->qp_num;
1565 resp.base.qp_handle = obj->uevent.uobject.id;
1566 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1567 resp.base.max_send_sge = attr.cap.max_send_sge;
1568 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1569 resp.base.max_send_wr = attr.cap.max_send_wr;
1570 resp.base.max_inline_data = attr.cap.max_inline_data;
bc38a6ab 1571
6d8a7497
EBE
1572 resp.response_length = offsetof(typeof(resp), response_length) +
1573 sizeof(resp.response_length);
1574
1575 ret = cb(file, &resp, ucore);
1576 if (ret)
1577 goto err_cb;
bc38a6ab 1578
846be90d
YH
1579 if (xrcd) {
1580 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1581 uobject);
1582 atomic_inc(&obj->uxrcd->refcnt);
fd3c7904 1583 uobj_put_read(xrcd_uobj);
846be90d
YH
1584 }
1585
b93f3c18 1586 if (pd)
fd3c7904 1587 uobj_put_obj_read(pd);
b93f3c18 1588 if (scq)
fd3c7904 1589 uobj_put_obj_read(scq);
9977f4f6 1590 if (rcq && rcq != scq)
fd3c7904 1591 uobj_put_obj_read(rcq);
9ead190b 1592 if (srq)
fd3c7904 1593 uobj_put_obj_read(srq);
c70285f8 1594 if (ind_tbl)
fd3c7904 1595 uobj_put_obj_read(ind_tbl);
9ead190b 1596
fd3c7904 1597 uobj_alloc_commit(&obj->uevent.uobject);
bc38a6ab 1598
6d8a7497
EBE
1599 return 0;
1600err_cb:
bc38a6ab
RD
1601 ib_destroy_qp(qp);
1602
9ead190b 1603err_put:
fd3c7904
MB
1604 if (!IS_ERR(xrcd_uobj))
1605 uobj_put_read(xrcd_uobj);
9ead190b 1606 if (pd)
fd3c7904 1607 uobj_put_obj_read(pd);
9ead190b 1608 if (scq)
fd3c7904 1609 uobj_put_obj_read(scq);
43db2bc0 1610 if (rcq && rcq != scq)
fd3c7904 1611 uobj_put_obj_read(rcq);
9ead190b 1612 if (srq)
fd3c7904 1613 uobj_put_obj_read(srq);
c70285f8 1614 if (ind_tbl)
fd3c7904 1615 uobj_put_obj_read(ind_tbl);
9ead190b 1616
fd3c7904 1617 uobj_alloc_abort(&obj->uevent.uobject);
bc38a6ab
RD
1618 return ret;
1619}
1620
6d8a7497
EBE
1621static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1622 struct ib_uverbs_ex_create_qp_resp *resp,
1623 struct ib_udata *ucore)
1624{
1625 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1626 return -EFAULT;
1627
1628 return 0;
1629}
1630
1631ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1632 struct ib_device *ib_dev,
1633 const char __user *buf, int in_len,
1634 int out_len)
1635{
1636 struct ib_uverbs_create_qp cmd;
1637 struct ib_uverbs_ex_create_qp cmd_ex;
1638 struct ib_udata ucore;
1639 struct ib_udata uhw;
1640 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1641 int err;
1642
1643 if (out_len < resp_size)
1644 return -ENOSPC;
1645
1646 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1647 return -EFAULT;
1648
40a20339
AB
1649 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1650 sizeof(cmd), resp_size);
1651 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1652 u64_to_user_ptr(cmd.response) + resp_size,
3d943c9d
MD
1653 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1654 out_len - resp_size);
6d8a7497
EBE
1655
1656 memset(&cmd_ex, 0, sizeof(cmd_ex));
1657 cmd_ex.user_handle = cmd.user_handle;
1658 cmd_ex.pd_handle = cmd.pd_handle;
1659 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1660 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1661 cmd_ex.srq_handle = cmd.srq_handle;
1662 cmd_ex.max_send_wr = cmd.max_send_wr;
1663 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1664 cmd_ex.max_send_sge = cmd.max_send_sge;
1665 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1666 cmd_ex.max_inline_data = cmd.max_inline_data;
1667 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1668 cmd_ex.qp_type = cmd.qp_type;
1669 cmd_ex.is_srq = cmd.is_srq;
1670
1671 err = create_qp(file, &ucore, &uhw, &cmd_ex,
1672 offsetof(typeof(cmd_ex), is_srq) +
1673 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
1674 NULL);
1675
1676 if (err)
1677 return err;
1678
1679 return in_len;
1680}
1681
1682static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
1683 struct ib_uverbs_ex_create_qp_resp *resp,
1684 struct ib_udata *ucore)
1685{
1686 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1687 return -EFAULT;
1688
1689 return 0;
1690}
1691
1692int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
1693 struct ib_device *ib_dev,
1694 struct ib_udata *ucore,
1695 struct ib_udata *uhw)
1696{
1697 struct ib_uverbs_ex_create_qp_resp resp;
1698 struct ib_uverbs_ex_create_qp cmd = {0};
1699 int err;
1700
1701 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
1702 sizeof(cmd.comp_mask)))
1703 return -EINVAL;
1704
1705 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
1706 if (err)
1707 return err;
1708
c70285f8 1709 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
6d8a7497
EBE
1710 return -EINVAL;
1711
1712 if (cmd.reserved)
1713 return -EINVAL;
1714
1715 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1716 sizeof(resp.response_length)))
1717 return -ENOSPC;
1718
1719 err = create_qp(file, ucore, uhw, &cmd,
1720 min(ucore->inlen, sizeof(cmd)),
1721 ib_uverbs_ex_create_qp_cb, NULL);
1722
1723 if (err)
1724 return err;
1725
1726 return 0;
1727}
1728
42849b26 1729ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
057aec0d 1730 struct ib_device *ib_dev,
42849b26
SH
1731 const char __user *buf, int in_len, int out_len)
1732{
1733 struct ib_uverbs_open_qp cmd;
1734 struct ib_uverbs_create_qp_resp resp;
1735 struct ib_udata udata;
1736 struct ib_uqp_object *obj;
1737 struct ib_xrcd *xrcd;
1738 struct ib_uobject *uninitialized_var(xrcd_uobj);
1739 struct ib_qp *qp;
1740 struct ib_qp_open_attr attr;
1741 int ret;
1742
1743 if (out_len < sizeof resp)
1744 return -ENOSPC;
1745
1746 if (copy_from_user(&cmd, buf, sizeof cmd))
1747 return -EFAULT;
1748
40a20339
AB
1749 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1750 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
1751 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1752 out_len - sizeof(resp));
42849b26 1753
fd3c7904
MB
1754 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1755 file->ucontext);
1756 if (IS_ERR(obj))
1757 return PTR_ERR(obj);
42849b26 1758
fd3c7904
MB
1759 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle,
1760 file->ucontext);
1761 if (IS_ERR(xrcd_uobj)) {
1762 ret = -EINVAL;
1763 goto err_put;
1764 }
42849b26 1765
fd3c7904 1766 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
42849b26
SH
1767 if (!xrcd) {
1768 ret = -EINVAL;
fd3c7904 1769 goto err_xrcd;
42849b26
SH
1770 }
1771
1772 attr.event_handler = ib_uverbs_qp_event_handler;
1773 attr.qp_context = file;
1774 attr.qp_num = cmd.qpn;
1775 attr.qp_type = cmd.qp_type;
1776
1777 obj->uevent.events_reported = 0;
1778 INIT_LIST_HEAD(&obj->uevent.event_list);
1779 INIT_LIST_HEAD(&obj->mcast_list);
1780
1781 qp = ib_open_qp(xrcd, &attr);
1782 if (IS_ERR(qp)) {
1783 ret = PTR_ERR(qp);
fd3c7904 1784 goto err_xrcd;
42849b26
SH
1785 }
1786
42849b26 1787 obj->uevent.uobject.object = qp;
fd3c7904 1788 obj->uevent.uobject.user_handle = cmd.user_handle;
42849b26
SH
1789
1790 memset(&resp, 0, sizeof resp);
1791 resp.qpn = qp->qp_num;
1792 resp.qp_handle = obj->uevent.uobject.id;
1793
40a20339 1794 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
42849b26 1795 ret = -EFAULT;
fd3c7904 1796 goto err_destroy;
42849b26
SH
1797 }
1798
846be90d
YH
1799 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1800 atomic_inc(&obj->uxrcd->refcnt);
fd3c7904
MB
1801 qp->uobject = &obj->uevent.uobject;
1802 uobj_put_read(xrcd_uobj);
42849b26 1803
42849b26 1804
fd3c7904 1805 uobj_alloc_commit(&obj->uevent.uobject);
42849b26
SH
1806
1807 return in_len;
1808
42849b26
SH
1809err_destroy:
1810 ib_destroy_qp(qp);
fd3c7904
MB
1811err_xrcd:
1812 uobj_put_read(xrcd_uobj);
42849b26 1813err_put:
fd3c7904 1814 uobj_alloc_abort(&obj->uevent.uobject);
42849b26
SH
1815 return ret;
1816}
1817
89caa053
PP
1818static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1819 struct rdma_ah_attr *rdma_attr)
1820{
1821 const struct ib_global_route *grh;
1822
1823 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
1824 uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
1825 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
1826 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
1827 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
1828 IB_AH_GRH);
1829 if (uverb_attr->is_global) {
1830 grh = rdma_ah_read_grh(rdma_attr);
1831 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1832 uverb_attr->flow_label = grh->flow_label;
1833 uverb_attr->sgid_index = grh->sgid_index;
1834 uverb_attr->hop_limit = grh->hop_limit;
1835 uverb_attr->traffic_class = grh->traffic_class;
1836 }
1837 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
1838}
1839
7ccc9a24 1840ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
057aec0d 1841 struct ib_device *ib_dev,
7ccc9a24
DB
1842 const char __user *buf, int in_len,
1843 int out_len)
1844{
1845 struct ib_uverbs_query_qp cmd;
1846 struct ib_uverbs_query_qp_resp resp;
1847 struct ib_qp *qp;
1848 struct ib_qp_attr *attr;
1849 struct ib_qp_init_attr *init_attr;
1850 int ret;
1851
1852 if (copy_from_user(&cmd, buf, sizeof cmd))
1853 return -EFAULT;
1854
1855 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1856 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1857 if (!attr || !init_attr) {
1858 ret = -ENOMEM;
1859 goto out;
1860 }
1861
fd3c7904 1862 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b 1863 if (!qp) {
7ccc9a24 1864 ret = -EINVAL;
9ead190b
RD
1865 goto out;
1866 }
1867
1868 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
7ccc9a24 1869
fd3c7904 1870 uobj_put_obj_read(qp);
7ccc9a24
DB
1871
1872 if (ret)
1873 goto out;
1874
1875 memset(&resp, 0, sizeof resp);
1876
1877 resp.qp_state = attr->qp_state;
1878 resp.cur_qp_state = attr->cur_qp_state;
1879 resp.path_mtu = attr->path_mtu;
1880 resp.path_mig_state = attr->path_mig_state;
1881 resp.qkey = attr->qkey;
1882 resp.rq_psn = attr->rq_psn;
1883 resp.sq_psn = attr->sq_psn;
1884 resp.dest_qp_num = attr->dest_qp_num;
1885 resp.qp_access_flags = attr->qp_access_flags;
1886 resp.pkey_index = attr->pkey_index;
1887 resp.alt_pkey_index = attr->alt_pkey_index;
0b26c88f 1888 resp.sq_draining = attr->sq_draining;
7ccc9a24
DB
1889 resp.max_rd_atomic = attr->max_rd_atomic;
1890 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1891 resp.min_rnr_timer = attr->min_rnr_timer;
1892 resp.port_num = attr->port_num;
1893 resp.timeout = attr->timeout;
1894 resp.retry_cnt = attr->retry_cnt;
1895 resp.rnr_retry = attr->rnr_retry;
1896 resp.alt_port_num = attr->alt_port_num;
1897 resp.alt_timeout = attr->alt_timeout;
1898
89caa053
PP
1899 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1900 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
7ccc9a24
DB
1901
1902 resp.max_send_wr = init_attr->cap.max_send_wr;
1903 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1904 resp.max_send_sge = init_attr->cap.max_send_sge;
1905 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1906 resp.max_inline_data = init_attr->cap.max_inline_data;
27d56300 1907 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
7ccc9a24 1908
40a20339 1909 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
7ccc9a24
DB
1910 ret = -EFAULT;
1911
1912out:
1913 kfree(attr);
1914 kfree(init_attr);
1915
1916 return ret ? ret : in_len;
1917}
1918
9977f4f6
SH
1919/* Remove ignored fields set in the attribute mask */
1920static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1921{
1922 switch (qp_type) {
1923 case IB_QPT_XRC_INI:
1924 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
b93f3c18
SH
1925 case IB_QPT_XRC_TGT:
1926 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1927 IB_QP_RNR_RETRY);
9977f4f6
SH
1928 default:
1929 return mask;
1930 }
1931}
1932
89caa053
PP
1933static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1934 struct rdma_ah_attr *rdma_attr,
1935 struct ib_uverbs_qp_dest *uverb_attr)
1936{
1937 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1938 if (uverb_attr->is_global) {
1939 rdma_ah_set_grh(rdma_attr, NULL,
1940 uverb_attr->flow_label,
1941 uverb_attr->sgid_index,
1942 uverb_attr->hop_limit,
1943 uverb_attr->traffic_class);
1944 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1945 } else {
1946 rdma_ah_set_ah_flags(rdma_attr, 0);
1947 }
1948 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1949 rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1950 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1951 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1952 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1953 rdma_ah_set_make_grd(rdma_attr, false);
1954}
1955
189aba99
BW
1956static int modify_qp(struct ib_uverbs_file *file,
1957 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
bc38a6ab 1958{
189aba99
BW
1959 struct ib_qp_attr *attr;
1960 struct ib_qp *qp;
1961 int ret;
9bc57e2d 1962
bc38a6ab
RD
1963 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1964 if (!attr)
1965 return -ENOMEM;
1966
fd3c7904 1967 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext);
9ead190b 1968 if (!qp) {
bc38a6ab
RD
1969 ret = -EINVAL;
1970 goto out;
1971 }
1972
5a7a88f1
IM
1973 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1974 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
5ecce4c9
BP
1975 ret = -EINVAL;
1976 goto release_qp;
1977 }
1978
5d4c05c3
LR
1979 if ((cmd->base.attr_mask & IB_QP_AV) &&
1980 !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
1981 ret = -EINVAL;
1982 goto release_qp;
1983 }
1984
4cae8ff1 1985 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
5d4c05c3
LR
1986 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1987 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
4cae8ff1
DJ
1988 ret = -EINVAL;
1989 goto release_qp;
1990 }
1991
189aba99
BW
1992 attr->qp_state = cmd->base.qp_state;
1993 attr->cur_qp_state = cmd->base.cur_qp_state;
1994 attr->path_mtu = cmd->base.path_mtu;
1995 attr->path_mig_state = cmd->base.path_mig_state;
1996 attr->qkey = cmd->base.qkey;
1997 attr->rq_psn = cmd->base.rq_psn;
1998 attr->sq_psn = cmd->base.sq_psn;
1999 attr->dest_qp_num = cmd->base.dest_qp_num;
2000 attr->qp_access_flags = cmd->base.qp_access_flags;
2001 attr->pkey_index = cmd->base.pkey_index;
2002 attr->alt_pkey_index = cmd->base.alt_pkey_index;
2003 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
2004 attr->max_rd_atomic = cmd->base.max_rd_atomic;
2005 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
2006 attr->min_rnr_timer = cmd->base.min_rnr_timer;
2007 attr->port_num = cmd->base.port_num;
2008 attr->timeout = cmd->base.timeout;
2009 attr->retry_cnt = cmd->base.retry_cnt;
2010 attr->rnr_retry = cmd->base.rnr_retry;
2011 attr->alt_port_num = cmd->base.alt_port_num;
2012 attr->alt_timeout = cmd->base.alt_timeout;
2013 attr->rate_limit = cmd->rate_limit;
2014
498ca3c8 2015 if (cmd->base.attr_mask & IB_QP_AV)
89caa053
PP
2016 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
2017 &cmd->base.dest);
189aba99 2018
498ca3c8 2019 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
89caa053
PP
2020 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
2021 &cmd->base.alt_dest);
bc38a6ab 2022
f7c8f2e9
PP
2023 ret = ib_modify_qp_with_udata(qp, attr,
2024 modify_qp_mask(qp->qp_type,
2025 cmd->base.attr_mask),
2026 udata);
9ead190b 2027
0fb8bcf0 2028release_qp:
fd3c7904 2029 uobj_put_obj_read(qp);
bc38a6ab 2030out:
bc38a6ab
RD
2031 kfree(attr);
2032
2033 return ret;
2034}
2035
189aba99
BW
2036ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2037 struct ib_device *ib_dev,
2038 const char __user *buf, int in_len,
2039 int out_len)
2040{
2041 struct ib_uverbs_ex_modify_qp cmd = {};
2042 struct ib_udata udata;
2043 int ret;
2044
2045 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
2046 return -EFAULT;
2047
2048 if (cmd.base.attr_mask &
2049 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
2050 return -EOPNOTSUPP;
2051
40a20339 2052 ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
e093111d
AR
2053 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
2054 out_len);
189aba99
BW
2055
2056 ret = modify_qp(file, &cmd, &udata);
2057 if (ret)
2058 return ret;
2059
2060 return in_len;
2061}
2062
2063int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
2064 struct ib_device *ib_dev,
2065 struct ib_udata *ucore,
2066 struct ib_udata *uhw)
2067{
2068 struct ib_uverbs_ex_modify_qp cmd = {};
2069 int ret;
2070
2071 /*
2072 * Last bit is reserved for extending the attr_mask by
2073 * using another field.
2074 */
2075 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
2076
2077 if (ucore->inlen < sizeof(cmd.base))
2078 return -EINVAL;
2079
2080 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2081 if (ret)
2082 return ret;
2083
2084 if (cmd.base.attr_mask &
2085 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
2086 return -EOPNOTSUPP;
2087
2088 if (ucore->inlen > sizeof(cmd)) {
05d14e7b
MS
2089 if (!ib_is_udata_cleared(ucore, sizeof(cmd),
2090 ucore->inlen - sizeof(cmd)))
189aba99
BW
2091 return -EOPNOTSUPP;
2092 }
2093
2094 ret = modify_qp(file, &cmd, uhw);
2095
2096 return ret;
2097}
2098
bc38a6ab 2099ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
057aec0d 2100 struct ib_device *ib_dev,
bc38a6ab
RD
2101 const char __user *buf, int in_len,
2102 int out_len)
2103{
63aaf647
RD
2104 struct ib_uverbs_destroy_qp cmd;
2105 struct ib_uverbs_destroy_qp_resp resp;
9ead190b 2106 struct ib_uobject *uobj;
9ead190b 2107 struct ib_uqp_object *obj;
63aaf647 2108 int ret = -EINVAL;
bc38a6ab
RD
2109
2110 if (copy_from_user(&cmd, buf, sizeof cmd))
2111 return -EFAULT;
2112
63aaf647
RD
2113 memset(&resp, 0, sizeof resp);
2114
fd3c7904
MB
2115 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle,
2116 file->ucontext);
2117 if (IS_ERR(uobj))
2118 return PTR_ERR(uobj);
2119
9ead190b 2120 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
fd3c7904
MB
2121 /*
2122 * Make sure we don't free the memory in remove_commit as we still
2123 * needs the uobject memory to create the response.
2124 */
2125 uverbs_uobject_get(uobj);
f4e40156 2126
fd3c7904
MB
2127 ret = uobj_remove_commit(uobj);
2128 if (ret) {
2129 uverbs_uobject_put(uobj);
9ead190b 2130 return ret;
fd3c7904 2131 }
63aaf647 2132
9ead190b 2133 resp.events_reported = obj->uevent.events_reported;
fd3c7904 2134 uverbs_uobject_put(uobj);
bc38a6ab 2135
40a20339 2136 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
9ead190b 2137 return -EFAULT;
bc38a6ab 2138
9ead190b 2139 return in_len;
bc38a6ab
RD
2140}
2141
e622f2f4
CH
2142static void *alloc_wr(size_t wr_size, __u32 num_sge)
2143{
4f7f4dcf
VT
2144 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
2145 sizeof (struct ib_sge))
2146 return NULL;
2147
e622f2f4
CH
2148 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2149 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
4f7f4dcf 2150}
e622f2f4 2151
67cdb40c 2152ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
057aec0d 2153 struct ib_device *ib_dev,
a74cd4af
RD
2154 const char __user *buf, int in_len,
2155 int out_len)
67cdb40c
RD
2156{
2157 struct ib_uverbs_post_send cmd;
2158 struct ib_uverbs_post_send_resp resp;
2159 struct ib_uverbs_send_wr *user_wr;
2160 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2161 struct ib_qp *qp;
2162 int i, sg_ind;
9ead190b 2163 int is_ud;
67cdb40c 2164 ssize_t ret = -EINVAL;
1d784b89 2165 size_t next_size;
67cdb40c
RD
2166
2167 if (copy_from_user(&cmd, buf, sizeof cmd))
2168 return -EFAULT;
2169
2170 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2171 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2172 return -EINVAL;
2173
2174 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2175 return -EINVAL;
2176
2177 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2178 if (!user_wr)
2179 return -ENOMEM;
2180
fd3c7904 2181 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b 2182 if (!qp)
67cdb40c
RD
2183 goto out;
2184
9ead190b 2185 is_ud = qp->qp_type == IB_QPT_UD;
67cdb40c
RD
2186 sg_ind = 0;
2187 last = NULL;
2188 for (i = 0; i < cmd.wr_count; ++i) {
2189 if (copy_from_user(user_wr,
2190 buf + sizeof cmd + i * cmd.wqe_size,
2191 cmd.wqe_size)) {
2192 ret = -EFAULT;
9ead190b 2193 goto out_put;
67cdb40c
RD
2194 }
2195
2196 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2197 ret = -EINVAL;
9ead190b 2198 goto out_put;
67cdb40c
RD
2199 }
2200
e622f2f4
CH
2201 if (is_ud) {
2202 struct ib_ud_wr *ud;
2203
2204 if (user_wr->opcode != IB_WR_SEND &&
2205 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2206 ret = -EINVAL;
2207 goto out_put;
2208 }
2209
1d784b89
MM
2210 next_size = sizeof(*ud);
2211 ud = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2212 if (!ud) {
2213 ret = -ENOMEM;
2214 goto out_put;
2215 }
2216
fd3c7904
MB
2217 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah,
2218 file->ucontext);
e622f2f4
CH
2219 if (!ud->ah) {
2220 kfree(ud);
2221 ret = -EINVAL;
2222 goto out_put;
2223 }
2224 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2225 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2226
2227 next = &ud->wr;
2228 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2229 user_wr->opcode == IB_WR_RDMA_WRITE ||
2230 user_wr->opcode == IB_WR_RDMA_READ) {
2231 struct ib_rdma_wr *rdma;
2232
1d784b89
MM
2233 next_size = sizeof(*rdma);
2234 rdma = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2235 if (!rdma) {
2236 ret = -ENOMEM;
2237 goto out_put;
2238 }
2239
2240 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2241 rdma->rkey = user_wr->wr.rdma.rkey;
2242
2243 next = &rdma->wr;
2244 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2245 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2246 struct ib_atomic_wr *atomic;
2247
1d784b89
MM
2248 next_size = sizeof(*atomic);
2249 atomic = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2250 if (!atomic) {
2251 ret = -ENOMEM;
2252 goto out_put;
2253 }
2254
2255 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2256 atomic->compare_add = user_wr->wr.atomic.compare_add;
2257 atomic->swap = user_wr->wr.atomic.swap;
2258 atomic->rkey = user_wr->wr.atomic.rkey;
2259
2260 next = &atomic->wr;
2261 } else if (user_wr->opcode == IB_WR_SEND ||
2262 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2263 user_wr->opcode == IB_WR_SEND_WITH_INV) {
1d784b89
MM
2264 next_size = sizeof(*next);
2265 next = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2266 if (!next) {
2267 ret = -ENOMEM;
2268 goto out_put;
2269 }
2270 } else {
2271 ret = -EINVAL;
9ead190b 2272 goto out_put;
67cdb40c
RD
2273 }
2274
e622f2f4
CH
2275 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2276 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2277 next->ex.imm_data =
2278 (__be32 __force) user_wr->ex.imm_data;
2279 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2280 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2281 }
2282
67cdb40c
RD
2283 if (!last)
2284 wr = next;
2285 else
2286 last->next = next;
2287 last = next;
2288
2289 next->next = NULL;
2290 next->wr_id = user_wr->wr_id;
2291 next->num_sge = user_wr->num_sge;
2292 next->opcode = user_wr->opcode;
2293 next->send_flags = user_wr->send_flags;
67cdb40c 2294
67cdb40c
RD
2295 if (next->num_sge) {
2296 next->sg_list = (void *) next +
1d784b89 2297 ALIGN(next_size, sizeof(struct ib_sge));
67cdb40c
RD
2298 if (copy_from_user(next->sg_list,
2299 buf + sizeof cmd +
2300 cmd.wr_count * cmd.wqe_size +
2301 sg_ind * sizeof (struct ib_sge),
2302 next->num_sge * sizeof (struct ib_sge))) {
2303 ret = -EFAULT;
9ead190b 2304 goto out_put;
67cdb40c
RD
2305 }
2306 sg_ind += next->num_sge;
2307 } else
2308 next->sg_list = NULL;
2309 }
2310
2311 resp.bad_wr = 0;
0e0ec7e0 2312 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
67cdb40c
RD
2313 if (ret)
2314 for (next = wr; next; next = next->next) {
2315 ++resp.bad_wr;
2316 if (next == bad_wr)
2317 break;
2318 }
2319
40a20339 2320 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
67cdb40c
RD
2321 ret = -EFAULT;
2322
9ead190b 2323out_put:
fd3c7904 2324 uobj_put_obj_read(qp);
67cdb40c
RD
2325
2326 while (wr) {
e622f2f4 2327 if (is_ud && ud_wr(wr)->ah)
fd3c7904 2328 uobj_put_obj_read(ud_wr(wr)->ah);
67cdb40c
RD
2329 next = wr->next;
2330 kfree(wr);
2331 wr = next;
2332 }
2333
18320828 2334out:
67cdb40c
RD
2335 kfree(user_wr);
2336
2337 return ret ? ret : in_len;
2338}
2339
2340static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2341 int in_len,
2342 u32 wr_count,
2343 u32 sge_count,
2344 u32 wqe_size)
2345{
2346 struct ib_uverbs_recv_wr *user_wr;
2347 struct ib_recv_wr *wr = NULL, *last, *next;
2348 int sg_ind;
2349 int i;
2350 int ret;
2351
2352 if (in_len < wqe_size * wr_count +
2353 sge_count * sizeof (struct ib_uverbs_sge))
2354 return ERR_PTR(-EINVAL);
2355
2356 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2357 return ERR_PTR(-EINVAL);
2358
2359 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2360 if (!user_wr)
2361 return ERR_PTR(-ENOMEM);
2362
2363 sg_ind = 0;
2364 last = NULL;
2365 for (i = 0; i < wr_count; ++i) {
2366 if (copy_from_user(user_wr, buf + i * wqe_size,
2367 wqe_size)) {
2368 ret = -EFAULT;
2369 goto err;
2370 }
2371
2372 if (user_wr->num_sge + sg_ind > sge_count) {
2373 ret = -EINVAL;
2374 goto err;
2375 }
2376
4f7f4dcf
VT
2377 if (user_wr->num_sge >=
2378 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2379 sizeof (struct ib_sge)) {
2380 ret = -EINVAL;
2381 goto err;
2382 }
2383
67cdb40c
RD
2384 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2385 user_wr->num_sge * sizeof (struct ib_sge),
2386 GFP_KERNEL);
2387 if (!next) {
2388 ret = -ENOMEM;
2389 goto err;
2390 }
2391
2392 if (!last)
2393 wr = next;
2394 else
2395 last->next = next;
2396 last = next;
2397
2398 next->next = NULL;
2399 next->wr_id = user_wr->wr_id;
2400 next->num_sge = user_wr->num_sge;
2401
2402 if (next->num_sge) {
2403 next->sg_list = (void *) next +
2404 ALIGN(sizeof *next, sizeof (struct ib_sge));
2405 if (copy_from_user(next->sg_list,
2406 buf + wr_count * wqe_size +
2407 sg_ind * sizeof (struct ib_sge),
2408 next->num_sge * sizeof (struct ib_sge))) {
2409 ret = -EFAULT;
2410 goto err;
2411 }
2412 sg_ind += next->num_sge;
2413 } else
2414 next->sg_list = NULL;
2415 }
2416
2417 kfree(user_wr);
2418 return wr;
2419
2420err:
2421 kfree(user_wr);
2422
2423 while (wr) {
2424 next = wr->next;
2425 kfree(wr);
2426 wr = next;
2427 }
2428
2429 return ERR_PTR(ret);
2430}
2431
2432ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
057aec0d 2433 struct ib_device *ib_dev,
a74cd4af
RD
2434 const char __user *buf, int in_len,
2435 int out_len)
67cdb40c
RD
2436{
2437 struct ib_uverbs_post_recv cmd;
2438 struct ib_uverbs_post_recv_resp resp;
2439 struct ib_recv_wr *wr, *next, *bad_wr;
2440 struct ib_qp *qp;
2441 ssize_t ret = -EINVAL;
2442
2443 if (copy_from_user(&cmd, buf, sizeof cmd))
2444 return -EFAULT;
2445
2446 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2447 in_len - sizeof cmd, cmd.wr_count,
2448 cmd.sge_count, cmd.wqe_size);
2449 if (IS_ERR(wr))
2450 return PTR_ERR(wr);
2451
fd3c7904 2452 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b 2453 if (!qp)
67cdb40c
RD
2454 goto out;
2455
2456 resp.bad_wr = 0;
0e0ec7e0 2457 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
9ead190b 2458
fd3c7904
MB
2459 uobj_put_obj_read(qp);
2460 if (ret) {
67cdb40c
RD
2461 for (next = wr; next; next = next->next) {
2462 ++resp.bad_wr;
2463 if (next == bad_wr)
2464 break;
2465 }
fd3c7904 2466 }
67cdb40c 2467
40a20339 2468 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
67cdb40c
RD
2469 ret = -EFAULT;
2470
2471out:
67cdb40c
RD
2472 while (wr) {
2473 next = wr->next;
2474 kfree(wr);
2475 wr = next;
2476 }
2477
2478 return ret ? ret : in_len;
2479}
2480
2481ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
057aec0d 2482 struct ib_device *ib_dev,
a74cd4af
RD
2483 const char __user *buf, int in_len,
2484 int out_len)
67cdb40c
RD
2485{
2486 struct ib_uverbs_post_srq_recv cmd;
2487 struct ib_uverbs_post_srq_recv_resp resp;
2488 struct ib_recv_wr *wr, *next, *bad_wr;
2489 struct ib_srq *srq;
2490 ssize_t ret = -EINVAL;
2491
2492 if (copy_from_user(&cmd, buf, sizeof cmd))
2493 return -EFAULT;
2494
2495 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2496 in_len - sizeof cmd, cmd.wr_count,
2497 cmd.sge_count, cmd.wqe_size);
2498 if (IS_ERR(wr))
2499 return PTR_ERR(wr);
2500
fd3c7904 2501 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
9ead190b 2502 if (!srq)
67cdb40c
RD
2503 goto out;
2504
2505 resp.bad_wr = 0;
2506 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
9ead190b 2507
fd3c7904 2508 uobj_put_obj_read(srq);
9ead190b 2509
67cdb40c
RD
2510 if (ret)
2511 for (next = wr; next; next = next->next) {
2512 ++resp.bad_wr;
2513 if (next == bad_wr)
2514 break;
2515 }
2516
40a20339 2517 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
67cdb40c
RD
2518 ret = -EFAULT;
2519
2520out:
67cdb40c
RD
2521 while (wr) {
2522 next = wr->next;
2523 kfree(wr);
2524 wr = next;
2525 }
2526
2527 return ret ? ret : in_len;
2528}
2529
2530ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
057aec0d 2531 struct ib_device *ib_dev,
67cdb40c
RD
2532 const char __user *buf, int in_len,
2533 int out_len)
2534{
2535 struct ib_uverbs_create_ah cmd;
2536 struct ib_uverbs_create_ah_resp resp;
2537 struct ib_uobject *uobj;
2538 struct ib_pd *pd;
2539 struct ib_ah *ah;
90898850 2540 struct rdma_ah_attr attr;
67cdb40c 2541 int ret;
477864c8 2542 struct ib_udata udata;
67cdb40c
RD
2543
2544 if (out_len < sizeof resp)
2545 return -ENOSPC;
2546
2547 if (copy_from_user(&cmd, buf, sizeof cmd))
2548 return -EFAULT;
2549
5ecce4c9
BP
2550 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
2551 return -EINVAL;
2552
40a20339
AB
2553 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
2554 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
2555 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
2556 out_len - sizeof(resp));
477864c8 2557
fd3c7904
MB
2558 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext);
2559 if (IS_ERR(uobj))
2560 return PTR_ERR(uobj);
67cdb40c 2561
fd3c7904 2562 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
9ead190b 2563 if (!pd) {
67cdb40c 2564 ret = -EINVAL;
9ead190b 2565 goto err;
67cdb40c
RD
2566 }
2567
44c58487 2568 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
d98bb7f7 2569 rdma_ah_set_make_grd(&attr, false);
d8966fcd
DC
2570 rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2571 rdma_ah_set_sl(&attr, cmd.attr.sl);
2572 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2573 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2574 rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2575
4ba66093 2576 if (cmd.attr.is_global) {
d8966fcd
DC
2577 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2578 cmd.attr.grh.sgid_index,
2579 cmd.attr.grh.hop_limit,
2580 cmd.attr.grh.traffic_class);
2581 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
4ba66093 2582 } else {
d8966fcd 2583 rdma_ah_set_ah_flags(&attr, 0);
4ba66093 2584 }
477864c8 2585
5cda6587 2586 ah = rdma_create_user_ah(pd, &attr, &udata);
67cdb40c
RD
2587 if (IS_ERR(ah)) {
2588 ret = PTR_ERR(ah);
fd3c7904 2589 goto err_put;
67cdb40c
RD
2590 }
2591
9ead190b 2592 ah->uobject = uobj;
fd3c7904 2593 uobj->user_handle = cmd.user_handle;
9ead190b 2594 uobj->object = ah;
67cdb40c 2595
67cdb40c
RD
2596 resp.ah_handle = uobj->id;
2597
40a20339 2598 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
67cdb40c 2599 ret = -EFAULT;
9ead190b 2600 goto err_copy;
67cdb40c
RD
2601 }
2602
fd3c7904
MB
2603 uobj_put_obj_read(pd);
2604 uobj_alloc_commit(uobj);
67cdb40c
RD
2605
2606 return in_len;
2607
9ead190b 2608err_copy:
36523159 2609 rdma_destroy_ah(ah);
67cdb40c 2610
fd3c7904
MB
2611err_put:
2612 uobj_put_obj_read(pd);
ec924b47 2613
9ead190b 2614err:
fd3c7904 2615 uobj_alloc_abort(uobj);
67cdb40c
RD
2616 return ret;
2617}
2618
2619ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
057aec0d 2620 struct ib_device *ib_dev,
67cdb40c
RD
2621 const char __user *buf, int in_len, int out_len)
2622{
2623 struct ib_uverbs_destroy_ah cmd;
67cdb40c 2624 struct ib_uobject *uobj;
9ead190b 2625 int ret;
67cdb40c
RD
2626
2627 if (copy_from_user(&cmd, buf, sizeof cmd))
2628 return -EFAULT;
2629
fd3c7904
MB
2630 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle,
2631 file->ucontext);
2632 if (IS_ERR(uobj))
2633 return PTR_ERR(uobj);
67cdb40c 2634
fd3c7904
MB
2635 ret = uobj_remove_commit(uobj);
2636 return ret ?: in_len;
67cdb40c
RD
2637}
2638
bc38a6ab 2639ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
057aec0d 2640 struct ib_device *ib_dev,
bc38a6ab
RD
2641 const char __user *buf, int in_len,
2642 int out_len)
2643{
2644 struct ib_uverbs_attach_mcast cmd;
2645 struct ib_qp *qp;
9ead190b 2646 struct ib_uqp_object *obj;
f4e40156 2647 struct ib_uverbs_mcast_entry *mcast;
9ead190b 2648 int ret;
bc38a6ab
RD
2649
2650 if (copy_from_user(&cmd, buf, sizeof cmd))
2651 return -EFAULT;
2652
fd3c7904 2653 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b
RD
2654 if (!qp)
2655 return -EINVAL;
f4e40156 2656
9ead190b 2657 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f4e40156 2658
f48b7269 2659 mutex_lock(&obj->mcast_lock);
9ead190b 2660 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2661 if (cmd.mlid == mcast->lid &&
2662 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2663 ret = 0;
9ead190b 2664 goto out_put;
f4e40156
JM
2665 }
2666
2667 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2668 if (!mcast) {
2669 ret = -ENOMEM;
9ead190b 2670 goto out_put;
f4e40156
JM
2671 }
2672
2673 mcast->lid = cmd.mlid;
2674 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
bc38a6ab 2675
f4e40156 2676 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
9ead190b
RD
2677 if (!ret)
2678 list_add_tail(&mcast->list, &obj->mcast_list);
2679 else
f4e40156
JM
2680 kfree(mcast);
2681
9ead190b 2682out_put:
f48b7269 2683 mutex_unlock(&obj->mcast_lock);
fd3c7904 2684 uobj_put_obj_read(qp);
bc38a6ab
RD
2685
2686 return ret ? ret : in_len;
2687}
2688
2689ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
057aec0d 2690 struct ib_device *ib_dev,
bc38a6ab
RD
2691 const char __user *buf, int in_len,
2692 int out_len)
2693{
2694 struct ib_uverbs_detach_mcast cmd;
9ead190b 2695 struct ib_uqp_object *obj;
bc38a6ab 2696 struct ib_qp *qp;
f4e40156 2697 struct ib_uverbs_mcast_entry *mcast;
bc38a6ab 2698 int ret = -EINVAL;
20c7840a 2699 bool found = false;
bc38a6ab
RD
2700
2701 if (copy_from_user(&cmd, buf, sizeof cmd))
2702 return -EFAULT;
2703
fd3c7904 2704 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b
RD
2705 if (!qp)
2706 return -EINVAL;
bc38a6ab 2707
fd3c7904 2708 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f48b7269 2709 mutex_lock(&obj->mcast_lock);
fd3c7904 2710
9ead190b 2711 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2712 if (cmd.mlid == mcast->lid &&
2713 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2714 list_del(&mcast->list);
2715 kfree(mcast);
20c7840a 2716 found = true;
f4e40156
JM
2717 break;
2718 }
2719
20c7840a
MR
2720 if (!found) {
2721 ret = -EINVAL;
2722 goto out_put;
2723 }
2724
2725 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2726
9ead190b 2727out_put:
f48b7269 2728 mutex_unlock(&obj->mcast_lock);
fd3c7904 2729 uobj_put_obj_read(qp);
bc38a6ab
RD
2730 return ret ? ret : in_len;
2731}
f520ba5a 2732
94e03f11
MR
2733static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec,
2734 union ib_flow_spec *ib_spec)
2735{
2736 ib_spec->type = kern_spec->type;
2737 switch (ib_spec->type) {
2738 case IB_FLOW_SPEC_ACTION_TAG:
2739 if (kern_spec->flow_tag.size !=
2740 sizeof(struct ib_uverbs_flow_spec_action_tag))
2741 return -EINVAL;
2742
2743 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2744 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2745 break;
483a3966
SS
2746 case IB_FLOW_SPEC_ACTION_DROP:
2747 if (kern_spec->drop.size !=
2748 sizeof(struct ib_uverbs_flow_spec_action_drop))
2749 return -EINVAL;
2750
2751 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2752 break;
94e03f11
MR
2753 default:
2754 return -EINVAL;
2755 }
2756 return 0;
2757}
2758
15dfbd6b
MG
2759static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
2760{
2761 /* Returns user space filter size, includes padding */
2762 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
2763}
2764
2765static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
2766 u16 ib_real_filter_sz)
2767{
2768 /*
2769 * User space filter structures must be 64 bit aligned, otherwise this
2770 * may pass, but we won't handle additional new attributes.
2771 */
2772
2773 if (kern_filter_size > ib_real_filter_sz) {
2774 if (memchr_inv(kern_spec_filter +
2775 ib_real_filter_sz, 0,
2776 kern_filter_size - ib_real_filter_sz))
2777 return -EINVAL;
2778 return ib_real_filter_sz;
2779 }
2780 return kern_filter_size;
2781}
2782
94e03f11
MR
2783static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2784 union ib_flow_spec *ib_spec)
436f2ad0 2785{
15dfbd6b
MG
2786 ssize_t actual_filter_sz;
2787 ssize_t kern_filter_sz;
2788 ssize_t ib_filter_sz;
2789 void *kern_spec_mask;
2790 void *kern_spec_val;
2791
c780d82a
YD
2792 if (kern_spec->reserved)
2793 return -EINVAL;
2794
436f2ad0
HHZ
2795 ib_spec->type = kern_spec->type;
2796
15dfbd6b
MG
2797 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
2798 /* User flow spec size must be aligned to 4 bytes */
2799 if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2800 return -EINVAL;
2801
2802 kern_spec_val = (void *)kern_spec +
2803 sizeof(struct ib_uverbs_flow_spec_hdr);
2804 kern_spec_mask = kern_spec_val + kern_filter_sz;
fbf46860
MR
2805 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2806 return -EINVAL;
15dfbd6b 2807
fbf46860 2808 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
436f2ad0 2809 case IB_FLOW_SPEC_ETH:
15dfbd6b
MG
2810 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2811 actual_filter_sz = spec_filter_size(kern_spec_mask,
2812 kern_filter_sz,
2813 ib_filter_sz);
2814 if (actual_filter_sz <= 0)
436f2ad0 2815 return -EINVAL;
15dfbd6b
MG
2816 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2817 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2818 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
436f2ad0
HHZ
2819 break;
2820 case IB_FLOW_SPEC_IPV4:
15dfbd6b
MG
2821 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2822 actual_filter_sz = spec_filter_size(kern_spec_mask,
2823 kern_filter_sz,
2824 ib_filter_sz);
2825 if (actual_filter_sz <= 0)
436f2ad0 2826 return -EINVAL;
15dfbd6b
MG
2827 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2828 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2829 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
436f2ad0 2830 break;
4c2aae71 2831 case IB_FLOW_SPEC_IPV6:
15dfbd6b
MG
2832 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2833 actual_filter_sz = spec_filter_size(kern_spec_mask,
2834 kern_filter_sz,
2835 ib_filter_sz);
2836 if (actual_filter_sz <= 0)
4c2aae71 2837 return -EINVAL;
15dfbd6b
MG
2838 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2839 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2840 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
a72c6a2b
MG
2841
2842 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2843 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2844 return -EINVAL;
4c2aae71 2845 break;
436f2ad0
HHZ
2846 case IB_FLOW_SPEC_TCP:
2847 case IB_FLOW_SPEC_UDP:
15dfbd6b
MG
2848 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2849 actual_filter_sz = spec_filter_size(kern_spec_mask,
2850 kern_filter_sz,
2851 ib_filter_sz);
2852 if (actual_filter_sz <= 0)
436f2ad0 2853 return -EINVAL;
15dfbd6b
MG
2854 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2855 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2856 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
436f2ad0 2857 break;
0dbf3332
MR
2858 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2859 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2860 actual_filter_sz = spec_filter_size(kern_spec_mask,
2861 kern_filter_sz,
2862 ib_filter_sz);
2863 if (actual_filter_sz <= 0)
2864 return -EINVAL;
2865 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2866 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2867 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2868
2869 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2870 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2871 return -EINVAL;
2872 break;
436f2ad0
HHZ
2873 default:
2874 return -EINVAL;
2875 }
2876 return 0;
2877}
2878
94e03f11
MR
2879static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2880 union ib_flow_spec *ib_spec)
2881{
2882 if (kern_spec->reserved)
2883 return -EINVAL;
2884
2885 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2886 return kern_spec_to_ib_spec_action(kern_spec, ib_spec);
2887 else
2888 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2889}
2890
f213c052
YH
2891int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
2892 struct ib_device *ib_dev,
2893 struct ib_udata *ucore,
2894 struct ib_udata *uhw)
2895{
2896 struct ib_uverbs_ex_create_wq cmd = {};
2897 struct ib_uverbs_ex_create_wq_resp resp = {};
2898 struct ib_uwq_object *obj;
2899 int err = 0;
2900 struct ib_cq *cq;
2901 struct ib_pd *pd;
2902 struct ib_wq *wq;
2903 struct ib_wq_init_attr wq_init_attr = {};
2904 size_t required_cmd_sz;
2905 size_t required_resp_len;
2906
2907 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
2908 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
2909
2910 if (ucore->inlen < required_cmd_sz)
2911 return -EINVAL;
2912
2913 if (ucore->outlen < required_resp_len)
2914 return -ENOSPC;
2915
2916 if (ucore->inlen > sizeof(cmd) &&
2917 !ib_is_udata_cleared(ucore, sizeof(cmd),
2918 ucore->inlen - sizeof(cmd)))
2919 return -EOPNOTSUPP;
2920
2921 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2922 if (err)
2923 return err;
2924
2925 if (cmd.comp_mask)
2926 return -EOPNOTSUPP;
2927
fd3c7904
MB
2928 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq),
2929 file->ucontext);
2930 if (IS_ERR(obj))
2931 return PTR_ERR(obj);
f213c052 2932
fd3c7904 2933 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
f213c052
YH
2934 if (!pd) {
2935 err = -EINVAL;
2936 goto err_uobj;
2937 }
2938
fd3c7904 2939 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
f213c052
YH
2940 if (!cq) {
2941 err = -EINVAL;
2942 goto err_put_pd;
2943 }
2944
2945 wq_init_attr.cq = cq;
2946 wq_init_attr.max_sge = cmd.max_sge;
2947 wq_init_attr.max_wr = cmd.max_wr;
2948 wq_init_attr.wq_context = file;
2949 wq_init_attr.wq_type = cmd.wq_type;
2950 wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
af1cb95d
NO
2951 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) +
2952 sizeof(cmd.create_flags)))
2953 wq_init_attr.create_flags = cmd.create_flags;
f213c052
YH
2954 obj->uevent.events_reported = 0;
2955 INIT_LIST_HEAD(&obj->uevent.event_list);
21885586
LR
2956
2957 if (!pd->device->create_wq) {
2958 err = -EOPNOTSUPP;
2959 goto err_put_cq;
2960 }
f213c052
YH
2961 wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
2962 if (IS_ERR(wq)) {
2963 err = PTR_ERR(wq);
2964 goto err_put_cq;
2965 }
2966
2967 wq->uobject = &obj->uevent.uobject;
2968 obj->uevent.uobject.object = wq;
2969 wq->wq_type = wq_init_attr.wq_type;
2970 wq->cq = cq;
2971 wq->pd = pd;
2972 wq->device = pd->device;
2973 wq->wq_context = wq_init_attr.wq_context;
2974 atomic_set(&wq->usecnt, 0);
2975 atomic_inc(&pd->usecnt);
2976 atomic_inc(&cq->usecnt);
2977 wq->uobject = &obj->uevent.uobject;
2978 obj->uevent.uobject.object = wq;
f213c052
YH
2979
2980 memset(&resp, 0, sizeof(resp));
2981 resp.wq_handle = obj->uevent.uobject.id;
2982 resp.max_sge = wq_init_attr.max_sge;
2983 resp.max_wr = wq_init_attr.max_wr;
2984 resp.wqn = wq->wq_num;
2985 resp.response_length = required_resp_len;
2986 err = ib_copy_to_udata(ucore,
2987 &resp, resp.response_length);
2988 if (err)
2989 goto err_copy;
2990
fd3c7904
MB
2991 uobj_put_obj_read(pd);
2992 uobj_put_obj_read(cq);
2993 uobj_alloc_commit(&obj->uevent.uobject);
f213c052
YH
2994 return 0;
2995
2996err_copy:
f213c052
YH
2997 ib_destroy_wq(wq);
2998err_put_cq:
fd3c7904 2999 uobj_put_obj_read(cq);
f213c052 3000err_put_pd:
fd3c7904 3001 uobj_put_obj_read(pd);
f213c052 3002err_uobj:
fd3c7904 3003 uobj_alloc_abort(&obj->uevent.uobject);
f213c052
YH
3004
3005 return err;
3006}
3007
3008int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
3009 struct ib_device *ib_dev,
3010 struct ib_udata *ucore,
3011 struct ib_udata *uhw)
3012{
3013 struct ib_uverbs_ex_destroy_wq cmd = {};
3014 struct ib_uverbs_ex_destroy_wq_resp resp = {};
f213c052
YH
3015 struct ib_uobject *uobj;
3016 struct ib_uwq_object *obj;
3017 size_t required_cmd_sz;
3018 size_t required_resp_len;
3019 int ret;
3020
3021 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
3022 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
3023
3024 if (ucore->inlen < required_cmd_sz)
3025 return -EINVAL;
3026
3027 if (ucore->outlen < required_resp_len)
3028 return -ENOSPC;
3029
3030 if (ucore->inlen > sizeof(cmd) &&
3031 !ib_is_udata_cleared(ucore, sizeof(cmd),
3032 ucore->inlen - sizeof(cmd)))
3033 return -EOPNOTSUPP;
3034
3035 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3036 if (ret)
3037 return ret;
3038
3039 if (cmd.comp_mask)
3040 return -EOPNOTSUPP;
3041
3042 resp.response_length = required_resp_len;
fd3c7904
MB
3043 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle,
3044 file->ucontext);
3045 if (IS_ERR(uobj))
3046 return PTR_ERR(uobj);
f213c052 3047
f213c052 3048 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
fd3c7904
MB
3049 /*
3050 * Make sure we don't free the memory in remove_commit as we still
3051 * needs the uobject memory to create the response.
3052 */
3053 uverbs_uobject_get(uobj);
f213c052 3054
fd3c7904 3055 ret = uobj_remove_commit(uobj);
f213c052 3056 resp.events_reported = obj->uevent.events_reported;
fd3c7904 3057 uverbs_uobject_put(uobj);
f213c052
YH
3058 if (ret)
3059 return ret;
3060
c52d8114 3061 return ib_copy_to_udata(ucore, &resp, resp.response_length);
f213c052
YH
3062}
3063
3064int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
3065 struct ib_device *ib_dev,
3066 struct ib_udata *ucore,
3067 struct ib_udata *uhw)
3068{
3069 struct ib_uverbs_ex_modify_wq cmd = {};
3070 struct ib_wq *wq;
3071 struct ib_wq_attr wq_attr = {};
3072 size_t required_cmd_sz;
3073 int ret;
3074
3075 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
3076 if (ucore->inlen < required_cmd_sz)
3077 return -EINVAL;
3078
3079 if (ucore->inlen > sizeof(cmd) &&
3080 !ib_is_udata_cleared(ucore, sizeof(cmd),
3081 ucore->inlen - sizeof(cmd)))
3082 return -EOPNOTSUPP;
3083
3084 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3085 if (ret)
3086 return ret;
3087
3088 if (!cmd.attr_mask)
3089 return -EINVAL;
3090
af1cb95d 3091 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
f213c052
YH
3092 return -EINVAL;
3093
fd3c7904 3094 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext);
f213c052
YH
3095 if (!wq)
3096 return -EINVAL;
3097
3098 wq_attr.curr_wq_state = cmd.curr_wq_state;
3099 wq_attr.wq_state = cmd.wq_state;
af1cb95d
NO
3100 if (cmd.attr_mask & IB_WQ_FLAGS) {
3101 wq_attr.flags = cmd.flags;
3102 wq_attr.flags_mask = cmd.flags_mask;
3103 }
21885586
LR
3104 if (!wq->device->modify_wq) {
3105 ret = -EOPNOTSUPP;
3106 goto out;
3107 }
f213c052 3108 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
21885586 3109out:
fd3c7904 3110 uobj_put_obj_read(wq);
f213c052
YH
3111 return ret;
3112}
3113
de019a94
YH
3114int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
3115 struct ib_device *ib_dev,
3116 struct ib_udata *ucore,
3117 struct ib_udata *uhw)
3118{
3119 struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
3120 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3121 struct ib_uobject *uobj;
3122 int err = 0;
3123 struct ib_rwq_ind_table_init_attr init_attr = {};
3124 struct ib_rwq_ind_table *rwq_ind_tbl;
3125 struct ib_wq **wqs = NULL;
3126 u32 *wqs_handles = NULL;
3127 struct ib_wq *wq = NULL;
3128 int i, j, num_read_wqs;
3129 u32 num_wq_handles;
3130 u32 expected_in_size;
3131 size_t required_cmd_sz_header;
3132 size_t required_resp_len;
3133
3134 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
3135 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
3136
3137 if (ucore->inlen < required_cmd_sz_header)
3138 return -EINVAL;
3139
3140 if (ucore->outlen < required_resp_len)
3141 return -ENOSPC;
3142
3143 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
3144 if (err)
3145 return err;
3146
3147 ucore->inbuf += required_cmd_sz_header;
3148 ucore->inlen -= required_cmd_sz_header;
3149
3150 if (cmd.comp_mask)
3151 return -EOPNOTSUPP;
3152
3153 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3154 return -EINVAL;
3155
3156 num_wq_handles = 1 << cmd.log_ind_tbl_size;
3157 expected_in_size = num_wq_handles * sizeof(__u32);
3158 if (num_wq_handles == 1)
3159 /* input size for wq handles is u64 aligned */
3160 expected_in_size += sizeof(__u32);
3161
3162 if (ucore->inlen < expected_in_size)
3163 return -EINVAL;
3164
3165 if (ucore->inlen > expected_in_size &&
3166 !ib_is_udata_cleared(ucore, expected_in_size,
3167 ucore->inlen - expected_in_size))
3168 return -EOPNOTSUPP;
3169
3170 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3171 GFP_KERNEL);
3172 if (!wqs_handles)
3173 return -ENOMEM;
3174
3175 err = ib_copy_from_udata(wqs_handles, ucore,
3176 num_wq_handles * sizeof(__u32));
3177 if (err)
3178 goto err_free;
3179
3180 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3181 if (!wqs) {
3182 err = -ENOMEM;
3183 goto err_free;
3184 }
3185
3186 for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3187 num_read_wqs++) {
fd3c7904
MB
3188 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs],
3189 file->ucontext);
de019a94
YH
3190 if (!wq) {
3191 err = -EINVAL;
3192 goto put_wqs;
3193 }
3194
3195 wqs[num_read_wqs] = wq;
3196 }
3197
fd3c7904
MB
3198 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext);
3199 if (IS_ERR(uobj)) {
3200 err = PTR_ERR(uobj);
de019a94
YH
3201 goto put_wqs;
3202 }
3203
de019a94
YH
3204 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3205 init_attr.ind_tbl = wqs;
21885586
LR
3206
3207 if (!ib_dev->create_rwq_ind_table) {
3208 err = -EOPNOTSUPP;
3209 goto err_uobj;
3210 }
de019a94
YH
3211 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
3212
3213 if (IS_ERR(rwq_ind_tbl)) {
3214 err = PTR_ERR(rwq_ind_tbl);
3215 goto err_uobj;
3216 }
3217
3218 rwq_ind_tbl->ind_tbl = wqs;
3219 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3220 rwq_ind_tbl->uobject = uobj;
3221 uobj->object = rwq_ind_tbl;
3222 rwq_ind_tbl->device = ib_dev;
3223 atomic_set(&rwq_ind_tbl->usecnt, 0);
3224
3225 for (i = 0; i < num_wq_handles; i++)
3226 atomic_inc(&wqs[i]->usecnt);
3227
de019a94
YH
3228 resp.ind_tbl_handle = uobj->id;
3229 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3230 resp.response_length = required_resp_len;
3231
3232 err = ib_copy_to_udata(ucore,
3233 &resp, resp.response_length);
3234 if (err)
3235 goto err_copy;
3236
3237 kfree(wqs_handles);
3238
3239 for (j = 0; j < num_read_wqs; j++)
fd3c7904 3240 uobj_put_obj_read(wqs[j]);
de019a94 3241
fd3c7904 3242 uobj_alloc_commit(uobj);
de019a94
YH
3243 return 0;
3244
3245err_copy:
de019a94
YH
3246 ib_destroy_rwq_ind_table(rwq_ind_tbl);
3247err_uobj:
fd3c7904 3248 uobj_alloc_abort(uobj);
de019a94
YH
3249put_wqs:
3250 for (j = 0; j < num_read_wqs; j++)
fd3c7904 3251 uobj_put_obj_read(wqs[j]);
de019a94
YH
3252err_free:
3253 kfree(wqs_handles);
3254 kfree(wqs);
3255 return err;
3256}
3257
3258int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
3259 struct ib_device *ib_dev,
3260 struct ib_udata *ucore,
3261 struct ib_udata *uhw)
3262{
3263 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
de019a94
YH
3264 struct ib_uobject *uobj;
3265 int ret;
de019a94
YH
3266 size_t required_cmd_sz;
3267
3268 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
3269
3270 if (ucore->inlen < required_cmd_sz)
3271 return -EINVAL;
3272
3273 if (ucore->inlen > sizeof(cmd) &&
3274 !ib_is_udata_cleared(ucore, sizeof(cmd),
3275 ucore->inlen - sizeof(cmd)))
3276 return -EOPNOTSUPP;
3277
3278 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3279 if (ret)
3280 return ret;
3281
3282 if (cmd.comp_mask)
3283 return -EOPNOTSUPP;
3284
fd3c7904
MB
3285 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle,
3286 file->ucontext);
3287 if (IS_ERR(uobj))
3288 return PTR_ERR(uobj);
de019a94 3289
fd3c7904 3290 return uobj_remove_commit(uobj);
de019a94
YH
3291}
3292
f21519b2 3293int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
057aec0d 3294 struct ib_device *ib_dev,
f21519b2
YD
3295 struct ib_udata *ucore,
3296 struct ib_udata *uhw)
436f2ad0
HHZ
3297{
3298 struct ib_uverbs_create_flow cmd;
3299 struct ib_uverbs_create_flow_resp resp;
3300 struct ib_uobject *uobj;
3301 struct ib_flow *flow_id;
d82693da 3302 struct ib_uverbs_flow_attr *kern_flow_attr;
436f2ad0
HHZ
3303 struct ib_flow_attr *flow_attr;
3304 struct ib_qp *qp;
3305 int err = 0;
3306 void *kern_spec;
3307 void *ib_spec;
3308 int i;
436f2ad0 3309
6bcca3d4
YD
3310 if (ucore->inlen < sizeof(cmd))
3311 return -EINVAL;
3312
f21519b2 3313 if (ucore->outlen < sizeof(resp))
436f2ad0
HHZ
3314 return -ENOSPC;
3315
f21519b2
YD
3316 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3317 if (err)
3318 return err;
3319
3320 ucore->inbuf += sizeof(cmd);
3321 ucore->inlen -= sizeof(cmd);
436f2ad0 3322
22878dbc
MB
3323 if (cmd.comp_mask)
3324 return -EINVAL;
3325
e3b6d8cf 3326 if (!capable(CAP_NET_RAW))
436f2ad0
HHZ
3327 return -EPERM;
3328
a3100a78
MV
3329 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3330 return -EINVAL;
3331
3332 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3333 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3334 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3335 return -EINVAL;
3336
f8848274 3337 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
22878dbc
MB
3338 return -EINVAL;
3339
f21519b2 3340 if (cmd.flow_attr.size > ucore->inlen ||
f8848274 3341 cmd.flow_attr.size >
b68c9560 3342 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
22878dbc
MB
3343 return -EINVAL;
3344
c780d82a
YD
3345 if (cmd.flow_attr.reserved[0] ||
3346 cmd.flow_attr.reserved[1])
3347 return -EINVAL;
3348
436f2ad0 3349 if (cmd.flow_attr.num_of_specs) {
f8848274
MB
3350 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3351 GFP_KERNEL);
436f2ad0
HHZ
3352 if (!kern_flow_attr)
3353 return -ENOMEM;
3354
3355 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
f21519b2
YD
3356 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3357 cmd.flow_attr.size);
3358 if (err)
436f2ad0 3359 goto err_free_attr;
436f2ad0
HHZ
3360 } else {
3361 kern_flow_attr = &cmd.flow_attr;
436f2ad0
HHZ
3362 }
3363
fd3c7904
MB
3364 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext);
3365 if (IS_ERR(uobj)) {
3366 err = PTR_ERR(uobj);
436f2ad0
HHZ
3367 goto err_free_attr;
3368 }
436f2ad0 3369
fd3c7904 3370 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
436f2ad0
HHZ
3371 if (!qp) {
3372 err = -EINVAL;
3373 goto err_uobj;
3374 }
3375
15dfbd6b
MG
3376 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
3377 sizeof(union ib_flow_spec), GFP_KERNEL);
436f2ad0
HHZ
3378 if (!flow_attr) {
3379 err = -ENOMEM;
3380 goto err_put;
3381 }
3382
3383 flow_attr->type = kern_flow_attr->type;
3384 flow_attr->priority = kern_flow_attr->priority;
3385 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3386 flow_attr->port = kern_flow_attr->port;
3387 flow_attr->flags = kern_flow_attr->flags;
3388 flow_attr->size = sizeof(*flow_attr);
3389
3390 kern_spec = kern_flow_attr + 1;
3391 ib_spec = flow_attr + 1;
f8848274 3392 for (i = 0; i < flow_attr->num_of_specs &&
b68c9560 3393 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
f8848274 3394 cmd.flow_attr.size >=
b68c9560 3395 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
436f2ad0
HHZ
3396 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3397 if (err)
3398 goto err_free;
3399 flow_attr->size +=
3400 ((union ib_flow_spec *) ib_spec)->size;
b68c9560
YD
3401 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3402 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
436f2ad0
HHZ
3403 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3404 }
f8848274
MB
3405 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3406 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3407 i, cmd.flow_attr.size);
98a37510 3408 err = -EINVAL;
436f2ad0
HHZ
3409 goto err_free;
3410 }
3411 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3412 if (IS_ERR(flow_id)) {
3413 err = PTR_ERR(flow_id);
fd3c7904 3414 goto err_free;
436f2ad0 3415 }
436f2ad0
HHZ
3416 flow_id->uobject = uobj;
3417 uobj->object = flow_id;
3418
436f2ad0
HHZ
3419 memset(&resp, 0, sizeof(resp));
3420 resp.flow_handle = uobj->id;
3421
f21519b2
YD
3422 err = ib_copy_to_udata(ucore,
3423 &resp, sizeof(resp));
3424 if (err)
436f2ad0 3425 goto err_copy;
436f2ad0 3426
fd3c7904
MB
3427 uobj_put_obj_read(qp);
3428 uobj_alloc_commit(uobj);
436f2ad0
HHZ
3429 kfree(flow_attr);
3430 if (cmd.flow_attr.num_of_specs)
3431 kfree(kern_flow_attr);
f21519b2 3432 return 0;
436f2ad0 3433err_copy:
436f2ad0
HHZ
3434 ib_destroy_flow(flow_id);
3435err_free:
3436 kfree(flow_attr);
3437err_put:
fd3c7904 3438 uobj_put_obj_read(qp);
436f2ad0 3439err_uobj:
fd3c7904 3440 uobj_alloc_abort(uobj);
436f2ad0
HHZ
3441err_free_attr:
3442 if (cmd.flow_attr.num_of_specs)
3443 kfree(kern_flow_attr);
3444 return err;
3445}
3446
f21519b2 3447int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
057aec0d 3448 struct ib_device *ib_dev,
f21519b2
YD
3449 struct ib_udata *ucore,
3450 struct ib_udata *uhw)
3451{
436f2ad0 3452 struct ib_uverbs_destroy_flow cmd;
436f2ad0
HHZ
3453 struct ib_uobject *uobj;
3454 int ret;
3455
6bcca3d4
YD
3456 if (ucore->inlen < sizeof(cmd))
3457 return -EINVAL;
3458
f21519b2
YD
3459 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3460 if (ret)
3461 return ret;
436f2ad0 3462
2782c2d3
YD
3463 if (cmd.comp_mask)
3464 return -EINVAL;
3465
fd3c7904
MB
3466 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle,
3467 file->ucontext);
3468 if (IS_ERR(uobj))
3469 return PTR_ERR(uobj);
436f2ad0 3470
fd3c7904 3471 ret = uobj_remove_commit(uobj);
f21519b2 3472 return ret;
436f2ad0
HHZ
3473}
3474
c89d1bed 3475static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
057aec0d 3476 struct ib_device *ib_dev,
c89d1bed
SH
3477 struct ib_uverbs_create_xsrq *cmd,
3478 struct ib_udata *udata)
f520ba5a 3479{
f520ba5a 3480 struct ib_uverbs_create_srq_resp resp;
8541f8de 3481 struct ib_usrq_object *obj;
f520ba5a
RD
3482 struct ib_pd *pd;
3483 struct ib_srq *srq;
8541f8de 3484 struct ib_uobject *uninitialized_var(xrcd_uobj);
f520ba5a
RD
3485 struct ib_srq_init_attr attr;
3486 int ret;
3487
fd3c7904
MB
3488 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq),
3489 file->ucontext);
3490 if (IS_ERR(obj))
3491 return PTR_ERR(obj);
f520ba5a 3492
38eb44fa
AK
3493 if (cmd->srq_type == IB_SRQT_TM)
3494 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3495
8541f8de 3496 if (cmd->srq_type == IB_SRQT_XRC) {
fd3c7904
MB
3497 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle,
3498 file->ucontext);
3499 if (IS_ERR(xrcd_uobj)) {
8541f8de 3500 ret = -EINVAL;
5909ce54 3501 goto err;
8541f8de
SH
3502 }
3503
fd3c7904
MB
3504 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3505 if (!attr.ext.xrc.xrcd) {
3506 ret = -EINVAL;
3507 goto err_put_xrcd;
3508 }
3509
8541f8de
SH
3510 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3511 atomic_inc(&obj->uxrcd->refcnt);
1a56ff6d 3512 }
5909ce54 3513
1a56ff6d
AK
3514 if (ib_srq_has_cq(cmd->srq_type)) {
3515 attr.ext.cq = uobj_get_obj_read(cq, cmd->cq_handle,
3516 file->ucontext);
3517 if (!attr.ext.cq) {
5909ce54
RD
3518 ret = -EINVAL;
3519 goto err_put_xrcd;
3520 }
3521 }
3522
fd3c7904 3523 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
5909ce54
RD
3524 if (!pd) {
3525 ret = -EINVAL;
3526 goto err_put_cq;
8541f8de
SH
3527 }
3528
f520ba5a
RD
3529 attr.event_handler = ib_uverbs_srq_event_handler;
3530 attr.srq_context = file;
8541f8de
SH
3531 attr.srq_type = cmd->srq_type;
3532 attr.attr.max_wr = cmd->max_wr;
3533 attr.attr.max_sge = cmd->max_sge;
3534 attr.attr.srq_limit = cmd->srq_limit;
f520ba5a 3535
8541f8de
SH
3536 obj->uevent.events_reported = 0;
3537 INIT_LIST_HEAD(&obj->uevent.event_list);
f520ba5a 3538
8541f8de 3539 srq = pd->device->create_srq(pd, &attr, udata);
f520ba5a
RD
3540 if (IS_ERR(srq)) {
3541 ret = PTR_ERR(srq);
ec924b47 3542 goto err_put;
f520ba5a
RD
3543 }
3544
8541f8de
SH
3545 srq->device = pd->device;
3546 srq->pd = pd;
3547 srq->srq_type = cmd->srq_type;
3548 srq->uobject = &obj->uevent.uobject;
f520ba5a
RD
3549 srq->event_handler = attr.event_handler;
3550 srq->srq_context = attr.srq_context;
8541f8de 3551
1a56ff6d
AK
3552 if (ib_srq_has_cq(cmd->srq_type)) {
3553 srq->ext.cq = attr.ext.cq;
3554 atomic_inc(&attr.ext.cq->usecnt);
3555 }
3556
8541f8de 3557 if (cmd->srq_type == IB_SRQT_XRC) {
8541f8de 3558 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
8541f8de
SH
3559 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3560 }
3561
f520ba5a
RD
3562 atomic_inc(&pd->usecnt);
3563 atomic_set(&srq->usecnt, 0);
3564
8541f8de 3565 obj->uevent.uobject.object = srq;
fd3c7904 3566 obj->uevent.uobject.user_handle = cmd->user_handle;
f520ba5a 3567
9ead190b 3568 memset(&resp, 0, sizeof resp);
8541f8de 3569 resp.srq_handle = obj->uevent.uobject.id;
ea88fd16
DB
3570 resp.max_wr = attr.attr.max_wr;
3571 resp.max_sge = attr.attr.max_sge;
8541f8de
SH
3572 if (cmd->srq_type == IB_SRQT_XRC)
3573 resp.srqn = srq->ext.xrc.srq_num;
f520ba5a 3574
8541f8de 3575 if (copy_to_user((void __user *) (unsigned long) cmd->response,
f520ba5a
RD
3576 &resp, sizeof resp)) {
3577 ret = -EFAULT;
9ead190b 3578 goto err_copy;
f520ba5a
RD
3579 }
3580
1a56ff6d 3581 if (cmd->srq_type == IB_SRQT_XRC)
fd3c7904 3582 uobj_put_read(xrcd_uobj);
1a56ff6d
AK
3583
3584 if (ib_srq_has_cq(cmd->srq_type))
3585 uobj_put_obj_read(attr.ext.cq);
3586
fd3c7904
MB
3587 uobj_put_obj_read(pd);
3588 uobj_alloc_commit(&obj->uevent.uobject);
f520ba5a 3589
8541f8de 3590 return 0;
f520ba5a 3591
9ead190b 3592err_copy:
f520ba5a
RD
3593 ib_destroy_srq(srq);
3594
ec924b47 3595err_put:
fd3c7904 3596 uobj_put_obj_read(pd);
8541f8de
SH
3597
3598err_put_cq:
1a56ff6d
AK
3599 if (ib_srq_has_cq(cmd->srq_type))
3600 uobj_put_obj_read(attr.ext.cq);
8541f8de 3601
5909ce54
RD
3602err_put_xrcd:
3603 if (cmd->srq_type == IB_SRQT_XRC) {
3604 atomic_dec(&obj->uxrcd->refcnt);
fd3c7904 3605 uobj_put_read(xrcd_uobj);
5909ce54 3606 }
ec924b47 3607
9ead190b 3608err:
fd3c7904 3609 uobj_alloc_abort(&obj->uevent.uobject);
f520ba5a
RD
3610 return ret;
3611}
3612
8541f8de 3613ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
057aec0d 3614 struct ib_device *ib_dev,
8541f8de
SH
3615 const char __user *buf, int in_len,
3616 int out_len)
3617{
3618 struct ib_uverbs_create_srq cmd;
3619 struct ib_uverbs_create_xsrq xcmd;
3620 struct ib_uverbs_create_srq_resp resp;
3621 struct ib_udata udata;
3622 int ret;
3623
3624 if (out_len < sizeof resp)
3625 return -ENOSPC;
3626
3627 if (copy_from_user(&cmd, buf, sizeof cmd))
3628 return -EFAULT;
3629
38eb44fa 3630 memset(&xcmd, 0, sizeof(xcmd));
8541f8de
SH
3631 xcmd.response = cmd.response;
3632 xcmd.user_handle = cmd.user_handle;
3633 xcmd.srq_type = IB_SRQT_BASIC;
3634 xcmd.pd_handle = cmd.pd_handle;
3635 xcmd.max_wr = cmd.max_wr;
3636 xcmd.max_sge = cmd.max_sge;
3637 xcmd.srq_limit = cmd.srq_limit;
3638
40a20339
AB
3639 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3640 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
3641 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3642 out_len - sizeof(resp));
8541f8de 3643
057aec0d 3644 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
8541f8de
SH
3645 if (ret)
3646 return ret;
3647
3648 return in_len;
3649}
3650
3651ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
057aec0d 3652 struct ib_device *ib_dev,
8541f8de
SH
3653 const char __user *buf, int in_len, int out_len)
3654{
3655 struct ib_uverbs_create_xsrq cmd;
3656 struct ib_uverbs_create_srq_resp resp;
3657 struct ib_udata udata;
3658 int ret;
3659
3660 if (out_len < sizeof resp)
3661 return -ENOSPC;
3662
3663 if (copy_from_user(&cmd, buf, sizeof cmd))
3664 return -EFAULT;
3665
40a20339
AB
3666 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3667 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
3668 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3669 out_len - sizeof(resp));
8541f8de 3670
057aec0d 3671 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
8541f8de
SH
3672 if (ret)
3673 return ret;
3674
3675 return in_len;
3676}
3677
f520ba5a 3678ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
057aec0d 3679 struct ib_device *ib_dev,
f520ba5a
RD
3680 const char __user *buf, int in_len,
3681 int out_len)
3682{
3683 struct ib_uverbs_modify_srq cmd;
9bc57e2d 3684 struct ib_udata udata;
f520ba5a
RD
3685 struct ib_srq *srq;
3686 struct ib_srq_attr attr;
3687 int ret;
3688
3689 if (copy_from_user(&cmd, buf, sizeof cmd))
3690 return -EFAULT;
3691
40a20339 3692 ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
9bc57e2d
RC
3693 out_len);
3694
fd3c7904 3695 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
9ead190b
RD
3696 if (!srq)
3697 return -EINVAL;
f520ba5a
RD
3698
3699 attr.max_wr = cmd.max_wr;
f520ba5a
RD
3700 attr.srq_limit = cmd.srq_limit;
3701
9bc57e2d 3702 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
f520ba5a 3703
fd3c7904 3704 uobj_put_obj_read(srq);
f520ba5a
RD
3705
3706 return ret ? ret : in_len;
3707}
3708
8bdb0e86 3709ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
057aec0d 3710 struct ib_device *ib_dev,
8bdb0e86
DB
3711 const char __user *buf,
3712 int in_len, int out_len)
3713{
3714 struct ib_uverbs_query_srq cmd;
3715 struct ib_uverbs_query_srq_resp resp;
3716 struct ib_srq_attr attr;
3717 struct ib_srq *srq;
3718 int ret;
3719
3720 if (out_len < sizeof resp)
3721 return -ENOSPC;
3722
3723 if (copy_from_user(&cmd, buf, sizeof cmd))
3724 return -EFAULT;
3725
fd3c7904 3726 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
9ead190b
RD
3727 if (!srq)
3728 return -EINVAL;
8bdb0e86 3729
9ead190b 3730 ret = ib_query_srq(srq, &attr);
8bdb0e86 3731
fd3c7904 3732 uobj_put_obj_read(srq);
8bdb0e86
DB
3733
3734 if (ret)
9ead190b 3735 return ret;
8bdb0e86
DB
3736
3737 memset(&resp, 0, sizeof resp);
3738
3739 resp.max_wr = attr.max_wr;
3740 resp.max_sge = attr.max_sge;
3741 resp.srq_limit = attr.srq_limit;
3742
40a20339 3743 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
9ead190b 3744 return -EFAULT;
8bdb0e86 3745
9ead190b 3746 return in_len;
8bdb0e86
DB
3747}
3748
f520ba5a 3749ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
057aec0d 3750 struct ib_device *ib_dev,
f520ba5a
RD
3751 const char __user *buf, int in_len,
3752 int out_len)
3753{
63aaf647
RD
3754 struct ib_uverbs_destroy_srq cmd;
3755 struct ib_uverbs_destroy_srq_resp resp;
9ead190b 3756 struct ib_uobject *uobj;
9ead190b 3757 struct ib_uevent_object *obj;
63aaf647 3758 int ret = -EINVAL;
f520ba5a
RD
3759
3760 if (copy_from_user(&cmd, buf, sizeof cmd))
3761 return -EFAULT;
3762
fd3c7904
MB
3763 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle,
3764 file->ucontext);
3765 if (IS_ERR(uobj))
3766 return PTR_ERR(uobj);
3767
9ead190b 3768 obj = container_of(uobj, struct ib_uevent_object, uobject);
fd3c7904
MB
3769 /*
3770 * Make sure we don't free the memory in remove_commit as we still
3771 * needs the uobject memory to create the response.
3772 */
3773 uverbs_uobject_get(uobj);
63aaf647 3774
fd3c7904 3775 memset(&resp, 0, sizeof(resp));
f520ba5a 3776
fd3c7904
MB
3777 ret = uobj_remove_commit(uobj);
3778 if (ret) {
3779 uverbs_uobject_put(uobj);
9ead190b 3780 return ret;
846be90d 3781 }
9ead190b 3782 resp.events_reported = obj->events_reported;
fd3c7904 3783 uverbs_uobject_put(uobj);
40a20339 3784 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
fd3c7904 3785 return -EFAULT;
63aaf647 3786
fd3c7904 3787 return in_len;
f520ba5a 3788}
02d1aa7a
EC
3789
3790int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
057aec0d 3791 struct ib_device *ib_dev,
02d1aa7a
EC
3792 struct ib_udata *ucore,
3793 struct ib_udata *uhw)
3794{
2953f425 3795 struct ib_uverbs_ex_query_device_resp resp = { {0} };
02d1aa7a 3796 struct ib_uverbs_ex_query_device cmd;
2953f425 3797 struct ib_device_attr attr = {0};
02d1aa7a
EC
3798 int err;
3799
21885586
LR
3800 if (!ib_dev->query_device)
3801 return -EOPNOTSUPP;
3802
02d1aa7a
EC
3803 if (ucore->inlen < sizeof(cmd))
3804 return -EINVAL;
3805
3806 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3807 if (err)
3808 return err;
3809
3810 if (cmd.comp_mask)
3811 return -EINVAL;
3812
3813 if (cmd.reserved)
3814 return -EINVAL;
3815
f4056bfd 3816 resp.response_length = offsetof(typeof(resp), odp_caps);
02d1aa7a
EC
3817
3818 if (ucore->outlen < resp.response_length)
3819 return -ENOSPC;
3820
057aec0d 3821 err = ib_dev->query_device(ib_dev, &attr, uhw);
02d1aa7a
EC
3822 if (err)
3823 return err;
3824
057aec0d 3825 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
02d1aa7a 3826
f4056bfd
HE
3827 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3828 goto end;
3829
3830#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3831 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3832 resp.odp_caps.per_transport_caps.rc_odp_caps =
3833 attr.odp_caps.per_transport_caps.rc_odp_caps;
3834 resp.odp_caps.per_transport_caps.uc_odp_caps =
3835 attr.odp_caps.per_transport_caps.uc_odp_caps;
3836 resp.odp_caps.per_transport_caps.ud_odp_caps =
3837 attr.odp_caps.per_transport_caps.ud_odp_caps;
f4056bfd
HE
3838#endif
3839 resp.response_length += sizeof(resp.odp_caps);
3840
24306dc6
MB
3841 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3842 goto end;
3843
3844 resp.timestamp_mask = attr.timestamp_mask;
3845 resp.response_length += sizeof(resp.timestamp_mask);
3846
3847 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3848 goto end;
3849
3850 resp.hca_core_clock = attr.hca_core_clock;
3851 resp.response_length += sizeof(resp.hca_core_clock);
3852
0b24e5ac
MD
3853 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
3854 goto end;
3855
3856 resp.device_cap_flags_ex = attr.device_cap_flags;
3857 resp.response_length += sizeof(resp.device_cap_flags_ex);
47adf2f4
YH
3858
3859 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
3860 goto end;
3861
3862 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3863 resp.rss_caps.max_rwq_indirection_tables =
3864 attr.rss_caps.max_rwq_indirection_tables;
3865 resp.rss_caps.max_rwq_indirection_table_size =
3866 attr.rss_caps.max_rwq_indirection_table_size;
3867
3868 resp.response_length += sizeof(resp.rss_caps);
3869
3870 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
3871 goto end;
3872
3873 resp.max_wq_type_rq = attr.max_wq_type_rq;
3874 resp.response_length += sizeof(resp.max_wq_type_rq);
5f23d426
NO
3875
3876 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps))
3877 goto end;
3878
3879 resp.raw_packet_caps = attr.raw_packet_caps;
3880 resp.response_length += sizeof(resp.raw_packet_caps);
8d50505a 3881
78b1beb0 3882 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
8d50505a
AK
3883 goto end;
3884
78b1beb0
LR
3885 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
3886 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
3887 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
3888 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
3889 resp.tm_caps.flags = attr.tm_caps.flags;
3890 resp.response_length += sizeof(resp.tm_caps);
18bd9072
YC
3891
3892 if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps))
3893 goto end;
3894
3895 resp.cq_moderation_caps.max_cq_moderation_count =
3896 attr.cq_caps.max_cq_moderation_count;
3897 resp.cq_moderation_caps.max_cq_moderation_period =
3898 attr.cq_caps.max_cq_moderation_period;
3899 resp.response_length += sizeof(resp.cq_moderation_caps);
f4056bfd 3900end:
02d1aa7a 3901 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
2953f425 3902 return err;
02d1aa7a 3903}
869ddcf8
YC
3904
3905int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
3906 struct ib_device *ib_dev,
3907 struct ib_udata *ucore,
3908 struct ib_udata *uhw)
3909{
3910 struct ib_uverbs_ex_modify_cq cmd = {};
3911 struct ib_cq *cq;
3912 size_t required_cmd_sz;
3913 int ret;
3914
3915 required_cmd_sz = offsetof(typeof(cmd), reserved) +
3916 sizeof(cmd.reserved);
3917 if (ucore->inlen < required_cmd_sz)
3918 return -EINVAL;
3919
3920 /* sanity checks */
3921 if (ucore->inlen > sizeof(cmd) &&
3922 !ib_is_udata_cleared(ucore, sizeof(cmd),
3923 ucore->inlen - sizeof(cmd)))
3924 return -EOPNOTSUPP;
3925
3926 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3927 if (ret)
3928 return ret;
3929
3930 if (!cmd.attr_mask || cmd.reserved)
3931 return -EINVAL;
3932
3933 if (cmd.attr_mask > IB_CQ_MODERATE)
3934 return -EOPNOTSUPP;
3935
3936 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
3937 if (!cq)
3938 return -EINVAL;
3939
4190b4e9 3940 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
869ddcf8
YC
3941
3942 uobj_put_obj_read(cq);
3943
3944 return ret;
3945}