Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux...
[linux-block.git] / drivers / infiniband / core / uverbs_cmd.c
CommitLineData
bc38a6ab
RD
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
f7c6a7b5 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
eb9d3cd5 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8bdb0e86 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
bc38a6ab
RD
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
bc38a6ab
RD
34 */
35
6b73597e 36#include <linux/file.h>
70a30e16 37#include <linux/fs.h>
5a0e3ad6 38#include <linux/slab.h>
8ada2c1c 39#include <linux/sched.h>
6b73597e 40
7c0f6ba6 41#include <linux/uaccess.h>
bc38a6ab 42
fd3c7904
MB
43#include <rdma/uverbs_types.h>
44#include <rdma/uverbs_std_types.h>
45#include "rdma_core.h"
46
bc38a6ab 47#include "uverbs.h"
ed4c54e5 48#include "core_priv.h"
bc38a6ab 49
1e7710f3
MB
50static struct ib_uverbs_completion_event_file *
51ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
52{
53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel),
54 fd, context);
55 struct ib_uobject_file *uobj_file;
56
57 if (IS_ERR(uobj))
58 return (void *)uobj;
59
60 uverbs_uobject_get(uobj);
61 uobj_put_read(uobj);
62
63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
64 return container_of(uobj_file, struct ib_uverbs_completion_event_file,
65 uobj_file);
66}
67
bc38a6ab 68ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
057aec0d 69 struct ib_device *ib_dev,
bc38a6ab
RD
70 const char __user *buf,
71 int in_len, int out_len)
72{
73 struct ib_uverbs_get_context cmd;
74 struct ib_uverbs_get_context_resp resp;
75 struct ib_udata udata;
63c47c28 76 struct ib_ucontext *ucontext;
6b73597e 77 struct file *filp;
43579b5f 78 struct ib_rdmacg_object cg_obj;
63c47c28 79 int ret;
bc38a6ab
RD
80
81 if (out_len < sizeof resp)
82 return -ENOSPC;
83
84 if (copy_from_user(&cmd, buf, sizeof cmd))
85 return -EFAULT;
86
95ed644f 87 mutex_lock(&file->mutex);
63c47c28
RD
88
89 if (file->ucontext) {
90 ret = -EINVAL;
91 goto err;
92 }
93
bc38a6ab
RD
94 INIT_UDATA(&udata, buf + sizeof cmd,
95 (unsigned long) cmd.response + sizeof resp,
96 in_len - sizeof cmd, out_len - sizeof resp);
97
43579b5f
PP
98 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
99 if (ret)
100 goto err;
101
057aec0d 102 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
77f76013 103 if (IS_ERR(ucontext)) {
df42245a 104 ret = PTR_ERR(ucontext);
43579b5f 105 goto err_alloc;
77f76013 106 }
bc38a6ab 107
057aec0d 108 ucontext->device = ib_dev;
43579b5f 109 ucontext->cg_obj = cg_obj;
771addf6
MB
110 /* ufile is required when some objects are released */
111 ucontext->ufile = file;
fd3c7904
MB
112 uverbs_initialize_ucontext(ucontext);
113
8ada2c1c
SR
114 rcu_read_lock();
115 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
116 rcu_read_unlock();
f7c6a7b5 117 ucontext->closing = 0;
bc38a6ab 118
882214e2
HE
119#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
120 ucontext->umem_tree = RB_ROOT;
121 init_rwsem(&ucontext->umem_rwsem);
122 ucontext->odp_mrs_count = 0;
123 INIT_LIST_HEAD(&ucontext->no_private_counters);
124
86bee4c9 125 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
882214e2
HE
126 ucontext->invalidate_range = NULL;
127
128#endif
129
6b73597e
RD
130 resp.num_comp_vectors = file->device->num_comp_vectors;
131
da183c7a 132 ret = get_unused_fd_flags(O_CLOEXEC);
b1e4594b
AV
133 if (ret < 0)
134 goto err_free;
135 resp.async_fd = ret;
136
1e7710f3 137 filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
6b73597e
RD
138 if (IS_ERR(filp)) {
139 ret = PTR_ERR(filp);
b1e4594b 140 goto err_fd;
6b73597e 141 }
bc38a6ab
RD
142
143 if (copy_to_user((void __user *) (unsigned long) cmd.response,
63c47c28
RD
144 &resp, sizeof resp)) {
145 ret = -EFAULT;
6b73597e 146 goto err_file;
63c47c28
RD
147 }
148
70a30e16 149 file->ucontext = ucontext;
6b73597e
RD
150
151 fd_install(resp.async_fd, filp);
152
95ed644f 153 mutex_unlock(&file->mutex);
bc38a6ab
RD
154
155 return in_len;
156
6b73597e 157err_file:
03c40442 158 ib_uverbs_free_async_event_file(file);
6b73597e
RD
159 fput(filp);
160
b1e4594b
AV
161err_fd:
162 put_unused_fd(resp.async_fd);
163
63c47c28 164err_free:
8ada2c1c 165 put_pid(ucontext->tgid);
057aec0d 166 ib_dev->dealloc_ucontext(ucontext);
bc38a6ab 167
43579b5f
PP
168err_alloc:
169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
170
63c47c28 171err:
95ed644f 172 mutex_unlock(&file->mutex);
63c47c28 173 return ret;
bc38a6ab
RD
174}
175
02d1aa7a 176static void copy_query_dev_fields(struct ib_uverbs_file *file,
057aec0d 177 struct ib_device *ib_dev,
02d1aa7a
EC
178 struct ib_uverbs_query_device_resp *resp,
179 struct ib_device_attr *attr)
180{
181 resp->fw_ver = attr->fw_ver;
057aec0d 182 resp->node_guid = ib_dev->node_guid;
02d1aa7a
EC
183 resp->sys_image_guid = attr->sys_image_guid;
184 resp->max_mr_size = attr->max_mr_size;
185 resp->page_size_cap = attr->page_size_cap;
186 resp->vendor_id = attr->vendor_id;
187 resp->vendor_part_id = attr->vendor_part_id;
188 resp->hw_ver = attr->hw_ver;
189 resp->max_qp = attr->max_qp;
190 resp->max_qp_wr = attr->max_qp_wr;
fb532d6a 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
02d1aa7a
EC
192 resp->max_sge = attr->max_sge;
193 resp->max_sge_rd = attr->max_sge_rd;
194 resp->max_cq = attr->max_cq;
195 resp->max_cqe = attr->max_cqe;
196 resp->max_mr = attr->max_mr;
197 resp->max_pd = attr->max_pd;
198 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
199 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
200 resp->max_res_rd_atom = attr->max_res_rd_atom;
201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
203 resp->atomic_cap = attr->atomic_cap;
204 resp->max_ee = attr->max_ee;
205 resp->max_rdd = attr->max_rdd;
206 resp->max_mw = attr->max_mw;
207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
209 resp->max_mcast_grp = attr->max_mcast_grp;
210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
212 resp->max_ah = attr->max_ah;
213 resp->max_fmr = attr->max_fmr;
214 resp->max_map_per_fmr = attr->max_map_per_fmr;
215 resp->max_srq = attr->max_srq;
216 resp->max_srq_wr = attr->max_srq_wr;
217 resp->max_srq_sge = attr->max_srq_sge;
218 resp->max_pkeys = attr->max_pkeys;
219 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
057aec0d 220 resp->phys_port_cnt = ib_dev->phys_port_cnt;
02d1aa7a
EC
221}
222
bc38a6ab 223ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
057aec0d 224 struct ib_device *ib_dev,
bc38a6ab
RD
225 const char __user *buf,
226 int in_len, int out_len)
227{
228 struct ib_uverbs_query_device cmd;
229 struct ib_uverbs_query_device_resp resp;
bc38a6ab
RD
230
231 if (out_len < sizeof resp)
232 return -ENOSPC;
233
234 if (copy_from_user(&cmd, buf, sizeof cmd))
235 return -EFAULT;
236
bc38a6ab 237 memset(&resp, 0, sizeof resp);
86bee4c9 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
bc38a6ab
RD
239
240 if (copy_to_user((void __user *) (unsigned long) cmd.response,
241 &resp, sizeof resp))
242 return -EFAULT;
243
244 return in_len;
245}
246
247ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
057aec0d 248 struct ib_device *ib_dev,
bc38a6ab
RD
249 const char __user *buf,
250 int in_len, int out_len)
251{
252 struct ib_uverbs_query_port cmd;
253 struct ib_uverbs_query_port_resp resp;
254 struct ib_port_attr attr;
255 int ret;
256
257 if (out_len < sizeof resp)
258 return -ENOSPC;
259
260 if (copy_from_user(&cmd, buf, sizeof cmd))
261 return -EFAULT;
262
057aec0d 263 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
bc38a6ab
RD
264 if (ret)
265 return ret;
266
267 memset(&resp, 0, sizeof resp);
268
269 resp.state = attr.state;
270 resp.max_mtu = attr.max_mtu;
271 resp.active_mtu = attr.active_mtu;
272 resp.gid_tbl_len = attr.gid_tbl_len;
273 resp.port_cap_flags = attr.port_cap_flags;
274 resp.max_msg_sz = attr.max_msg_sz;
275 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
276 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
277 resp.pkey_tbl_len = attr.pkey_tbl_len;
278 resp.lid = attr.lid;
279 resp.sm_lid = attr.sm_lid;
280 resp.lmc = attr.lmc;
281 resp.max_vl_num = attr.max_vl_num;
282 resp.sm_sl = attr.sm_sl;
283 resp.subnet_timeout = attr.subnet_timeout;
284 resp.init_type_reply = attr.init_type_reply;
285 resp.active_width = attr.active_width;
286 resp.active_speed = attr.active_speed;
287 resp.phys_state = attr.phys_state;
057aec0d 288 resp.link_layer = rdma_port_get_link_layer(ib_dev,
2420b60b 289 cmd.port_num);
bc38a6ab
RD
290
291 if (copy_to_user((void __user *) (unsigned long) cmd.response,
292 &resp, sizeof resp))
293 return -EFAULT;
294
295 return in_len;
296}
297
bc38a6ab 298ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
057aec0d 299 struct ib_device *ib_dev,
bc38a6ab
RD
300 const char __user *buf,
301 int in_len, int out_len)
302{
303 struct ib_uverbs_alloc_pd cmd;
304 struct ib_uverbs_alloc_pd_resp resp;
305 struct ib_udata udata;
306 struct ib_uobject *uobj;
307 struct ib_pd *pd;
308 int ret;
309
310 if (out_len < sizeof resp)
311 return -ENOSPC;
312
313 if (copy_from_user(&cmd, buf, sizeof cmd))
314 return -EFAULT;
315
316 INIT_UDATA(&udata, buf + sizeof cmd,
317 (unsigned long) cmd.response + sizeof resp,
318 in_len - sizeof cmd, out_len - sizeof resp);
319
fd3c7904
MB
320 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext);
321 if (IS_ERR(uobj))
322 return PTR_ERR(uobj);
bc38a6ab 323
057aec0d 324 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
bc38a6ab
RD
325 if (IS_ERR(pd)) {
326 ret = PTR_ERR(pd);
327 goto err;
328 }
329
057aec0d 330 pd->device = ib_dev;
bc38a6ab 331 pd->uobject = uobj;
50d46335 332 pd->__internal_mr = NULL;
bc38a6ab
RD
333 atomic_set(&pd->usecnt, 0);
334
9ead190b 335 uobj->object = pd;
bc38a6ab
RD
336 memset(&resp, 0, sizeof resp);
337 resp.pd_handle = uobj->id;
338
339 if (copy_to_user((void __user *) (unsigned long) cmd.response,
340 &resp, sizeof resp)) {
341 ret = -EFAULT;
9ead190b 342 goto err_copy;
bc38a6ab
RD
343 }
344
fd3c7904 345 uobj_alloc_commit(uobj);
bc38a6ab 346
eb9d3cd5
RD
347 return in_len;
348
9ead190b 349err_copy:
bc38a6ab
RD
350 ib_dealloc_pd(pd);
351
352err:
fd3c7904 353 uobj_alloc_abort(uobj);
bc38a6ab
RD
354 return ret;
355}
356
357ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
057aec0d 358 struct ib_device *ib_dev,
bc38a6ab
RD
359 const char __user *buf,
360 int in_len, int out_len)
361{
362 struct ib_uverbs_dealloc_pd cmd;
bc38a6ab 363 struct ib_uobject *uobj;
9ead190b 364 int ret;
bc38a6ab
RD
365
366 if (copy_from_user(&cmd, buf, sizeof cmd))
367 return -EFAULT;
368
fd3c7904
MB
369 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle,
370 file->ucontext);
371 if (IS_ERR(uobj))
372 return PTR_ERR(uobj);
bc38a6ab 373
fd3c7904 374 ret = uobj_remove_commit(uobj);
bc38a6ab 375
fd3c7904 376 return ret ?: in_len;
bc38a6ab
RD
377}
378
53d0bd1e
SH
379struct xrcd_table_entry {
380 struct rb_node node;
381 struct ib_xrcd *xrcd;
382 struct inode *inode;
383};
384
385static int xrcd_table_insert(struct ib_uverbs_device *dev,
386 struct inode *inode,
387 struct ib_xrcd *xrcd)
388{
389 struct xrcd_table_entry *entry, *scan;
390 struct rb_node **p = &dev->xrcd_tree.rb_node;
391 struct rb_node *parent = NULL;
392
393 entry = kmalloc(sizeof *entry, GFP_KERNEL);
394 if (!entry)
395 return -ENOMEM;
396
397 entry->xrcd = xrcd;
398 entry->inode = inode;
399
400 while (*p) {
401 parent = *p;
402 scan = rb_entry(parent, struct xrcd_table_entry, node);
403
404 if (inode < scan->inode) {
405 p = &(*p)->rb_left;
406 } else if (inode > scan->inode) {
407 p = &(*p)->rb_right;
408 } else {
409 kfree(entry);
410 return -EEXIST;
411 }
412 }
413
414 rb_link_node(&entry->node, parent, p);
415 rb_insert_color(&entry->node, &dev->xrcd_tree);
416 igrab(inode);
417 return 0;
418}
419
420static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
421 struct inode *inode)
422{
423 struct xrcd_table_entry *entry;
424 struct rb_node *p = dev->xrcd_tree.rb_node;
425
426 while (p) {
427 entry = rb_entry(p, struct xrcd_table_entry, node);
428
429 if (inode < entry->inode)
430 p = p->rb_left;
431 else if (inode > entry->inode)
432 p = p->rb_right;
433 else
434 return entry;
435 }
436
437 return NULL;
438}
439
440static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
441{
442 struct xrcd_table_entry *entry;
443
444 entry = xrcd_table_search(dev, inode);
445 if (!entry)
446 return NULL;
447
448 return entry->xrcd;
449}
450
451static void xrcd_table_delete(struct ib_uverbs_device *dev,
452 struct inode *inode)
453{
454 struct xrcd_table_entry *entry;
455
456 entry = xrcd_table_search(dev, inode);
457 if (entry) {
458 iput(inode);
459 rb_erase(&entry->node, &dev->xrcd_tree);
460 kfree(entry);
461 }
462}
463
464ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
057aec0d 465 struct ib_device *ib_dev,
53d0bd1e
SH
466 const char __user *buf, int in_len,
467 int out_len)
468{
469 struct ib_uverbs_open_xrcd cmd;
470 struct ib_uverbs_open_xrcd_resp resp;
471 struct ib_udata udata;
472 struct ib_uxrcd_object *obj;
473 struct ib_xrcd *xrcd = NULL;
2903ff01 474 struct fd f = {NULL, 0};
53d0bd1e 475 struct inode *inode = NULL;
2903ff01 476 int ret = 0;
53d0bd1e
SH
477 int new_xrcd = 0;
478
479 if (out_len < sizeof resp)
480 return -ENOSPC;
481
482 if (copy_from_user(&cmd, buf, sizeof cmd))
483 return -EFAULT;
484
485 INIT_UDATA(&udata, buf + sizeof cmd,
486 (unsigned long) cmd.response + sizeof resp,
487 in_len - sizeof cmd, out_len - sizeof resp);
488
489 mutex_lock(&file->device->xrcd_tree_mutex);
490
491 if (cmd.fd != -1) {
492 /* search for file descriptor */
2903ff01
AV
493 f = fdget(cmd.fd);
494 if (!f.file) {
53d0bd1e
SH
495 ret = -EBADF;
496 goto err_tree_mutex_unlock;
497 }
498
496ad9aa 499 inode = file_inode(f.file);
53d0bd1e
SH
500 xrcd = find_xrcd(file->device, inode);
501 if (!xrcd && !(cmd.oflags & O_CREAT)) {
502 /* no file descriptor. Need CREATE flag */
503 ret = -EAGAIN;
504 goto err_tree_mutex_unlock;
505 }
506
507 if (xrcd && cmd.oflags & O_EXCL) {
508 ret = -EINVAL;
509 goto err_tree_mutex_unlock;
510 }
511 }
512
fd3c7904
MB
513 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd),
514 file->ucontext);
515 if (IS_ERR(obj)) {
516 ret = PTR_ERR(obj);
53d0bd1e
SH
517 goto err_tree_mutex_unlock;
518 }
519
53d0bd1e 520 if (!xrcd) {
057aec0d 521 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
53d0bd1e
SH
522 if (IS_ERR(xrcd)) {
523 ret = PTR_ERR(xrcd);
524 goto err;
525 }
526
527 xrcd->inode = inode;
057aec0d 528 xrcd->device = ib_dev;
53d0bd1e
SH
529 atomic_set(&xrcd->usecnt, 0);
530 mutex_init(&xrcd->tgt_qp_mutex);
531 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
532 new_xrcd = 1;
533 }
534
535 atomic_set(&obj->refcnt, 0);
536 obj->uobject.object = xrcd;
53d0bd1e
SH
537 memset(&resp, 0, sizeof resp);
538 resp.xrcd_handle = obj->uobject.id;
539
540 if (inode) {
541 if (new_xrcd) {
542 /* create new inode/xrcd table entry */
543 ret = xrcd_table_insert(file->device, inode, xrcd);
544 if (ret)
fd3c7904 545 goto err_dealloc_xrcd;
53d0bd1e
SH
546 }
547 atomic_inc(&xrcd->usecnt);
548 }
549
550 if (copy_to_user((void __user *) (unsigned long) cmd.response,
551 &resp, sizeof resp)) {
552 ret = -EFAULT;
553 goto err_copy;
554 }
555
2903ff01
AV
556 if (f.file)
557 fdput(f);
53d0bd1e 558
fd3c7904 559 uobj_alloc_commit(&obj->uobject);
53d0bd1e
SH
560
561 mutex_unlock(&file->device->xrcd_tree_mutex);
562 return in_len;
563
564err_copy:
565 if (inode) {
566 if (new_xrcd)
567 xrcd_table_delete(file->device, inode);
568 atomic_dec(&xrcd->usecnt);
569 }
570
fd3c7904 571err_dealloc_xrcd:
53d0bd1e
SH
572 ib_dealloc_xrcd(xrcd);
573
574err:
fd3c7904 575 uobj_alloc_abort(&obj->uobject);
53d0bd1e
SH
576
577err_tree_mutex_unlock:
2903ff01
AV
578 if (f.file)
579 fdput(f);
53d0bd1e
SH
580
581 mutex_unlock(&file->device->xrcd_tree_mutex);
582
583 return ret;
584}
585
586ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
057aec0d 587 struct ib_device *ib_dev,
53d0bd1e
SH
588 const char __user *buf, int in_len,
589 int out_len)
590{
591 struct ib_uverbs_close_xrcd cmd;
592 struct ib_uobject *uobj;
53d0bd1e
SH
593 int ret = 0;
594
595 if (copy_from_user(&cmd, buf, sizeof cmd))
596 return -EFAULT;
597
fd3c7904
MB
598 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
599 file->ucontext);
600 if (IS_ERR(uobj)) {
601 mutex_unlock(&file->device->xrcd_tree_mutex);
602 return PTR_ERR(uobj);
53d0bd1e
SH
603 }
604
fd3c7904
MB
605 ret = uobj_remove_commit(uobj);
606 return ret ?: in_len;
53d0bd1e
SH
607}
608
6be60aed
MB
609int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
610 struct ib_xrcd *xrcd,
611 enum rdma_remove_reason why)
53d0bd1e
SH
612{
613 struct inode *inode;
6be60aed 614 int ret;
53d0bd1e
SH
615
616 inode = xrcd->inode;
617 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
6be60aed 618 return 0;
53d0bd1e 619
6be60aed 620 ret = ib_dealloc_xrcd(xrcd);
53d0bd1e 621
6be60aed
MB
622 if (why == RDMA_REMOVE_DESTROY && ret)
623 atomic_inc(&xrcd->usecnt);
624 else if (inode)
53d0bd1e 625 xrcd_table_delete(dev, inode);
6be60aed
MB
626
627 return ret;
53d0bd1e
SH
628}
629
bc38a6ab 630ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
057aec0d 631 struct ib_device *ib_dev,
bc38a6ab
RD
632 const char __user *buf, int in_len,
633 int out_len)
634{
635 struct ib_uverbs_reg_mr cmd;
636 struct ib_uverbs_reg_mr_resp resp;
637 struct ib_udata udata;
f7c6a7b5 638 struct ib_uobject *uobj;
bc38a6ab
RD
639 struct ib_pd *pd;
640 struct ib_mr *mr;
641 int ret;
642
643 if (out_len < sizeof resp)
644 return -ENOSPC;
645
646 if (copy_from_user(&cmd, buf, sizeof cmd))
647 return -EFAULT;
648
649 INIT_UDATA(&udata, buf + sizeof cmd,
650 (unsigned long) cmd.response + sizeof resp,
651 in_len - sizeof cmd, out_len - sizeof resp);
652
653 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
654 return -EINVAL;
655
1c636f80
EC
656 ret = ib_check_mr_access(cmd.access_flags);
657 if (ret)
658 return ret;
f575394f 659
fd3c7904
MB
660 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext);
661 if (IS_ERR(uobj))
662 return PTR_ERR(uobj);
bc38a6ab 663
fd3c7904 664 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
aaf1aef5
RD
665 if (!pd) {
666 ret = -EINVAL;
f7c6a7b5 667 goto err_free;
aaf1aef5 668 }
bc38a6ab 669
860f10a7 670 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
86bee4c9
OG
671 if (!(pd->device->attrs.device_cap_flags &
672 IB_DEVICE_ON_DEMAND_PAGING)) {
860f10a7
SG
673 pr_debug("ODP support not available\n");
674 ret = -EINVAL;
675 goto err_put;
676 }
677 }
678
f7c6a7b5
RD
679 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
680 cmd.access_flags, &udata);
bc38a6ab
RD
681 if (IS_ERR(mr)) {
682 ret = PTR_ERR(mr);
9ead190b 683 goto err_put;
bc38a6ab
RD
684 }
685
686 mr->device = pd->device;
687 mr->pd = pd;
f7c6a7b5 688 mr->uobject = uobj;
bc38a6ab 689 atomic_inc(&pd->usecnt);
bc38a6ab 690
f7c6a7b5 691 uobj->object = mr;
bc38a6ab 692
9ead190b
RD
693 memset(&resp, 0, sizeof resp);
694 resp.lkey = mr->lkey;
695 resp.rkey = mr->rkey;
f7c6a7b5 696 resp.mr_handle = uobj->id;
bc38a6ab 697
bc38a6ab
RD
698 if (copy_to_user((void __user *) (unsigned long) cmd.response,
699 &resp, sizeof resp)) {
700 ret = -EFAULT;
9ead190b 701 goto err_copy;
bc38a6ab
RD
702 }
703
fd3c7904 704 uobj_put_obj_read(pd);
eb9d3cd5 705
fd3c7904 706 uobj_alloc_commit(uobj);
bc38a6ab
RD
707
708 return in_len;
709
9ead190b 710err_copy:
bc38a6ab
RD
711 ib_dereg_mr(mr);
712
9ead190b 713err_put:
fd3c7904 714 uobj_put_obj_read(pd);
bc38a6ab 715
bc38a6ab 716err_free:
fd3c7904 717 uobj_alloc_abort(uobj);
bc38a6ab
RD
718 return ret;
719}
720
7e6edb9b 721ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
057aec0d 722 struct ib_device *ib_dev,
7e6edb9b
MB
723 const char __user *buf, int in_len,
724 int out_len)
725{
726 struct ib_uverbs_rereg_mr cmd;
727 struct ib_uverbs_rereg_mr_resp resp;
728 struct ib_udata udata;
729 struct ib_pd *pd = NULL;
730 struct ib_mr *mr;
731 struct ib_pd *old_pd;
732 int ret;
733 struct ib_uobject *uobj;
734
735 if (out_len < sizeof(resp))
736 return -ENOSPC;
737
738 if (copy_from_user(&cmd, buf, sizeof(cmd)))
739 return -EFAULT;
740
741 INIT_UDATA(&udata, buf + sizeof(cmd),
742 (unsigned long) cmd.response + sizeof(resp),
743 in_len - sizeof(cmd), out_len - sizeof(resp));
744
745 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
746 return -EINVAL;
747
748 if ((cmd.flags & IB_MR_REREG_TRANS) &&
749 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
750 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
751 return -EINVAL;
752
fd3c7904
MB
753 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
754 file->ucontext);
755 if (IS_ERR(uobj))
756 return PTR_ERR(uobj);
7e6edb9b
MB
757
758 mr = uobj->object;
759
760 if (cmd.flags & IB_MR_REREG_ACCESS) {
761 ret = ib_check_mr_access(cmd.access_flags);
762 if (ret)
763 goto put_uobjs;
764 }
765
766 if (cmd.flags & IB_MR_REREG_PD) {
fd3c7904 767 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
7e6edb9b
MB
768 if (!pd) {
769 ret = -EINVAL;
770 goto put_uobjs;
771 }
772 }
773
7e6edb9b
MB
774 old_pd = mr->pd;
775 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
776 cmd.length, cmd.hca_va,
777 cmd.access_flags, pd, &udata);
778 if (!ret) {
779 if (cmd.flags & IB_MR_REREG_PD) {
780 atomic_inc(&pd->usecnt);
781 mr->pd = pd;
782 atomic_dec(&old_pd->usecnt);
783 }
784 } else {
785 goto put_uobj_pd;
786 }
787
788 memset(&resp, 0, sizeof(resp));
789 resp.lkey = mr->lkey;
790 resp.rkey = mr->rkey;
791
792 if (copy_to_user((void __user *)(unsigned long)cmd.response,
793 &resp, sizeof(resp)))
794 ret = -EFAULT;
795 else
796 ret = in_len;
797
798put_uobj_pd:
799 if (cmd.flags & IB_MR_REREG_PD)
fd3c7904 800 uobj_put_obj_read(pd);
7e6edb9b
MB
801
802put_uobjs:
fd3c7904 803 uobj_put_write(uobj);
7e6edb9b
MB
804
805 return ret;
806}
807
bc38a6ab 808ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
057aec0d 809 struct ib_device *ib_dev,
bc38a6ab
RD
810 const char __user *buf, int in_len,
811 int out_len)
812{
813 struct ib_uverbs_dereg_mr cmd;
9ead190b 814 struct ib_uobject *uobj;
bc38a6ab
RD
815 int ret = -EINVAL;
816
817 if (copy_from_user(&cmd, buf, sizeof cmd))
818 return -EFAULT;
819
fd3c7904
MB
820 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
821 file->ucontext);
822 if (IS_ERR(uobj))
823 return PTR_ERR(uobj);
9ead190b 824
fd3c7904 825 ret = uobj_remove_commit(uobj);
bc38a6ab 826
fd3c7904 827 return ret ?: in_len;
bc38a6ab
RD
828}
829
6b52a12b 830ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
057aec0d
YH
831 struct ib_device *ib_dev,
832 const char __user *buf, int in_len,
833 int out_len)
6b52a12b
SM
834{
835 struct ib_uverbs_alloc_mw cmd;
836 struct ib_uverbs_alloc_mw_resp resp;
837 struct ib_uobject *uobj;
838 struct ib_pd *pd;
839 struct ib_mw *mw;
b2a239df 840 struct ib_udata udata;
6b52a12b
SM
841 int ret;
842
843 if (out_len < sizeof(resp))
844 return -ENOSPC;
845
846 if (copy_from_user(&cmd, buf, sizeof(cmd)))
847 return -EFAULT;
848
fd3c7904
MB
849 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext);
850 if (IS_ERR(uobj))
851 return PTR_ERR(uobj);
6b52a12b 852
fd3c7904 853 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
6b52a12b
SM
854 if (!pd) {
855 ret = -EINVAL;
856 goto err_free;
857 }
858
b2a239df
MB
859 INIT_UDATA(&udata, buf + sizeof(cmd),
860 (unsigned long)cmd.response + sizeof(resp),
861 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
862 out_len - sizeof(resp));
863
864 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
6b52a12b
SM
865 if (IS_ERR(mw)) {
866 ret = PTR_ERR(mw);
867 goto err_put;
868 }
869
870 mw->device = pd->device;
871 mw->pd = pd;
872 mw->uobject = uobj;
873 atomic_inc(&pd->usecnt);
874
875 uobj->object = mw;
6b52a12b
SM
876
877 memset(&resp, 0, sizeof(resp));
878 resp.rkey = mw->rkey;
879 resp.mw_handle = uobj->id;
880
881 if (copy_to_user((void __user *)(unsigned long)cmd.response,
882 &resp, sizeof(resp))) {
883 ret = -EFAULT;
884 goto err_copy;
885 }
886
fd3c7904
MB
887 uobj_put_obj_read(pd);
888 uobj_alloc_commit(uobj);
6b52a12b
SM
889
890 return in_len;
891
892err_copy:
feb7c1e3 893 uverbs_dealloc_mw(mw);
6b52a12b 894err_put:
fd3c7904 895 uobj_put_obj_read(pd);
6b52a12b 896err_free:
fd3c7904 897 uobj_alloc_abort(uobj);
6b52a12b
SM
898 return ret;
899}
900
901ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
057aec0d
YH
902 struct ib_device *ib_dev,
903 const char __user *buf, int in_len,
904 int out_len)
6b52a12b
SM
905{
906 struct ib_uverbs_dealloc_mw cmd;
6b52a12b
SM
907 struct ib_uobject *uobj;
908 int ret = -EINVAL;
909
910 if (copy_from_user(&cmd, buf, sizeof(cmd)))
911 return -EFAULT;
912
fd3c7904
MB
913 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle,
914 file->ucontext);
915 if (IS_ERR(uobj))
916 return PTR_ERR(uobj);
6b52a12b 917
fd3c7904
MB
918 ret = uobj_remove_commit(uobj);
919 return ret ?: in_len;
6b52a12b
SM
920}
921
6b73597e 922ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
057aec0d 923 struct ib_device *ib_dev,
6b73597e
RD
924 const char __user *buf, int in_len,
925 int out_len)
926{
927 struct ib_uverbs_create_comp_channel cmd;
928 struct ib_uverbs_create_comp_channel_resp resp;
1e7710f3
MB
929 struct ib_uobject *uobj;
930 struct ib_uverbs_completion_event_file *ev_file;
6b73597e
RD
931
932 if (out_len < sizeof resp)
933 return -ENOSPC;
934
935 if (copy_from_user(&cmd, buf, sizeof cmd))
936 return -EFAULT;
937
1e7710f3
MB
938 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext);
939 if (IS_ERR(uobj))
940 return PTR_ERR(uobj);
b1e4594b 941
1e7710f3
MB
942 resp.fd = uobj->id;
943
944 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
945 uobj_file.uobj);
db1b5ddd 946 ib_uverbs_init_event_queue(&ev_file->ev_queue);
6b73597e
RD
947
948 if (copy_to_user((void __user *) (unsigned long) cmd.response,
949 &resp, sizeof resp)) {
1e7710f3 950 uobj_alloc_abort(uobj);
6b73597e
RD
951 return -EFAULT;
952 }
953
1e7710f3 954 uobj_alloc_commit(uobj);
6b73597e
RD
955 return in_len;
956}
957
565197dd 958static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
057aec0d 959 struct ib_device *ib_dev,
565197dd
MB
960 struct ib_udata *ucore,
961 struct ib_udata *uhw,
962 struct ib_uverbs_ex_create_cq *cmd,
963 size_t cmd_sz,
964 int (*cb)(struct ib_uverbs_file *file,
965 struct ib_ucq_object *obj,
966 struct ib_uverbs_ex_create_cq_resp *resp,
967 struct ib_udata *udata,
968 void *context),
969 void *context)
bc38a6ab 970{
9ead190b 971 struct ib_ucq_object *obj;
1e7710f3 972 struct ib_uverbs_completion_event_file *ev_file = NULL;
bc38a6ab
RD
973 struct ib_cq *cq;
974 int ret;
565197dd 975 struct ib_uverbs_ex_create_cq_resp resp;
bcf4c1ea 976 struct ib_cq_init_attr attr = {};
bc38a6ab 977
565197dd
MB
978 if (cmd->comp_vector >= file->device->num_comp_vectors)
979 return ERR_PTR(-EINVAL);
bc38a6ab 980
fd3c7904
MB
981 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq),
982 file->ucontext);
983 if (IS_ERR(obj))
984 return obj;
9ead190b 985
565197dd 986 if (cmd->comp_channel >= 0) {
1e7710f3
MB
987 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel,
988 file->ucontext);
989 if (IS_ERR(ev_file)) {
990 ret = PTR_ERR(ev_file);
ac4e7b35
JM
991 goto err;
992 }
993 }
994
fd3c7904 995 obj->uobject.user_handle = cmd->user_handle;
9ead190b
RD
996 obj->uverbs_file = file;
997 obj->comp_events_reported = 0;
998 obj->async_events_reported = 0;
999 INIT_LIST_HEAD(&obj->comp_list);
1000 INIT_LIST_HEAD(&obj->async_list);
bc38a6ab 1001
565197dd
MB
1002 attr.cqe = cmd->cqe;
1003 attr.comp_vector = cmd->comp_vector;
1004
1005 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1006 attr.flags = cmd->flags;
1007
fd3c7904 1008 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw);
bc38a6ab
RD
1009 if (IS_ERR(cq)) {
1010 ret = PTR_ERR(cq);
9ead190b 1011 goto err_file;
bc38a6ab
RD
1012 }
1013
057aec0d 1014 cq->device = ib_dev;
9ead190b 1015 cq->uobject = &obj->uobject;
bc38a6ab
RD
1016 cq->comp_handler = ib_uverbs_comp_handler;
1017 cq->event_handler = ib_uverbs_cq_event_handler;
db1b5ddd 1018 cq->cq_context = &ev_file->ev_queue;
bc38a6ab
RD
1019 atomic_set(&cq->usecnt, 0);
1020
9ead190b 1021 obj->uobject.object = cq;
bc38a6ab 1022 memset(&resp, 0, sizeof resp);
565197dd
MB
1023 resp.base.cq_handle = obj->uobject.id;
1024 resp.base.cqe = cq->cqe;
bc38a6ab 1025
565197dd
MB
1026 resp.response_length = offsetof(typeof(resp), response_length) +
1027 sizeof(resp.response_length);
1028
1029 ret = cb(file, obj, &resp, ucore, context);
1030 if (ret)
1031 goto err_cb;
bc38a6ab 1032
fd3c7904 1033 uobj_alloc_commit(&obj->uobject);
bc38a6ab 1034
565197dd 1035 return obj;
eb9d3cd5 1036
565197dd 1037err_cb:
bc38a6ab
RD
1038 ib_destroy_cq(cq);
1039
9ead190b 1040err_file:
ac4e7b35 1041 if (ev_file)
9ead190b
RD
1042 ib_uverbs_release_ucq(file, ev_file, obj);
1043
1044err:
fd3c7904 1045 uobj_alloc_abort(&obj->uobject);
565197dd
MB
1046
1047 return ERR_PTR(ret);
1048}
1049
1050static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1051 struct ib_ucq_object *obj,
1052 struct ib_uverbs_ex_create_cq_resp *resp,
1053 struct ib_udata *ucore, void *context)
1054{
1055 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1056 return -EFAULT;
1057
1058 return 0;
1059}
1060
1061ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
057aec0d 1062 struct ib_device *ib_dev,
565197dd
MB
1063 const char __user *buf, int in_len,
1064 int out_len)
1065{
1066 struct ib_uverbs_create_cq cmd;
1067 struct ib_uverbs_ex_create_cq cmd_ex;
1068 struct ib_uverbs_create_cq_resp resp;
1069 struct ib_udata ucore;
1070 struct ib_udata uhw;
1071 struct ib_ucq_object *obj;
1072
1073 if (out_len < sizeof(resp))
1074 return -ENOSPC;
1075
1076 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1077 return -EFAULT;
1078
5d1e6235 1079 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
565197dd
MB
1080
1081 INIT_UDATA(&uhw, buf + sizeof(cmd),
1082 (unsigned long)cmd.response + sizeof(resp),
1083 in_len - sizeof(cmd), out_len - sizeof(resp));
1084
1085 memset(&cmd_ex, 0, sizeof(cmd_ex));
1086 cmd_ex.user_handle = cmd.user_handle;
1087 cmd_ex.cqe = cmd.cqe;
1088 cmd_ex.comp_vector = cmd.comp_vector;
1089 cmd_ex.comp_channel = cmd.comp_channel;
1090
057aec0d 1091 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
565197dd
MB
1092 offsetof(typeof(cmd_ex), comp_channel) +
1093 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1094 NULL);
1095
1096 if (IS_ERR(obj))
1097 return PTR_ERR(obj);
1098
1099 return in_len;
1100}
1101
1102static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1103 struct ib_ucq_object *obj,
1104 struct ib_uverbs_ex_create_cq_resp *resp,
1105 struct ib_udata *ucore, void *context)
1106{
1107 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1108 return -EFAULT;
1109
1110 return 0;
1111}
1112
1113int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
057aec0d 1114 struct ib_device *ib_dev,
565197dd
MB
1115 struct ib_udata *ucore,
1116 struct ib_udata *uhw)
1117{
1118 struct ib_uverbs_ex_create_cq_resp resp;
1119 struct ib_uverbs_ex_create_cq cmd;
1120 struct ib_ucq_object *obj;
1121 int err;
1122
1123 if (ucore->inlen < sizeof(cmd))
1124 return -EINVAL;
1125
1126 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1127 if (err)
1128 return err;
1129
1130 if (cmd.comp_mask)
1131 return -EINVAL;
1132
1133 if (cmd.reserved)
1134 return -EINVAL;
1135
1136 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1137 sizeof(resp.response_length)))
1138 return -ENOSPC;
1139
057aec0d 1140 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
565197dd
MB
1141 min(ucore->inlen, sizeof(cmd)),
1142 ib_uverbs_ex_create_cq_cb, NULL);
1143
1144 if (IS_ERR(obj))
1145 return PTR_ERR(obj);
1146
1147 return 0;
bc38a6ab
RD
1148}
1149
33b9b3ee 1150ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
057aec0d 1151 struct ib_device *ib_dev,
33b9b3ee
RD
1152 const char __user *buf, int in_len,
1153 int out_len)
1154{
1155 struct ib_uverbs_resize_cq cmd;
1156 struct ib_uverbs_resize_cq_resp resp;
1157 struct ib_udata udata;
1158 struct ib_cq *cq;
1159 int ret = -EINVAL;
1160
1161 if (copy_from_user(&cmd, buf, sizeof cmd))
1162 return -EFAULT;
1163
1164 INIT_UDATA(&udata, buf + sizeof cmd,
1165 (unsigned long) cmd.response + sizeof resp,
1166 in_len - sizeof cmd, out_len - sizeof resp);
1167
fd3c7904 1168 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
9ead190b
RD
1169 if (!cq)
1170 return -EINVAL;
33b9b3ee
RD
1171
1172 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1173 if (ret)
1174 goto out;
1175
33b9b3ee
RD
1176 resp.cqe = cq->cqe;
1177
1178 if (copy_to_user((void __user *) (unsigned long) cmd.response,
64f817ba 1179 &resp, sizeof resp.cqe))
33b9b3ee
RD
1180 ret = -EFAULT;
1181
1182out:
fd3c7904 1183 uobj_put_obj_read(cq);
33b9b3ee
RD
1184
1185 return ret ? ret : in_len;
1186}
1187
7182afea
DC
1188static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1189{
1190 struct ib_uverbs_wc tmp;
1191
1192 tmp.wr_id = wc->wr_id;
1193 tmp.status = wc->status;
1194 tmp.opcode = wc->opcode;
1195 tmp.vendor_err = wc->vendor_err;
1196 tmp.byte_len = wc->byte_len;
1197 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1198 tmp.qp_num = wc->qp->qp_num;
1199 tmp.src_qp = wc->src_qp;
1200 tmp.wc_flags = wc->wc_flags;
1201 tmp.pkey_index = wc->pkey_index;
1202 tmp.slid = wc->slid;
1203 tmp.sl = wc->sl;
1204 tmp.dlid_path_bits = wc->dlid_path_bits;
1205 tmp.port_num = wc->port_num;
1206 tmp.reserved = 0;
1207
1208 if (copy_to_user(dest, &tmp, sizeof tmp))
1209 return -EFAULT;
1210
1211 return 0;
1212}
1213
67cdb40c 1214ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
057aec0d 1215 struct ib_device *ib_dev,
67cdb40c
RD
1216 const char __user *buf, int in_len,
1217 int out_len)
1218{
1219 struct ib_uverbs_poll_cq cmd;
7182afea
DC
1220 struct ib_uverbs_poll_cq_resp resp;
1221 u8 __user *header_ptr;
1222 u8 __user *data_ptr;
67cdb40c 1223 struct ib_cq *cq;
7182afea
DC
1224 struct ib_wc wc;
1225 int ret;
67cdb40c
RD
1226
1227 if (copy_from_user(&cmd, buf, sizeof cmd))
1228 return -EFAULT;
1229
fd3c7904 1230 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
7182afea
DC
1231 if (!cq)
1232 return -EINVAL;
67cdb40c 1233
7182afea
DC
1234 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1235 header_ptr = (void __user *)(unsigned long) cmd.response;
1236 data_ptr = header_ptr + sizeof resp;
9ead190b 1237
7182afea
DC
1238 memset(&resp, 0, sizeof resp);
1239 while (resp.count < cmd.ne) {
1240 ret = ib_poll_cq(cq, 1, &wc);
1241 if (ret < 0)
1242 goto out_put;
1243 if (!ret)
1244 break;
1245
1246 ret = copy_wc_to_user(data_ptr, &wc);
1247 if (ret)
1248 goto out_put;
1249
1250 data_ptr += sizeof(struct ib_uverbs_wc);
1251 ++resp.count;
67cdb40c
RD
1252 }
1253
7182afea 1254 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
67cdb40c 1255 ret = -EFAULT;
7182afea
DC
1256 goto out_put;
1257 }
67cdb40c 1258
7182afea 1259 ret = in_len;
67cdb40c 1260
7182afea 1261out_put:
fd3c7904 1262 uobj_put_obj_read(cq);
7182afea 1263 return ret;
67cdb40c
RD
1264}
1265
1266ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
057aec0d 1267 struct ib_device *ib_dev,
67cdb40c
RD
1268 const char __user *buf, int in_len,
1269 int out_len)
1270{
1271 struct ib_uverbs_req_notify_cq cmd;
1272 struct ib_cq *cq;
67cdb40c
RD
1273
1274 if (copy_from_user(&cmd, buf, sizeof cmd))
1275 return -EFAULT;
1276
fd3c7904 1277 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
ab108676 1278 if (!cq)
9ead190b 1279 return -EINVAL;
67cdb40c 1280
9ead190b
RD
1281 ib_req_notify_cq(cq, cmd.solicited_only ?
1282 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1283
fd3c7904 1284 uobj_put_obj_read(cq);
9ead190b
RD
1285
1286 return in_len;
67cdb40c
RD
1287}
1288
bc38a6ab 1289ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
057aec0d 1290 struct ib_device *ib_dev,
bc38a6ab
RD
1291 const char __user *buf, int in_len,
1292 int out_len)
1293{
63aaf647
RD
1294 struct ib_uverbs_destroy_cq cmd;
1295 struct ib_uverbs_destroy_cq_resp resp;
9ead190b 1296 struct ib_uobject *uobj;
63aaf647 1297 struct ib_cq *cq;
9ead190b 1298 struct ib_ucq_object *obj;
db1b5ddd 1299 struct ib_uverbs_event_queue *ev_queue;
63aaf647 1300 int ret = -EINVAL;
bc38a6ab
RD
1301
1302 if (copy_from_user(&cmd, buf, sizeof cmd))
1303 return -EFAULT;
1304
fd3c7904
MB
1305 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle,
1306 file->ucontext);
1307 if (IS_ERR(uobj))
1308 return PTR_ERR(uobj);
1309
1310 /*
1311 * Make sure we don't free the memory in remove_commit as we still
1312 * needs the uobject memory to create the response.
1313 */
1314 uverbs_uobject_get(uobj);
9ead190b 1315 cq = uobj->object;
db1b5ddd 1316 ev_queue = cq->cq_context;
9ead190b 1317 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
bc38a6ab 1318
fd3c7904 1319 memset(&resp, 0, sizeof(resp));
bc38a6ab 1320
fd3c7904
MB
1321 ret = uobj_remove_commit(uobj);
1322 if (ret) {
1323 uverbs_uobject_put(uobj);
9ead190b 1324 return ret;
fd3c7904 1325 }
bc38a6ab 1326
9ead190b
RD
1327 resp.comp_events_reported = obj->comp_events_reported;
1328 resp.async_events_reported = obj->async_events_reported;
63aaf647 1329
fd3c7904 1330 uverbs_uobject_put(uobj);
63aaf647
RD
1331 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1332 &resp, sizeof resp))
9ead190b 1333 return -EFAULT;
bc38a6ab 1334
9ead190b 1335 return in_len;
bc38a6ab
RD
1336}
1337
6d8a7497
EBE
1338static int create_qp(struct ib_uverbs_file *file,
1339 struct ib_udata *ucore,
1340 struct ib_udata *uhw,
1341 struct ib_uverbs_ex_create_qp *cmd,
1342 size_t cmd_sz,
1343 int (*cb)(struct ib_uverbs_file *file,
1344 struct ib_uverbs_ex_create_qp_resp *resp,
1345 struct ib_udata *udata),
1346 void *context)
bc38a6ab 1347{
6d8a7497
EBE
1348 struct ib_uqp_object *obj;
1349 struct ib_device *device;
1350 struct ib_pd *pd = NULL;
1351 struct ib_xrcd *xrcd = NULL;
fd3c7904 1352 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
6d8a7497
EBE
1353 struct ib_cq *scq = NULL, *rcq = NULL;
1354 struct ib_srq *srq = NULL;
1355 struct ib_qp *qp;
1356 char *buf;
c70285f8 1357 struct ib_qp_init_attr attr = {};
6d8a7497
EBE
1358 struct ib_uverbs_ex_create_qp_resp resp;
1359 int ret;
c70285f8
YH
1360 struct ib_rwq_ind_table *ind_tbl = NULL;
1361 bool has_sq = true;
6d8a7497
EBE
1362
1363 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
c938a616
OG
1364 return -EPERM;
1365
fd3c7904
MB
1366 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1367 file->ucontext);
1368 if (IS_ERR(obj))
1369 return PTR_ERR(obj);
1370 obj->uxrcd = NULL;
1371 obj->uevent.uobject.user_handle = cmd->user_handle;
f48b7269 1372 mutex_init(&obj->mcast_lock);
bc38a6ab 1373
c70285f8
YH
1374 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
1375 sizeof(cmd->rwq_ind_tbl_handle) &&
1376 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
fd3c7904
MB
1377 ind_tbl = uobj_get_obj_read(rwq_ind_table,
1378 cmd->rwq_ind_tbl_handle,
1379 file->ucontext);
c70285f8
YH
1380 if (!ind_tbl) {
1381 ret = -EINVAL;
1382 goto err_put;
1383 }
1384
1385 attr.rwq_ind_tbl = ind_tbl;
1386 }
1387
1388 if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) +
1389 sizeof(cmd->reserved1)) && cmd->reserved1) {
1390 ret = -EOPNOTSUPP;
1391 goto err_put;
1392 }
1393
1394 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1395 ret = -EINVAL;
1396 goto err_put;
1397 }
1398
1399 if (ind_tbl && !cmd->max_send_wr)
1400 has_sq = false;
bc38a6ab 1401
6d8a7497 1402 if (cmd->qp_type == IB_QPT_XRC_TGT) {
fd3c7904
MB
1403 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle,
1404 file->ucontext);
1405
1406 if (IS_ERR(xrcd_uobj)) {
1407 ret = -EINVAL;
1408 goto err_put;
1409 }
1410
1411 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
b93f3c18
SH
1412 if (!xrcd) {
1413 ret = -EINVAL;
1414 goto err_put;
1415 }
1416 device = xrcd->device;
9977f4f6 1417 } else {
6d8a7497
EBE
1418 if (cmd->qp_type == IB_QPT_XRC_INI) {
1419 cmd->max_recv_wr = 0;
1420 cmd->max_recv_sge = 0;
b93f3c18 1421 } else {
6d8a7497 1422 if (cmd->is_srq) {
fd3c7904
MB
1423 srq = uobj_get_obj_read(srq, cmd->srq_handle,
1424 file->ucontext);
b93f3c18
SH
1425 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1426 ret = -EINVAL;
1427 goto err_put;
1428 }
1429 }
5909ce54 1430
c70285f8
YH
1431 if (!ind_tbl) {
1432 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
fd3c7904
MB
1433 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle,
1434 file->ucontext);
c70285f8
YH
1435 if (!rcq) {
1436 ret = -EINVAL;
1437 goto err_put;
1438 }
5909ce54 1439 }
9977f4f6
SH
1440 }
1441 }
5909ce54 1442
c70285f8 1443 if (has_sq)
fd3c7904
MB
1444 scq = uobj_get_obj_read(cq, cmd->send_cq_handle,
1445 file->ucontext);
c70285f8
YH
1446 if (!ind_tbl)
1447 rcq = rcq ?: scq;
fd3c7904 1448 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
c70285f8 1449 if (!pd || (!scq && has_sq)) {
5909ce54
RD
1450 ret = -EINVAL;
1451 goto err_put;
1452 }
1453
b93f3c18 1454 device = pd->device;
9977f4f6
SH
1455 }
1456
bc38a6ab
RD
1457 attr.event_handler = ib_uverbs_qp_event_handler;
1458 attr.qp_context = file;
1459 attr.send_cq = scq;
1460 attr.recv_cq = rcq;
f520ba5a 1461 attr.srq = srq;
b93f3c18 1462 attr.xrcd = xrcd;
6d8a7497
EBE
1463 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1464 IB_SIGNAL_REQ_WR;
1465 attr.qp_type = cmd->qp_type;
b846f25a 1466 attr.create_flags = 0;
bc38a6ab 1467
6d8a7497
EBE
1468 attr.cap.max_send_wr = cmd->max_send_wr;
1469 attr.cap.max_recv_wr = cmd->max_recv_wr;
1470 attr.cap.max_send_sge = cmd->max_send_sge;
1471 attr.cap.max_recv_sge = cmd->max_recv_sge;
1472 attr.cap.max_inline_data = cmd->max_inline_data;
bc38a6ab 1473
9ead190b
RD
1474 obj->uevent.events_reported = 0;
1475 INIT_LIST_HEAD(&obj->uevent.event_list);
1476 INIT_LIST_HEAD(&obj->mcast_list);
bc38a6ab 1477
6d8a7497
EBE
1478 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1479 sizeof(cmd->create_flags))
1480 attr.create_flags = cmd->create_flags;
1481
8a06ce59
LR
1482 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1483 IB_QP_CREATE_CROSS_CHANNEL |
1484 IB_QP_CREATE_MANAGED_SEND |
b531b909 1485 IB_QP_CREATE_MANAGED_RECV |
9e1b161f
NO
1486 IB_QP_CREATE_SCATTER_FCS |
1487 IB_QP_CREATE_CVLAN_STRIPPING)) {
6d8a7497
EBE
1488 ret = -EINVAL;
1489 goto err_put;
1490 }
1491
1492 buf = (void *)cmd + sizeof(*cmd);
1493 if (cmd_sz > sizeof(*cmd))
1494 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1495 cmd_sz - sizeof(*cmd) - 1))) {
1496 ret = -EINVAL;
1497 goto err_put;
1498 }
1499
1500 if (cmd->qp_type == IB_QPT_XRC_TGT)
b93f3c18
SH
1501 qp = ib_create_qp(pd, &attr);
1502 else
6d8a7497 1503 qp = device->create_qp(pd, &attr, uhw);
b93f3c18 1504
bc38a6ab
RD
1505 if (IS_ERR(qp)) {
1506 ret = PTR_ERR(qp);
fd3c7904 1507 goto err_put;
bc38a6ab
RD
1508 }
1509
6d8a7497 1510 if (cmd->qp_type != IB_QPT_XRC_TGT) {
d291f1a6
DJ
1511 ret = ib_create_qp_security(qp, device);
1512 if (ret)
1513 goto err_cb;
1514
0e0ec7e0 1515 qp->real_qp = qp;
b93f3c18
SH
1516 qp->device = device;
1517 qp->pd = pd;
1518 qp->send_cq = attr.send_cq;
1519 qp->recv_cq = attr.recv_cq;
1520 qp->srq = attr.srq;
c70285f8 1521 qp->rwq_ind_tbl = ind_tbl;
b93f3c18
SH
1522 qp->event_handler = attr.event_handler;
1523 qp->qp_context = attr.qp_context;
1524 qp->qp_type = attr.qp_type;
e47e321a 1525 atomic_set(&qp->usecnt, 0);
b93f3c18 1526 atomic_inc(&pd->usecnt);
c70285f8
YH
1527 if (attr.send_cq)
1528 atomic_inc(&attr.send_cq->usecnt);
b93f3c18
SH
1529 if (attr.recv_cq)
1530 atomic_inc(&attr.recv_cq->usecnt);
1531 if (attr.srq)
1532 atomic_inc(&attr.srq->usecnt);
c70285f8
YH
1533 if (ind_tbl)
1534 atomic_inc(&ind_tbl->usecnt);
b93f3c18
SH
1535 }
1536 qp->uobject = &obj->uevent.uobject;
bc38a6ab 1537
9ead190b 1538 obj->uevent.uobject.object = qp;
bc38a6ab 1539
9ead190b 1540 memset(&resp, 0, sizeof resp);
6d8a7497
EBE
1541 resp.base.qpn = qp->qp_num;
1542 resp.base.qp_handle = obj->uevent.uobject.id;
1543 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1544 resp.base.max_send_sge = attr.cap.max_send_sge;
1545 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1546 resp.base.max_send_wr = attr.cap.max_send_wr;
1547 resp.base.max_inline_data = attr.cap.max_inline_data;
bc38a6ab 1548
6d8a7497
EBE
1549 resp.response_length = offsetof(typeof(resp), response_length) +
1550 sizeof(resp.response_length);
1551
1552 ret = cb(file, &resp, ucore);
1553 if (ret)
1554 goto err_cb;
bc38a6ab 1555
846be90d
YH
1556 if (xrcd) {
1557 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1558 uobject);
1559 atomic_inc(&obj->uxrcd->refcnt);
fd3c7904 1560 uobj_put_read(xrcd_uobj);
846be90d
YH
1561 }
1562
b93f3c18 1563 if (pd)
fd3c7904 1564 uobj_put_obj_read(pd);
b93f3c18 1565 if (scq)
fd3c7904 1566 uobj_put_obj_read(scq);
9977f4f6 1567 if (rcq && rcq != scq)
fd3c7904 1568 uobj_put_obj_read(rcq);
9ead190b 1569 if (srq)
fd3c7904 1570 uobj_put_obj_read(srq);
c70285f8 1571 if (ind_tbl)
fd3c7904 1572 uobj_put_obj_read(ind_tbl);
9ead190b 1573
fd3c7904 1574 uobj_alloc_commit(&obj->uevent.uobject);
bc38a6ab 1575
6d8a7497
EBE
1576 return 0;
1577err_cb:
bc38a6ab
RD
1578 ib_destroy_qp(qp);
1579
9ead190b 1580err_put:
fd3c7904
MB
1581 if (!IS_ERR(xrcd_uobj))
1582 uobj_put_read(xrcd_uobj);
9ead190b 1583 if (pd)
fd3c7904 1584 uobj_put_obj_read(pd);
9ead190b 1585 if (scq)
fd3c7904 1586 uobj_put_obj_read(scq);
43db2bc0 1587 if (rcq && rcq != scq)
fd3c7904 1588 uobj_put_obj_read(rcq);
9ead190b 1589 if (srq)
fd3c7904 1590 uobj_put_obj_read(srq);
c70285f8 1591 if (ind_tbl)
fd3c7904 1592 uobj_put_obj_read(ind_tbl);
9ead190b 1593
fd3c7904 1594 uobj_alloc_abort(&obj->uevent.uobject);
bc38a6ab
RD
1595 return ret;
1596}
1597
6d8a7497
EBE
1598static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1599 struct ib_uverbs_ex_create_qp_resp *resp,
1600 struct ib_udata *ucore)
1601{
1602 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1603 return -EFAULT;
1604
1605 return 0;
1606}
1607
1608ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1609 struct ib_device *ib_dev,
1610 const char __user *buf, int in_len,
1611 int out_len)
1612{
1613 struct ib_uverbs_create_qp cmd;
1614 struct ib_uverbs_ex_create_qp cmd_ex;
1615 struct ib_udata ucore;
1616 struct ib_udata uhw;
1617 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1618 int err;
1619
1620 if (out_len < resp_size)
1621 return -ENOSPC;
1622
1623 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1624 return -EFAULT;
1625
1626 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
1627 resp_size);
1628 INIT_UDATA(&uhw, buf + sizeof(cmd),
1629 (unsigned long)cmd.response + resp_size,
3d943c9d
MD
1630 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1631 out_len - resp_size);
6d8a7497
EBE
1632
1633 memset(&cmd_ex, 0, sizeof(cmd_ex));
1634 cmd_ex.user_handle = cmd.user_handle;
1635 cmd_ex.pd_handle = cmd.pd_handle;
1636 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1637 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1638 cmd_ex.srq_handle = cmd.srq_handle;
1639 cmd_ex.max_send_wr = cmd.max_send_wr;
1640 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1641 cmd_ex.max_send_sge = cmd.max_send_sge;
1642 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1643 cmd_ex.max_inline_data = cmd.max_inline_data;
1644 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1645 cmd_ex.qp_type = cmd.qp_type;
1646 cmd_ex.is_srq = cmd.is_srq;
1647
1648 err = create_qp(file, &ucore, &uhw, &cmd_ex,
1649 offsetof(typeof(cmd_ex), is_srq) +
1650 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
1651 NULL);
1652
1653 if (err)
1654 return err;
1655
1656 return in_len;
1657}
1658
1659static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
1660 struct ib_uverbs_ex_create_qp_resp *resp,
1661 struct ib_udata *ucore)
1662{
1663 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1664 return -EFAULT;
1665
1666 return 0;
1667}
1668
1669int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
1670 struct ib_device *ib_dev,
1671 struct ib_udata *ucore,
1672 struct ib_udata *uhw)
1673{
1674 struct ib_uverbs_ex_create_qp_resp resp;
1675 struct ib_uverbs_ex_create_qp cmd = {0};
1676 int err;
1677
1678 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
1679 sizeof(cmd.comp_mask)))
1680 return -EINVAL;
1681
1682 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
1683 if (err)
1684 return err;
1685
c70285f8 1686 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
6d8a7497
EBE
1687 return -EINVAL;
1688
1689 if (cmd.reserved)
1690 return -EINVAL;
1691
1692 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1693 sizeof(resp.response_length)))
1694 return -ENOSPC;
1695
1696 err = create_qp(file, ucore, uhw, &cmd,
1697 min(ucore->inlen, sizeof(cmd)),
1698 ib_uverbs_ex_create_qp_cb, NULL);
1699
1700 if (err)
1701 return err;
1702
1703 return 0;
1704}
1705
42849b26 1706ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
057aec0d 1707 struct ib_device *ib_dev,
42849b26
SH
1708 const char __user *buf, int in_len, int out_len)
1709{
1710 struct ib_uverbs_open_qp cmd;
1711 struct ib_uverbs_create_qp_resp resp;
1712 struct ib_udata udata;
1713 struct ib_uqp_object *obj;
1714 struct ib_xrcd *xrcd;
1715 struct ib_uobject *uninitialized_var(xrcd_uobj);
1716 struct ib_qp *qp;
1717 struct ib_qp_open_attr attr;
1718 int ret;
1719
1720 if (out_len < sizeof resp)
1721 return -ENOSPC;
1722
1723 if (copy_from_user(&cmd, buf, sizeof cmd))
1724 return -EFAULT;
1725
1726 INIT_UDATA(&udata, buf + sizeof cmd,
1727 (unsigned long) cmd.response + sizeof resp,
1728 in_len - sizeof cmd, out_len - sizeof resp);
1729
fd3c7904
MB
1730 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1731 file->ucontext);
1732 if (IS_ERR(obj))
1733 return PTR_ERR(obj);
42849b26 1734
fd3c7904
MB
1735 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle,
1736 file->ucontext);
1737 if (IS_ERR(xrcd_uobj)) {
1738 ret = -EINVAL;
1739 goto err_put;
1740 }
42849b26 1741
fd3c7904 1742 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
42849b26
SH
1743 if (!xrcd) {
1744 ret = -EINVAL;
fd3c7904 1745 goto err_xrcd;
42849b26
SH
1746 }
1747
1748 attr.event_handler = ib_uverbs_qp_event_handler;
1749 attr.qp_context = file;
1750 attr.qp_num = cmd.qpn;
1751 attr.qp_type = cmd.qp_type;
1752
1753 obj->uevent.events_reported = 0;
1754 INIT_LIST_HEAD(&obj->uevent.event_list);
1755 INIT_LIST_HEAD(&obj->mcast_list);
1756
1757 qp = ib_open_qp(xrcd, &attr);
1758 if (IS_ERR(qp)) {
1759 ret = PTR_ERR(qp);
fd3c7904 1760 goto err_xrcd;
42849b26
SH
1761 }
1762
42849b26 1763 obj->uevent.uobject.object = qp;
fd3c7904 1764 obj->uevent.uobject.user_handle = cmd.user_handle;
42849b26
SH
1765
1766 memset(&resp, 0, sizeof resp);
1767 resp.qpn = qp->qp_num;
1768 resp.qp_handle = obj->uevent.uobject.id;
1769
1770 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1771 &resp, sizeof resp)) {
1772 ret = -EFAULT;
fd3c7904 1773 goto err_destroy;
42849b26
SH
1774 }
1775
846be90d
YH
1776 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1777 atomic_inc(&obj->uxrcd->refcnt);
fd3c7904
MB
1778 qp->uobject = &obj->uevent.uobject;
1779 uobj_put_read(xrcd_uobj);
42849b26 1780
42849b26 1781
fd3c7904 1782 uobj_alloc_commit(&obj->uevent.uobject);
42849b26
SH
1783
1784 return in_len;
1785
42849b26
SH
1786err_destroy:
1787 ib_destroy_qp(qp);
fd3c7904
MB
1788err_xrcd:
1789 uobj_put_read(xrcd_uobj);
42849b26 1790err_put:
fd3c7904 1791 uobj_alloc_abort(&obj->uevent.uobject);
42849b26
SH
1792 return ret;
1793}
1794
7ccc9a24 1795ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
057aec0d 1796 struct ib_device *ib_dev,
7ccc9a24
DB
1797 const char __user *buf, int in_len,
1798 int out_len)
1799{
1800 struct ib_uverbs_query_qp cmd;
1801 struct ib_uverbs_query_qp_resp resp;
1802 struct ib_qp *qp;
1803 struct ib_qp_attr *attr;
1804 struct ib_qp_init_attr *init_attr;
d8966fcd 1805 const struct ib_global_route *grh;
7ccc9a24
DB
1806 int ret;
1807
1808 if (copy_from_user(&cmd, buf, sizeof cmd))
1809 return -EFAULT;
1810
1811 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1812 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1813 if (!attr || !init_attr) {
1814 ret = -ENOMEM;
1815 goto out;
1816 }
1817
fd3c7904 1818 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b 1819 if (!qp) {
7ccc9a24 1820 ret = -EINVAL;
9ead190b
RD
1821 goto out;
1822 }
1823
1824 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
7ccc9a24 1825
fd3c7904 1826 uobj_put_obj_read(qp);
7ccc9a24
DB
1827
1828 if (ret)
1829 goto out;
1830
1831 memset(&resp, 0, sizeof resp);
1832
1833 resp.qp_state = attr->qp_state;
1834 resp.cur_qp_state = attr->cur_qp_state;
1835 resp.path_mtu = attr->path_mtu;
1836 resp.path_mig_state = attr->path_mig_state;
1837 resp.qkey = attr->qkey;
1838 resp.rq_psn = attr->rq_psn;
1839 resp.sq_psn = attr->sq_psn;
1840 resp.dest_qp_num = attr->dest_qp_num;
1841 resp.qp_access_flags = attr->qp_access_flags;
1842 resp.pkey_index = attr->pkey_index;
1843 resp.alt_pkey_index = attr->alt_pkey_index;
0b26c88f 1844 resp.sq_draining = attr->sq_draining;
7ccc9a24
DB
1845 resp.max_rd_atomic = attr->max_rd_atomic;
1846 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1847 resp.min_rnr_timer = attr->min_rnr_timer;
1848 resp.port_num = attr->port_num;
1849 resp.timeout = attr->timeout;
1850 resp.retry_cnt = attr->retry_cnt;
1851 resp.rnr_retry = attr->rnr_retry;
1852 resp.alt_port_num = attr->alt_port_num;
1853 resp.alt_timeout = attr->alt_timeout;
1854
d8966fcd
DC
1855 resp.dest.dlid = rdma_ah_get_dlid(&attr->ah_attr);
1856 resp.dest.sl = rdma_ah_get_sl(&attr->ah_attr);
1857 resp.dest.src_path_bits = rdma_ah_get_path_bits(&attr->ah_attr);
1858 resp.dest.static_rate = rdma_ah_get_static_rate(&attr->ah_attr);
1859 resp.dest.is_global = !!(rdma_ah_get_ah_flags(&attr->ah_attr) &
1860 IB_AH_GRH);
4ba66093 1861 if (resp.dest.is_global) {
d8966fcd
DC
1862 grh = rdma_ah_read_grh(&attr->ah_attr);
1863 memcpy(resp.dest.dgid, grh->dgid.raw, 16);
1864 resp.dest.flow_label = grh->flow_label;
1865 resp.dest.sgid_index = grh->sgid_index;
1866 resp.dest.hop_limit = grh->hop_limit;
1867 resp.dest.traffic_class = grh->traffic_class;
1868 }
1869 resp.dest.port_num = rdma_ah_get_port_num(&attr->ah_attr);
1870
1871 resp.alt_dest.dlid = rdma_ah_get_dlid(&attr->alt_ah_attr);
1872 resp.alt_dest.sl = rdma_ah_get_sl(&attr->alt_ah_attr);
1873 resp.alt_dest.src_path_bits = rdma_ah_get_path_bits(&attr->alt_ah_attr);
1874 resp.alt_dest.static_rate
1875 = rdma_ah_get_static_rate(&attr->alt_ah_attr);
1876 resp.alt_dest.is_global
1877 = !!(rdma_ah_get_ah_flags(&attr->alt_ah_attr) &
1878 IB_AH_GRH);
4ba66093 1879 if (resp.alt_dest.is_global) {
d8966fcd
DC
1880 grh = rdma_ah_read_grh(&attr->alt_ah_attr);
1881 memcpy(resp.alt_dest.dgid, grh->dgid.raw, 16);
1882 resp.alt_dest.flow_label = grh->flow_label;
1883 resp.alt_dest.sgid_index = grh->sgid_index;
1884 resp.alt_dest.hop_limit = grh->hop_limit;
1885 resp.alt_dest.traffic_class = grh->traffic_class;
4ba66093 1886 }
d8966fcd 1887 resp.alt_dest.port_num = rdma_ah_get_port_num(&attr->alt_ah_attr);
7ccc9a24
DB
1888
1889 resp.max_send_wr = init_attr->cap.max_send_wr;
1890 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1891 resp.max_send_sge = init_attr->cap.max_send_sge;
1892 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1893 resp.max_inline_data = init_attr->cap.max_inline_data;
27d56300 1894 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
7ccc9a24
DB
1895
1896 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1897 &resp, sizeof resp))
1898 ret = -EFAULT;
1899
1900out:
1901 kfree(attr);
1902 kfree(init_attr);
1903
1904 return ret ? ret : in_len;
1905}
1906
9977f4f6
SH
1907/* Remove ignored fields set in the attribute mask */
1908static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1909{
1910 switch (qp_type) {
1911 case IB_QPT_XRC_INI:
1912 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
b93f3c18
SH
1913 case IB_QPT_XRC_TGT:
1914 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1915 IB_QP_RNR_RETRY);
9977f4f6
SH
1916 default:
1917 return mask;
1918 }
1919}
1920
189aba99
BW
1921static int modify_qp(struct ib_uverbs_file *file,
1922 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
bc38a6ab 1923{
189aba99
BW
1924 struct ib_qp_attr *attr;
1925 struct ib_qp *qp;
1926 int ret;
9bc57e2d 1927
bc38a6ab
RD
1928 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1929 if (!attr)
1930 return -ENOMEM;
1931
fd3c7904 1932 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext);
9ead190b 1933 if (!qp) {
bc38a6ab
RD
1934 ret = -EINVAL;
1935 goto out;
1936 }
1937
189aba99
BW
1938 attr->qp_state = cmd->base.qp_state;
1939 attr->cur_qp_state = cmd->base.cur_qp_state;
1940 attr->path_mtu = cmd->base.path_mtu;
1941 attr->path_mig_state = cmd->base.path_mig_state;
1942 attr->qkey = cmd->base.qkey;
1943 attr->rq_psn = cmd->base.rq_psn;
1944 attr->sq_psn = cmd->base.sq_psn;
1945 attr->dest_qp_num = cmd->base.dest_qp_num;
1946 attr->qp_access_flags = cmd->base.qp_access_flags;
1947 attr->pkey_index = cmd->base.pkey_index;
1948 attr->alt_pkey_index = cmd->base.alt_pkey_index;
1949 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1950 attr->max_rd_atomic = cmd->base.max_rd_atomic;
1951 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1952 attr->min_rnr_timer = cmd->base.min_rnr_timer;
1953 attr->port_num = cmd->base.port_num;
1954 attr->timeout = cmd->base.timeout;
1955 attr->retry_cnt = cmd->base.retry_cnt;
1956 attr->rnr_retry = cmd->base.rnr_retry;
1957 attr->alt_port_num = cmd->base.alt_port_num;
1958 attr->alt_timeout = cmd->base.alt_timeout;
1959 attr->rate_limit = cmd->rate_limit;
1960
44c58487
DC
1961 attr->ah_attr.type = rdma_ah_find_type(qp->device,
1962 cmd->base.dest.port_num);
4ba66093 1963 if (cmd->base.dest.is_global) {
d8966fcd
DC
1964 rdma_ah_set_grh(&attr->ah_attr, NULL,
1965 cmd->base.dest.flow_label,
1966 cmd->base.dest.sgid_index,
1967 cmd->base.dest.hop_limit,
1968 cmd->base.dest.traffic_class);
1969 rdma_ah_set_dgid_raw(&attr->ah_attr, cmd->base.dest.dgid);
4ba66093 1970 } else {
d8966fcd 1971 rdma_ah_set_ah_flags(&attr->ah_attr, 0);
4ba66093 1972 }
d8966fcd
DC
1973 rdma_ah_set_dlid(&attr->ah_attr, cmd->base.dest.dlid);
1974 rdma_ah_set_sl(&attr->ah_attr, cmd->base.dest.sl);
1975 rdma_ah_set_path_bits(&attr->ah_attr, cmd->base.dest.src_path_bits);
1976 rdma_ah_set_static_rate(&attr->ah_attr, cmd->base.dest.static_rate);
1977 rdma_ah_set_port_num(&attr->ah_attr,
1978 cmd->base.dest.port_num);
189aba99 1979
44c58487
DC
1980 attr->alt_ah_attr.type = rdma_ah_find_type(qp->device,
1981 cmd->base.dest.port_num);
4ba66093 1982 if (cmd->base.alt_dest.is_global) {
d8966fcd
DC
1983 rdma_ah_set_grh(&attr->alt_ah_attr, NULL,
1984 cmd->base.alt_dest.flow_label,
1985 cmd->base.alt_dest.sgid_index,
1986 cmd->base.alt_dest.hop_limit,
1987 cmd->base.alt_dest.traffic_class);
1988 rdma_ah_set_dgid_raw(&attr->alt_ah_attr,
1989 cmd->base.alt_dest.dgid);
4ba66093 1990 } else {
d8966fcd 1991 rdma_ah_set_ah_flags(&attr->alt_ah_attr, 0);
4ba66093 1992 }
d8966fcd
DC
1993
1994 rdma_ah_set_dlid(&attr->alt_ah_attr, cmd->base.alt_dest.dlid);
1995 rdma_ah_set_sl(&attr->alt_ah_attr, cmd->base.alt_dest.sl);
1996 rdma_ah_set_path_bits(&attr->alt_ah_attr,
1997 cmd->base.alt_dest.src_path_bits);
1998 rdma_ah_set_static_rate(&attr->alt_ah_attr,
1999 cmd->base.alt_dest.static_rate);
2000 rdma_ah_set_port_num(&attr->alt_ah_attr,
2001 cmd->base.alt_dest.port_num);
bc38a6ab 2002
0e0ec7e0 2003 if (qp->real_qp == qp) {
189aba99 2004 if (cmd->base.attr_mask & IB_QP_AV) {
c90ea9d8
MS
2005 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
2006 if (ret)
2007 goto release_qp;
2008 }
d291f1a6
DJ
2009 ret = ib_security_modify_qp(qp,
2010 attr,
189aba99
BW
2011 modify_qp_mask(qp->qp_type,
2012 cmd->base.attr_mask),
2013 udata);
0e0ec7e0 2014 } else {
d291f1a6
DJ
2015 ret = ib_security_modify_qp(qp,
2016 attr,
2017 modify_qp_mask(qp->qp_type,
2018 cmd->base.attr_mask),
2019 NULL);
0e0ec7e0 2020 }
9ead190b 2021
0fb8bcf0 2022release_qp:
fd3c7904 2023 uobj_put_obj_read(qp);
0fb8bcf0 2024
bc38a6ab 2025out:
bc38a6ab
RD
2026 kfree(attr);
2027
2028 return ret;
2029}
2030
189aba99
BW
2031ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2032 struct ib_device *ib_dev,
2033 const char __user *buf, int in_len,
2034 int out_len)
2035{
2036 struct ib_uverbs_ex_modify_qp cmd = {};
2037 struct ib_udata udata;
2038 int ret;
2039
2040 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
2041 return -EFAULT;
2042
2043 if (cmd.base.attr_mask &
2044 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
2045 return -EOPNOTSUPP;
2046
2047 INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
2048 in_len - sizeof(cmd.base), out_len);
2049
2050 ret = modify_qp(file, &cmd, &udata);
2051 if (ret)
2052 return ret;
2053
2054 return in_len;
2055}
2056
2057int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
2058 struct ib_device *ib_dev,
2059 struct ib_udata *ucore,
2060 struct ib_udata *uhw)
2061{
2062 struct ib_uverbs_ex_modify_qp cmd = {};
2063 int ret;
2064
2065 /*
2066 * Last bit is reserved for extending the attr_mask by
2067 * using another field.
2068 */
2069 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
2070
2071 if (ucore->inlen < sizeof(cmd.base))
2072 return -EINVAL;
2073
2074 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2075 if (ret)
2076 return ret;
2077
2078 if (cmd.base.attr_mask &
2079 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
2080 return -EOPNOTSUPP;
2081
2082 if (ucore->inlen > sizeof(cmd)) {
2083 if (ib_is_udata_cleared(ucore, sizeof(cmd),
2084 ucore->inlen - sizeof(cmd)))
2085 return -EOPNOTSUPP;
2086 }
2087
2088 ret = modify_qp(file, &cmd, uhw);
2089
2090 return ret;
2091}
2092
bc38a6ab 2093ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
057aec0d 2094 struct ib_device *ib_dev,
bc38a6ab
RD
2095 const char __user *buf, int in_len,
2096 int out_len)
2097{
63aaf647
RD
2098 struct ib_uverbs_destroy_qp cmd;
2099 struct ib_uverbs_destroy_qp_resp resp;
9ead190b 2100 struct ib_uobject *uobj;
63aaf647 2101 struct ib_qp *qp;
9ead190b 2102 struct ib_uqp_object *obj;
63aaf647 2103 int ret = -EINVAL;
bc38a6ab
RD
2104
2105 if (copy_from_user(&cmd, buf, sizeof cmd))
2106 return -EFAULT;
2107
63aaf647
RD
2108 memset(&resp, 0, sizeof resp);
2109
fd3c7904
MB
2110 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle,
2111 file->ucontext);
2112 if (IS_ERR(uobj))
2113 return PTR_ERR(uobj);
2114
9ead190b
RD
2115 qp = uobj->object;
2116 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
fd3c7904
MB
2117 /*
2118 * Make sure we don't free the memory in remove_commit as we still
2119 * needs the uobject memory to create the response.
2120 */
2121 uverbs_uobject_get(uobj);
f4e40156 2122
fd3c7904
MB
2123 ret = uobj_remove_commit(uobj);
2124 if (ret) {
2125 uverbs_uobject_put(uobj);
9ead190b 2126 return ret;
fd3c7904 2127 }
63aaf647 2128
9ead190b 2129 resp.events_reported = obj->uevent.events_reported;
fd3c7904 2130 uverbs_uobject_put(uobj);
bc38a6ab 2131
63aaf647
RD
2132 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2133 &resp, sizeof resp))
9ead190b 2134 return -EFAULT;
bc38a6ab 2135
9ead190b 2136 return in_len;
bc38a6ab
RD
2137}
2138
e622f2f4
CH
2139static void *alloc_wr(size_t wr_size, __u32 num_sge)
2140{
4f7f4dcf
VT
2141 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
2142 sizeof (struct ib_sge))
2143 return NULL;
2144
e622f2f4
CH
2145 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2146 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
4f7f4dcf 2147}
e622f2f4 2148
67cdb40c 2149ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
057aec0d 2150 struct ib_device *ib_dev,
a74cd4af
RD
2151 const char __user *buf, int in_len,
2152 int out_len)
67cdb40c
RD
2153{
2154 struct ib_uverbs_post_send cmd;
2155 struct ib_uverbs_post_send_resp resp;
2156 struct ib_uverbs_send_wr *user_wr;
2157 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2158 struct ib_qp *qp;
2159 int i, sg_ind;
9ead190b 2160 int is_ud;
67cdb40c 2161 ssize_t ret = -EINVAL;
1d784b89 2162 size_t next_size;
67cdb40c
RD
2163
2164 if (copy_from_user(&cmd, buf, sizeof cmd))
2165 return -EFAULT;
2166
2167 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2168 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2169 return -EINVAL;
2170
2171 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2172 return -EINVAL;
2173
2174 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2175 if (!user_wr)
2176 return -ENOMEM;
2177
fd3c7904 2178 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b 2179 if (!qp)
67cdb40c
RD
2180 goto out;
2181
9ead190b 2182 is_ud = qp->qp_type == IB_QPT_UD;
67cdb40c
RD
2183 sg_ind = 0;
2184 last = NULL;
2185 for (i = 0; i < cmd.wr_count; ++i) {
2186 if (copy_from_user(user_wr,
2187 buf + sizeof cmd + i * cmd.wqe_size,
2188 cmd.wqe_size)) {
2189 ret = -EFAULT;
9ead190b 2190 goto out_put;
67cdb40c
RD
2191 }
2192
2193 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2194 ret = -EINVAL;
9ead190b 2195 goto out_put;
67cdb40c
RD
2196 }
2197
e622f2f4
CH
2198 if (is_ud) {
2199 struct ib_ud_wr *ud;
2200
2201 if (user_wr->opcode != IB_WR_SEND &&
2202 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2203 ret = -EINVAL;
2204 goto out_put;
2205 }
2206
1d784b89
MM
2207 next_size = sizeof(*ud);
2208 ud = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2209 if (!ud) {
2210 ret = -ENOMEM;
2211 goto out_put;
2212 }
2213
fd3c7904
MB
2214 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah,
2215 file->ucontext);
e622f2f4
CH
2216 if (!ud->ah) {
2217 kfree(ud);
2218 ret = -EINVAL;
2219 goto out_put;
2220 }
2221 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2222 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2223
2224 next = &ud->wr;
2225 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2226 user_wr->opcode == IB_WR_RDMA_WRITE ||
2227 user_wr->opcode == IB_WR_RDMA_READ) {
2228 struct ib_rdma_wr *rdma;
2229
1d784b89
MM
2230 next_size = sizeof(*rdma);
2231 rdma = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2232 if (!rdma) {
2233 ret = -ENOMEM;
2234 goto out_put;
2235 }
2236
2237 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2238 rdma->rkey = user_wr->wr.rdma.rkey;
2239
2240 next = &rdma->wr;
2241 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2242 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2243 struct ib_atomic_wr *atomic;
2244
1d784b89
MM
2245 next_size = sizeof(*atomic);
2246 atomic = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2247 if (!atomic) {
2248 ret = -ENOMEM;
2249 goto out_put;
2250 }
2251
2252 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2253 atomic->compare_add = user_wr->wr.atomic.compare_add;
2254 atomic->swap = user_wr->wr.atomic.swap;
2255 atomic->rkey = user_wr->wr.atomic.rkey;
2256
2257 next = &atomic->wr;
2258 } else if (user_wr->opcode == IB_WR_SEND ||
2259 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2260 user_wr->opcode == IB_WR_SEND_WITH_INV) {
1d784b89
MM
2261 next_size = sizeof(*next);
2262 next = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2263 if (!next) {
2264 ret = -ENOMEM;
2265 goto out_put;
2266 }
2267 } else {
2268 ret = -EINVAL;
9ead190b 2269 goto out_put;
67cdb40c
RD
2270 }
2271
e622f2f4
CH
2272 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2273 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2274 next->ex.imm_data =
2275 (__be32 __force) user_wr->ex.imm_data;
2276 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2277 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2278 }
2279
67cdb40c
RD
2280 if (!last)
2281 wr = next;
2282 else
2283 last->next = next;
2284 last = next;
2285
2286 next->next = NULL;
2287 next->wr_id = user_wr->wr_id;
2288 next->num_sge = user_wr->num_sge;
2289 next->opcode = user_wr->opcode;
2290 next->send_flags = user_wr->send_flags;
67cdb40c 2291
67cdb40c
RD
2292 if (next->num_sge) {
2293 next->sg_list = (void *) next +
1d784b89 2294 ALIGN(next_size, sizeof(struct ib_sge));
67cdb40c
RD
2295 if (copy_from_user(next->sg_list,
2296 buf + sizeof cmd +
2297 cmd.wr_count * cmd.wqe_size +
2298 sg_ind * sizeof (struct ib_sge),
2299 next->num_sge * sizeof (struct ib_sge))) {
2300 ret = -EFAULT;
9ead190b 2301 goto out_put;
67cdb40c
RD
2302 }
2303 sg_ind += next->num_sge;
2304 } else
2305 next->sg_list = NULL;
2306 }
2307
2308 resp.bad_wr = 0;
0e0ec7e0 2309 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
67cdb40c
RD
2310 if (ret)
2311 for (next = wr; next; next = next->next) {
2312 ++resp.bad_wr;
2313 if (next == bad_wr)
2314 break;
2315 }
2316
2317 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2318 &resp, sizeof resp))
2319 ret = -EFAULT;
2320
9ead190b 2321out_put:
fd3c7904 2322 uobj_put_obj_read(qp);
67cdb40c
RD
2323
2324 while (wr) {
e622f2f4 2325 if (is_ud && ud_wr(wr)->ah)
fd3c7904 2326 uobj_put_obj_read(ud_wr(wr)->ah);
67cdb40c
RD
2327 next = wr->next;
2328 kfree(wr);
2329 wr = next;
2330 }
2331
18320828 2332out:
67cdb40c
RD
2333 kfree(user_wr);
2334
2335 return ret ? ret : in_len;
2336}
2337
2338static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2339 int in_len,
2340 u32 wr_count,
2341 u32 sge_count,
2342 u32 wqe_size)
2343{
2344 struct ib_uverbs_recv_wr *user_wr;
2345 struct ib_recv_wr *wr = NULL, *last, *next;
2346 int sg_ind;
2347 int i;
2348 int ret;
2349
2350 if (in_len < wqe_size * wr_count +
2351 sge_count * sizeof (struct ib_uverbs_sge))
2352 return ERR_PTR(-EINVAL);
2353
2354 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2355 return ERR_PTR(-EINVAL);
2356
2357 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2358 if (!user_wr)
2359 return ERR_PTR(-ENOMEM);
2360
2361 sg_ind = 0;
2362 last = NULL;
2363 for (i = 0; i < wr_count; ++i) {
2364 if (copy_from_user(user_wr, buf + i * wqe_size,
2365 wqe_size)) {
2366 ret = -EFAULT;
2367 goto err;
2368 }
2369
2370 if (user_wr->num_sge + sg_ind > sge_count) {
2371 ret = -EINVAL;
2372 goto err;
2373 }
2374
4f7f4dcf
VT
2375 if (user_wr->num_sge >=
2376 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2377 sizeof (struct ib_sge)) {
2378 ret = -EINVAL;
2379 goto err;
2380 }
2381
67cdb40c
RD
2382 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2383 user_wr->num_sge * sizeof (struct ib_sge),
2384 GFP_KERNEL);
2385 if (!next) {
2386 ret = -ENOMEM;
2387 goto err;
2388 }
2389
2390 if (!last)
2391 wr = next;
2392 else
2393 last->next = next;
2394 last = next;
2395
2396 next->next = NULL;
2397 next->wr_id = user_wr->wr_id;
2398 next->num_sge = user_wr->num_sge;
2399
2400 if (next->num_sge) {
2401 next->sg_list = (void *) next +
2402 ALIGN(sizeof *next, sizeof (struct ib_sge));
2403 if (copy_from_user(next->sg_list,
2404 buf + wr_count * wqe_size +
2405 sg_ind * sizeof (struct ib_sge),
2406 next->num_sge * sizeof (struct ib_sge))) {
2407 ret = -EFAULT;
2408 goto err;
2409 }
2410 sg_ind += next->num_sge;
2411 } else
2412 next->sg_list = NULL;
2413 }
2414
2415 kfree(user_wr);
2416 return wr;
2417
2418err:
2419 kfree(user_wr);
2420
2421 while (wr) {
2422 next = wr->next;
2423 kfree(wr);
2424 wr = next;
2425 }
2426
2427 return ERR_PTR(ret);
2428}
2429
2430ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
057aec0d 2431 struct ib_device *ib_dev,
a74cd4af
RD
2432 const char __user *buf, int in_len,
2433 int out_len)
67cdb40c
RD
2434{
2435 struct ib_uverbs_post_recv cmd;
2436 struct ib_uverbs_post_recv_resp resp;
2437 struct ib_recv_wr *wr, *next, *bad_wr;
2438 struct ib_qp *qp;
2439 ssize_t ret = -EINVAL;
2440
2441 if (copy_from_user(&cmd, buf, sizeof cmd))
2442 return -EFAULT;
2443
2444 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2445 in_len - sizeof cmd, cmd.wr_count,
2446 cmd.sge_count, cmd.wqe_size);
2447 if (IS_ERR(wr))
2448 return PTR_ERR(wr);
2449
fd3c7904 2450 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b 2451 if (!qp)
67cdb40c
RD
2452 goto out;
2453
2454 resp.bad_wr = 0;
0e0ec7e0 2455 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
9ead190b 2456
fd3c7904
MB
2457 uobj_put_obj_read(qp);
2458 if (ret) {
67cdb40c
RD
2459 for (next = wr; next; next = next->next) {
2460 ++resp.bad_wr;
2461 if (next == bad_wr)
2462 break;
2463 }
fd3c7904 2464 }
67cdb40c 2465
67cdb40c
RD
2466 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2467 &resp, sizeof resp))
2468 ret = -EFAULT;
2469
2470out:
67cdb40c
RD
2471 while (wr) {
2472 next = wr->next;
2473 kfree(wr);
2474 wr = next;
2475 }
2476
2477 return ret ? ret : in_len;
2478}
2479
2480ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
057aec0d 2481 struct ib_device *ib_dev,
a74cd4af
RD
2482 const char __user *buf, int in_len,
2483 int out_len)
67cdb40c
RD
2484{
2485 struct ib_uverbs_post_srq_recv cmd;
2486 struct ib_uverbs_post_srq_recv_resp resp;
2487 struct ib_recv_wr *wr, *next, *bad_wr;
2488 struct ib_srq *srq;
2489 ssize_t ret = -EINVAL;
2490
2491 if (copy_from_user(&cmd, buf, sizeof cmd))
2492 return -EFAULT;
2493
2494 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2495 in_len - sizeof cmd, cmd.wr_count,
2496 cmd.sge_count, cmd.wqe_size);
2497 if (IS_ERR(wr))
2498 return PTR_ERR(wr);
2499
fd3c7904 2500 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
9ead190b 2501 if (!srq)
67cdb40c
RD
2502 goto out;
2503
2504 resp.bad_wr = 0;
2505 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
9ead190b 2506
fd3c7904 2507 uobj_put_obj_read(srq);
9ead190b 2508
67cdb40c
RD
2509 if (ret)
2510 for (next = wr; next; next = next->next) {
2511 ++resp.bad_wr;
2512 if (next == bad_wr)
2513 break;
2514 }
2515
67cdb40c
RD
2516 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2517 &resp, sizeof resp))
2518 ret = -EFAULT;
2519
2520out:
67cdb40c
RD
2521 while (wr) {
2522 next = wr->next;
2523 kfree(wr);
2524 wr = next;
2525 }
2526
2527 return ret ? ret : in_len;
2528}
2529
2530ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
057aec0d 2531 struct ib_device *ib_dev,
67cdb40c
RD
2532 const char __user *buf, int in_len,
2533 int out_len)
2534{
2535 struct ib_uverbs_create_ah cmd;
2536 struct ib_uverbs_create_ah_resp resp;
2537 struct ib_uobject *uobj;
2538 struct ib_pd *pd;
2539 struct ib_ah *ah;
90898850 2540 struct rdma_ah_attr attr;
67cdb40c 2541 int ret;
477864c8 2542 struct ib_udata udata;
d8966fcd 2543 u8 *dmac;
67cdb40c
RD
2544
2545 if (out_len < sizeof resp)
2546 return -ENOSPC;
2547
2548 if (copy_from_user(&cmd, buf, sizeof cmd))
2549 return -EFAULT;
2550
477864c8
MS
2551 INIT_UDATA(&udata, buf + sizeof(cmd),
2552 (unsigned long)cmd.response + sizeof(resp),
2553 in_len - sizeof(cmd), out_len - sizeof(resp));
2554
fd3c7904
MB
2555 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext);
2556 if (IS_ERR(uobj))
2557 return PTR_ERR(uobj);
67cdb40c 2558
fd3c7904 2559 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
9ead190b 2560 if (!pd) {
67cdb40c 2561 ret = -EINVAL;
9ead190b 2562 goto err;
67cdb40c
RD
2563 }
2564
44c58487 2565 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
d8966fcd
DC
2566 rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2567 rdma_ah_set_sl(&attr, cmd.attr.sl);
2568 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2569 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2570 rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2571
4ba66093 2572 if (cmd.attr.is_global) {
d8966fcd
DC
2573 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2574 cmd.attr.grh.sgid_index,
2575 cmd.attr.grh.hop_limit,
2576 cmd.attr.grh.traffic_class);
2577 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
4ba66093 2578 } else {
d8966fcd 2579 rdma_ah_set_ah_flags(&attr, 0);
4ba66093 2580 }
d8966fcd
DC
2581 dmac = rdma_ah_retrieve_dmac(&attr);
2582 if (dmac)
2583 memset(dmac, 0, ETH_ALEN);
67cdb40c 2584
477864c8
MS
2585 ah = pd->device->create_ah(pd, &attr, &udata);
2586
67cdb40c
RD
2587 if (IS_ERR(ah)) {
2588 ret = PTR_ERR(ah);
fd3c7904 2589 goto err_put;
67cdb40c
RD
2590 }
2591
477864c8
MS
2592 ah->device = pd->device;
2593 ah->pd = pd;
2594 atomic_inc(&pd->usecnt);
9ead190b 2595 ah->uobject = uobj;
fd3c7904 2596 uobj->user_handle = cmd.user_handle;
9ead190b 2597 uobj->object = ah;
67cdb40c 2598
67cdb40c
RD
2599 resp.ah_handle = uobj->id;
2600
2601 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2602 &resp, sizeof resp)) {
2603 ret = -EFAULT;
9ead190b 2604 goto err_copy;
67cdb40c
RD
2605 }
2606
fd3c7904
MB
2607 uobj_put_obj_read(pd);
2608 uobj_alloc_commit(uobj);
67cdb40c
RD
2609
2610 return in_len;
2611
9ead190b 2612err_copy:
36523159 2613 rdma_destroy_ah(ah);
67cdb40c 2614
fd3c7904
MB
2615err_put:
2616 uobj_put_obj_read(pd);
ec924b47 2617
9ead190b 2618err:
fd3c7904 2619 uobj_alloc_abort(uobj);
67cdb40c
RD
2620 return ret;
2621}
2622
2623ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
057aec0d 2624 struct ib_device *ib_dev,
67cdb40c
RD
2625 const char __user *buf, int in_len, int out_len)
2626{
2627 struct ib_uverbs_destroy_ah cmd;
67cdb40c 2628 struct ib_uobject *uobj;
9ead190b 2629 int ret;
67cdb40c
RD
2630
2631 if (copy_from_user(&cmd, buf, sizeof cmd))
2632 return -EFAULT;
2633
fd3c7904
MB
2634 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle,
2635 file->ucontext);
2636 if (IS_ERR(uobj))
2637 return PTR_ERR(uobj);
67cdb40c 2638
fd3c7904
MB
2639 ret = uobj_remove_commit(uobj);
2640 return ret ?: in_len;
67cdb40c
RD
2641}
2642
bc38a6ab 2643ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
057aec0d 2644 struct ib_device *ib_dev,
bc38a6ab
RD
2645 const char __user *buf, int in_len,
2646 int out_len)
2647{
2648 struct ib_uverbs_attach_mcast cmd;
2649 struct ib_qp *qp;
9ead190b 2650 struct ib_uqp_object *obj;
f4e40156 2651 struct ib_uverbs_mcast_entry *mcast;
9ead190b 2652 int ret;
bc38a6ab
RD
2653
2654 if (copy_from_user(&cmd, buf, sizeof cmd))
2655 return -EFAULT;
2656
fd3c7904 2657 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b
RD
2658 if (!qp)
2659 return -EINVAL;
f4e40156 2660
9ead190b 2661 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f4e40156 2662
f48b7269 2663 mutex_lock(&obj->mcast_lock);
9ead190b 2664 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2665 if (cmd.mlid == mcast->lid &&
2666 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2667 ret = 0;
9ead190b 2668 goto out_put;
f4e40156
JM
2669 }
2670
2671 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2672 if (!mcast) {
2673 ret = -ENOMEM;
9ead190b 2674 goto out_put;
f4e40156
JM
2675 }
2676
2677 mcast->lid = cmd.mlid;
2678 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
bc38a6ab 2679
f4e40156 2680 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
9ead190b
RD
2681 if (!ret)
2682 list_add_tail(&mcast->list, &obj->mcast_list);
2683 else
f4e40156
JM
2684 kfree(mcast);
2685
9ead190b 2686out_put:
f48b7269 2687 mutex_unlock(&obj->mcast_lock);
fd3c7904 2688 uobj_put_obj_read(qp);
bc38a6ab
RD
2689
2690 return ret ? ret : in_len;
2691}
2692
2693ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
057aec0d 2694 struct ib_device *ib_dev,
bc38a6ab
RD
2695 const char __user *buf, int in_len,
2696 int out_len)
2697{
2698 struct ib_uverbs_detach_mcast cmd;
9ead190b 2699 struct ib_uqp_object *obj;
bc38a6ab 2700 struct ib_qp *qp;
f4e40156 2701 struct ib_uverbs_mcast_entry *mcast;
bc38a6ab 2702 int ret = -EINVAL;
20c7840a 2703 bool found = false;
bc38a6ab
RD
2704
2705 if (copy_from_user(&cmd, buf, sizeof cmd))
2706 return -EFAULT;
2707
fd3c7904 2708 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b
RD
2709 if (!qp)
2710 return -EINVAL;
bc38a6ab 2711
fd3c7904 2712 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f48b7269 2713 mutex_lock(&obj->mcast_lock);
fd3c7904 2714
9ead190b 2715 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2716 if (cmd.mlid == mcast->lid &&
2717 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2718 list_del(&mcast->list);
2719 kfree(mcast);
20c7840a 2720 found = true;
f4e40156
JM
2721 break;
2722 }
2723
20c7840a
MR
2724 if (!found) {
2725 ret = -EINVAL;
2726 goto out_put;
2727 }
2728
2729 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2730
9ead190b 2731out_put:
f48b7269 2732 mutex_unlock(&obj->mcast_lock);
fd3c7904 2733 uobj_put_obj_read(qp);
bc38a6ab
RD
2734 return ret ? ret : in_len;
2735}
f520ba5a 2736
94e03f11
MR
2737static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec,
2738 union ib_flow_spec *ib_spec)
2739{
2740 ib_spec->type = kern_spec->type;
2741 switch (ib_spec->type) {
2742 case IB_FLOW_SPEC_ACTION_TAG:
2743 if (kern_spec->flow_tag.size !=
2744 sizeof(struct ib_uverbs_flow_spec_action_tag))
2745 return -EINVAL;
2746
2747 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2748 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2749 break;
483a3966
SS
2750 case IB_FLOW_SPEC_ACTION_DROP:
2751 if (kern_spec->drop.size !=
2752 sizeof(struct ib_uverbs_flow_spec_action_drop))
2753 return -EINVAL;
2754
2755 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2756 break;
94e03f11
MR
2757 default:
2758 return -EINVAL;
2759 }
2760 return 0;
2761}
2762
15dfbd6b
MG
2763static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
2764{
2765 /* Returns user space filter size, includes padding */
2766 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
2767}
2768
2769static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
2770 u16 ib_real_filter_sz)
2771{
2772 /*
2773 * User space filter structures must be 64 bit aligned, otherwise this
2774 * may pass, but we won't handle additional new attributes.
2775 */
2776
2777 if (kern_filter_size > ib_real_filter_sz) {
2778 if (memchr_inv(kern_spec_filter +
2779 ib_real_filter_sz, 0,
2780 kern_filter_size - ib_real_filter_sz))
2781 return -EINVAL;
2782 return ib_real_filter_sz;
2783 }
2784 return kern_filter_size;
2785}
2786
94e03f11
MR
2787static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2788 union ib_flow_spec *ib_spec)
436f2ad0 2789{
15dfbd6b
MG
2790 ssize_t actual_filter_sz;
2791 ssize_t kern_filter_sz;
2792 ssize_t ib_filter_sz;
2793 void *kern_spec_mask;
2794 void *kern_spec_val;
2795
c780d82a
YD
2796 if (kern_spec->reserved)
2797 return -EINVAL;
2798
436f2ad0
HHZ
2799 ib_spec->type = kern_spec->type;
2800
15dfbd6b
MG
2801 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
2802 /* User flow spec size must be aligned to 4 bytes */
2803 if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2804 return -EINVAL;
2805
2806 kern_spec_val = (void *)kern_spec +
2807 sizeof(struct ib_uverbs_flow_spec_hdr);
2808 kern_spec_mask = kern_spec_val + kern_filter_sz;
fbf46860
MR
2809 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2810 return -EINVAL;
15dfbd6b 2811
fbf46860 2812 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
436f2ad0 2813 case IB_FLOW_SPEC_ETH:
15dfbd6b
MG
2814 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2815 actual_filter_sz = spec_filter_size(kern_spec_mask,
2816 kern_filter_sz,
2817 ib_filter_sz);
2818 if (actual_filter_sz <= 0)
436f2ad0 2819 return -EINVAL;
15dfbd6b
MG
2820 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2821 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2822 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
436f2ad0
HHZ
2823 break;
2824 case IB_FLOW_SPEC_IPV4:
15dfbd6b
MG
2825 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2826 actual_filter_sz = spec_filter_size(kern_spec_mask,
2827 kern_filter_sz,
2828 ib_filter_sz);
2829 if (actual_filter_sz <= 0)
436f2ad0 2830 return -EINVAL;
15dfbd6b
MG
2831 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2832 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2833 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
436f2ad0 2834 break;
4c2aae71 2835 case IB_FLOW_SPEC_IPV6:
15dfbd6b
MG
2836 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2837 actual_filter_sz = spec_filter_size(kern_spec_mask,
2838 kern_filter_sz,
2839 ib_filter_sz);
2840 if (actual_filter_sz <= 0)
4c2aae71 2841 return -EINVAL;
15dfbd6b
MG
2842 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2843 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2844 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
a72c6a2b
MG
2845
2846 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2847 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2848 return -EINVAL;
4c2aae71 2849 break;
436f2ad0
HHZ
2850 case IB_FLOW_SPEC_TCP:
2851 case IB_FLOW_SPEC_UDP:
15dfbd6b
MG
2852 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2853 actual_filter_sz = spec_filter_size(kern_spec_mask,
2854 kern_filter_sz,
2855 ib_filter_sz);
2856 if (actual_filter_sz <= 0)
436f2ad0 2857 return -EINVAL;
15dfbd6b
MG
2858 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2859 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2860 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
436f2ad0 2861 break;
0dbf3332
MR
2862 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2863 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2864 actual_filter_sz = spec_filter_size(kern_spec_mask,
2865 kern_filter_sz,
2866 ib_filter_sz);
2867 if (actual_filter_sz <= 0)
2868 return -EINVAL;
2869 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2870 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2871 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2872
2873 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2874 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2875 return -EINVAL;
2876 break;
436f2ad0
HHZ
2877 default:
2878 return -EINVAL;
2879 }
2880 return 0;
2881}
2882
94e03f11
MR
2883static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2884 union ib_flow_spec *ib_spec)
2885{
2886 if (kern_spec->reserved)
2887 return -EINVAL;
2888
2889 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2890 return kern_spec_to_ib_spec_action(kern_spec, ib_spec);
2891 else
2892 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2893}
2894
f213c052
YH
2895int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
2896 struct ib_device *ib_dev,
2897 struct ib_udata *ucore,
2898 struct ib_udata *uhw)
2899{
2900 struct ib_uverbs_ex_create_wq cmd = {};
2901 struct ib_uverbs_ex_create_wq_resp resp = {};
2902 struct ib_uwq_object *obj;
2903 int err = 0;
2904 struct ib_cq *cq;
2905 struct ib_pd *pd;
2906 struct ib_wq *wq;
2907 struct ib_wq_init_attr wq_init_attr = {};
2908 size_t required_cmd_sz;
2909 size_t required_resp_len;
2910
2911 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
2912 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
2913
2914 if (ucore->inlen < required_cmd_sz)
2915 return -EINVAL;
2916
2917 if (ucore->outlen < required_resp_len)
2918 return -ENOSPC;
2919
2920 if (ucore->inlen > sizeof(cmd) &&
2921 !ib_is_udata_cleared(ucore, sizeof(cmd),
2922 ucore->inlen - sizeof(cmd)))
2923 return -EOPNOTSUPP;
2924
2925 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2926 if (err)
2927 return err;
2928
2929 if (cmd.comp_mask)
2930 return -EOPNOTSUPP;
2931
fd3c7904
MB
2932 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq),
2933 file->ucontext);
2934 if (IS_ERR(obj))
2935 return PTR_ERR(obj);
f213c052 2936
fd3c7904 2937 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
f213c052
YH
2938 if (!pd) {
2939 err = -EINVAL;
2940 goto err_uobj;
2941 }
2942
fd3c7904 2943 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
f213c052
YH
2944 if (!cq) {
2945 err = -EINVAL;
2946 goto err_put_pd;
2947 }
2948
2949 wq_init_attr.cq = cq;
2950 wq_init_attr.max_sge = cmd.max_sge;
2951 wq_init_attr.max_wr = cmd.max_wr;
2952 wq_init_attr.wq_context = file;
2953 wq_init_attr.wq_type = cmd.wq_type;
2954 wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
af1cb95d
NO
2955 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) +
2956 sizeof(cmd.create_flags)))
2957 wq_init_attr.create_flags = cmd.create_flags;
f213c052
YH
2958 obj->uevent.events_reported = 0;
2959 INIT_LIST_HEAD(&obj->uevent.event_list);
2960 wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
2961 if (IS_ERR(wq)) {
2962 err = PTR_ERR(wq);
2963 goto err_put_cq;
2964 }
2965
2966 wq->uobject = &obj->uevent.uobject;
2967 obj->uevent.uobject.object = wq;
2968 wq->wq_type = wq_init_attr.wq_type;
2969 wq->cq = cq;
2970 wq->pd = pd;
2971 wq->device = pd->device;
2972 wq->wq_context = wq_init_attr.wq_context;
2973 atomic_set(&wq->usecnt, 0);
2974 atomic_inc(&pd->usecnt);
2975 atomic_inc(&cq->usecnt);
2976 wq->uobject = &obj->uevent.uobject;
2977 obj->uevent.uobject.object = wq;
f213c052
YH
2978
2979 memset(&resp, 0, sizeof(resp));
2980 resp.wq_handle = obj->uevent.uobject.id;
2981 resp.max_sge = wq_init_attr.max_sge;
2982 resp.max_wr = wq_init_attr.max_wr;
2983 resp.wqn = wq->wq_num;
2984 resp.response_length = required_resp_len;
2985 err = ib_copy_to_udata(ucore,
2986 &resp, resp.response_length);
2987 if (err)
2988 goto err_copy;
2989
fd3c7904
MB
2990 uobj_put_obj_read(pd);
2991 uobj_put_obj_read(cq);
2992 uobj_alloc_commit(&obj->uevent.uobject);
f213c052
YH
2993 return 0;
2994
2995err_copy:
f213c052
YH
2996 ib_destroy_wq(wq);
2997err_put_cq:
fd3c7904 2998 uobj_put_obj_read(cq);
f213c052 2999err_put_pd:
fd3c7904 3000 uobj_put_obj_read(pd);
f213c052 3001err_uobj:
fd3c7904 3002 uobj_alloc_abort(&obj->uevent.uobject);
f213c052
YH
3003
3004 return err;
3005}
3006
3007int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
3008 struct ib_device *ib_dev,
3009 struct ib_udata *ucore,
3010 struct ib_udata *uhw)
3011{
3012 struct ib_uverbs_ex_destroy_wq cmd = {};
3013 struct ib_uverbs_ex_destroy_wq_resp resp = {};
3014 struct ib_wq *wq;
3015 struct ib_uobject *uobj;
3016 struct ib_uwq_object *obj;
3017 size_t required_cmd_sz;
3018 size_t required_resp_len;
3019 int ret;
3020
3021 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
3022 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
3023
3024 if (ucore->inlen < required_cmd_sz)
3025 return -EINVAL;
3026
3027 if (ucore->outlen < required_resp_len)
3028 return -ENOSPC;
3029
3030 if (ucore->inlen > sizeof(cmd) &&
3031 !ib_is_udata_cleared(ucore, sizeof(cmd),
3032 ucore->inlen - sizeof(cmd)))
3033 return -EOPNOTSUPP;
3034
3035 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3036 if (ret)
3037 return ret;
3038
3039 if (cmd.comp_mask)
3040 return -EOPNOTSUPP;
3041
3042 resp.response_length = required_resp_len;
fd3c7904
MB
3043 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle,
3044 file->ucontext);
3045 if (IS_ERR(uobj))
3046 return PTR_ERR(uobj);
f213c052
YH
3047
3048 wq = uobj->object;
3049 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
fd3c7904
MB
3050 /*
3051 * Make sure we don't free the memory in remove_commit as we still
3052 * needs the uobject memory to create the response.
3053 */
3054 uverbs_uobject_get(uobj);
f213c052 3055
fd3c7904 3056 ret = uobj_remove_commit(uobj);
f213c052 3057 resp.events_reported = obj->uevent.events_reported;
fd3c7904 3058 uverbs_uobject_put(uobj);
f213c052
YH
3059 if (ret)
3060 return ret;
3061
c52d8114 3062 return ib_copy_to_udata(ucore, &resp, resp.response_length);
f213c052
YH
3063}
3064
3065int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
3066 struct ib_device *ib_dev,
3067 struct ib_udata *ucore,
3068 struct ib_udata *uhw)
3069{
3070 struct ib_uverbs_ex_modify_wq cmd = {};
3071 struct ib_wq *wq;
3072 struct ib_wq_attr wq_attr = {};
3073 size_t required_cmd_sz;
3074 int ret;
3075
3076 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
3077 if (ucore->inlen < required_cmd_sz)
3078 return -EINVAL;
3079
3080 if (ucore->inlen > sizeof(cmd) &&
3081 !ib_is_udata_cleared(ucore, sizeof(cmd),
3082 ucore->inlen - sizeof(cmd)))
3083 return -EOPNOTSUPP;
3084
3085 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3086 if (ret)
3087 return ret;
3088
3089 if (!cmd.attr_mask)
3090 return -EINVAL;
3091
af1cb95d 3092 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
f213c052
YH
3093 return -EINVAL;
3094
fd3c7904 3095 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext);
f213c052
YH
3096 if (!wq)
3097 return -EINVAL;
3098
3099 wq_attr.curr_wq_state = cmd.curr_wq_state;
3100 wq_attr.wq_state = cmd.wq_state;
af1cb95d
NO
3101 if (cmd.attr_mask & IB_WQ_FLAGS) {
3102 wq_attr.flags = cmd.flags;
3103 wq_attr.flags_mask = cmd.flags_mask;
3104 }
f213c052 3105 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
fd3c7904 3106 uobj_put_obj_read(wq);
f213c052
YH
3107 return ret;
3108}
3109
de019a94
YH
3110int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
3111 struct ib_device *ib_dev,
3112 struct ib_udata *ucore,
3113 struct ib_udata *uhw)
3114{
3115 struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
3116 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3117 struct ib_uobject *uobj;
3118 int err = 0;
3119 struct ib_rwq_ind_table_init_attr init_attr = {};
3120 struct ib_rwq_ind_table *rwq_ind_tbl;
3121 struct ib_wq **wqs = NULL;
3122 u32 *wqs_handles = NULL;
3123 struct ib_wq *wq = NULL;
3124 int i, j, num_read_wqs;
3125 u32 num_wq_handles;
3126 u32 expected_in_size;
3127 size_t required_cmd_sz_header;
3128 size_t required_resp_len;
3129
3130 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
3131 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
3132
3133 if (ucore->inlen < required_cmd_sz_header)
3134 return -EINVAL;
3135
3136 if (ucore->outlen < required_resp_len)
3137 return -ENOSPC;
3138
3139 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
3140 if (err)
3141 return err;
3142
3143 ucore->inbuf += required_cmd_sz_header;
3144 ucore->inlen -= required_cmd_sz_header;
3145
3146 if (cmd.comp_mask)
3147 return -EOPNOTSUPP;
3148
3149 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3150 return -EINVAL;
3151
3152 num_wq_handles = 1 << cmd.log_ind_tbl_size;
3153 expected_in_size = num_wq_handles * sizeof(__u32);
3154 if (num_wq_handles == 1)
3155 /* input size for wq handles is u64 aligned */
3156 expected_in_size += sizeof(__u32);
3157
3158 if (ucore->inlen < expected_in_size)
3159 return -EINVAL;
3160
3161 if (ucore->inlen > expected_in_size &&
3162 !ib_is_udata_cleared(ucore, expected_in_size,
3163 ucore->inlen - expected_in_size))
3164 return -EOPNOTSUPP;
3165
3166 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3167 GFP_KERNEL);
3168 if (!wqs_handles)
3169 return -ENOMEM;
3170
3171 err = ib_copy_from_udata(wqs_handles, ucore,
3172 num_wq_handles * sizeof(__u32));
3173 if (err)
3174 goto err_free;
3175
3176 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3177 if (!wqs) {
3178 err = -ENOMEM;
3179 goto err_free;
3180 }
3181
3182 for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3183 num_read_wqs++) {
fd3c7904
MB
3184 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs],
3185 file->ucontext);
de019a94
YH
3186 if (!wq) {
3187 err = -EINVAL;
3188 goto put_wqs;
3189 }
3190
3191 wqs[num_read_wqs] = wq;
3192 }
3193
fd3c7904
MB
3194 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext);
3195 if (IS_ERR(uobj)) {
3196 err = PTR_ERR(uobj);
de019a94
YH
3197 goto put_wqs;
3198 }
3199
de019a94
YH
3200 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3201 init_attr.ind_tbl = wqs;
3202 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
3203
3204 if (IS_ERR(rwq_ind_tbl)) {
3205 err = PTR_ERR(rwq_ind_tbl);
3206 goto err_uobj;
3207 }
3208
3209 rwq_ind_tbl->ind_tbl = wqs;
3210 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3211 rwq_ind_tbl->uobject = uobj;
3212 uobj->object = rwq_ind_tbl;
3213 rwq_ind_tbl->device = ib_dev;
3214 atomic_set(&rwq_ind_tbl->usecnt, 0);
3215
3216 for (i = 0; i < num_wq_handles; i++)
3217 atomic_inc(&wqs[i]->usecnt);
3218
de019a94
YH
3219 resp.ind_tbl_handle = uobj->id;
3220 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3221 resp.response_length = required_resp_len;
3222
3223 err = ib_copy_to_udata(ucore,
3224 &resp, resp.response_length);
3225 if (err)
3226 goto err_copy;
3227
3228 kfree(wqs_handles);
3229
3230 for (j = 0; j < num_read_wqs; j++)
fd3c7904 3231 uobj_put_obj_read(wqs[j]);
de019a94 3232
fd3c7904 3233 uobj_alloc_commit(uobj);
de019a94
YH
3234 return 0;
3235
3236err_copy:
de019a94
YH
3237 ib_destroy_rwq_ind_table(rwq_ind_tbl);
3238err_uobj:
fd3c7904 3239 uobj_alloc_abort(uobj);
de019a94
YH
3240put_wqs:
3241 for (j = 0; j < num_read_wqs; j++)
fd3c7904 3242 uobj_put_obj_read(wqs[j]);
de019a94
YH
3243err_free:
3244 kfree(wqs_handles);
3245 kfree(wqs);
3246 return err;
3247}
3248
3249int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
3250 struct ib_device *ib_dev,
3251 struct ib_udata *ucore,
3252 struct ib_udata *uhw)
3253{
3254 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
de019a94
YH
3255 struct ib_uobject *uobj;
3256 int ret;
de019a94
YH
3257 size_t required_cmd_sz;
3258
3259 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
3260
3261 if (ucore->inlen < required_cmd_sz)
3262 return -EINVAL;
3263
3264 if (ucore->inlen > sizeof(cmd) &&
3265 !ib_is_udata_cleared(ucore, sizeof(cmd),
3266 ucore->inlen - sizeof(cmd)))
3267 return -EOPNOTSUPP;
3268
3269 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3270 if (ret)
3271 return ret;
3272
3273 if (cmd.comp_mask)
3274 return -EOPNOTSUPP;
3275
fd3c7904
MB
3276 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle,
3277 file->ucontext);
3278 if (IS_ERR(uobj))
3279 return PTR_ERR(uobj);
de019a94 3280
fd3c7904 3281 return uobj_remove_commit(uobj);
de019a94
YH
3282}
3283
f21519b2 3284int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
057aec0d 3285 struct ib_device *ib_dev,
f21519b2
YD
3286 struct ib_udata *ucore,
3287 struct ib_udata *uhw)
436f2ad0
HHZ
3288{
3289 struct ib_uverbs_create_flow cmd;
3290 struct ib_uverbs_create_flow_resp resp;
3291 struct ib_uobject *uobj;
3292 struct ib_flow *flow_id;
d82693da 3293 struct ib_uverbs_flow_attr *kern_flow_attr;
436f2ad0
HHZ
3294 struct ib_flow_attr *flow_attr;
3295 struct ib_qp *qp;
3296 int err = 0;
3297 void *kern_spec;
3298 void *ib_spec;
3299 int i;
436f2ad0 3300
6bcca3d4
YD
3301 if (ucore->inlen < sizeof(cmd))
3302 return -EINVAL;
3303
f21519b2 3304 if (ucore->outlen < sizeof(resp))
436f2ad0
HHZ
3305 return -ENOSPC;
3306
f21519b2
YD
3307 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3308 if (err)
3309 return err;
3310
3311 ucore->inbuf += sizeof(cmd);
3312 ucore->inlen -= sizeof(cmd);
436f2ad0 3313
22878dbc
MB
3314 if (cmd.comp_mask)
3315 return -EINVAL;
3316
e3b6d8cf 3317 if (!capable(CAP_NET_RAW))
436f2ad0
HHZ
3318 return -EPERM;
3319
a3100a78
MV
3320 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3321 return -EINVAL;
3322
3323 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3324 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3325 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3326 return -EINVAL;
3327
f8848274 3328 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
22878dbc
MB
3329 return -EINVAL;
3330
f21519b2 3331 if (cmd.flow_attr.size > ucore->inlen ||
f8848274 3332 cmd.flow_attr.size >
b68c9560 3333 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
22878dbc
MB
3334 return -EINVAL;
3335
c780d82a
YD
3336 if (cmd.flow_attr.reserved[0] ||
3337 cmd.flow_attr.reserved[1])
3338 return -EINVAL;
3339
436f2ad0 3340 if (cmd.flow_attr.num_of_specs) {
f8848274
MB
3341 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3342 GFP_KERNEL);
436f2ad0
HHZ
3343 if (!kern_flow_attr)
3344 return -ENOMEM;
3345
3346 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
f21519b2
YD
3347 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3348 cmd.flow_attr.size);
3349 if (err)
436f2ad0 3350 goto err_free_attr;
436f2ad0
HHZ
3351 } else {
3352 kern_flow_attr = &cmd.flow_attr;
436f2ad0
HHZ
3353 }
3354
fd3c7904
MB
3355 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext);
3356 if (IS_ERR(uobj)) {
3357 err = PTR_ERR(uobj);
436f2ad0
HHZ
3358 goto err_free_attr;
3359 }
436f2ad0 3360
fd3c7904 3361 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
436f2ad0
HHZ
3362 if (!qp) {
3363 err = -EINVAL;
3364 goto err_uobj;
3365 }
3366
15dfbd6b
MG
3367 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
3368 sizeof(union ib_flow_spec), GFP_KERNEL);
436f2ad0
HHZ
3369 if (!flow_attr) {
3370 err = -ENOMEM;
3371 goto err_put;
3372 }
3373
3374 flow_attr->type = kern_flow_attr->type;
3375 flow_attr->priority = kern_flow_attr->priority;
3376 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3377 flow_attr->port = kern_flow_attr->port;
3378 flow_attr->flags = kern_flow_attr->flags;
3379 flow_attr->size = sizeof(*flow_attr);
3380
3381 kern_spec = kern_flow_attr + 1;
3382 ib_spec = flow_attr + 1;
f8848274 3383 for (i = 0; i < flow_attr->num_of_specs &&
b68c9560 3384 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
f8848274 3385 cmd.flow_attr.size >=
b68c9560 3386 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
436f2ad0
HHZ
3387 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3388 if (err)
3389 goto err_free;
3390 flow_attr->size +=
3391 ((union ib_flow_spec *) ib_spec)->size;
b68c9560
YD
3392 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3393 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
436f2ad0
HHZ
3394 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3395 }
f8848274
MB
3396 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3397 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3398 i, cmd.flow_attr.size);
98a37510 3399 err = -EINVAL;
436f2ad0
HHZ
3400 goto err_free;
3401 }
3402 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3403 if (IS_ERR(flow_id)) {
3404 err = PTR_ERR(flow_id);
fd3c7904 3405 goto err_free;
436f2ad0 3406 }
436f2ad0
HHZ
3407 flow_id->uobject = uobj;
3408 uobj->object = flow_id;
3409
436f2ad0
HHZ
3410 memset(&resp, 0, sizeof(resp));
3411 resp.flow_handle = uobj->id;
3412
f21519b2
YD
3413 err = ib_copy_to_udata(ucore,
3414 &resp, sizeof(resp));
3415 if (err)
436f2ad0 3416 goto err_copy;
436f2ad0 3417
fd3c7904
MB
3418 uobj_put_obj_read(qp);
3419 uobj_alloc_commit(uobj);
436f2ad0
HHZ
3420 kfree(flow_attr);
3421 if (cmd.flow_attr.num_of_specs)
3422 kfree(kern_flow_attr);
f21519b2 3423 return 0;
436f2ad0 3424err_copy:
436f2ad0
HHZ
3425 ib_destroy_flow(flow_id);
3426err_free:
3427 kfree(flow_attr);
3428err_put:
fd3c7904 3429 uobj_put_obj_read(qp);
436f2ad0 3430err_uobj:
fd3c7904 3431 uobj_alloc_abort(uobj);
436f2ad0
HHZ
3432err_free_attr:
3433 if (cmd.flow_attr.num_of_specs)
3434 kfree(kern_flow_attr);
3435 return err;
3436}
3437
f21519b2 3438int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
057aec0d 3439 struct ib_device *ib_dev,
f21519b2
YD
3440 struct ib_udata *ucore,
3441 struct ib_udata *uhw)
3442{
436f2ad0 3443 struct ib_uverbs_destroy_flow cmd;
436f2ad0
HHZ
3444 struct ib_uobject *uobj;
3445 int ret;
3446
6bcca3d4
YD
3447 if (ucore->inlen < sizeof(cmd))
3448 return -EINVAL;
3449
f21519b2
YD
3450 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3451 if (ret)
3452 return ret;
436f2ad0 3453
2782c2d3
YD
3454 if (cmd.comp_mask)
3455 return -EINVAL;
3456
fd3c7904
MB
3457 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle,
3458 file->ucontext);
3459 if (IS_ERR(uobj))
3460 return PTR_ERR(uobj);
436f2ad0 3461
fd3c7904 3462 ret = uobj_remove_commit(uobj);
f21519b2 3463 return ret;
436f2ad0
HHZ
3464}
3465
c89d1bed 3466static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
057aec0d 3467 struct ib_device *ib_dev,
c89d1bed
SH
3468 struct ib_uverbs_create_xsrq *cmd,
3469 struct ib_udata *udata)
f520ba5a 3470{
f520ba5a 3471 struct ib_uverbs_create_srq_resp resp;
8541f8de 3472 struct ib_usrq_object *obj;
f520ba5a
RD
3473 struct ib_pd *pd;
3474 struct ib_srq *srq;
8541f8de 3475 struct ib_uobject *uninitialized_var(xrcd_uobj);
f520ba5a
RD
3476 struct ib_srq_init_attr attr;
3477 int ret;
3478
fd3c7904
MB
3479 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq),
3480 file->ucontext);
3481 if (IS_ERR(obj))
3482 return PTR_ERR(obj);
f520ba5a 3483
8541f8de 3484 if (cmd->srq_type == IB_SRQT_XRC) {
fd3c7904
MB
3485 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle,
3486 file->ucontext);
3487 if (IS_ERR(xrcd_uobj)) {
8541f8de 3488 ret = -EINVAL;
5909ce54 3489 goto err;
8541f8de
SH
3490 }
3491
fd3c7904
MB
3492 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3493 if (!attr.ext.xrc.xrcd) {
3494 ret = -EINVAL;
3495 goto err_put_xrcd;
3496 }
3497
8541f8de
SH
3498 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3499 atomic_inc(&obj->uxrcd->refcnt);
5909ce54 3500
fd3c7904
MB
3501 attr.ext.xrc.cq = uobj_get_obj_read(cq, cmd->cq_handle,
3502 file->ucontext);
5909ce54
RD
3503 if (!attr.ext.xrc.cq) {
3504 ret = -EINVAL;
3505 goto err_put_xrcd;
3506 }
3507 }
3508
fd3c7904 3509 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
5909ce54
RD
3510 if (!pd) {
3511 ret = -EINVAL;
3512 goto err_put_cq;
8541f8de
SH
3513 }
3514
f520ba5a
RD
3515 attr.event_handler = ib_uverbs_srq_event_handler;
3516 attr.srq_context = file;
8541f8de
SH
3517 attr.srq_type = cmd->srq_type;
3518 attr.attr.max_wr = cmd->max_wr;
3519 attr.attr.max_sge = cmd->max_sge;
3520 attr.attr.srq_limit = cmd->srq_limit;
f520ba5a 3521
8541f8de
SH
3522 obj->uevent.events_reported = 0;
3523 INIT_LIST_HEAD(&obj->uevent.event_list);
f520ba5a 3524
8541f8de 3525 srq = pd->device->create_srq(pd, &attr, udata);
f520ba5a
RD
3526 if (IS_ERR(srq)) {
3527 ret = PTR_ERR(srq);
ec924b47 3528 goto err_put;
f520ba5a
RD
3529 }
3530
8541f8de
SH
3531 srq->device = pd->device;
3532 srq->pd = pd;
3533 srq->srq_type = cmd->srq_type;
3534 srq->uobject = &obj->uevent.uobject;
f520ba5a
RD
3535 srq->event_handler = attr.event_handler;
3536 srq->srq_context = attr.srq_context;
8541f8de
SH
3537
3538 if (cmd->srq_type == IB_SRQT_XRC) {
3539 srq->ext.xrc.cq = attr.ext.xrc.cq;
3540 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3541 atomic_inc(&attr.ext.xrc.cq->usecnt);
3542 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3543 }
3544
f520ba5a
RD
3545 atomic_inc(&pd->usecnt);
3546 atomic_set(&srq->usecnt, 0);
3547
8541f8de 3548 obj->uevent.uobject.object = srq;
fd3c7904 3549 obj->uevent.uobject.user_handle = cmd->user_handle;
f520ba5a 3550
9ead190b 3551 memset(&resp, 0, sizeof resp);
8541f8de 3552 resp.srq_handle = obj->uevent.uobject.id;
ea88fd16
DB
3553 resp.max_wr = attr.attr.max_wr;
3554 resp.max_sge = attr.attr.max_sge;
8541f8de
SH
3555 if (cmd->srq_type == IB_SRQT_XRC)
3556 resp.srqn = srq->ext.xrc.srq_num;
f520ba5a 3557
8541f8de 3558 if (copy_to_user((void __user *) (unsigned long) cmd->response,
f520ba5a
RD
3559 &resp, sizeof resp)) {
3560 ret = -EFAULT;
9ead190b 3561 goto err_copy;
f520ba5a
RD
3562 }
3563
8541f8de 3564 if (cmd->srq_type == IB_SRQT_XRC) {
fd3c7904
MB
3565 uobj_put_read(xrcd_uobj);
3566 uobj_put_obj_read(attr.ext.xrc.cq);
8541f8de 3567 }
fd3c7904
MB
3568 uobj_put_obj_read(pd);
3569 uobj_alloc_commit(&obj->uevent.uobject);
f520ba5a 3570
8541f8de 3571 return 0;
f520ba5a 3572
9ead190b 3573err_copy:
f520ba5a
RD
3574 ib_destroy_srq(srq);
3575
ec924b47 3576err_put:
fd3c7904 3577 uobj_put_obj_read(pd);
8541f8de
SH
3578
3579err_put_cq:
3580 if (cmd->srq_type == IB_SRQT_XRC)
fd3c7904 3581 uobj_put_obj_read(attr.ext.xrc.cq);
8541f8de 3582
5909ce54
RD
3583err_put_xrcd:
3584 if (cmd->srq_type == IB_SRQT_XRC) {
3585 atomic_dec(&obj->uxrcd->refcnt);
fd3c7904 3586 uobj_put_read(xrcd_uobj);
5909ce54 3587 }
ec924b47 3588
9ead190b 3589err:
fd3c7904 3590 uobj_alloc_abort(&obj->uevent.uobject);
f520ba5a
RD
3591 return ret;
3592}
3593
8541f8de 3594ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
057aec0d 3595 struct ib_device *ib_dev,
8541f8de
SH
3596 const char __user *buf, int in_len,
3597 int out_len)
3598{
3599 struct ib_uverbs_create_srq cmd;
3600 struct ib_uverbs_create_xsrq xcmd;
3601 struct ib_uverbs_create_srq_resp resp;
3602 struct ib_udata udata;
3603 int ret;
3604
3605 if (out_len < sizeof resp)
3606 return -ENOSPC;
3607
3608 if (copy_from_user(&cmd, buf, sizeof cmd))
3609 return -EFAULT;
3610
3611 xcmd.response = cmd.response;
3612 xcmd.user_handle = cmd.user_handle;
3613 xcmd.srq_type = IB_SRQT_BASIC;
3614 xcmd.pd_handle = cmd.pd_handle;
3615 xcmd.max_wr = cmd.max_wr;
3616 xcmd.max_sge = cmd.max_sge;
3617 xcmd.srq_limit = cmd.srq_limit;
3618
3619 INIT_UDATA(&udata, buf + sizeof cmd,
3620 (unsigned long) cmd.response + sizeof resp,
3d943c9d
MD
3621 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3622 out_len - sizeof resp);
8541f8de 3623
057aec0d 3624 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
8541f8de
SH
3625 if (ret)
3626 return ret;
3627
3628 return in_len;
3629}
3630
3631ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
057aec0d 3632 struct ib_device *ib_dev,
8541f8de
SH
3633 const char __user *buf, int in_len, int out_len)
3634{
3635 struct ib_uverbs_create_xsrq cmd;
3636 struct ib_uverbs_create_srq_resp resp;
3637 struct ib_udata udata;
3638 int ret;
3639
3640 if (out_len < sizeof resp)
3641 return -ENOSPC;
3642
3643 if (copy_from_user(&cmd, buf, sizeof cmd))
3644 return -EFAULT;
3645
3646 INIT_UDATA(&udata, buf + sizeof cmd,
3647 (unsigned long) cmd.response + sizeof resp,
3d943c9d
MD
3648 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3649 out_len - sizeof resp);
8541f8de 3650
057aec0d 3651 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
8541f8de
SH
3652 if (ret)
3653 return ret;
3654
3655 return in_len;
3656}
3657
f520ba5a 3658ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
057aec0d 3659 struct ib_device *ib_dev,
f520ba5a
RD
3660 const char __user *buf, int in_len,
3661 int out_len)
3662{
3663 struct ib_uverbs_modify_srq cmd;
9bc57e2d 3664 struct ib_udata udata;
f520ba5a
RD
3665 struct ib_srq *srq;
3666 struct ib_srq_attr attr;
3667 int ret;
3668
3669 if (copy_from_user(&cmd, buf, sizeof cmd))
3670 return -EFAULT;
3671
9bc57e2d
RC
3672 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3673 out_len);
3674
fd3c7904 3675 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
9ead190b
RD
3676 if (!srq)
3677 return -EINVAL;
f520ba5a
RD
3678
3679 attr.max_wr = cmd.max_wr;
f520ba5a
RD
3680 attr.srq_limit = cmd.srq_limit;
3681
9bc57e2d 3682 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
f520ba5a 3683
fd3c7904 3684 uobj_put_obj_read(srq);
f520ba5a
RD
3685
3686 return ret ? ret : in_len;
3687}
3688
8bdb0e86 3689ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
057aec0d 3690 struct ib_device *ib_dev,
8bdb0e86
DB
3691 const char __user *buf,
3692 int in_len, int out_len)
3693{
3694 struct ib_uverbs_query_srq cmd;
3695 struct ib_uverbs_query_srq_resp resp;
3696 struct ib_srq_attr attr;
3697 struct ib_srq *srq;
3698 int ret;
3699
3700 if (out_len < sizeof resp)
3701 return -ENOSPC;
3702
3703 if (copy_from_user(&cmd, buf, sizeof cmd))
3704 return -EFAULT;
3705
fd3c7904 3706 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
9ead190b
RD
3707 if (!srq)
3708 return -EINVAL;
8bdb0e86 3709
9ead190b 3710 ret = ib_query_srq(srq, &attr);
8bdb0e86 3711
fd3c7904 3712 uobj_put_obj_read(srq);
8bdb0e86
DB
3713
3714 if (ret)
9ead190b 3715 return ret;
8bdb0e86
DB
3716
3717 memset(&resp, 0, sizeof resp);
3718
3719 resp.max_wr = attr.max_wr;
3720 resp.max_sge = attr.max_sge;
3721 resp.srq_limit = attr.srq_limit;
3722
3723 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3724 &resp, sizeof resp))
9ead190b 3725 return -EFAULT;
8bdb0e86 3726
9ead190b 3727 return in_len;
8bdb0e86
DB
3728}
3729
f520ba5a 3730ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
057aec0d 3731 struct ib_device *ib_dev,
f520ba5a
RD
3732 const char __user *buf, int in_len,
3733 int out_len)
3734{
63aaf647
RD
3735 struct ib_uverbs_destroy_srq cmd;
3736 struct ib_uverbs_destroy_srq_resp resp;
9ead190b 3737 struct ib_uobject *uobj;
63aaf647 3738 struct ib_srq *srq;
9ead190b 3739 struct ib_uevent_object *obj;
63aaf647 3740 int ret = -EINVAL;
846be90d 3741 enum ib_srq_type srq_type;
f520ba5a
RD
3742
3743 if (copy_from_user(&cmd, buf, sizeof cmd))
3744 return -EFAULT;
3745
fd3c7904
MB
3746 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle,
3747 file->ucontext);
3748 if (IS_ERR(uobj))
3749 return PTR_ERR(uobj);
3750
9ead190b
RD
3751 srq = uobj->object;
3752 obj = container_of(uobj, struct ib_uevent_object, uobject);
846be90d 3753 srq_type = srq->srq_type;
fd3c7904
MB
3754 /*
3755 * Make sure we don't free the memory in remove_commit as we still
3756 * needs the uobject memory to create the response.
3757 */
3758 uverbs_uobject_get(uobj);
63aaf647 3759
fd3c7904 3760 memset(&resp, 0, sizeof(resp));
f520ba5a 3761
fd3c7904
MB
3762 ret = uobj_remove_commit(uobj);
3763 if (ret) {
3764 uverbs_uobject_put(uobj);
9ead190b 3765 return ret;
846be90d 3766 }
9ead190b 3767 resp.events_reported = obj->events_reported;
fd3c7904
MB
3768 uverbs_uobject_put(uobj);
3769 if (copy_to_user((void __user *)(unsigned long)cmd.response,
3770 &resp, sizeof(resp)))
3771 return -EFAULT;
63aaf647 3772
fd3c7904 3773 return in_len;
f520ba5a 3774}
02d1aa7a
EC
3775
3776int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
057aec0d 3777 struct ib_device *ib_dev,
02d1aa7a
EC
3778 struct ib_udata *ucore,
3779 struct ib_udata *uhw)
3780{
2953f425 3781 struct ib_uverbs_ex_query_device_resp resp = { {0} };
02d1aa7a 3782 struct ib_uverbs_ex_query_device cmd;
2953f425 3783 struct ib_device_attr attr = {0};
02d1aa7a
EC
3784 int err;
3785
02d1aa7a
EC
3786 if (ucore->inlen < sizeof(cmd))
3787 return -EINVAL;
3788
3789 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3790 if (err)
3791 return err;
3792
3793 if (cmd.comp_mask)
3794 return -EINVAL;
3795
3796 if (cmd.reserved)
3797 return -EINVAL;
3798
f4056bfd 3799 resp.response_length = offsetof(typeof(resp), odp_caps);
02d1aa7a
EC
3800
3801 if (ucore->outlen < resp.response_length)
3802 return -ENOSPC;
3803
057aec0d 3804 err = ib_dev->query_device(ib_dev, &attr, uhw);
02d1aa7a
EC
3805 if (err)
3806 return err;
3807
057aec0d 3808 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
02d1aa7a 3809
f4056bfd
HE
3810 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3811 goto end;
3812
3813#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3814 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3815 resp.odp_caps.per_transport_caps.rc_odp_caps =
3816 attr.odp_caps.per_transport_caps.rc_odp_caps;
3817 resp.odp_caps.per_transport_caps.uc_odp_caps =
3818 attr.odp_caps.per_transport_caps.uc_odp_caps;
3819 resp.odp_caps.per_transport_caps.ud_odp_caps =
3820 attr.odp_caps.per_transport_caps.ud_odp_caps;
f4056bfd
HE
3821#endif
3822 resp.response_length += sizeof(resp.odp_caps);
3823
24306dc6
MB
3824 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3825 goto end;
3826
3827 resp.timestamp_mask = attr.timestamp_mask;
3828 resp.response_length += sizeof(resp.timestamp_mask);
3829
3830 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3831 goto end;
3832
3833 resp.hca_core_clock = attr.hca_core_clock;
3834 resp.response_length += sizeof(resp.hca_core_clock);
3835
0b24e5ac
MD
3836 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
3837 goto end;
3838
3839 resp.device_cap_flags_ex = attr.device_cap_flags;
3840 resp.response_length += sizeof(resp.device_cap_flags_ex);
47adf2f4
YH
3841
3842 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
3843 goto end;
3844
3845 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3846 resp.rss_caps.max_rwq_indirection_tables =
3847 attr.rss_caps.max_rwq_indirection_tables;
3848 resp.rss_caps.max_rwq_indirection_table_size =
3849 attr.rss_caps.max_rwq_indirection_table_size;
3850
3851 resp.response_length += sizeof(resp.rss_caps);
3852
3853 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
3854 goto end;
3855
3856 resp.max_wq_type_rq = attr.max_wq_type_rq;
3857 resp.response_length += sizeof(resp.max_wq_type_rq);
5f23d426
NO
3858
3859 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps))
3860 goto end;
3861
3862 resp.raw_packet_caps = attr.raw_packet_caps;
3863 resp.response_length += sizeof(resp.raw_packet_caps);
f4056bfd 3864end:
02d1aa7a 3865 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
2953f425 3866 return err;
02d1aa7a 3867}