IB/rxe: don't crash, if allocation of crc algorithm failed
[linux-2.6-block.git] / drivers / infiniband / core / uverbs_cmd.c
CommitLineData
bc38a6ab
RD
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
f7c6a7b5 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
eb9d3cd5 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8bdb0e86 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
bc38a6ab
RD
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
bc38a6ab
RD
34 */
35
6b73597e 36#include <linux/file.h>
70a30e16 37#include <linux/fs.h>
5a0e3ad6 38#include <linux/slab.h>
8ada2c1c 39#include <linux/sched.h>
6b73597e 40
7c0f6ba6 41#include <linux/uaccess.h>
bc38a6ab 42
fd3c7904
MB
43#include <rdma/uverbs_types.h>
44#include <rdma/uverbs_std_types.h>
45#include "rdma_core.h"
46
bc38a6ab 47#include "uverbs.h"
ed4c54e5 48#include "core_priv.h"
bc38a6ab 49
1e7710f3
MB
50static struct ib_uverbs_completion_event_file *
51ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
52{
53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel),
54 fd, context);
55 struct ib_uobject_file *uobj_file;
56
57 if (IS_ERR(uobj))
58 return (void *)uobj;
59
60 uverbs_uobject_get(uobj);
61 uobj_put_read(uobj);
62
63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
64 return container_of(uobj_file, struct ib_uverbs_completion_event_file,
65 uobj_file);
66}
67
bc38a6ab 68ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
057aec0d 69 struct ib_device *ib_dev,
bc38a6ab
RD
70 const char __user *buf,
71 int in_len, int out_len)
72{
73 struct ib_uverbs_get_context cmd;
74 struct ib_uverbs_get_context_resp resp;
75 struct ib_udata udata;
63c47c28 76 struct ib_ucontext *ucontext;
6b73597e 77 struct file *filp;
43579b5f 78 struct ib_rdmacg_object cg_obj;
63c47c28 79 int ret;
bc38a6ab
RD
80
81 if (out_len < sizeof resp)
82 return -ENOSPC;
83
84 if (copy_from_user(&cmd, buf, sizeof cmd))
85 return -EFAULT;
86
95ed644f 87 mutex_lock(&file->mutex);
63c47c28
RD
88
89 if (file->ucontext) {
90 ret = -EINVAL;
91 goto err;
92 }
93
40a20339
AB
94 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
95 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
97 out_len - sizeof(resp));
bc38a6ab 98
43579b5f
PP
99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
100 if (ret)
101 goto err;
102
057aec0d 103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
77f76013 104 if (IS_ERR(ucontext)) {
df42245a 105 ret = PTR_ERR(ucontext);
43579b5f 106 goto err_alloc;
77f76013 107 }
bc38a6ab 108
057aec0d 109 ucontext->device = ib_dev;
43579b5f 110 ucontext->cg_obj = cg_obj;
771addf6
MB
111 /* ufile is required when some objects are released */
112 ucontext->ufile = file;
fd3c7904
MB
113 uverbs_initialize_ucontext(ucontext);
114
8ada2c1c
SR
115 rcu_read_lock();
116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
117 rcu_read_unlock();
f7c6a7b5 118 ucontext->closing = 0;
bc38a6ab 119
882214e2 120#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
f808c13f 121 ucontext->umem_tree = RB_ROOT_CACHED;
882214e2
HE
122 init_rwsem(&ucontext->umem_rwsem);
123 ucontext->odp_mrs_count = 0;
124 INIT_LIST_HEAD(&ucontext->no_private_counters);
125
86bee4c9 126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
882214e2
HE
127 ucontext->invalidate_range = NULL;
128
129#endif
130
6b73597e
RD
131 resp.num_comp_vectors = file->device->num_comp_vectors;
132
da183c7a 133 ret = get_unused_fd_flags(O_CLOEXEC);
b1e4594b
AV
134 if (ret < 0)
135 goto err_free;
136 resp.async_fd = ret;
137
1e7710f3 138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
6b73597e
RD
139 if (IS_ERR(filp)) {
140 ret = PTR_ERR(filp);
b1e4594b 141 goto err_fd;
6b73597e 142 }
bc38a6ab 143
40a20339 144 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
63c47c28 145 ret = -EFAULT;
6b73597e 146 goto err_file;
63c47c28
RD
147 }
148
70a30e16 149 file->ucontext = ucontext;
6b73597e
RD
150
151 fd_install(resp.async_fd, filp);
152
95ed644f 153 mutex_unlock(&file->mutex);
bc38a6ab
RD
154
155 return in_len;
156
6b73597e 157err_file:
03c40442 158 ib_uverbs_free_async_event_file(file);
6b73597e
RD
159 fput(filp);
160
b1e4594b
AV
161err_fd:
162 put_unused_fd(resp.async_fd);
163
63c47c28 164err_free:
8ada2c1c 165 put_pid(ucontext->tgid);
057aec0d 166 ib_dev->dealloc_ucontext(ucontext);
bc38a6ab 167
43579b5f
PP
168err_alloc:
169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
170
63c47c28 171err:
95ed644f 172 mutex_unlock(&file->mutex);
63c47c28 173 return ret;
bc38a6ab
RD
174}
175
02d1aa7a 176static void copy_query_dev_fields(struct ib_uverbs_file *file,
057aec0d 177 struct ib_device *ib_dev,
02d1aa7a
EC
178 struct ib_uverbs_query_device_resp *resp,
179 struct ib_device_attr *attr)
180{
181 resp->fw_ver = attr->fw_ver;
057aec0d 182 resp->node_guid = ib_dev->node_guid;
02d1aa7a
EC
183 resp->sys_image_guid = attr->sys_image_guid;
184 resp->max_mr_size = attr->max_mr_size;
185 resp->page_size_cap = attr->page_size_cap;
186 resp->vendor_id = attr->vendor_id;
187 resp->vendor_part_id = attr->vendor_part_id;
188 resp->hw_ver = attr->hw_ver;
189 resp->max_qp = attr->max_qp;
190 resp->max_qp_wr = attr->max_qp_wr;
fb532d6a 191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
02d1aa7a
EC
192 resp->max_sge = attr->max_sge;
193 resp->max_sge_rd = attr->max_sge_rd;
194 resp->max_cq = attr->max_cq;
195 resp->max_cqe = attr->max_cqe;
196 resp->max_mr = attr->max_mr;
197 resp->max_pd = attr->max_pd;
198 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
199 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
200 resp->max_res_rd_atom = attr->max_res_rd_atom;
201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
203 resp->atomic_cap = attr->atomic_cap;
204 resp->max_ee = attr->max_ee;
205 resp->max_rdd = attr->max_rdd;
206 resp->max_mw = attr->max_mw;
207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
209 resp->max_mcast_grp = attr->max_mcast_grp;
210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
212 resp->max_ah = attr->max_ah;
213 resp->max_fmr = attr->max_fmr;
214 resp->max_map_per_fmr = attr->max_map_per_fmr;
215 resp->max_srq = attr->max_srq;
216 resp->max_srq_wr = attr->max_srq_wr;
217 resp->max_srq_sge = attr->max_srq_sge;
218 resp->max_pkeys = attr->max_pkeys;
219 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
057aec0d 220 resp->phys_port_cnt = ib_dev->phys_port_cnt;
02d1aa7a
EC
221}
222
bc38a6ab 223ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
057aec0d 224 struct ib_device *ib_dev,
bc38a6ab
RD
225 const char __user *buf,
226 int in_len, int out_len)
227{
228 struct ib_uverbs_query_device cmd;
229 struct ib_uverbs_query_device_resp resp;
bc38a6ab
RD
230
231 if (out_len < sizeof resp)
232 return -ENOSPC;
233
234 if (copy_from_user(&cmd, buf, sizeof cmd))
235 return -EFAULT;
236
bc38a6ab 237 memset(&resp, 0, sizeof resp);
86bee4c9 238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
bc38a6ab 239
40a20339 240 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
bc38a6ab
RD
241 return -EFAULT;
242
243 return in_len;
244}
245
246ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
057aec0d 247 struct ib_device *ib_dev,
bc38a6ab
RD
248 const char __user *buf,
249 int in_len, int out_len)
250{
251 struct ib_uverbs_query_port cmd;
252 struct ib_uverbs_query_port_resp resp;
253 struct ib_port_attr attr;
254 int ret;
255
256 if (out_len < sizeof resp)
257 return -ENOSPC;
258
259 if (copy_from_user(&cmd, buf, sizeof cmd))
260 return -EFAULT;
261
057aec0d 262 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
bc38a6ab
RD
263 if (ret)
264 return ret;
265
266 memset(&resp, 0, sizeof resp);
267
268 resp.state = attr.state;
269 resp.max_mtu = attr.max_mtu;
270 resp.active_mtu = attr.active_mtu;
271 resp.gid_tbl_len = attr.gid_tbl_len;
272 resp.port_cap_flags = attr.port_cap_flags;
273 resp.max_msg_sz = attr.max_msg_sz;
274 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
275 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
276 resp.pkey_tbl_len = attr.pkey_tbl_len;
62ede777 277
db58540b 278 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
62ede777 279 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
db58540b
DC
280 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
281 } else {
62ede777
HD
282 resp.lid = ib_lid_cpu16(attr.lid);
283 resp.sm_lid = ib_lid_cpu16(attr.sm_lid);
db58540b 284 }
bc38a6ab
RD
285 resp.lmc = attr.lmc;
286 resp.max_vl_num = attr.max_vl_num;
287 resp.sm_sl = attr.sm_sl;
288 resp.subnet_timeout = attr.subnet_timeout;
289 resp.init_type_reply = attr.init_type_reply;
290 resp.active_width = attr.active_width;
291 resp.active_speed = attr.active_speed;
292 resp.phys_state = attr.phys_state;
057aec0d 293 resp.link_layer = rdma_port_get_link_layer(ib_dev,
2420b60b 294 cmd.port_num);
bc38a6ab 295
40a20339 296 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
bc38a6ab
RD
297 return -EFAULT;
298
299 return in_len;
300}
301
bc38a6ab 302ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
057aec0d 303 struct ib_device *ib_dev,
bc38a6ab
RD
304 const char __user *buf,
305 int in_len, int out_len)
306{
307 struct ib_uverbs_alloc_pd cmd;
308 struct ib_uverbs_alloc_pd_resp resp;
309 struct ib_udata udata;
310 struct ib_uobject *uobj;
311 struct ib_pd *pd;
312 int ret;
313
314 if (out_len < sizeof resp)
315 return -ENOSPC;
316
317 if (copy_from_user(&cmd, buf, sizeof cmd))
318 return -EFAULT;
319
40a20339
AB
320 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
321 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
322 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
323 out_len - sizeof(resp));
bc38a6ab 324
fd3c7904
MB
325 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext);
326 if (IS_ERR(uobj))
327 return PTR_ERR(uobj);
bc38a6ab 328
057aec0d 329 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
bc38a6ab
RD
330 if (IS_ERR(pd)) {
331 ret = PTR_ERR(pd);
332 goto err;
333 }
334
057aec0d 335 pd->device = ib_dev;
bc38a6ab 336 pd->uobject = uobj;
50d46335 337 pd->__internal_mr = NULL;
bc38a6ab
RD
338 atomic_set(&pd->usecnt, 0);
339
9ead190b 340 uobj->object = pd;
bc38a6ab
RD
341 memset(&resp, 0, sizeof resp);
342 resp.pd_handle = uobj->id;
343
40a20339 344 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
bc38a6ab 345 ret = -EFAULT;
9ead190b 346 goto err_copy;
bc38a6ab
RD
347 }
348
fd3c7904 349 uobj_alloc_commit(uobj);
bc38a6ab 350
eb9d3cd5
RD
351 return in_len;
352
9ead190b 353err_copy:
bc38a6ab
RD
354 ib_dealloc_pd(pd);
355
356err:
fd3c7904 357 uobj_alloc_abort(uobj);
bc38a6ab
RD
358 return ret;
359}
360
361ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
057aec0d 362 struct ib_device *ib_dev,
bc38a6ab
RD
363 const char __user *buf,
364 int in_len, int out_len)
365{
366 struct ib_uverbs_dealloc_pd cmd;
bc38a6ab 367 struct ib_uobject *uobj;
9ead190b 368 int ret;
bc38a6ab
RD
369
370 if (copy_from_user(&cmd, buf, sizeof cmd))
371 return -EFAULT;
372
fd3c7904
MB
373 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle,
374 file->ucontext);
375 if (IS_ERR(uobj))
376 return PTR_ERR(uobj);
bc38a6ab 377
fd3c7904 378 ret = uobj_remove_commit(uobj);
bc38a6ab 379
fd3c7904 380 return ret ?: in_len;
bc38a6ab
RD
381}
382
53d0bd1e
SH
383struct xrcd_table_entry {
384 struct rb_node node;
385 struct ib_xrcd *xrcd;
386 struct inode *inode;
387};
388
389static int xrcd_table_insert(struct ib_uverbs_device *dev,
390 struct inode *inode,
391 struct ib_xrcd *xrcd)
392{
393 struct xrcd_table_entry *entry, *scan;
394 struct rb_node **p = &dev->xrcd_tree.rb_node;
395 struct rb_node *parent = NULL;
396
397 entry = kmalloc(sizeof *entry, GFP_KERNEL);
398 if (!entry)
399 return -ENOMEM;
400
401 entry->xrcd = xrcd;
402 entry->inode = inode;
403
404 while (*p) {
405 parent = *p;
406 scan = rb_entry(parent, struct xrcd_table_entry, node);
407
408 if (inode < scan->inode) {
409 p = &(*p)->rb_left;
410 } else if (inode > scan->inode) {
411 p = &(*p)->rb_right;
412 } else {
413 kfree(entry);
414 return -EEXIST;
415 }
416 }
417
418 rb_link_node(&entry->node, parent, p);
419 rb_insert_color(&entry->node, &dev->xrcd_tree);
420 igrab(inode);
421 return 0;
422}
423
424static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
425 struct inode *inode)
426{
427 struct xrcd_table_entry *entry;
428 struct rb_node *p = dev->xrcd_tree.rb_node;
429
430 while (p) {
431 entry = rb_entry(p, struct xrcd_table_entry, node);
432
433 if (inode < entry->inode)
434 p = p->rb_left;
435 else if (inode > entry->inode)
436 p = p->rb_right;
437 else
438 return entry;
439 }
440
441 return NULL;
442}
443
444static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
445{
446 struct xrcd_table_entry *entry;
447
448 entry = xrcd_table_search(dev, inode);
449 if (!entry)
450 return NULL;
451
452 return entry->xrcd;
453}
454
455static void xrcd_table_delete(struct ib_uverbs_device *dev,
456 struct inode *inode)
457{
458 struct xrcd_table_entry *entry;
459
460 entry = xrcd_table_search(dev, inode);
461 if (entry) {
462 iput(inode);
463 rb_erase(&entry->node, &dev->xrcd_tree);
464 kfree(entry);
465 }
466}
467
468ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
057aec0d 469 struct ib_device *ib_dev,
53d0bd1e
SH
470 const char __user *buf, int in_len,
471 int out_len)
472{
473 struct ib_uverbs_open_xrcd cmd;
474 struct ib_uverbs_open_xrcd_resp resp;
475 struct ib_udata udata;
476 struct ib_uxrcd_object *obj;
477 struct ib_xrcd *xrcd = NULL;
2903ff01 478 struct fd f = {NULL, 0};
53d0bd1e 479 struct inode *inode = NULL;
2903ff01 480 int ret = 0;
53d0bd1e
SH
481 int new_xrcd = 0;
482
483 if (out_len < sizeof resp)
484 return -ENOSPC;
485
486 if (copy_from_user(&cmd, buf, sizeof cmd))
487 return -EFAULT;
488
40a20339
AB
489 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
490 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
491 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
492 out_len - sizeof(resp));
53d0bd1e
SH
493
494 mutex_lock(&file->device->xrcd_tree_mutex);
495
496 if (cmd.fd != -1) {
497 /* search for file descriptor */
2903ff01
AV
498 f = fdget(cmd.fd);
499 if (!f.file) {
53d0bd1e
SH
500 ret = -EBADF;
501 goto err_tree_mutex_unlock;
502 }
503
496ad9aa 504 inode = file_inode(f.file);
53d0bd1e
SH
505 xrcd = find_xrcd(file->device, inode);
506 if (!xrcd && !(cmd.oflags & O_CREAT)) {
507 /* no file descriptor. Need CREATE flag */
508 ret = -EAGAIN;
509 goto err_tree_mutex_unlock;
510 }
511
512 if (xrcd && cmd.oflags & O_EXCL) {
513 ret = -EINVAL;
514 goto err_tree_mutex_unlock;
515 }
516 }
517
fd3c7904
MB
518 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd),
519 file->ucontext);
520 if (IS_ERR(obj)) {
521 ret = PTR_ERR(obj);
53d0bd1e
SH
522 goto err_tree_mutex_unlock;
523 }
524
53d0bd1e 525 if (!xrcd) {
057aec0d 526 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
53d0bd1e
SH
527 if (IS_ERR(xrcd)) {
528 ret = PTR_ERR(xrcd);
529 goto err;
530 }
531
532 xrcd->inode = inode;
057aec0d 533 xrcd->device = ib_dev;
53d0bd1e
SH
534 atomic_set(&xrcd->usecnt, 0);
535 mutex_init(&xrcd->tgt_qp_mutex);
536 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
537 new_xrcd = 1;
538 }
539
540 atomic_set(&obj->refcnt, 0);
541 obj->uobject.object = xrcd;
53d0bd1e
SH
542 memset(&resp, 0, sizeof resp);
543 resp.xrcd_handle = obj->uobject.id;
544
545 if (inode) {
546 if (new_xrcd) {
547 /* create new inode/xrcd table entry */
548 ret = xrcd_table_insert(file->device, inode, xrcd);
549 if (ret)
fd3c7904 550 goto err_dealloc_xrcd;
53d0bd1e
SH
551 }
552 atomic_inc(&xrcd->usecnt);
553 }
554
40a20339 555 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
53d0bd1e
SH
556 ret = -EFAULT;
557 goto err_copy;
558 }
559
2903ff01
AV
560 if (f.file)
561 fdput(f);
53d0bd1e 562
fd3c7904 563 uobj_alloc_commit(&obj->uobject);
53d0bd1e
SH
564
565 mutex_unlock(&file->device->xrcd_tree_mutex);
566 return in_len;
567
568err_copy:
569 if (inode) {
570 if (new_xrcd)
571 xrcd_table_delete(file->device, inode);
572 atomic_dec(&xrcd->usecnt);
573 }
574
fd3c7904 575err_dealloc_xrcd:
53d0bd1e
SH
576 ib_dealloc_xrcd(xrcd);
577
578err:
fd3c7904 579 uobj_alloc_abort(&obj->uobject);
53d0bd1e
SH
580
581err_tree_mutex_unlock:
2903ff01
AV
582 if (f.file)
583 fdput(f);
53d0bd1e
SH
584
585 mutex_unlock(&file->device->xrcd_tree_mutex);
586
587 return ret;
588}
589
590ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
057aec0d 591 struct ib_device *ib_dev,
53d0bd1e
SH
592 const char __user *buf, int in_len,
593 int out_len)
594{
595 struct ib_uverbs_close_xrcd cmd;
596 struct ib_uobject *uobj;
53d0bd1e
SH
597 int ret = 0;
598
599 if (copy_from_user(&cmd, buf, sizeof cmd))
600 return -EFAULT;
601
fd3c7904
MB
602 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
603 file->ucontext);
604 if (IS_ERR(uobj)) {
605 mutex_unlock(&file->device->xrcd_tree_mutex);
606 return PTR_ERR(uobj);
53d0bd1e
SH
607 }
608
fd3c7904
MB
609 ret = uobj_remove_commit(uobj);
610 return ret ?: in_len;
53d0bd1e
SH
611}
612
6be60aed
MB
613int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
614 struct ib_xrcd *xrcd,
615 enum rdma_remove_reason why)
53d0bd1e
SH
616{
617 struct inode *inode;
6be60aed 618 int ret;
53d0bd1e
SH
619
620 inode = xrcd->inode;
621 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
6be60aed 622 return 0;
53d0bd1e 623
6be60aed 624 ret = ib_dealloc_xrcd(xrcd);
53d0bd1e 625
6be60aed
MB
626 if (why == RDMA_REMOVE_DESTROY && ret)
627 atomic_inc(&xrcd->usecnt);
628 else if (inode)
53d0bd1e 629 xrcd_table_delete(dev, inode);
6be60aed
MB
630
631 return ret;
53d0bd1e
SH
632}
633
bc38a6ab 634ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
057aec0d 635 struct ib_device *ib_dev,
bc38a6ab
RD
636 const char __user *buf, int in_len,
637 int out_len)
638{
639 struct ib_uverbs_reg_mr cmd;
640 struct ib_uverbs_reg_mr_resp resp;
641 struct ib_udata udata;
f7c6a7b5 642 struct ib_uobject *uobj;
bc38a6ab
RD
643 struct ib_pd *pd;
644 struct ib_mr *mr;
645 int ret;
646
647 if (out_len < sizeof resp)
648 return -ENOSPC;
649
650 if (copy_from_user(&cmd, buf, sizeof cmd))
651 return -EFAULT;
652
40a20339
AB
653 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
654 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
655 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
656 out_len - sizeof(resp));
bc38a6ab
RD
657
658 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
659 return -EINVAL;
660
1c636f80
EC
661 ret = ib_check_mr_access(cmd.access_flags);
662 if (ret)
663 return ret;
f575394f 664
fd3c7904
MB
665 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext);
666 if (IS_ERR(uobj))
667 return PTR_ERR(uobj);
bc38a6ab 668
fd3c7904 669 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
aaf1aef5
RD
670 if (!pd) {
671 ret = -EINVAL;
f7c6a7b5 672 goto err_free;
aaf1aef5 673 }
bc38a6ab 674
860f10a7 675 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
86bee4c9
OG
676 if (!(pd->device->attrs.device_cap_flags &
677 IB_DEVICE_ON_DEMAND_PAGING)) {
860f10a7
SG
678 pr_debug("ODP support not available\n");
679 ret = -EINVAL;
680 goto err_put;
681 }
682 }
683
f7c6a7b5
RD
684 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
685 cmd.access_flags, &udata);
bc38a6ab
RD
686 if (IS_ERR(mr)) {
687 ret = PTR_ERR(mr);
9ead190b 688 goto err_put;
bc38a6ab
RD
689 }
690
691 mr->device = pd->device;
692 mr->pd = pd;
f7c6a7b5 693 mr->uobject = uobj;
bc38a6ab 694 atomic_inc(&pd->usecnt);
bc38a6ab 695
f7c6a7b5 696 uobj->object = mr;
bc38a6ab 697
9ead190b
RD
698 memset(&resp, 0, sizeof resp);
699 resp.lkey = mr->lkey;
700 resp.rkey = mr->rkey;
f7c6a7b5 701 resp.mr_handle = uobj->id;
bc38a6ab 702
40a20339 703 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
bc38a6ab 704 ret = -EFAULT;
9ead190b 705 goto err_copy;
bc38a6ab
RD
706 }
707
fd3c7904 708 uobj_put_obj_read(pd);
eb9d3cd5 709
fd3c7904 710 uobj_alloc_commit(uobj);
bc38a6ab
RD
711
712 return in_len;
713
9ead190b 714err_copy:
bc38a6ab
RD
715 ib_dereg_mr(mr);
716
9ead190b 717err_put:
fd3c7904 718 uobj_put_obj_read(pd);
bc38a6ab 719
bc38a6ab 720err_free:
fd3c7904 721 uobj_alloc_abort(uobj);
bc38a6ab
RD
722 return ret;
723}
724
7e6edb9b 725ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
057aec0d 726 struct ib_device *ib_dev,
7e6edb9b
MB
727 const char __user *buf, int in_len,
728 int out_len)
729{
730 struct ib_uverbs_rereg_mr cmd;
731 struct ib_uverbs_rereg_mr_resp resp;
732 struct ib_udata udata;
733 struct ib_pd *pd = NULL;
734 struct ib_mr *mr;
735 struct ib_pd *old_pd;
736 int ret;
737 struct ib_uobject *uobj;
738
739 if (out_len < sizeof(resp))
740 return -ENOSPC;
741
742 if (copy_from_user(&cmd, buf, sizeof(cmd)))
743 return -EFAULT;
744
40a20339
AB
745 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
746 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
747 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
748 out_len - sizeof(resp));
7e6edb9b
MB
749
750 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
751 return -EINVAL;
752
753 if ((cmd.flags & IB_MR_REREG_TRANS) &&
754 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
755 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
756 return -EINVAL;
757
fd3c7904
MB
758 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
759 file->ucontext);
760 if (IS_ERR(uobj))
761 return PTR_ERR(uobj);
7e6edb9b
MB
762
763 mr = uobj->object;
764
765 if (cmd.flags & IB_MR_REREG_ACCESS) {
766 ret = ib_check_mr_access(cmd.access_flags);
767 if (ret)
768 goto put_uobjs;
769 }
770
771 if (cmd.flags & IB_MR_REREG_PD) {
fd3c7904 772 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
7e6edb9b
MB
773 if (!pd) {
774 ret = -EINVAL;
775 goto put_uobjs;
776 }
777 }
778
7e6edb9b
MB
779 old_pd = mr->pd;
780 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
781 cmd.length, cmd.hca_va,
782 cmd.access_flags, pd, &udata);
783 if (!ret) {
784 if (cmd.flags & IB_MR_REREG_PD) {
785 atomic_inc(&pd->usecnt);
786 mr->pd = pd;
787 atomic_dec(&old_pd->usecnt);
788 }
789 } else {
790 goto put_uobj_pd;
791 }
792
793 memset(&resp, 0, sizeof(resp));
794 resp.lkey = mr->lkey;
795 resp.rkey = mr->rkey;
796
40a20339 797 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
7e6edb9b
MB
798 ret = -EFAULT;
799 else
800 ret = in_len;
801
802put_uobj_pd:
803 if (cmd.flags & IB_MR_REREG_PD)
fd3c7904 804 uobj_put_obj_read(pd);
7e6edb9b
MB
805
806put_uobjs:
fd3c7904 807 uobj_put_write(uobj);
7e6edb9b
MB
808
809 return ret;
810}
811
bc38a6ab 812ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
057aec0d 813 struct ib_device *ib_dev,
bc38a6ab
RD
814 const char __user *buf, int in_len,
815 int out_len)
816{
817 struct ib_uverbs_dereg_mr cmd;
9ead190b 818 struct ib_uobject *uobj;
bc38a6ab
RD
819 int ret = -EINVAL;
820
821 if (copy_from_user(&cmd, buf, sizeof cmd))
822 return -EFAULT;
823
fd3c7904
MB
824 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
825 file->ucontext);
826 if (IS_ERR(uobj))
827 return PTR_ERR(uobj);
9ead190b 828
fd3c7904 829 ret = uobj_remove_commit(uobj);
bc38a6ab 830
fd3c7904 831 return ret ?: in_len;
bc38a6ab
RD
832}
833
6b52a12b 834ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
057aec0d
YH
835 struct ib_device *ib_dev,
836 const char __user *buf, int in_len,
837 int out_len)
6b52a12b
SM
838{
839 struct ib_uverbs_alloc_mw cmd;
840 struct ib_uverbs_alloc_mw_resp resp;
841 struct ib_uobject *uobj;
842 struct ib_pd *pd;
843 struct ib_mw *mw;
b2a239df 844 struct ib_udata udata;
6b52a12b
SM
845 int ret;
846
847 if (out_len < sizeof(resp))
848 return -ENOSPC;
849
850 if (copy_from_user(&cmd, buf, sizeof(cmd)))
851 return -EFAULT;
852
fd3c7904
MB
853 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext);
854 if (IS_ERR(uobj))
855 return PTR_ERR(uobj);
6b52a12b 856
fd3c7904 857 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
6b52a12b
SM
858 if (!pd) {
859 ret = -EINVAL;
860 goto err_free;
861 }
862
40a20339
AB
863 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
864 u64_to_user_ptr(cmd.response) + sizeof(resp),
b2a239df
MB
865 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
866 out_len - sizeof(resp));
867
868 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
6b52a12b
SM
869 if (IS_ERR(mw)) {
870 ret = PTR_ERR(mw);
871 goto err_put;
872 }
873
874 mw->device = pd->device;
875 mw->pd = pd;
876 mw->uobject = uobj;
877 atomic_inc(&pd->usecnt);
878
879 uobj->object = mw;
6b52a12b
SM
880
881 memset(&resp, 0, sizeof(resp));
882 resp.rkey = mw->rkey;
883 resp.mw_handle = uobj->id;
884
40a20339 885 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
6b52a12b
SM
886 ret = -EFAULT;
887 goto err_copy;
888 }
889
fd3c7904
MB
890 uobj_put_obj_read(pd);
891 uobj_alloc_commit(uobj);
6b52a12b
SM
892
893 return in_len;
894
895err_copy:
feb7c1e3 896 uverbs_dealloc_mw(mw);
6b52a12b 897err_put:
fd3c7904 898 uobj_put_obj_read(pd);
6b52a12b 899err_free:
fd3c7904 900 uobj_alloc_abort(uobj);
6b52a12b
SM
901 return ret;
902}
903
904ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
057aec0d
YH
905 struct ib_device *ib_dev,
906 const char __user *buf, int in_len,
907 int out_len)
6b52a12b
SM
908{
909 struct ib_uverbs_dealloc_mw cmd;
6b52a12b
SM
910 struct ib_uobject *uobj;
911 int ret = -EINVAL;
912
913 if (copy_from_user(&cmd, buf, sizeof(cmd)))
914 return -EFAULT;
915
fd3c7904
MB
916 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle,
917 file->ucontext);
918 if (IS_ERR(uobj))
919 return PTR_ERR(uobj);
6b52a12b 920
fd3c7904
MB
921 ret = uobj_remove_commit(uobj);
922 return ret ?: in_len;
6b52a12b
SM
923}
924
6b73597e 925ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
057aec0d 926 struct ib_device *ib_dev,
6b73597e
RD
927 const char __user *buf, int in_len,
928 int out_len)
929{
930 struct ib_uverbs_create_comp_channel cmd;
931 struct ib_uverbs_create_comp_channel_resp resp;
1e7710f3
MB
932 struct ib_uobject *uobj;
933 struct ib_uverbs_completion_event_file *ev_file;
6b73597e
RD
934
935 if (out_len < sizeof resp)
936 return -ENOSPC;
937
938 if (copy_from_user(&cmd, buf, sizeof cmd))
939 return -EFAULT;
940
1e7710f3
MB
941 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext);
942 if (IS_ERR(uobj))
943 return PTR_ERR(uobj);
b1e4594b 944
1e7710f3
MB
945 resp.fd = uobj->id;
946
947 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
948 uobj_file.uobj);
db1b5ddd 949 ib_uverbs_init_event_queue(&ev_file->ev_queue);
6b73597e 950
40a20339 951 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
1e7710f3 952 uobj_alloc_abort(uobj);
6b73597e
RD
953 return -EFAULT;
954 }
955
1e7710f3 956 uobj_alloc_commit(uobj);
6b73597e
RD
957 return in_len;
958}
959
565197dd 960static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
057aec0d 961 struct ib_device *ib_dev,
565197dd
MB
962 struct ib_udata *ucore,
963 struct ib_udata *uhw,
964 struct ib_uverbs_ex_create_cq *cmd,
965 size_t cmd_sz,
966 int (*cb)(struct ib_uverbs_file *file,
967 struct ib_ucq_object *obj,
968 struct ib_uverbs_ex_create_cq_resp *resp,
969 struct ib_udata *udata,
970 void *context),
971 void *context)
bc38a6ab 972{
9ead190b 973 struct ib_ucq_object *obj;
1e7710f3 974 struct ib_uverbs_completion_event_file *ev_file = NULL;
bc38a6ab
RD
975 struct ib_cq *cq;
976 int ret;
565197dd 977 struct ib_uverbs_ex_create_cq_resp resp;
bcf4c1ea 978 struct ib_cq_init_attr attr = {};
bc38a6ab 979
565197dd
MB
980 if (cmd->comp_vector >= file->device->num_comp_vectors)
981 return ERR_PTR(-EINVAL);
bc38a6ab 982
fd3c7904
MB
983 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq),
984 file->ucontext);
985 if (IS_ERR(obj))
986 return obj;
9ead190b 987
565197dd 988 if (cmd->comp_channel >= 0) {
1e7710f3
MB
989 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel,
990 file->ucontext);
991 if (IS_ERR(ev_file)) {
992 ret = PTR_ERR(ev_file);
ac4e7b35
JM
993 goto err;
994 }
995 }
996
fd3c7904 997 obj->uobject.user_handle = cmd->user_handle;
9ead190b
RD
998 obj->uverbs_file = file;
999 obj->comp_events_reported = 0;
1000 obj->async_events_reported = 0;
1001 INIT_LIST_HEAD(&obj->comp_list);
1002 INIT_LIST_HEAD(&obj->async_list);
bc38a6ab 1003
565197dd
MB
1004 attr.cqe = cmd->cqe;
1005 attr.comp_vector = cmd->comp_vector;
1006
1007 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1008 attr.flags = cmd->flags;
1009
fd3c7904 1010 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw);
bc38a6ab
RD
1011 if (IS_ERR(cq)) {
1012 ret = PTR_ERR(cq);
9ead190b 1013 goto err_file;
bc38a6ab
RD
1014 }
1015
057aec0d 1016 cq->device = ib_dev;
9ead190b 1017 cq->uobject = &obj->uobject;
bc38a6ab
RD
1018 cq->comp_handler = ib_uverbs_comp_handler;
1019 cq->event_handler = ib_uverbs_cq_event_handler;
699a2d5b 1020 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
bc38a6ab
RD
1021 atomic_set(&cq->usecnt, 0);
1022
9ead190b 1023 obj->uobject.object = cq;
bc38a6ab 1024 memset(&resp, 0, sizeof resp);
565197dd
MB
1025 resp.base.cq_handle = obj->uobject.id;
1026 resp.base.cqe = cq->cqe;
bc38a6ab 1027
565197dd
MB
1028 resp.response_length = offsetof(typeof(resp), response_length) +
1029 sizeof(resp.response_length);
1030
1031 ret = cb(file, obj, &resp, ucore, context);
1032 if (ret)
1033 goto err_cb;
bc38a6ab 1034
fd3c7904 1035 uobj_alloc_commit(&obj->uobject);
bc38a6ab 1036
565197dd 1037 return obj;
eb9d3cd5 1038
565197dd 1039err_cb:
bc38a6ab
RD
1040 ib_destroy_cq(cq);
1041
9ead190b 1042err_file:
ac4e7b35 1043 if (ev_file)
9ead190b
RD
1044 ib_uverbs_release_ucq(file, ev_file, obj);
1045
1046err:
fd3c7904 1047 uobj_alloc_abort(&obj->uobject);
565197dd
MB
1048
1049 return ERR_PTR(ret);
1050}
1051
1052static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1053 struct ib_ucq_object *obj,
1054 struct ib_uverbs_ex_create_cq_resp *resp,
1055 struct ib_udata *ucore, void *context)
1056{
1057 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1058 return -EFAULT;
1059
1060 return 0;
1061}
1062
1063ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
057aec0d 1064 struct ib_device *ib_dev,
565197dd
MB
1065 const char __user *buf, int in_len,
1066 int out_len)
1067{
1068 struct ib_uverbs_create_cq cmd;
1069 struct ib_uverbs_ex_create_cq cmd_ex;
1070 struct ib_uverbs_create_cq_resp resp;
1071 struct ib_udata ucore;
1072 struct ib_udata uhw;
1073 struct ib_ucq_object *obj;
1074
1075 if (out_len < sizeof(resp))
1076 return -ENOSPC;
1077
1078 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1079 return -EFAULT;
1080
40a20339
AB
1081 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1082 sizeof(cmd), sizeof(resp));
565197dd 1083
40a20339
AB
1084 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1085 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
1086 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1087 out_len - sizeof(resp));
565197dd
MB
1088
1089 memset(&cmd_ex, 0, sizeof(cmd_ex));
1090 cmd_ex.user_handle = cmd.user_handle;
1091 cmd_ex.cqe = cmd.cqe;
1092 cmd_ex.comp_vector = cmd.comp_vector;
1093 cmd_ex.comp_channel = cmd.comp_channel;
1094
057aec0d 1095 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
565197dd
MB
1096 offsetof(typeof(cmd_ex), comp_channel) +
1097 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1098 NULL);
1099
1100 if (IS_ERR(obj))
1101 return PTR_ERR(obj);
1102
1103 return in_len;
1104}
1105
1106static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1107 struct ib_ucq_object *obj,
1108 struct ib_uverbs_ex_create_cq_resp *resp,
1109 struct ib_udata *ucore, void *context)
1110{
1111 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1112 return -EFAULT;
1113
1114 return 0;
1115}
1116
1117int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
057aec0d 1118 struct ib_device *ib_dev,
565197dd
MB
1119 struct ib_udata *ucore,
1120 struct ib_udata *uhw)
1121{
1122 struct ib_uverbs_ex_create_cq_resp resp;
1123 struct ib_uverbs_ex_create_cq cmd;
1124 struct ib_ucq_object *obj;
1125 int err;
1126
1127 if (ucore->inlen < sizeof(cmd))
1128 return -EINVAL;
1129
1130 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1131 if (err)
1132 return err;
1133
1134 if (cmd.comp_mask)
1135 return -EINVAL;
1136
1137 if (cmd.reserved)
1138 return -EINVAL;
1139
1140 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1141 sizeof(resp.response_length)))
1142 return -ENOSPC;
1143
057aec0d 1144 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
565197dd
MB
1145 min(ucore->inlen, sizeof(cmd)),
1146 ib_uverbs_ex_create_cq_cb, NULL);
1147
1148 if (IS_ERR(obj))
1149 return PTR_ERR(obj);
1150
1151 return 0;
bc38a6ab
RD
1152}
1153
33b9b3ee 1154ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
057aec0d 1155 struct ib_device *ib_dev,
33b9b3ee
RD
1156 const char __user *buf, int in_len,
1157 int out_len)
1158{
1159 struct ib_uverbs_resize_cq cmd;
f7a6cb7b 1160 struct ib_uverbs_resize_cq_resp resp = {};
33b9b3ee
RD
1161 struct ib_udata udata;
1162 struct ib_cq *cq;
1163 int ret = -EINVAL;
1164
1165 if (copy_from_user(&cmd, buf, sizeof cmd))
1166 return -EFAULT;
1167
40a20339
AB
1168 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1169 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
1170 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1171 out_len - sizeof(resp));
33b9b3ee 1172
fd3c7904 1173 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
9ead190b
RD
1174 if (!cq)
1175 return -EINVAL;
33b9b3ee
RD
1176
1177 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1178 if (ret)
1179 goto out;
1180
33b9b3ee
RD
1181 resp.cqe = cq->cqe;
1182
40a20339 1183 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
33b9b3ee
RD
1184 ret = -EFAULT;
1185
1186out:
fd3c7904 1187 uobj_put_obj_read(cq);
33b9b3ee
RD
1188
1189 return ret ? ret : in_len;
1190}
1191
7db20ecd
HD
1192static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1193 struct ib_wc *wc)
7182afea
DC
1194{
1195 struct ib_uverbs_wc tmp;
1196
1197 tmp.wr_id = wc->wr_id;
1198 tmp.status = wc->status;
1199 tmp.opcode = wc->opcode;
1200 tmp.vendor_err = wc->vendor_err;
1201 tmp.byte_len = wc->byte_len;
1202 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1203 tmp.qp_num = wc->qp->qp_num;
1204 tmp.src_qp = wc->src_qp;
1205 tmp.wc_flags = wc->wc_flags;
1206 tmp.pkey_index = wc->pkey_index;
7db20ecd 1207 if (rdma_cap_opa_ah(ib_dev, wc->port_num))
62ede777 1208 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
7db20ecd 1209 else
62ede777 1210 tmp.slid = ib_lid_cpu16(wc->slid);
7182afea
DC
1211 tmp.sl = wc->sl;
1212 tmp.dlid_path_bits = wc->dlid_path_bits;
1213 tmp.port_num = wc->port_num;
1214 tmp.reserved = 0;
1215
1216 if (copy_to_user(dest, &tmp, sizeof tmp))
1217 return -EFAULT;
1218
1219 return 0;
1220}
1221
67cdb40c 1222ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
057aec0d 1223 struct ib_device *ib_dev,
67cdb40c
RD
1224 const char __user *buf, int in_len,
1225 int out_len)
1226{
1227 struct ib_uverbs_poll_cq cmd;
7182afea
DC
1228 struct ib_uverbs_poll_cq_resp resp;
1229 u8 __user *header_ptr;
1230 u8 __user *data_ptr;
67cdb40c 1231 struct ib_cq *cq;
7182afea
DC
1232 struct ib_wc wc;
1233 int ret;
67cdb40c
RD
1234
1235 if (copy_from_user(&cmd, buf, sizeof cmd))
1236 return -EFAULT;
1237
fd3c7904 1238 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
7182afea
DC
1239 if (!cq)
1240 return -EINVAL;
67cdb40c 1241
7182afea 1242 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
40a20339 1243 header_ptr = u64_to_user_ptr(cmd.response);
7182afea 1244 data_ptr = header_ptr + sizeof resp;
9ead190b 1245
7182afea
DC
1246 memset(&resp, 0, sizeof resp);
1247 while (resp.count < cmd.ne) {
1248 ret = ib_poll_cq(cq, 1, &wc);
1249 if (ret < 0)
1250 goto out_put;
1251 if (!ret)
1252 break;
1253
7db20ecd 1254 ret = copy_wc_to_user(ib_dev, data_ptr, &wc);
7182afea
DC
1255 if (ret)
1256 goto out_put;
1257
1258 data_ptr += sizeof(struct ib_uverbs_wc);
1259 ++resp.count;
67cdb40c
RD
1260 }
1261
7182afea 1262 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
67cdb40c 1263 ret = -EFAULT;
7182afea
DC
1264 goto out_put;
1265 }
67cdb40c 1266
7182afea 1267 ret = in_len;
67cdb40c 1268
7182afea 1269out_put:
fd3c7904 1270 uobj_put_obj_read(cq);
7182afea 1271 return ret;
67cdb40c
RD
1272}
1273
1274ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
057aec0d 1275 struct ib_device *ib_dev,
67cdb40c
RD
1276 const char __user *buf, int in_len,
1277 int out_len)
1278{
1279 struct ib_uverbs_req_notify_cq cmd;
1280 struct ib_cq *cq;
67cdb40c
RD
1281
1282 if (copy_from_user(&cmd, buf, sizeof cmd))
1283 return -EFAULT;
1284
fd3c7904 1285 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
ab108676 1286 if (!cq)
9ead190b 1287 return -EINVAL;
67cdb40c 1288
9ead190b
RD
1289 ib_req_notify_cq(cq, cmd.solicited_only ?
1290 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1291
fd3c7904 1292 uobj_put_obj_read(cq);
9ead190b
RD
1293
1294 return in_len;
67cdb40c
RD
1295}
1296
bc38a6ab 1297ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
057aec0d 1298 struct ib_device *ib_dev,
bc38a6ab
RD
1299 const char __user *buf, int in_len,
1300 int out_len)
1301{
63aaf647
RD
1302 struct ib_uverbs_destroy_cq cmd;
1303 struct ib_uverbs_destroy_cq_resp resp;
9ead190b 1304 struct ib_uobject *uobj;
63aaf647 1305 struct ib_cq *cq;
9ead190b 1306 struct ib_ucq_object *obj;
63aaf647 1307 int ret = -EINVAL;
bc38a6ab
RD
1308
1309 if (copy_from_user(&cmd, buf, sizeof cmd))
1310 return -EFAULT;
1311
fd3c7904
MB
1312 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle,
1313 file->ucontext);
1314 if (IS_ERR(uobj))
1315 return PTR_ERR(uobj);
1316
1317 /*
1318 * Make sure we don't free the memory in remove_commit as we still
1319 * needs the uobject memory to create the response.
1320 */
1321 uverbs_uobject_get(uobj);
9ead190b 1322 cq = uobj->object;
9ead190b 1323 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
bc38a6ab 1324
fd3c7904 1325 memset(&resp, 0, sizeof(resp));
bc38a6ab 1326
fd3c7904
MB
1327 ret = uobj_remove_commit(uobj);
1328 if (ret) {
1329 uverbs_uobject_put(uobj);
9ead190b 1330 return ret;
fd3c7904 1331 }
bc38a6ab 1332
9ead190b
RD
1333 resp.comp_events_reported = obj->comp_events_reported;
1334 resp.async_events_reported = obj->async_events_reported;
63aaf647 1335
fd3c7904 1336 uverbs_uobject_put(uobj);
40a20339 1337 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
9ead190b 1338 return -EFAULT;
bc38a6ab 1339
9ead190b 1340 return in_len;
bc38a6ab
RD
1341}
1342
6d8a7497
EBE
1343static int create_qp(struct ib_uverbs_file *file,
1344 struct ib_udata *ucore,
1345 struct ib_udata *uhw,
1346 struct ib_uverbs_ex_create_qp *cmd,
1347 size_t cmd_sz,
1348 int (*cb)(struct ib_uverbs_file *file,
1349 struct ib_uverbs_ex_create_qp_resp *resp,
1350 struct ib_udata *udata),
1351 void *context)
bc38a6ab 1352{
6d8a7497
EBE
1353 struct ib_uqp_object *obj;
1354 struct ib_device *device;
1355 struct ib_pd *pd = NULL;
1356 struct ib_xrcd *xrcd = NULL;
fd3c7904 1357 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
6d8a7497
EBE
1358 struct ib_cq *scq = NULL, *rcq = NULL;
1359 struct ib_srq *srq = NULL;
1360 struct ib_qp *qp;
1361 char *buf;
c70285f8 1362 struct ib_qp_init_attr attr = {};
6d8a7497
EBE
1363 struct ib_uverbs_ex_create_qp_resp resp;
1364 int ret;
c70285f8
YH
1365 struct ib_rwq_ind_table *ind_tbl = NULL;
1366 bool has_sq = true;
6d8a7497
EBE
1367
1368 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
c938a616
OG
1369 return -EPERM;
1370
fd3c7904
MB
1371 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1372 file->ucontext);
1373 if (IS_ERR(obj))
1374 return PTR_ERR(obj);
1375 obj->uxrcd = NULL;
1376 obj->uevent.uobject.user_handle = cmd->user_handle;
f48b7269 1377 mutex_init(&obj->mcast_lock);
bc38a6ab 1378
c70285f8
YH
1379 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
1380 sizeof(cmd->rwq_ind_tbl_handle) &&
1381 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
fd3c7904
MB
1382 ind_tbl = uobj_get_obj_read(rwq_ind_table,
1383 cmd->rwq_ind_tbl_handle,
1384 file->ucontext);
c70285f8
YH
1385 if (!ind_tbl) {
1386 ret = -EINVAL;
1387 goto err_put;
1388 }
1389
1390 attr.rwq_ind_tbl = ind_tbl;
1391 }
1392
2dee0e54
YH
1393 if (cmd_sz > sizeof(*cmd) &&
1394 !ib_is_udata_cleared(ucore, sizeof(*cmd),
1395 cmd_sz - sizeof(*cmd))) {
c70285f8
YH
1396 ret = -EOPNOTSUPP;
1397 goto err_put;
1398 }
1399
1400 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1401 ret = -EINVAL;
1402 goto err_put;
1403 }
1404
1405 if (ind_tbl && !cmd->max_send_wr)
1406 has_sq = false;
bc38a6ab 1407
6d8a7497 1408 if (cmd->qp_type == IB_QPT_XRC_TGT) {
fd3c7904
MB
1409 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle,
1410 file->ucontext);
1411
1412 if (IS_ERR(xrcd_uobj)) {
1413 ret = -EINVAL;
1414 goto err_put;
1415 }
1416
1417 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
b93f3c18
SH
1418 if (!xrcd) {
1419 ret = -EINVAL;
1420 goto err_put;
1421 }
1422 device = xrcd->device;
9977f4f6 1423 } else {
6d8a7497
EBE
1424 if (cmd->qp_type == IB_QPT_XRC_INI) {
1425 cmd->max_recv_wr = 0;
1426 cmd->max_recv_sge = 0;
b93f3c18 1427 } else {
6d8a7497 1428 if (cmd->is_srq) {
fd3c7904
MB
1429 srq = uobj_get_obj_read(srq, cmd->srq_handle,
1430 file->ucontext);
38eb44fa 1431 if (!srq || srq->srq_type == IB_SRQT_XRC) {
b93f3c18
SH
1432 ret = -EINVAL;
1433 goto err_put;
1434 }
1435 }
5909ce54 1436
c70285f8
YH
1437 if (!ind_tbl) {
1438 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
fd3c7904
MB
1439 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle,
1440 file->ucontext);
c70285f8
YH
1441 if (!rcq) {
1442 ret = -EINVAL;
1443 goto err_put;
1444 }
5909ce54 1445 }
9977f4f6
SH
1446 }
1447 }
5909ce54 1448
c70285f8 1449 if (has_sq)
fd3c7904
MB
1450 scq = uobj_get_obj_read(cq, cmd->send_cq_handle,
1451 file->ucontext);
c70285f8
YH
1452 if (!ind_tbl)
1453 rcq = rcq ?: scq;
fd3c7904 1454 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
c70285f8 1455 if (!pd || (!scq && has_sq)) {
5909ce54
RD
1456 ret = -EINVAL;
1457 goto err_put;
1458 }
1459
b93f3c18 1460 device = pd->device;
9977f4f6
SH
1461 }
1462
bc38a6ab
RD
1463 attr.event_handler = ib_uverbs_qp_event_handler;
1464 attr.qp_context = file;
1465 attr.send_cq = scq;
1466 attr.recv_cq = rcq;
f520ba5a 1467 attr.srq = srq;
b93f3c18 1468 attr.xrcd = xrcd;
6d8a7497
EBE
1469 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1470 IB_SIGNAL_REQ_WR;
1471 attr.qp_type = cmd->qp_type;
b846f25a 1472 attr.create_flags = 0;
bc38a6ab 1473
6d8a7497
EBE
1474 attr.cap.max_send_wr = cmd->max_send_wr;
1475 attr.cap.max_recv_wr = cmd->max_recv_wr;
1476 attr.cap.max_send_sge = cmd->max_send_sge;
1477 attr.cap.max_recv_sge = cmd->max_recv_sge;
1478 attr.cap.max_inline_data = cmd->max_inline_data;
bc38a6ab 1479
9ead190b
RD
1480 obj->uevent.events_reported = 0;
1481 INIT_LIST_HEAD(&obj->uevent.event_list);
1482 INIT_LIST_HEAD(&obj->mcast_list);
bc38a6ab 1483
6d8a7497
EBE
1484 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1485 sizeof(cmd->create_flags))
1486 attr.create_flags = cmd->create_flags;
1487
8a06ce59
LR
1488 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1489 IB_QP_CREATE_CROSS_CHANNEL |
1490 IB_QP_CREATE_MANAGED_SEND |
b531b909 1491 IB_QP_CREATE_MANAGED_RECV |
9e1b161f 1492 IB_QP_CREATE_SCATTER_FCS |
2dee0e54
YH
1493 IB_QP_CREATE_CVLAN_STRIPPING |
1494 IB_QP_CREATE_SOURCE_QPN)) {
6d8a7497
EBE
1495 ret = -EINVAL;
1496 goto err_put;
1497 }
1498
2dee0e54
YH
1499 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1500 if (!capable(CAP_NET_RAW)) {
1501 ret = -EPERM;
1502 goto err_put;
1503 }
1504
1505 attr.source_qpn = cmd->source_qpn;
1506 }
1507
6d8a7497
EBE
1508 buf = (void *)cmd + sizeof(*cmd);
1509 if (cmd_sz > sizeof(*cmd))
1510 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1511 cmd_sz - sizeof(*cmd) - 1))) {
1512 ret = -EINVAL;
1513 goto err_put;
1514 }
1515
1516 if (cmd->qp_type == IB_QPT_XRC_TGT)
b93f3c18
SH
1517 qp = ib_create_qp(pd, &attr);
1518 else
6d8a7497 1519 qp = device->create_qp(pd, &attr, uhw);
b93f3c18 1520
bc38a6ab
RD
1521 if (IS_ERR(qp)) {
1522 ret = PTR_ERR(qp);
fd3c7904 1523 goto err_put;
bc38a6ab
RD
1524 }
1525
6d8a7497 1526 if (cmd->qp_type != IB_QPT_XRC_TGT) {
d291f1a6
DJ
1527 ret = ib_create_qp_security(qp, device);
1528 if (ret)
1529 goto err_cb;
1530
0e0ec7e0 1531 qp->real_qp = qp;
b93f3c18
SH
1532 qp->device = device;
1533 qp->pd = pd;
1534 qp->send_cq = attr.send_cq;
1535 qp->recv_cq = attr.recv_cq;
1536 qp->srq = attr.srq;
c70285f8 1537 qp->rwq_ind_tbl = ind_tbl;
b93f3c18
SH
1538 qp->event_handler = attr.event_handler;
1539 qp->qp_context = attr.qp_context;
1540 qp->qp_type = attr.qp_type;
e47e321a 1541 atomic_set(&qp->usecnt, 0);
b93f3c18 1542 atomic_inc(&pd->usecnt);
498ca3c8 1543 qp->port = 0;
c70285f8
YH
1544 if (attr.send_cq)
1545 atomic_inc(&attr.send_cq->usecnt);
b93f3c18
SH
1546 if (attr.recv_cq)
1547 atomic_inc(&attr.recv_cq->usecnt);
1548 if (attr.srq)
1549 atomic_inc(&attr.srq->usecnt);
c70285f8
YH
1550 if (ind_tbl)
1551 atomic_inc(&ind_tbl->usecnt);
b93f3c18
SH
1552 }
1553 qp->uobject = &obj->uevent.uobject;
bc38a6ab 1554
9ead190b 1555 obj->uevent.uobject.object = qp;
bc38a6ab 1556
9ead190b 1557 memset(&resp, 0, sizeof resp);
6d8a7497
EBE
1558 resp.base.qpn = qp->qp_num;
1559 resp.base.qp_handle = obj->uevent.uobject.id;
1560 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1561 resp.base.max_send_sge = attr.cap.max_send_sge;
1562 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1563 resp.base.max_send_wr = attr.cap.max_send_wr;
1564 resp.base.max_inline_data = attr.cap.max_inline_data;
bc38a6ab 1565
6d8a7497
EBE
1566 resp.response_length = offsetof(typeof(resp), response_length) +
1567 sizeof(resp.response_length);
1568
1569 ret = cb(file, &resp, ucore);
1570 if (ret)
1571 goto err_cb;
bc38a6ab 1572
846be90d
YH
1573 if (xrcd) {
1574 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1575 uobject);
1576 atomic_inc(&obj->uxrcd->refcnt);
fd3c7904 1577 uobj_put_read(xrcd_uobj);
846be90d
YH
1578 }
1579
b93f3c18 1580 if (pd)
fd3c7904 1581 uobj_put_obj_read(pd);
b93f3c18 1582 if (scq)
fd3c7904 1583 uobj_put_obj_read(scq);
9977f4f6 1584 if (rcq && rcq != scq)
fd3c7904 1585 uobj_put_obj_read(rcq);
9ead190b 1586 if (srq)
fd3c7904 1587 uobj_put_obj_read(srq);
c70285f8 1588 if (ind_tbl)
fd3c7904 1589 uobj_put_obj_read(ind_tbl);
9ead190b 1590
fd3c7904 1591 uobj_alloc_commit(&obj->uevent.uobject);
bc38a6ab 1592
6d8a7497
EBE
1593 return 0;
1594err_cb:
bc38a6ab
RD
1595 ib_destroy_qp(qp);
1596
9ead190b 1597err_put:
fd3c7904
MB
1598 if (!IS_ERR(xrcd_uobj))
1599 uobj_put_read(xrcd_uobj);
9ead190b 1600 if (pd)
fd3c7904 1601 uobj_put_obj_read(pd);
9ead190b 1602 if (scq)
fd3c7904 1603 uobj_put_obj_read(scq);
43db2bc0 1604 if (rcq && rcq != scq)
fd3c7904 1605 uobj_put_obj_read(rcq);
9ead190b 1606 if (srq)
fd3c7904 1607 uobj_put_obj_read(srq);
c70285f8 1608 if (ind_tbl)
fd3c7904 1609 uobj_put_obj_read(ind_tbl);
9ead190b 1610
fd3c7904 1611 uobj_alloc_abort(&obj->uevent.uobject);
bc38a6ab
RD
1612 return ret;
1613}
1614
6d8a7497
EBE
1615static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1616 struct ib_uverbs_ex_create_qp_resp *resp,
1617 struct ib_udata *ucore)
1618{
1619 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1620 return -EFAULT;
1621
1622 return 0;
1623}
1624
1625ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1626 struct ib_device *ib_dev,
1627 const char __user *buf, int in_len,
1628 int out_len)
1629{
1630 struct ib_uverbs_create_qp cmd;
1631 struct ib_uverbs_ex_create_qp cmd_ex;
1632 struct ib_udata ucore;
1633 struct ib_udata uhw;
1634 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1635 int err;
1636
1637 if (out_len < resp_size)
1638 return -ENOSPC;
1639
1640 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1641 return -EFAULT;
1642
40a20339
AB
1643 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1644 sizeof(cmd), resp_size);
1645 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1646 u64_to_user_ptr(cmd.response) + resp_size,
3d943c9d
MD
1647 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1648 out_len - resp_size);
6d8a7497
EBE
1649
1650 memset(&cmd_ex, 0, sizeof(cmd_ex));
1651 cmd_ex.user_handle = cmd.user_handle;
1652 cmd_ex.pd_handle = cmd.pd_handle;
1653 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1654 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1655 cmd_ex.srq_handle = cmd.srq_handle;
1656 cmd_ex.max_send_wr = cmd.max_send_wr;
1657 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1658 cmd_ex.max_send_sge = cmd.max_send_sge;
1659 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1660 cmd_ex.max_inline_data = cmd.max_inline_data;
1661 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1662 cmd_ex.qp_type = cmd.qp_type;
1663 cmd_ex.is_srq = cmd.is_srq;
1664
1665 err = create_qp(file, &ucore, &uhw, &cmd_ex,
1666 offsetof(typeof(cmd_ex), is_srq) +
1667 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
1668 NULL);
1669
1670 if (err)
1671 return err;
1672
1673 return in_len;
1674}
1675
1676static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
1677 struct ib_uverbs_ex_create_qp_resp *resp,
1678 struct ib_udata *ucore)
1679{
1680 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1681 return -EFAULT;
1682
1683 return 0;
1684}
1685
1686int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
1687 struct ib_device *ib_dev,
1688 struct ib_udata *ucore,
1689 struct ib_udata *uhw)
1690{
1691 struct ib_uverbs_ex_create_qp_resp resp;
1692 struct ib_uverbs_ex_create_qp cmd = {0};
1693 int err;
1694
1695 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
1696 sizeof(cmd.comp_mask)))
1697 return -EINVAL;
1698
1699 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
1700 if (err)
1701 return err;
1702
c70285f8 1703 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
6d8a7497
EBE
1704 return -EINVAL;
1705
1706 if (cmd.reserved)
1707 return -EINVAL;
1708
1709 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1710 sizeof(resp.response_length)))
1711 return -ENOSPC;
1712
1713 err = create_qp(file, ucore, uhw, &cmd,
1714 min(ucore->inlen, sizeof(cmd)),
1715 ib_uverbs_ex_create_qp_cb, NULL);
1716
1717 if (err)
1718 return err;
1719
1720 return 0;
1721}
1722
42849b26 1723ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
057aec0d 1724 struct ib_device *ib_dev,
42849b26
SH
1725 const char __user *buf, int in_len, int out_len)
1726{
1727 struct ib_uverbs_open_qp cmd;
1728 struct ib_uverbs_create_qp_resp resp;
1729 struct ib_udata udata;
1730 struct ib_uqp_object *obj;
1731 struct ib_xrcd *xrcd;
1732 struct ib_uobject *uninitialized_var(xrcd_uobj);
1733 struct ib_qp *qp;
1734 struct ib_qp_open_attr attr;
1735 int ret;
1736
1737 if (out_len < sizeof resp)
1738 return -ENOSPC;
1739
1740 if (copy_from_user(&cmd, buf, sizeof cmd))
1741 return -EFAULT;
1742
40a20339
AB
1743 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1744 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
1745 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1746 out_len - sizeof(resp));
42849b26 1747
fd3c7904
MB
1748 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1749 file->ucontext);
1750 if (IS_ERR(obj))
1751 return PTR_ERR(obj);
42849b26 1752
fd3c7904
MB
1753 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle,
1754 file->ucontext);
1755 if (IS_ERR(xrcd_uobj)) {
1756 ret = -EINVAL;
1757 goto err_put;
1758 }
42849b26 1759
fd3c7904 1760 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
42849b26
SH
1761 if (!xrcd) {
1762 ret = -EINVAL;
fd3c7904 1763 goto err_xrcd;
42849b26
SH
1764 }
1765
1766 attr.event_handler = ib_uverbs_qp_event_handler;
1767 attr.qp_context = file;
1768 attr.qp_num = cmd.qpn;
1769 attr.qp_type = cmd.qp_type;
1770
1771 obj->uevent.events_reported = 0;
1772 INIT_LIST_HEAD(&obj->uevent.event_list);
1773 INIT_LIST_HEAD(&obj->mcast_list);
1774
1775 qp = ib_open_qp(xrcd, &attr);
1776 if (IS_ERR(qp)) {
1777 ret = PTR_ERR(qp);
fd3c7904 1778 goto err_xrcd;
42849b26
SH
1779 }
1780
42849b26 1781 obj->uevent.uobject.object = qp;
fd3c7904 1782 obj->uevent.uobject.user_handle = cmd.user_handle;
42849b26
SH
1783
1784 memset(&resp, 0, sizeof resp);
1785 resp.qpn = qp->qp_num;
1786 resp.qp_handle = obj->uevent.uobject.id;
1787
40a20339 1788 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
42849b26 1789 ret = -EFAULT;
fd3c7904 1790 goto err_destroy;
42849b26
SH
1791 }
1792
846be90d
YH
1793 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1794 atomic_inc(&obj->uxrcd->refcnt);
fd3c7904
MB
1795 qp->uobject = &obj->uevent.uobject;
1796 uobj_put_read(xrcd_uobj);
42849b26 1797
42849b26 1798
fd3c7904 1799 uobj_alloc_commit(&obj->uevent.uobject);
42849b26
SH
1800
1801 return in_len;
1802
42849b26
SH
1803err_destroy:
1804 ib_destroy_qp(qp);
fd3c7904
MB
1805err_xrcd:
1806 uobj_put_read(xrcd_uobj);
42849b26 1807err_put:
fd3c7904 1808 uobj_alloc_abort(&obj->uevent.uobject);
42849b26
SH
1809 return ret;
1810}
1811
89caa053
PP
1812static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1813 struct rdma_ah_attr *rdma_attr)
1814{
1815 const struct ib_global_route *grh;
1816
1817 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
1818 uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
1819 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
1820 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
1821 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
1822 IB_AH_GRH);
1823 if (uverb_attr->is_global) {
1824 grh = rdma_ah_read_grh(rdma_attr);
1825 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1826 uverb_attr->flow_label = grh->flow_label;
1827 uverb_attr->sgid_index = grh->sgid_index;
1828 uverb_attr->hop_limit = grh->hop_limit;
1829 uverb_attr->traffic_class = grh->traffic_class;
1830 }
1831 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
1832}
1833
7ccc9a24 1834ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
057aec0d 1835 struct ib_device *ib_dev,
7ccc9a24
DB
1836 const char __user *buf, int in_len,
1837 int out_len)
1838{
1839 struct ib_uverbs_query_qp cmd;
1840 struct ib_uverbs_query_qp_resp resp;
1841 struct ib_qp *qp;
1842 struct ib_qp_attr *attr;
1843 struct ib_qp_init_attr *init_attr;
1844 int ret;
1845
1846 if (copy_from_user(&cmd, buf, sizeof cmd))
1847 return -EFAULT;
1848
1849 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1850 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1851 if (!attr || !init_attr) {
1852 ret = -ENOMEM;
1853 goto out;
1854 }
1855
fd3c7904 1856 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b 1857 if (!qp) {
7ccc9a24 1858 ret = -EINVAL;
9ead190b
RD
1859 goto out;
1860 }
1861
1862 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
7ccc9a24 1863
fd3c7904 1864 uobj_put_obj_read(qp);
7ccc9a24
DB
1865
1866 if (ret)
1867 goto out;
1868
1869 memset(&resp, 0, sizeof resp);
1870
1871 resp.qp_state = attr->qp_state;
1872 resp.cur_qp_state = attr->cur_qp_state;
1873 resp.path_mtu = attr->path_mtu;
1874 resp.path_mig_state = attr->path_mig_state;
1875 resp.qkey = attr->qkey;
1876 resp.rq_psn = attr->rq_psn;
1877 resp.sq_psn = attr->sq_psn;
1878 resp.dest_qp_num = attr->dest_qp_num;
1879 resp.qp_access_flags = attr->qp_access_flags;
1880 resp.pkey_index = attr->pkey_index;
1881 resp.alt_pkey_index = attr->alt_pkey_index;
0b26c88f 1882 resp.sq_draining = attr->sq_draining;
7ccc9a24
DB
1883 resp.max_rd_atomic = attr->max_rd_atomic;
1884 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1885 resp.min_rnr_timer = attr->min_rnr_timer;
1886 resp.port_num = attr->port_num;
1887 resp.timeout = attr->timeout;
1888 resp.retry_cnt = attr->retry_cnt;
1889 resp.rnr_retry = attr->rnr_retry;
1890 resp.alt_port_num = attr->alt_port_num;
1891 resp.alt_timeout = attr->alt_timeout;
1892
89caa053
PP
1893 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1894 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
7ccc9a24
DB
1895
1896 resp.max_send_wr = init_attr->cap.max_send_wr;
1897 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1898 resp.max_send_sge = init_attr->cap.max_send_sge;
1899 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1900 resp.max_inline_data = init_attr->cap.max_inline_data;
27d56300 1901 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
7ccc9a24 1902
40a20339 1903 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
7ccc9a24
DB
1904 ret = -EFAULT;
1905
1906out:
1907 kfree(attr);
1908 kfree(init_attr);
1909
1910 return ret ? ret : in_len;
1911}
1912
9977f4f6
SH
1913/* Remove ignored fields set in the attribute mask */
1914static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1915{
1916 switch (qp_type) {
1917 case IB_QPT_XRC_INI:
1918 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
b93f3c18
SH
1919 case IB_QPT_XRC_TGT:
1920 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1921 IB_QP_RNR_RETRY);
9977f4f6
SH
1922 default:
1923 return mask;
1924 }
1925}
1926
89caa053
PP
1927static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1928 struct rdma_ah_attr *rdma_attr,
1929 struct ib_uverbs_qp_dest *uverb_attr)
1930{
1931 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1932 if (uverb_attr->is_global) {
1933 rdma_ah_set_grh(rdma_attr, NULL,
1934 uverb_attr->flow_label,
1935 uverb_attr->sgid_index,
1936 uverb_attr->hop_limit,
1937 uverb_attr->traffic_class);
1938 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1939 } else {
1940 rdma_ah_set_ah_flags(rdma_attr, 0);
1941 }
1942 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1943 rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1944 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1945 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1946 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1947 rdma_ah_set_make_grd(rdma_attr, false);
1948}
1949
189aba99
BW
1950static int modify_qp(struct ib_uverbs_file *file,
1951 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
bc38a6ab 1952{
189aba99
BW
1953 struct ib_qp_attr *attr;
1954 struct ib_qp *qp;
1955 int ret;
9bc57e2d 1956
bc38a6ab
RD
1957 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1958 if (!attr)
1959 return -ENOMEM;
1960
fd3c7904 1961 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext);
9ead190b 1962 if (!qp) {
bc38a6ab
RD
1963 ret = -EINVAL;
1964 goto out;
1965 }
1966
5a7a88f1
IM
1967 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1968 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
5ecce4c9
BP
1969 ret = -EINVAL;
1970 goto release_qp;
1971 }
1972
189aba99
BW
1973 attr->qp_state = cmd->base.qp_state;
1974 attr->cur_qp_state = cmd->base.cur_qp_state;
1975 attr->path_mtu = cmd->base.path_mtu;
1976 attr->path_mig_state = cmd->base.path_mig_state;
1977 attr->qkey = cmd->base.qkey;
1978 attr->rq_psn = cmd->base.rq_psn;
1979 attr->sq_psn = cmd->base.sq_psn;
1980 attr->dest_qp_num = cmd->base.dest_qp_num;
1981 attr->qp_access_flags = cmd->base.qp_access_flags;
1982 attr->pkey_index = cmd->base.pkey_index;
1983 attr->alt_pkey_index = cmd->base.alt_pkey_index;
1984 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1985 attr->max_rd_atomic = cmd->base.max_rd_atomic;
1986 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1987 attr->min_rnr_timer = cmd->base.min_rnr_timer;
1988 attr->port_num = cmd->base.port_num;
1989 attr->timeout = cmd->base.timeout;
1990 attr->retry_cnt = cmd->base.retry_cnt;
1991 attr->rnr_retry = cmd->base.rnr_retry;
1992 attr->alt_port_num = cmd->base.alt_port_num;
1993 attr->alt_timeout = cmd->base.alt_timeout;
1994 attr->rate_limit = cmd->rate_limit;
1995
498ca3c8 1996 if (cmd->base.attr_mask & IB_QP_AV)
89caa053
PP
1997 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
1998 &cmd->base.dest);
189aba99 1999
498ca3c8 2000 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
89caa053
PP
2001 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
2002 &cmd->base.alt_dest);
bc38a6ab 2003
f7c8f2e9
PP
2004 ret = ib_modify_qp_with_udata(qp, attr,
2005 modify_qp_mask(qp->qp_type,
2006 cmd->base.attr_mask),
2007 udata);
9ead190b 2008
0fb8bcf0 2009release_qp:
fd3c7904 2010 uobj_put_obj_read(qp);
bc38a6ab 2011out:
bc38a6ab
RD
2012 kfree(attr);
2013
2014 return ret;
2015}
2016
189aba99
BW
2017ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2018 struct ib_device *ib_dev,
2019 const char __user *buf, int in_len,
2020 int out_len)
2021{
2022 struct ib_uverbs_ex_modify_qp cmd = {};
2023 struct ib_udata udata;
2024 int ret;
2025
2026 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
2027 return -EFAULT;
2028
2029 if (cmd.base.attr_mask &
2030 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
2031 return -EOPNOTSUPP;
2032
40a20339 2033 ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
e093111d
AR
2034 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
2035 out_len);
189aba99
BW
2036
2037 ret = modify_qp(file, &cmd, &udata);
2038 if (ret)
2039 return ret;
2040
2041 return in_len;
2042}
2043
2044int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
2045 struct ib_device *ib_dev,
2046 struct ib_udata *ucore,
2047 struct ib_udata *uhw)
2048{
2049 struct ib_uverbs_ex_modify_qp cmd = {};
2050 int ret;
2051
2052 /*
2053 * Last bit is reserved for extending the attr_mask by
2054 * using another field.
2055 */
2056 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
2057
2058 if (ucore->inlen < sizeof(cmd.base))
2059 return -EINVAL;
2060
2061 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2062 if (ret)
2063 return ret;
2064
2065 if (cmd.base.attr_mask &
2066 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
2067 return -EOPNOTSUPP;
2068
2069 if (ucore->inlen > sizeof(cmd)) {
2070 if (ib_is_udata_cleared(ucore, sizeof(cmd),
2071 ucore->inlen - sizeof(cmd)))
2072 return -EOPNOTSUPP;
2073 }
2074
2075 ret = modify_qp(file, &cmd, uhw);
2076
2077 return ret;
2078}
2079
bc38a6ab 2080ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
057aec0d 2081 struct ib_device *ib_dev,
bc38a6ab
RD
2082 const char __user *buf, int in_len,
2083 int out_len)
2084{
63aaf647
RD
2085 struct ib_uverbs_destroy_qp cmd;
2086 struct ib_uverbs_destroy_qp_resp resp;
9ead190b 2087 struct ib_uobject *uobj;
9ead190b 2088 struct ib_uqp_object *obj;
63aaf647 2089 int ret = -EINVAL;
bc38a6ab
RD
2090
2091 if (copy_from_user(&cmd, buf, sizeof cmd))
2092 return -EFAULT;
2093
63aaf647
RD
2094 memset(&resp, 0, sizeof resp);
2095
fd3c7904
MB
2096 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle,
2097 file->ucontext);
2098 if (IS_ERR(uobj))
2099 return PTR_ERR(uobj);
2100
9ead190b 2101 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
fd3c7904
MB
2102 /*
2103 * Make sure we don't free the memory in remove_commit as we still
2104 * needs the uobject memory to create the response.
2105 */
2106 uverbs_uobject_get(uobj);
f4e40156 2107
fd3c7904
MB
2108 ret = uobj_remove_commit(uobj);
2109 if (ret) {
2110 uverbs_uobject_put(uobj);
9ead190b 2111 return ret;
fd3c7904 2112 }
63aaf647 2113
9ead190b 2114 resp.events_reported = obj->uevent.events_reported;
fd3c7904 2115 uverbs_uobject_put(uobj);
bc38a6ab 2116
40a20339 2117 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
9ead190b 2118 return -EFAULT;
bc38a6ab 2119
9ead190b 2120 return in_len;
bc38a6ab
RD
2121}
2122
e622f2f4
CH
2123static void *alloc_wr(size_t wr_size, __u32 num_sge)
2124{
4f7f4dcf
VT
2125 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
2126 sizeof (struct ib_sge))
2127 return NULL;
2128
e622f2f4
CH
2129 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2130 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
4f7f4dcf 2131}
e622f2f4 2132
67cdb40c 2133ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
057aec0d 2134 struct ib_device *ib_dev,
a74cd4af
RD
2135 const char __user *buf, int in_len,
2136 int out_len)
67cdb40c
RD
2137{
2138 struct ib_uverbs_post_send cmd;
2139 struct ib_uverbs_post_send_resp resp;
2140 struct ib_uverbs_send_wr *user_wr;
2141 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2142 struct ib_qp *qp;
2143 int i, sg_ind;
9ead190b 2144 int is_ud;
67cdb40c 2145 ssize_t ret = -EINVAL;
1d784b89 2146 size_t next_size;
67cdb40c
RD
2147
2148 if (copy_from_user(&cmd, buf, sizeof cmd))
2149 return -EFAULT;
2150
2151 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2152 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2153 return -EINVAL;
2154
2155 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2156 return -EINVAL;
2157
2158 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2159 if (!user_wr)
2160 return -ENOMEM;
2161
fd3c7904 2162 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b 2163 if (!qp)
67cdb40c
RD
2164 goto out;
2165
9ead190b 2166 is_ud = qp->qp_type == IB_QPT_UD;
67cdb40c
RD
2167 sg_ind = 0;
2168 last = NULL;
2169 for (i = 0; i < cmd.wr_count; ++i) {
2170 if (copy_from_user(user_wr,
2171 buf + sizeof cmd + i * cmd.wqe_size,
2172 cmd.wqe_size)) {
2173 ret = -EFAULT;
9ead190b 2174 goto out_put;
67cdb40c
RD
2175 }
2176
2177 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2178 ret = -EINVAL;
9ead190b 2179 goto out_put;
67cdb40c
RD
2180 }
2181
e622f2f4
CH
2182 if (is_ud) {
2183 struct ib_ud_wr *ud;
2184
2185 if (user_wr->opcode != IB_WR_SEND &&
2186 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2187 ret = -EINVAL;
2188 goto out_put;
2189 }
2190
1d784b89
MM
2191 next_size = sizeof(*ud);
2192 ud = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2193 if (!ud) {
2194 ret = -ENOMEM;
2195 goto out_put;
2196 }
2197
fd3c7904
MB
2198 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah,
2199 file->ucontext);
e622f2f4
CH
2200 if (!ud->ah) {
2201 kfree(ud);
2202 ret = -EINVAL;
2203 goto out_put;
2204 }
2205 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2206 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2207
2208 next = &ud->wr;
2209 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2210 user_wr->opcode == IB_WR_RDMA_WRITE ||
2211 user_wr->opcode == IB_WR_RDMA_READ) {
2212 struct ib_rdma_wr *rdma;
2213
1d784b89
MM
2214 next_size = sizeof(*rdma);
2215 rdma = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2216 if (!rdma) {
2217 ret = -ENOMEM;
2218 goto out_put;
2219 }
2220
2221 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2222 rdma->rkey = user_wr->wr.rdma.rkey;
2223
2224 next = &rdma->wr;
2225 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2226 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2227 struct ib_atomic_wr *atomic;
2228
1d784b89
MM
2229 next_size = sizeof(*atomic);
2230 atomic = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2231 if (!atomic) {
2232 ret = -ENOMEM;
2233 goto out_put;
2234 }
2235
2236 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2237 atomic->compare_add = user_wr->wr.atomic.compare_add;
2238 atomic->swap = user_wr->wr.atomic.swap;
2239 atomic->rkey = user_wr->wr.atomic.rkey;
2240
2241 next = &atomic->wr;
2242 } else if (user_wr->opcode == IB_WR_SEND ||
2243 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2244 user_wr->opcode == IB_WR_SEND_WITH_INV) {
1d784b89
MM
2245 next_size = sizeof(*next);
2246 next = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2247 if (!next) {
2248 ret = -ENOMEM;
2249 goto out_put;
2250 }
2251 } else {
2252 ret = -EINVAL;
9ead190b 2253 goto out_put;
67cdb40c
RD
2254 }
2255
e622f2f4
CH
2256 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2257 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2258 next->ex.imm_data =
2259 (__be32 __force) user_wr->ex.imm_data;
2260 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2261 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2262 }
2263
67cdb40c
RD
2264 if (!last)
2265 wr = next;
2266 else
2267 last->next = next;
2268 last = next;
2269
2270 next->next = NULL;
2271 next->wr_id = user_wr->wr_id;
2272 next->num_sge = user_wr->num_sge;
2273 next->opcode = user_wr->opcode;
2274 next->send_flags = user_wr->send_flags;
67cdb40c 2275
67cdb40c
RD
2276 if (next->num_sge) {
2277 next->sg_list = (void *) next +
1d784b89 2278 ALIGN(next_size, sizeof(struct ib_sge));
67cdb40c
RD
2279 if (copy_from_user(next->sg_list,
2280 buf + sizeof cmd +
2281 cmd.wr_count * cmd.wqe_size +
2282 sg_ind * sizeof (struct ib_sge),
2283 next->num_sge * sizeof (struct ib_sge))) {
2284 ret = -EFAULT;
9ead190b 2285 goto out_put;
67cdb40c
RD
2286 }
2287 sg_ind += next->num_sge;
2288 } else
2289 next->sg_list = NULL;
2290 }
2291
2292 resp.bad_wr = 0;
0e0ec7e0 2293 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
67cdb40c
RD
2294 if (ret)
2295 for (next = wr; next; next = next->next) {
2296 ++resp.bad_wr;
2297 if (next == bad_wr)
2298 break;
2299 }
2300
40a20339 2301 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
67cdb40c
RD
2302 ret = -EFAULT;
2303
9ead190b 2304out_put:
fd3c7904 2305 uobj_put_obj_read(qp);
67cdb40c
RD
2306
2307 while (wr) {
e622f2f4 2308 if (is_ud && ud_wr(wr)->ah)
fd3c7904 2309 uobj_put_obj_read(ud_wr(wr)->ah);
67cdb40c
RD
2310 next = wr->next;
2311 kfree(wr);
2312 wr = next;
2313 }
2314
18320828 2315out:
67cdb40c
RD
2316 kfree(user_wr);
2317
2318 return ret ? ret : in_len;
2319}
2320
2321static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2322 int in_len,
2323 u32 wr_count,
2324 u32 sge_count,
2325 u32 wqe_size)
2326{
2327 struct ib_uverbs_recv_wr *user_wr;
2328 struct ib_recv_wr *wr = NULL, *last, *next;
2329 int sg_ind;
2330 int i;
2331 int ret;
2332
2333 if (in_len < wqe_size * wr_count +
2334 sge_count * sizeof (struct ib_uverbs_sge))
2335 return ERR_PTR(-EINVAL);
2336
2337 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2338 return ERR_PTR(-EINVAL);
2339
2340 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2341 if (!user_wr)
2342 return ERR_PTR(-ENOMEM);
2343
2344 sg_ind = 0;
2345 last = NULL;
2346 for (i = 0; i < wr_count; ++i) {
2347 if (copy_from_user(user_wr, buf + i * wqe_size,
2348 wqe_size)) {
2349 ret = -EFAULT;
2350 goto err;
2351 }
2352
2353 if (user_wr->num_sge + sg_ind > sge_count) {
2354 ret = -EINVAL;
2355 goto err;
2356 }
2357
4f7f4dcf
VT
2358 if (user_wr->num_sge >=
2359 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2360 sizeof (struct ib_sge)) {
2361 ret = -EINVAL;
2362 goto err;
2363 }
2364
67cdb40c
RD
2365 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2366 user_wr->num_sge * sizeof (struct ib_sge),
2367 GFP_KERNEL);
2368 if (!next) {
2369 ret = -ENOMEM;
2370 goto err;
2371 }
2372
2373 if (!last)
2374 wr = next;
2375 else
2376 last->next = next;
2377 last = next;
2378
2379 next->next = NULL;
2380 next->wr_id = user_wr->wr_id;
2381 next->num_sge = user_wr->num_sge;
2382
2383 if (next->num_sge) {
2384 next->sg_list = (void *) next +
2385 ALIGN(sizeof *next, sizeof (struct ib_sge));
2386 if (copy_from_user(next->sg_list,
2387 buf + wr_count * wqe_size +
2388 sg_ind * sizeof (struct ib_sge),
2389 next->num_sge * sizeof (struct ib_sge))) {
2390 ret = -EFAULT;
2391 goto err;
2392 }
2393 sg_ind += next->num_sge;
2394 } else
2395 next->sg_list = NULL;
2396 }
2397
2398 kfree(user_wr);
2399 return wr;
2400
2401err:
2402 kfree(user_wr);
2403
2404 while (wr) {
2405 next = wr->next;
2406 kfree(wr);
2407 wr = next;
2408 }
2409
2410 return ERR_PTR(ret);
2411}
2412
2413ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
057aec0d 2414 struct ib_device *ib_dev,
a74cd4af
RD
2415 const char __user *buf, int in_len,
2416 int out_len)
67cdb40c
RD
2417{
2418 struct ib_uverbs_post_recv cmd;
2419 struct ib_uverbs_post_recv_resp resp;
2420 struct ib_recv_wr *wr, *next, *bad_wr;
2421 struct ib_qp *qp;
2422 ssize_t ret = -EINVAL;
2423
2424 if (copy_from_user(&cmd, buf, sizeof cmd))
2425 return -EFAULT;
2426
2427 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2428 in_len - sizeof cmd, cmd.wr_count,
2429 cmd.sge_count, cmd.wqe_size);
2430 if (IS_ERR(wr))
2431 return PTR_ERR(wr);
2432
fd3c7904 2433 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b 2434 if (!qp)
67cdb40c
RD
2435 goto out;
2436
2437 resp.bad_wr = 0;
0e0ec7e0 2438 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
9ead190b 2439
fd3c7904
MB
2440 uobj_put_obj_read(qp);
2441 if (ret) {
67cdb40c
RD
2442 for (next = wr; next; next = next->next) {
2443 ++resp.bad_wr;
2444 if (next == bad_wr)
2445 break;
2446 }
fd3c7904 2447 }
67cdb40c 2448
40a20339 2449 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
67cdb40c
RD
2450 ret = -EFAULT;
2451
2452out:
67cdb40c
RD
2453 while (wr) {
2454 next = wr->next;
2455 kfree(wr);
2456 wr = next;
2457 }
2458
2459 return ret ? ret : in_len;
2460}
2461
2462ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
057aec0d 2463 struct ib_device *ib_dev,
a74cd4af
RD
2464 const char __user *buf, int in_len,
2465 int out_len)
67cdb40c
RD
2466{
2467 struct ib_uverbs_post_srq_recv cmd;
2468 struct ib_uverbs_post_srq_recv_resp resp;
2469 struct ib_recv_wr *wr, *next, *bad_wr;
2470 struct ib_srq *srq;
2471 ssize_t ret = -EINVAL;
2472
2473 if (copy_from_user(&cmd, buf, sizeof cmd))
2474 return -EFAULT;
2475
2476 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2477 in_len - sizeof cmd, cmd.wr_count,
2478 cmd.sge_count, cmd.wqe_size);
2479 if (IS_ERR(wr))
2480 return PTR_ERR(wr);
2481
fd3c7904 2482 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
9ead190b 2483 if (!srq)
67cdb40c
RD
2484 goto out;
2485
2486 resp.bad_wr = 0;
2487 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
9ead190b 2488
fd3c7904 2489 uobj_put_obj_read(srq);
9ead190b 2490
67cdb40c
RD
2491 if (ret)
2492 for (next = wr; next; next = next->next) {
2493 ++resp.bad_wr;
2494 if (next == bad_wr)
2495 break;
2496 }
2497
40a20339 2498 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
67cdb40c
RD
2499 ret = -EFAULT;
2500
2501out:
67cdb40c
RD
2502 while (wr) {
2503 next = wr->next;
2504 kfree(wr);
2505 wr = next;
2506 }
2507
2508 return ret ? ret : in_len;
2509}
2510
2511ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
057aec0d 2512 struct ib_device *ib_dev,
67cdb40c
RD
2513 const char __user *buf, int in_len,
2514 int out_len)
2515{
2516 struct ib_uverbs_create_ah cmd;
2517 struct ib_uverbs_create_ah_resp resp;
2518 struct ib_uobject *uobj;
2519 struct ib_pd *pd;
2520 struct ib_ah *ah;
90898850 2521 struct rdma_ah_attr attr;
67cdb40c 2522 int ret;
477864c8 2523 struct ib_udata udata;
67cdb40c
RD
2524
2525 if (out_len < sizeof resp)
2526 return -ENOSPC;
2527
2528 if (copy_from_user(&cmd, buf, sizeof cmd))
2529 return -EFAULT;
2530
5ecce4c9
BP
2531 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
2532 return -EINVAL;
2533
40a20339
AB
2534 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
2535 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
2536 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
2537 out_len - sizeof(resp));
477864c8 2538
fd3c7904
MB
2539 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext);
2540 if (IS_ERR(uobj))
2541 return PTR_ERR(uobj);
67cdb40c 2542
fd3c7904 2543 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
9ead190b 2544 if (!pd) {
67cdb40c 2545 ret = -EINVAL;
9ead190b 2546 goto err;
67cdb40c
RD
2547 }
2548
44c58487 2549 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
d98bb7f7 2550 rdma_ah_set_make_grd(&attr, false);
d8966fcd
DC
2551 rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2552 rdma_ah_set_sl(&attr, cmd.attr.sl);
2553 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2554 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2555 rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2556
4ba66093 2557 if (cmd.attr.is_global) {
d8966fcd
DC
2558 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2559 cmd.attr.grh.sgid_index,
2560 cmd.attr.grh.hop_limit,
2561 cmd.attr.grh.traffic_class);
2562 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
4ba66093 2563 } else {
d8966fcd 2564 rdma_ah_set_ah_flags(&attr, 0);
4ba66093 2565 }
477864c8 2566
5cda6587 2567 ah = rdma_create_user_ah(pd, &attr, &udata);
67cdb40c
RD
2568 if (IS_ERR(ah)) {
2569 ret = PTR_ERR(ah);
fd3c7904 2570 goto err_put;
67cdb40c
RD
2571 }
2572
9ead190b 2573 ah->uobject = uobj;
fd3c7904 2574 uobj->user_handle = cmd.user_handle;
9ead190b 2575 uobj->object = ah;
67cdb40c 2576
67cdb40c
RD
2577 resp.ah_handle = uobj->id;
2578
40a20339 2579 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
67cdb40c 2580 ret = -EFAULT;
9ead190b 2581 goto err_copy;
67cdb40c
RD
2582 }
2583
fd3c7904
MB
2584 uobj_put_obj_read(pd);
2585 uobj_alloc_commit(uobj);
67cdb40c
RD
2586
2587 return in_len;
2588
9ead190b 2589err_copy:
36523159 2590 rdma_destroy_ah(ah);
67cdb40c 2591
fd3c7904
MB
2592err_put:
2593 uobj_put_obj_read(pd);
ec924b47 2594
9ead190b 2595err:
fd3c7904 2596 uobj_alloc_abort(uobj);
67cdb40c
RD
2597 return ret;
2598}
2599
2600ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
057aec0d 2601 struct ib_device *ib_dev,
67cdb40c
RD
2602 const char __user *buf, int in_len, int out_len)
2603{
2604 struct ib_uverbs_destroy_ah cmd;
67cdb40c 2605 struct ib_uobject *uobj;
9ead190b 2606 int ret;
67cdb40c
RD
2607
2608 if (copy_from_user(&cmd, buf, sizeof cmd))
2609 return -EFAULT;
2610
fd3c7904
MB
2611 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle,
2612 file->ucontext);
2613 if (IS_ERR(uobj))
2614 return PTR_ERR(uobj);
67cdb40c 2615
fd3c7904
MB
2616 ret = uobj_remove_commit(uobj);
2617 return ret ?: in_len;
67cdb40c
RD
2618}
2619
bc38a6ab 2620ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
057aec0d 2621 struct ib_device *ib_dev,
bc38a6ab
RD
2622 const char __user *buf, int in_len,
2623 int out_len)
2624{
2625 struct ib_uverbs_attach_mcast cmd;
2626 struct ib_qp *qp;
9ead190b 2627 struct ib_uqp_object *obj;
f4e40156 2628 struct ib_uverbs_mcast_entry *mcast;
9ead190b 2629 int ret;
bc38a6ab
RD
2630
2631 if (copy_from_user(&cmd, buf, sizeof cmd))
2632 return -EFAULT;
2633
fd3c7904 2634 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b
RD
2635 if (!qp)
2636 return -EINVAL;
f4e40156 2637
9ead190b 2638 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f4e40156 2639
f48b7269 2640 mutex_lock(&obj->mcast_lock);
9ead190b 2641 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2642 if (cmd.mlid == mcast->lid &&
2643 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2644 ret = 0;
9ead190b 2645 goto out_put;
f4e40156
JM
2646 }
2647
2648 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2649 if (!mcast) {
2650 ret = -ENOMEM;
9ead190b 2651 goto out_put;
f4e40156
JM
2652 }
2653
2654 mcast->lid = cmd.mlid;
2655 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
bc38a6ab 2656
f4e40156 2657 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
9ead190b
RD
2658 if (!ret)
2659 list_add_tail(&mcast->list, &obj->mcast_list);
2660 else
f4e40156
JM
2661 kfree(mcast);
2662
9ead190b 2663out_put:
f48b7269 2664 mutex_unlock(&obj->mcast_lock);
fd3c7904 2665 uobj_put_obj_read(qp);
bc38a6ab
RD
2666
2667 return ret ? ret : in_len;
2668}
2669
2670ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
057aec0d 2671 struct ib_device *ib_dev,
bc38a6ab
RD
2672 const char __user *buf, int in_len,
2673 int out_len)
2674{
2675 struct ib_uverbs_detach_mcast cmd;
9ead190b 2676 struct ib_uqp_object *obj;
bc38a6ab 2677 struct ib_qp *qp;
f4e40156 2678 struct ib_uverbs_mcast_entry *mcast;
bc38a6ab 2679 int ret = -EINVAL;
20c7840a 2680 bool found = false;
bc38a6ab
RD
2681
2682 if (copy_from_user(&cmd, buf, sizeof cmd))
2683 return -EFAULT;
2684
fd3c7904 2685 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
9ead190b
RD
2686 if (!qp)
2687 return -EINVAL;
bc38a6ab 2688
fd3c7904 2689 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f48b7269 2690 mutex_lock(&obj->mcast_lock);
fd3c7904 2691
9ead190b 2692 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2693 if (cmd.mlid == mcast->lid &&
2694 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2695 list_del(&mcast->list);
2696 kfree(mcast);
20c7840a 2697 found = true;
f4e40156
JM
2698 break;
2699 }
2700
20c7840a
MR
2701 if (!found) {
2702 ret = -EINVAL;
2703 goto out_put;
2704 }
2705
2706 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2707
9ead190b 2708out_put:
f48b7269 2709 mutex_unlock(&obj->mcast_lock);
fd3c7904 2710 uobj_put_obj_read(qp);
bc38a6ab
RD
2711 return ret ? ret : in_len;
2712}
f520ba5a 2713
94e03f11
MR
2714static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec,
2715 union ib_flow_spec *ib_spec)
2716{
2717 ib_spec->type = kern_spec->type;
2718 switch (ib_spec->type) {
2719 case IB_FLOW_SPEC_ACTION_TAG:
2720 if (kern_spec->flow_tag.size !=
2721 sizeof(struct ib_uverbs_flow_spec_action_tag))
2722 return -EINVAL;
2723
2724 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2725 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2726 break;
483a3966
SS
2727 case IB_FLOW_SPEC_ACTION_DROP:
2728 if (kern_spec->drop.size !=
2729 sizeof(struct ib_uverbs_flow_spec_action_drop))
2730 return -EINVAL;
2731
2732 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2733 break;
94e03f11
MR
2734 default:
2735 return -EINVAL;
2736 }
2737 return 0;
2738}
2739
15dfbd6b
MG
2740static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
2741{
2742 /* Returns user space filter size, includes padding */
2743 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
2744}
2745
2746static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
2747 u16 ib_real_filter_sz)
2748{
2749 /*
2750 * User space filter structures must be 64 bit aligned, otherwise this
2751 * may pass, but we won't handle additional new attributes.
2752 */
2753
2754 if (kern_filter_size > ib_real_filter_sz) {
2755 if (memchr_inv(kern_spec_filter +
2756 ib_real_filter_sz, 0,
2757 kern_filter_size - ib_real_filter_sz))
2758 return -EINVAL;
2759 return ib_real_filter_sz;
2760 }
2761 return kern_filter_size;
2762}
2763
94e03f11
MR
2764static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2765 union ib_flow_spec *ib_spec)
436f2ad0 2766{
15dfbd6b
MG
2767 ssize_t actual_filter_sz;
2768 ssize_t kern_filter_sz;
2769 ssize_t ib_filter_sz;
2770 void *kern_spec_mask;
2771 void *kern_spec_val;
2772
c780d82a
YD
2773 if (kern_spec->reserved)
2774 return -EINVAL;
2775
436f2ad0
HHZ
2776 ib_spec->type = kern_spec->type;
2777
15dfbd6b
MG
2778 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
2779 /* User flow spec size must be aligned to 4 bytes */
2780 if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2781 return -EINVAL;
2782
2783 kern_spec_val = (void *)kern_spec +
2784 sizeof(struct ib_uverbs_flow_spec_hdr);
2785 kern_spec_mask = kern_spec_val + kern_filter_sz;
fbf46860
MR
2786 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2787 return -EINVAL;
15dfbd6b 2788
fbf46860 2789 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
436f2ad0 2790 case IB_FLOW_SPEC_ETH:
15dfbd6b
MG
2791 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2792 actual_filter_sz = spec_filter_size(kern_spec_mask,
2793 kern_filter_sz,
2794 ib_filter_sz);
2795 if (actual_filter_sz <= 0)
436f2ad0 2796 return -EINVAL;
15dfbd6b
MG
2797 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2798 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2799 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
436f2ad0
HHZ
2800 break;
2801 case IB_FLOW_SPEC_IPV4:
15dfbd6b
MG
2802 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2803 actual_filter_sz = spec_filter_size(kern_spec_mask,
2804 kern_filter_sz,
2805 ib_filter_sz);
2806 if (actual_filter_sz <= 0)
436f2ad0 2807 return -EINVAL;
15dfbd6b
MG
2808 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2809 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2810 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
436f2ad0 2811 break;
4c2aae71 2812 case IB_FLOW_SPEC_IPV6:
15dfbd6b
MG
2813 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2814 actual_filter_sz = spec_filter_size(kern_spec_mask,
2815 kern_filter_sz,
2816 ib_filter_sz);
2817 if (actual_filter_sz <= 0)
4c2aae71 2818 return -EINVAL;
15dfbd6b
MG
2819 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2820 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2821 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
a72c6a2b
MG
2822
2823 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2824 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2825 return -EINVAL;
4c2aae71 2826 break;
436f2ad0
HHZ
2827 case IB_FLOW_SPEC_TCP:
2828 case IB_FLOW_SPEC_UDP:
15dfbd6b
MG
2829 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2830 actual_filter_sz = spec_filter_size(kern_spec_mask,
2831 kern_filter_sz,
2832 ib_filter_sz);
2833 if (actual_filter_sz <= 0)
436f2ad0 2834 return -EINVAL;
15dfbd6b
MG
2835 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2836 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2837 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
436f2ad0 2838 break;
0dbf3332
MR
2839 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2840 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2841 actual_filter_sz = spec_filter_size(kern_spec_mask,
2842 kern_filter_sz,
2843 ib_filter_sz);
2844 if (actual_filter_sz <= 0)
2845 return -EINVAL;
2846 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2847 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2848 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2849
2850 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2851 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2852 return -EINVAL;
2853 break;
436f2ad0
HHZ
2854 default:
2855 return -EINVAL;
2856 }
2857 return 0;
2858}
2859
94e03f11
MR
2860static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2861 union ib_flow_spec *ib_spec)
2862{
2863 if (kern_spec->reserved)
2864 return -EINVAL;
2865
2866 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2867 return kern_spec_to_ib_spec_action(kern_spec, ib_spec);
2868 else
2869 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2870}
2871
f213c052
YH
2872int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
2873 struct ib_device *ib_dev,
2874 struct ib_udata *ucore,
2875 struct ib_udata *uhw)
2876{
2877 struct ib_uverbs_ex_create_wq cmd = {};
2878 struct ib_uverbs_ex_create_wq_resp resp = {};
2879 struct ib_uwq_object *obj;
2880 int err = 0;
2881 struct ib_cq *cq;
2882 struct ib_pd *pd;
2883 struct ib_wq *wq;
2884 struct ib_wq_init_attr wq_init_attr = {};
2885 size_t required_cmd_sz;
2886 size_t required_resp_len;
2887
2888 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
2889 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
2890
2891 if (ucore->inlen < required_cmd_sz)
2892 return -EINVAL;
2893
2894 if (ucore->outlen < required_resp_len)
2895 return -ENOSPC;
2896
2897 if (ucore->inlen > sizeof(cmd) &&
2898 !ib_is_udata_cleared(ucore, sizeof(cmd),
2899 ucore->inlen - sizeof(cmd)))
2900 return -EOPNOTSUPP;
2901
2902 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2903 if (err)
2904 return err;
2905
2906 if (cmd.comp_mask)
2907 return -EOPNOTSUPP;
2908
fd3c7904
MB
2909 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq),
2910 file->ucontext);
2911 if (IS_ERR(obj))
2912 return PTR_ERR(obj);
f213c052 2913
fd3c7904 2914 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
f213c052
YH
2915 if (!pd) {
2916 err = -EINVAL;
2917 goto err_uobj;
2918 }
2919
fd3c7904 2920 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
f213c052
YH
2921 if (!cq) {
2922 err = -EINVAL;
2923 goto err_put_pd;
2924 }
2925
2926 wq_init_attr.cq = cq;
2927 wq_init_attr.max_sge = cmd.max_sge;
2928 wq_init_attr.max_wr = cmd.max_wr;
2929 wq_init_attr.wq_context = file;
2930 wq_init_attr.wq_type = cmd.wq_type;
2931 wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
af1cb95d
NO
2932 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) +
2933 sizeof(cmd.create_flags)))
2934 wq_init_attr.create_flags = cmd.create_flags;
f213c052
YH
2935 obj->uevent.events_reported = 0;
2936 INIT_LIST_HEAD(&obj->uevent.event_list);
2937 wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
2938 if (IS_ERR(wq)) {
2939 err = PTR_ERR(wq);
2940 goto err_put_cq;
2941 }
2942
2943 wq->uobject = &obj->uevent.uobject;
2944 obj->uevent.uobject.object = wq;
2945 wq->wq_type = wq_init_attr.wq_type;
2946 wq->cq = cq;
2947 wq->pd = pd;
2948 wq->device = pd->device;
2949 wq->wq_context = wq_init_attr.wq_context;
2950 atomic_set(&wq->usecnt, 0);
2951 atomic_inc(&pd->usecnt);
2952 atomic_inc(&cq->usecnt);
2953 wq->uobject = &obj->uevent.uobject;
2954 obj->uevent.uobject.object = wq;
f213c052
YH
2955
2956 memset(&resp, 0, sizeof(resp));
2957 resp.wq_handle = obj->uevent.uobject.id;
2958 resp.max_sge = wq_init_attr.max_sge;
2959 resp.max_wr = wq_init_attr.max_wr;
2960 resp.wqn = wq->wq_num;
2961 resp.response_length = required_resp_len;
2962 err = ib_copy_to_udata(ucore,
2963 &resp, resp.response_length);
2964 if (err)
2965 goto err_copy;
2966
fd3c7904
MB
2967 uobj_put_obj_read(pd);
2968 uobj_put_obj_read(cq);
2969 uobj_alloc_commit(&obj->uevent.uobject);
f213c052
YH
2970 return 0;
2971
2972err_copy:
f213c052
YH
2973 ib_destroy_wq(wq);
2974err_put_cq:
fd3c7904 2975 uobj_put_obj_read(cq);
f213c052 2976err_put_pd:
fd3c7904 2977 uobj_put_obj_read(pd);
f213c052 2978err_uobj:
fd3c7904 2979 uobj_alloc_abort(&obj->uevent.uobject);
f213c052
YH
2980
2981 return err;
2982}
2983
2984int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
2985 struct ib_device *ib_dev,
2986 struct ib_udata *ucore,
2987 struct ib_udata *uhw)
2988{
2989 struct ib_uverbs_ex_destroy_wq cmd = {};
2990 struct ib_uverbs_ex_destroy_wq_resp resp = {};
f213c052
YH
2991 struct ib_uobject *uobj;
2992 struct ib_uwq_object *obj;
2993 size_t required_cmd_sz;
2994 size_t required_resp_len;
2995 int ret;
2996
2997 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
2998 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
2999
3000 if (ucore->inlen < required_cmd_sz)
3001 return -EINVAL;
3002
3003 if (ucore->outlen < required_resp_len)
3004 return -ENOSPC;
3005
3006 if (ucore->inlen > sizeof(cmd) &&
3007 !ib_is_udata_cleared(ucore, sizeof(cmd),
3008 ucore->inlen - sizeof(cmd)))
3009 return -EOPNOTSUPP;
3010
3011 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3012 if (ret)
3013 return ret;
3014
3015 if (cmd.comp_mask)
3016 return -EOPNOTSUPP;
3017
3018 resp.response_length = required_resp_len;
fd3c7904
MB
3019 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle,
3020 file->ucontext);
3021 if (IS_ERR(uobj))
3022 return PTR_ERR(uobj);
f213c052 3023
f213c052 3024 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
fd3c7904
MB
3025 /*
3026 * Make sure we don't free the memory in remove_commit as we still
3027 * needs the uobject memory to create the response.
3028 */
3029 uverbs_uobject_get(uobj);
f213c052 3030
fd3c7904 3031 ret = uobj_remove_commit(uobj);
f213c052 3032 resp.events_reported = obj->uevent.events_reported;
fd3c7904 3033 uverbs_uobject_put(uobj);
f213c052
YH
3034 if (ret)
3035 return ret;
3036
c52d8114 3037 return ib_copy_to_udata(ucore, &resp, resp.response_length);
f213c052
YH
3038}
3039
3040int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
3041 struct ib_device *ib_dev,
3042 struct ib_udata *ucore,
3043 struct ib_udata *uhw)
3044{
3045 struct ib_uverbs_ex_modify_wq cmd = {};
3046 struct ib_wq *wq;
3047 struct ib_wq_attr wq_attr = {};
3048 size_t required_cmd_sz;
3049 int ret;
3050
3051 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
3052 if (ucore->inlen < required_cmd_sz)
3053 return -EINVAL;
3054
3055 if (ucore->inlen > sizeof(cmd) &&
3056 !ib_is_udata_cleared(ucore, sizeof(cmd),
3057 ucore->inlen - sizeof(cmd)))
3058 return -EOPNOTSUPP;
3059
3060 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3061 if (ret)
3062 return ret;
3063
3064 if (!cmd.attr_mask)
3065 return -EINVAL;
3066
af1cb95d 3067 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
f213c052
YH
3068 return -EINVAL;
3069
fd3c7904 3070 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext);
f213c052
YH
3071 if (!wq)
3072 return -EINVAL;
3073
3074 wq_attr.curr_wq_state = cmd.curr_wq_state;
3075 wq_attr.wq_state = cmd.wq_state;
af1cb95d
NO
3076 if (cmd.attr_mask & IB_WQ_FLAGS) {
3077 wq_attr.flags = cmd.flags;
3078 wq_attr.flags_mask = cmd.flags_mask;
3079 }
f213c052 3080 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
fd3c7904 3081 uobj_put_obj_read(wq);
f213c052
YH
3082 return ret;
3083}
3084
de019a94
YH
3085int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
3086 struct ib_device *ib_dev,
3087 struct ib_udata *ucore,
3088 struct ib_udata *uhw)
3089{
3090 struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
3091 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3092 struct ib_uobject *uobj;
3093 int err = 0;
3094 struct ib_rwq_ind_table_init_attr init_attr = {};
3095 struct ib_rwq_ind_table *rwq_ind_tbl;
3096 struct ib_wq **wqs = NULL;
3097 u32 *wqs_handles = NULL;
3098 struct ib_wq *wq = NULL;
3099 int i, j, num_read_wqs;
3100 u32 num_wq_handles;
3101 u32 expected_in_size;
3102 size_t required_cmd_sz_header;
3103 size_t required_resp_len;
3104
3105 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
3106 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
3107
3108 if (ucore->inlen < required_cmd_sz_header)
3109 return -EINVAL;
3110
3111 if (ucore->outlen < required_resp_len)
3112 return -ENOSPC;
3113
3114 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
3115 if (err)
3116 return err;
3117
3118 ucore->inbuf += required_cmd_sz_header;
3119 ucore->inlen -= required_cmd_sz_header;
3120
3121 if (cmd.comp_mask)
3122 return -EOPNOTSUPP;
3123
3124 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3125 return -EINVAL;
3126
3127 num_wq_handles = 1 << cmd.log_ind_tbl_size;
3128 expected_in_size = num_wq_handles * sizeof(__u32);
3129 if (num_wq_handles == 1)
3130 /* input size for wq handles is u64 aligned */
3131 expected_in_size += sizeof(__u32);
3132
3133 if (ucore->inlen < expected_in_size)
3134 return -EINVAL;
3135
3136 if (ucore->inlen > expected_in_size &&
3137 !ib_is_udata_cleared(ucore, expected_in_size,
3138 ucore->inlen - expected_in_size))
3139 return -EOPNOTSUPP;
3140
3141 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3142 GFP_KERNEL);
3143 if (!wqs_handles)
3144 return -ENOMEM;
3145
3146 err = ib_copy_from_udata(wqs_handles, ucore,
3147 num_wq_handles * sizeof(__u32));
3148 if (err)
3149 goto err_free;
3150
3151 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3152 if (!wqs) {
3153 err = -ENOMEM;
3154 goto err_free;
3155 }
3156
3157 for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3158 num_read_wqs++) {
fd3c7904
MB
3159 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs],
3160 file->ucontext);
de019a94
YH
3161 if (!wq) {
3162 err = -EINVAL;
3163 goto put_wqs;
3164 }
3165
3166 wqs[num_read_wqs] = wq;
3167 }
3168
fd3c7904
MB
3169 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext);
3170 if (IS_ERR(uobj)) {
3171 err = PTR_ERR(uobj);
de019a94
YH
3172 goto put_wqs;
3173 }
3174
de019a94
YH
3175 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3176 init_attr.ind_tbl = wqs;
3177 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
3178
3179 if (IS_ERR(rwq_ind_tbl)) {
3180 err = PTR_ERR(rwq_ind_tbl);
3181 goto err_uobj;
3182 }
3183
3184 rwq_ind_tbl->ind_tbl = wqs;
3185 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3186 rwq_ind_tbl->uobject = uobj;
3187 uobj->object = rwq_ind_tbl;
3188 rwq_ind_tbl->device = ib_dev;
3189 atomic_set(&rwq_ind_tbl->usecnt, 0);
3190
3191 for (i = 0; i < num_wq_handles; i++)
3192 atomic_inc(&wqs[i]->usecnt);
3193
de019a94
YH
3194 resp.ind_tbl_handle = uobj->id;
3195 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3196 resp.response_length = required_resp_len;
3197
3198 err = ib_copy_to_udata(ucore,
3199 &resp, resp.response_length);
3200 if (err)
3201 goto err_copy;
3202
3203 kfree(wqs_handles);
3204
3205 for (j = 0; j < num_read_wqs; j++)
fd3c7904 3206 uobj_put_obj_read(wqs[j]);
de019a94 3207
fd3c7904 3208 uobj_alloc_commit(uobj);
de019a94
YH
3209 return 0;
3210
3211err_copy:
de019a94
YH
3212 ib_destroy_rwq_ind_table(rwq_ind_tbl);
3213err_uobj:
fd3c7904 3214 uobj_alloc_abort(uobj);
de019a94
YH
3215put_wqs:
3216 for (j = 0; j < num_read_wqs; j++)
fd3c7904 3217 uobj_put_obj_read(wqs[j]);
de019a94
YH
3218err_free:
3219 kfree(wqs_handles);
3220 kfree(wqs);
3221 return err;
3222}
3223
3224int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
3225 struct ib_device *ib_dev,
3226 struct ib_udata *ucore,
3227 struct ib_udata *uhw)
3228{
3229 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
de019a94
YH
3230 struct ib_uobject *uobj;
3231 int ret;
de019a94
YH
3232 size_t required_cmd_sz;
3233
3234 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
3235
3236 if (ucore->inlen < required_cmd_sz)
3237 return -EINVAL;
3238
3239 if (ucore->inlen > sizeof(cmd) &&
3240 !ib_is_udata_cleared(ucore, sizeof(cmd),
3241 ucore->inlen - sizeof(cmd)))
3242 return -EOPNOTSUPP;
3243
3244 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3245 if (ret)
3246 return ret;
3247
3248 if (cmd.comp_mask)
3249 return -EOPNOTSUPP;
3250
fd3c7904
MB
3251 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle,
3252 file->ucontext);
3253 if (IS_ERR(uobj))
3254 return PTR_ERR(uobj);
de019a94 3255
fd3c7904 3256 return uobj_remove_commit(uobj);
de019a94
YH
3257}
3258
f21519b2 3259int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
057aec0d 3260 struct ib_device *ib_dev,
f21519b2
YD
3261 struct ib_udata *ucore,
3262 struct ib_udata *uhw)
436f2ad0
HHZ
3263{
3264 struct ib_uverbs_create_flow cmd;
3265 struct ib_uverbs_create_flow_resp resp;
3266 struct ib_uobject *uobj;
3267 struct ib_flow *flow_id;
d82693da 3268 struct ib_uverbs_flow_attr *kern_flow_attr;
436f2ad0
HHZ
3269 struct ib_flow_attr *flow_attr;
3270 struct ib_qp *qp;
3271 int err = 0;
3272 void *kern_spec;
3273 void *ib_spec;
3274 int i;
436f2ad0 3275
6bcca3d4
YD
3276 if (ucore->inlen < sizeof(cmd))
3277 return -EINVAL;
3278
f21519b2 3279 if (ucore->outlen < sizeof(resp))
436f2ad0
HHZ
3280 return -ENOSPC;
3281
f21519b2
YD
3282 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3283 if (err)
3284 return err;
3285
3286 ucore->inbuf += sizeof(cmd);
3287 ucore->inlen -= sizeof(cmd);
436f2ad0 3288
22878dbc
MB
3289 if (cmd.comp_mask)
3290 return -EINVAL;
3291
e3b6d8cf 3292 if (!capable(CAP_NET_RAW))
436f2ad0
HHZ
3293 return -EPERM;
3294
a3100a78
MV
3295 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3296 return -EINVAL;
3297
3298 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3299 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3300 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3301 return -EINVAL;
3302
f8848274 3303 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
22878dbc
MB
3304 return -EINVAL;
3305
f21519b2 3306 if (cmd.flow_attr.size > ucore->inlen ||
f8848274 3307 cmd.flow_attr.size >
b68c9560 3308 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
22878dbc
MB
3309 return -EINVAL;
3310
c780d82a
YD
3311 if (cmd.flow_attr.reserved[0] ||
3312 cmd.flow_attr.reserved[1])
3313 return -EINVAL;
3314
436f2ad0 3315 if (cmd.flow_attr.num_of_specs) {
f8848274
MB
3316 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3317 GFP_KERNEL);
436f2ad0
HHZ
3318 if (!kern_flow_attr)
3319 return -ENOMEM;
3320
3321 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
f21519b2
YD
3322 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3323 cmd.flow_attr.size);
3324 if (err)
436f2ad0 3325 goto err_free_attr;
436f2ad0
HHZ
3326 } else {
3327 kern_flow_attr = &cmd.flow_attr;
436f2ad0
HHZ
3328 }
3329
fd3c7904
MB
3330 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext);
3331 if (IS_ERR(uobj)) {
3332 err = PTR_ERR(uobj);
436f2ad0
HHZ
3333 goto err_free_attr;
3334 }
436f2ad0 3335
fd3c7904 3336 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
436f2ad0
HHZ
3337 if (!qp) {
3338 err = -EINVAL;
3339 goto err_uobj;
3340 }
3341
15dfbd6b
MG
3342 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
3343 sizeof(union ib_flow_spec), GFP_KERNEL);
436f2ad0
HHZ
3344 if (!flow_attr) {
3345 err = -ENOMEM;
3346 goto err_put;
3347 }
3348
3349 flow_attr->type = kern_flow_attr->type;
3350 flow_attr->priority = kern_flow_attr->priority;
3351 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3352 flow_attr->port = kern_flow_attr->port;
3353 flow_attr->flags = kern_flow_attr->flags;
3354 flow_attr->size = sizeof(*flow_attr);
3355
3356 kern_spec = kern_flow_attr + 1;
3357 ib_spec = flow_attr + 1;
f8848274 3358 for (i = 0; i < flow_attr->num_of_specs &&
b68c9560 3359 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
f8848274 3360 cmd.flow_attr.size >=
b68c9560 3361 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
436f2ad0
HHZ
3362 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3363 if (err)
3364 goto err_free;
3365 flow_attr->size +=
3366 ((union ib_flow_spec *) ib_spec)->size;
b68c9560
YD
3367 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3368 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
436f2ad0
HHZ
3369 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3370 }
f8848274
MB
3371 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3372 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3373 i, cmd.flow_attr.size);
98a37510 3374 err = -EINVAL;
436f2ad0
HHZ
3375 goto err_free;
3376 }
3377 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3378 if (IS_ERR(flow_id)) {
3379 err = PTR_ERR(flow_id);
fd3c7904 3380 goto err_free;
436f2ad0 3381 }
436f2ad0
HHZ
3382 flow_id->uobject = uobj;
3383 uobj->object = flow_id;
3384
436f2ad0
HHZ
3385 memset(&resp, 0, sizeof(resp));
3386 resp.flow_handle = uobj->id;
3387
f21519b2
YD
3388 err = ib_copy_to_udata(ucore,
3389 &resp, sizeof(resp));
3390 if (err)
436f2ad0 3391 goto err_copy;
436f2ad0 3392
fd3c7904
MB
3393 uobj_put_obj_read(qp);
3394 uobj_alloc_commit(uobj);
436f2ad0
HHZ
3395 kfree(flow_attr);
3396 if (cmd.flow_attr.num_of_specs)
3397 kfree(kern_flow_attr);
f21519b2 3398 return 0;
436f2ad0 3399err_copy:
436f2ad0
HHZ
3400 ib_destroy_flow(flow_id);
3401err_free:
3402 kfree(flow_attr);
3403err_put:
fd3c7904 3404 uobj_put_obj_read(qp);
436f2ad0 3405err_uobj:
fd3c7904 3406 uobj_alloc_abort(uobj);
436f2ad0
HHZ
3407err_free_attr:
3408 if (cmd.flow_attr.num_of_specs)
3409 kfree(kern_flow_attr);
3410 return err;
3411}
3412
f21519b2 3413int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
057aec0d 3414 struct ib_device *ib_dev,
f21519b2
YD
3415 struct ib_udata *ucore,
3416 struct ib_udata *uhw)
3417{
436f2ad0 3418 struct ib_uverbs_destroy_flow cmd;
436f2ad0
HHZ
3419 struct ib_uobject *uobj;
3420 int ret;
3421
6bcca3d4
YD
3422 if (ucore->inlen < sizeof(cmd))
3423 return -EINVAL;
3424
f21519b2
YD
3425 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3426 if (ret)
3427 return ret;
436f2ad0 3428
2782c2d3
YD
3429 if (cmd.comp_mask)
3430 return -EINVAL;
3431
fd3c7904
MB
3432 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle,
3433 file->ucontext);
3434 if (IS_ERR(uobj))
3435 return PTR_ERR(uobj);
436f2ad0 3436
fd3c7904 3437 ret = uobj_remove_commit(uobj);
f21519b2 3438 return ret;
436f2ad0
HHZ
3439}
3440
c89d1bed 3441static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
057aec0d 3442 struct ib_device *ib_dev,
c89d1bed
SH
3443 struct ib_uverbs_create_xsrq *cmd,
3444 struct ib_udata *udata)
f520ba5a 3445{
f520ba5a 3446 struct ib_uverbs_create_srq_resp resp;
8541f8de 3447 struct ib_usrq_object *obj;
f520ba5a
RD
3448 struct ib_pd *pd;
3449 struct ib_srq *srq;
8541f8de 3450 struct ib_uobject *uninitialized_var(xrcd_uobj);
f520ba5a
RD
3451 struct ib_srq_init_attr attr;
3452 int ret;
3453
fd3c7904
MB
3454 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq),
3455 file->ucontext);
3456 if (IS_ERR(obj))
3457 return PTR_ERR(obj);
f520ba5a 3458
38eb44fa
AK
3459 if (cmd->srq_type == IB_SRQT_TM)
3460 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3461
8541f8de 3462 if (cmd->srq_type == IB_SRQT_XRC) {
fd3c7904
MB
3463 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle,
3464 file->ucontext);
3465 if (IS_ERR(xrcd_uobj)) {
8541f8de 3466 ret = -EINVAL;
5909ce54 3467 goto err;
8541f8de
SH
3468 }
3469
fd3c7904
MB
3470 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3471 if (!attr.ext.xrc.xrcd) {
3472 ret = -EINVAL;
3473 goto err_put_xrcd;
3474 }
3475
8541f8de
SH
3476 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3477 atomic_inc(&obj->uxrcd->refcnt);
1a56ff6d 3478 }
5909ce54 3479
1a56ff6d
AK
3480 if (ib_srq_has_cq(cmd->srq_type)) {
3481 attr.ext.cq = uobj_get_obj_read(cq, cmd->cq_handle,
3482 file->ucontext);
3483 if (!attr.ext.cq) {
5909ce54
RD
3484 ret = -EINVAL;
3485 goto err_put_xrcd;
3486 }
3487 }
3488
fd3c7904 3489 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
5909ce54
RD
3490 if (!pd) {
3491 ret = -EINVAL;
3492 goto err_put_cq;
8541f8de
SH
3493 }
3494
f520ba5a
RD
3495 attr.event_handler = ib_uverbs_srq_event_handler;
3496 attr.srq_context = file;
8541f8de
SH
3497 attr.srq_type = cmd->srq_type;
3498 attr.attr.max_wr = cmd->max_wr;
3499 attr.attr.max_sge = cmd->max_sge;
3500 attr.attr.srq_limit = cmd->srq_limit;
f520ba5a 3501
8541f8de
SH
3502 obj->uevent.events_reported = 0;
3503 INIT_LIST_HEAD(&obj->uevent.event_list);
f520ba5a 3504
8541f8de 3505 srq = pd->device->create_srq(pd, &attr, udata);
f520ba5a
RD
3506 if (IS_ERR(srq)) {
3507 ret = PTR_ERR(srq);
ec924b47 3508 goto err_put;
f520ba5a
RD
3509 }
3510
8541f8de
SH
3511 srq->device = pd->device;
3512 srq->pd = pd;
3513 srq->srq_type = cmd->srq_type;
3514 srq->uobject = &obj->uevent.uobject;
f520ba5a
RD
3515 srq->event_handler = attr.event_handler;
3516 srq->srq_context = attr.srq_context;
8541f8de 3517
1a56ff6d
AK
3518 if (ib_srq_has_cq(cmd->srq_type)) {
3519 srq->ext.cq = attr.ext.cq;
3520 atomic_inc(&attr.ext.cq->usecnt);
3521 }
3522
8541f8de 3523 if (cmd->srq_type == IB_SRQT_XRC) {
8541f8de 3524 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
8541f8de
SH
3525 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3526 }
3527
f520ba5a
RD
3528 atomic_inc(&pd->usecnt);
3529 atomic_set(&srq->usecnt, 0);
3530
8541f8de 3531 obj->uevent.uobject.object = srq;
fd3c7904 3532 obj->uevent.uobject.user_handle = cmd->user_handle;
f520ba5a 3533
9ead190b 3534 memset(&resp, 0, sizeof resp);
8541f8de 3535 resp.srq_handle = obj->uevent.uobject.id;
ea88fd16
DB
3536 resp.max_wr = attr.attr.max_wr;
3537 resp.max_sge = attr.attr.max_sge;
8541f8de
SH
3538 if (cmd->srq_type == IB_SRQT_XRC)
3539 resp.srqn = srq->ext.xrc.srq_num;
f520ba5a 3540
8541f8de 3541 if (copy_to_user((void __user *) (unsigned long) cmd->response,
f520ba5a
RD
3542 &resp, sizeof resp)) {
3543 ret = -EFAULT;
9ead190b 3544 goto err_copy;
f520ba5a
RD
3545 }
3546
1a56ff6d 3547 if (cmd->srq_type == IB_SRQT_XRC)
fd3c7904 3548 uobj_put_read(xrcd_uobj);
1a56ff6d
AK
3549
3550 if (ib_srq_has_cq(cmd->srq_type))
3551 uobj_put_obj_read(attr.ext.cq);
3552
fd3c7904
MB
3553 uobj_put_obj_read(pd);
3554 uobj_alloc_commit(&obj->uevent.uobject);
f520ba5a 3555
8541f8de 3556 return 0;
f520ba5a 3557
9ead190b 3558err_copy:
f520ba5a
RD
3559 ib_destroy_srq(srq);
3560
ec924b47 3561err_put:
fd3c7904 3562 uobj_put_obj_read(pd);
8541f8de
SH
3563
3564err_put_cq:
1a56ff6d
AK
3565 if (ib_srq_has_cq(cmd->srq_type))
3566 uobj_put_obj_read(attr.ext.cq);
8541f8de 3567
5909ce54
RD
3568err_put_xrcd:
3569 if (cmd->srq_type == IB_SRQT_XRC) {
3570 atomic_dec(&obj->uxrcd->refcnt);
fd3c7904 3571 uobj_put_read(xrcd_uobj);
5909ce54 3572 }
ec924b47 3573
9ead190b 3574err:
fd3c7904 3575 uobj_alloc_abort(&obj->uevent.uobject);
f520ba5a
RD
3576 return ret;
3577}
3578
8541f8de 3579ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
057aec0d 3580 struct ib_device *ib_dev,
8541f8de
SH
3581 const char __user *buf, int in_len,
3582 int out_len)
3583{
3584 struct ib_uverbs_create_srq cmd;
3585 struct ib_uverbs_create_xsrq xcmd;
3586 struct ib_uverbs_create_srq_resp resp;
3587 struct ib_udata udata;
3588 int ret;
3589
3590 if (out_len < sizeof resp)
3591 return -ENOSPC;
3592
3593 if (copy_from_user(&cmd, buf, sizeof cmd))
3594 return -EFAULT;
3595
38eb44fa 3596 memset(&xcmd, 0, sizeof(xcmd));
8541f8de
SH
3597 xcmd.response = cmd.response;
3598 xcmd.user_handle = cmd.user_handle;
3599 xcmd.srq_type = IB_SRQT_BASIC;
3600 xcmd.pd_handle = cmd.pd_handle;
3601 xcmd.max_wr = cmd.max_wr;
3602 xcmd.max_sge = cmd.max_sge;
3603 xcmd.srq_limit = cmd.srq_limit;
3604
40a20339
AB
3605 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3606 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
3607 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3608 out_len - sizeof(resp));
8541f8de 3609
057aec0d 3610 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
8541f8de
SH
3611 if (ret)
3612 return ret;
3613
3614 return in_len;
3615}
3616
3617ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
057aec0d 3618 struct ib_device *ib_dev,
8541f8de
SH
3619 const char __user *buf, int in_len, int out_len)
3620{
3621 struct ib_uverbs_create_xsrq cmd;
3622 struct ib_uverbs_create_srq_resp resp;
3623 struct ib_udata udata;
3624 int ret;
3625
3626 if (out_len < sizeof resp)
3627 return -ENOSPC;
3628
3629 if (copy_from_user(&cmd, buf, sizeof cmd))
3630 return -EFAULT;
3631
40a20339
AB
3632 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3633 u64_to_user_ptr(cmd.response) + sizeof(resp),
e093111d
AR
3634 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3635 out_len - sizeof(resp));
8541f8de 3636
057aec0d 3637 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
8541f8de
SH
3638 if (ret)
3639 return ret;
3640
3641 return in_len;
3642}
3643
f520ba5a 3644ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
057aec0d 3645 struct ib_device *ib_dev,
f520ba5a
RD
3646 const char __user *buf, int in_len,
3647 int out_len)
3648{
3649 struct ib_uverbs_modify_srq cmd;
9bc57e2d 3650 struct ib_udata udata;
f520ba5a
RD
3651 struct ib_srq *srq;
3652 struct ib_srq_attr attr;
3653 int ret;
3654
3655 if (copy_from_user(&cmd, buf, sizeof cmd))
3656 return -EFAULT;
3657
40a20339 3658 ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
9bc57e2d
RC
3659 out_len);
3660
fd3c7904 3661 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
9ead190b
RD
3662 if (!srq)
3663 return -EINVAL;
f520ba5a
RD
3664
3665 attr.max_wr = cmd.max_wr;
f520ba5a
RD
3666 attr.srq_limit = cmd.srq_limit;
3667
9bc57e2d 3668 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
f520ba5a 3669
fd3c7904 3670 uobj_put_obj_read(srq);
f520ba5a
RD
3671
3672 return ret ? ret : in_len;
3673}
3674
8bdb0e86 3675ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
057aec0d 3676 struct ib_device *ib_dev,
8bdb0e86
DB
3677 const char __user *buf,
3678 int in_len, int out_len)
3679{
3680 struct ib_uverbs_query_srq cmd;
3681 struct ib_uverbs_query_srq_resp resp;
3682 struct ib_srq_attr attr;
3683 struct ib_srq *srq;
3684 int ret;
3685
3686 if (out_len < sizeof resp)
3687 return -ENOSPC;
3688
3689 if (copy_from_user(&cmd, buf, sizeof cmd))
3690 return -EFAULT;
3691
fd3c7904 3692 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
9ead190b
RD
3693 if (!srq)
3694 return -EINVAL;
8bdb0e86 3695
9ead190b 3696 ret = ib_query_srq(srq, &attr);
8bdb0e86 3697
fd3c7904 3698 uobj_put_obj_read(srq);
8bdb0e86
DB
3699
3700 if (ret)
9ead190b 3701 return ret;
8bdb0e86
DB
3702
3703 memset(&resp, 0, sizeof resp);
3704
3705 resp.max_wr = attr.max_wr;
3706 resp.max_sge = attr.max_sge;
3707 resp.srq_limit = attr.srq_limit;
3708
40a20339 3709 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
9ead190b 3710 return -EFAULT;
8bdb0e86 3711
9ead190b 3712 return in_len;
8bdb0e86
DB
3713}
3714
f520ba5a 3715ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
057aec0d 3716 struct ib_device *ib_dev,
f520ba5a
RD
3717 const char __user *buf, int in_len,
3718 int out_len)
3719{
63aaf647
RD
3720 struct ib_uverbs_destroy_srq cmd;
3721 struct ib_uverbs_destroy_srq_resp resp;
9ead190b 3722 struct ib_uobject *uobj;
9ead190b 3723 struct ib_uevent_object *obj;
63aaf647 3724 int ret = -EINVAL;
f520ba5a
RD
3725
3726 if (copy_from_user(&cmd, buf, sizeof cmd))
3727 return -EFAULT;
3728
fd3c7904
MB
3729 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle,
3730 file->ucontext);
3731 if (IS_ERR(uobj))
3732 return PTR_ERR(uobj);
3733
9ead190b 3734 obj = container_of(uobj, struct ib_uevent_object, uobject);
fd3c7904
MB
3735 /*
3736 * Make sure we don't free the memory in remove_commit as we still
3737 * needs the uobject memory to create the response.
3738 */
3739 uverbs_uobject_get(uobj);
63aaf647 3740
fd3c7904 3741 memset(&resp, 0, sizeof(resp));
f520ba5a 3742
fd3c7904
MB
3743 ret = uobj_remove_commit(uobj);
3744 if (ret) {
3745 uverbs_uobject_put(uobj);
9ead190b 3746 return ret;
846be90d 3747 }
9ead190b 3748 resp.events_reported = obj->events_reported;
fd3c7904 3749 uverbs_uobject_put(uobj);
40a20339 3750 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
fd3c7904 3751 return -EFAULT;
63aaf647 3752
fd3c7904 3753 return in_len;
f520ba5a 3754}
02d1aa7a
EC
3755
3756int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
057aec0d 3757 struct ib_device *ib_dev,
02d1aa7a
EC
3758 struct ib_udata *ucore,
3759 struct ib_udata *uhw)
3760{
2953f425 3761 struct ib_uverbs_ex_query_device_resp resp = { {0} };
02d1aa7a 3762 struct ib_uverbs_ex_query_device cmd;
2953f425 3763 struct ib_device_attr attr = {0};
02d1aa7a
EC
3764 int err;
3765
02d1aa7a
EC
3766 if (ucore->inlen < sizeof(cmd))
3767 return -EINVAL;
3768
3769 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3770 if (err)
3771 return err;
3772
3773 if (cmd.comp_mask)
3774 return -EINVAL;
3775
3776 if (cmd.reserved)
3777 return -EINVAL;
3778
f4056bfd 3779 resp.response_length = offsetof(typeof(resp), odp_caps);
02d1aa7a
EC
3780
3781 if (ucore->outlen < resp.response_length)
3782 return -ENOSPC;
3783
057aec0d 3784 err = ib_dev->query_device(ib_dev, &attr, uhw);
02d1aa7a
EC
3785 if (err)
3786 return err;
3787
057aec0d 3788 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
02d1aa7a 3789
f4056bfd
HE
3790 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3791 goto end;
3792
3793#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3794 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3795 resp.odp_caps.per_transport_caps.rc_odp_caps =
3796 attr.odp_caps.per_transport_caps.rc_odp_caps;
3797 resp.odp_caps.per_transport_caps.uc_odp_caps =
3798 attr.odp_caps.per_transport_caps.uc_odp_caps;
3799 resp.odp_caps.per_transport_caps.ud_odp_caps =
3800 attr.odp_caps.per_transport_caps.ud_odp_caps;
f4056bfd
HE
3801#endif
3802 resp.response_length += sizeof(resp.odp_caps);
3803
24306dc6
MB
3804 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3805 goto end;
3806
3807 resp.timestamp_mask = attr.timestamp_mask;
3808 resp.response_length += sizeof(resp.timestamp_mask);
3809
3810 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3811 goto end;
3812
3813 resp.hca_core_clock = attr.hca_core_clock;
3814 resp.response_length += sizeof(resp.hca_core_clock);
3815
0b24e5ac
MD
3816 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
3817 goto end;
3818
3819 resp.device_cap_flags_ex = attr.device_cap_flags;
3820 resp.response_length += sizeof(resp.device_cap_flags_ex);
47adf2f4
YH
3821
3822 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
3823 goto end;
3824
3825 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3826 resp.rss_caps.max_rwq_indirection_tables =
3827 attr.rss_caps.max_rwq_indirection_tables;
3828 resp.rss_caps.max_rwq_indirection_table_size =
3829 attr.rss_caps.max_rwq_indirection_table_size;
3830
3831 resp.response_length += sizeof(resp.rss_caps);
3832
3833 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
3834 goto end;
3835
3836 resp.max_wq_type_rq = attr.max_wq_type_rq;
3837 resp.response_length += sizeof(resp.max_wq_type_rq);
5f23d426
NO
3838
3839 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps))
3840 goto end;
3841
3842 resp.raw_packet_caps = attr.raw_packet_caps;
3843 resp.response_length += sizeof(resp.raw_packet_caps);
8d50505a 3844
78b1beb0 3845 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
8d50505a
AK
3846 goto end;
3847
78b1beb0
LR
3848 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
3849 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
3850 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
3851 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
3852 resp.tm_caps.flags = attr.tm_caps.flags;
3853 resp.response_length += sizeof(resp.tm_caps);
f4056bfd 3854end:
02d1aa7a 3855 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
2953f425 3856 return err;
02d1aa7a 3857}