IB/uverbs: Use uverbs_api to unmarshal ioctl commands
[linux-2.6-block.git] / drivers / infiniband / core / uverbs_ioctl_merge.c
CommitLineData
118620d3
MB
1/*
2 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/uverbs_ioctl.h>
34#include <rdma/rdma_user_ioctl.h>
35#include <linux/bitops.h>
36#include "uverbs.h"
37
38#define UVERBS_NUM_NS (UVERBS_ID_NS_MASK >> UVERBS_ID_NS_SHIFT)
39#define GET_NS_ID(idx) (((idx) & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT)
40#define GET_ID(idx) ((idx) & ~UVERBS_ID_NS_MASK)
41
42#define _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset, \
43 buckets_offset) \
44 for (tmpj = 0, \
45 elem = (*(const void ***)((hashes)[tmpi] + \
46 (buckets_offset)))[0]; \
47 tmpj < *(size_t *)((hashes)[tmpi] + (num_buckets_offset)); \
48 tmpj++) \
49 if ((elem = ((*(const void ***)(hashes[tmpi] + \
50 (buckets_offset)))[tmpj])))
51
52/*
53 * Iterate all elements of a few @hashes. The number of given hashes is
54 * indicated by @num_hashes. The offset of the number of buckets in the hash is
55 * represented by @num_buckets_offset, while the offset of the buckets array in
56 * the hash structure is represented by @buckets_offset. tmpi and tmpj are two
57 * short (or int) based indices that are given by the user. tmpi iterates over
58 * the different hashes. @elem points the current element in the hashes[tmpi]
59 * bucket we are looping on. To be honest, @hashes representation isn't exactly
60 * a hash, but more a collection of elements. These elements' ids are treated
61 * in a hash like manner, where the first upper bits are the bucket number.
62 * These elements are later mapped into a perfect-hash.
63 */
64#define for_each_element(elem, tmpi, tmpj, hashes, num_hashes, \
65 num_buckets_offset, buckets_offset) \
66 for (tmpi = 0; tmpi < (num_hashes); tmpi++) \
67 _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset,\
68 buckets_offset)
69
70#define get_elements_iterators_entry_above(iters, num_elements, elements, \
71 num_objects_fld, objects_fld, bucket,\
72 min_id) \
73 get_elements_above_id((const void **)iters, num_elements, \
74 (const void **)(elements), \
75 offsetof(typeof(**elements), \
76 num_objects_fld), \
77 offsetof(typeof(**elements), objects_fld),\
78 offsetof(typeof(***(*elements)->objects_fld), id),\
79 bucket, min_id)
80
81#define get_objects_above_id(iters, num_trees, trees, bucket, min_id) \
82 get_elements_iterators_entry_above(iters, num_trees, trees, \
83 num_objects, objects, bucket, min_id)
84
85#define get_methods_above_id(method_iters, num_iters, iters, bucket, min_id)\
86 get_elements_iterators_entry_above(method_iters, num_iters, iters, \
87 num_methods, methods, bucket, min_id)
88
89#define get_attrs_above_id(attrs_iters, num_iters, iters, bucket, min_id)\
90 get_elements_iterators_entry_above(attrs_iters, num_iters, iters, \
91 num_attrs, attrs, bucket, min_id)
92
93/*
94 * get_elements_above_id get a few hashes represented by @elements and
95 * @num_elements. The hashes fields are described by @num_offset, @data_offset
96 * and @id_offset in the same way as required by for_each_element. The function
97 * returns an array of @iters, represents an array of elements in the hashes
98 * buckets, which their ids are the smallest ids in all hashes but are all
99 * larger than the id given by min_id. Elements are only added to the iters
100 * array if their id belongs to the bucket @bucket. The number of elements in
101 * the returned array is returned by the function. @min_id is also updated to
102 * reflect the new min_id of all elements in iters.
103 */
104static size_t get_elements_above_id(const void **iters,
105 unsigned int num_elements,
106 const void **elements,
107 size_t num_offset,
108 size_t data_offset,
109 size_t id_offset,
110 u16 bucket,
111 short *min_id)
112{
113 size_t num_iters = 0;
114 short min = SHRT_MAX;
115 const void *elem;
116 int i, j, last_stored = -1;
3d89459e 117 unsigned int equal_min = 0;
118620d3
MB
118
119 for_each_element(elem, i, j, elements, num_elements, num_offset,
120 data_offset) {
121 u16 id = *(u16 *)(elem + id_offset);
122
123 if (GET_NS_ID(id) != bucket)
124 continue;
125
126 if (GET_ID(id) < *min_id ||
127 (min != SHRT_MAX && GET_ID(id) > min))
128 continue;
129
130 /*
131 * We first iterate all hashes represented by @elements. When
132 * we do, we try to find an element @elem in the bucket @bucket
133 * which its id is min. Since we can't ensure the user sorted
134 * the elements in increasing order, we override this hash's
135 * minimal id element we found, if a new element with a smaller
136 * id was just found.
137 */
138 iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
139 last_stored = i;
3d89459e
MB
140 if (min == GET_ID(id))
141 equal_min++;
142 else
143 equal_min = 1;
118620d3
MB
144 min = GET_ID(id);
145 }
146
147 /*
148 * We only insert to our iters array an element, if its id is smaller
149 * than all previous ids. Therefore, the final iters array is sorted so
150 * that smaller ids are in the end of the array.
151 * Therefore, we need to clean the beginning of the array to make sure
152 * all ids of final elements are equal to min.
153 */
3d89459e 154 memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
118620d3
MB
155
156 *min_id = min;
3d89459e 157 return equal_min;
118620d3
MB
158}
159
160#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
161 objects_fld, bucket) \
162 find_max_element_id(num_elements, (const void **)(elements), \
163 offsetof(typeof(**elements), num_objects_fld), \
164 offsetof(typeof(**elements), objects_fld), \
165 offsetof(typeof(***(*elements)->objects_fld), id),\
166 bucket)
167
168static short find_max_element_ns_id(unsigned int num_elements,
169 const void **elements,
170 size_t num_offset,
171 size_t data_offset,
172 size_t id_offset)
173{
174 short max_ns = SHRT_MIN;
175 const void *elem;
176 int i, j;
177
178 for_each_element(elem, i, j, elements, num_elements, num_offset,
179 data_offset) {
180 u16 id = *(u16 *)(elem + id_offset);
181
182 if (GET_NS_ID(id) > max_ns)
183 max_ns = GET_NS_ID(id);
184 }
185
186 return max_ns;
187}
188
189static short find_max_element_id(unsigned int num_elements,
190 const void **elements,
191 size_t num_offset,
192 size_t data_offset,
193 size_t id_offset,
194 u16 bucket)
195{
196 short max_id = SHRT_MIN;
197 const void *elem;
198 int i, j;
199
200 for_each_element(elem, i, j, elements, num_elements, num_offset,
201 data_offset) {
202 u16 id = *(u16 *)(elem + id_offset);
203
204 if (GET_NS_ID(id) == bucket &&
205 GET_ID(id) > max_id)
206 max_id = GET_ID(id);
207 }
208 return max_id;
209}
210
211#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
212 objects_fld, bucket) \
213 find_max_element_id(num_elements, (const void **)(elements), \
214 offsetof(typeof(**elements), num_objects_fld), \
215 offsetof(typeof(**elements), objects_fld), \
216 offsetof(typeof(***(*elements)->objects_fld), id),\
217 bucket)
218
219#define find_max_element_ns_entry_id(num_elements, elements, \
220 num_objects_fld, objects_fld) \
221 find_max_element_ns_id(num_elements, (const void **)(elements), \
222 offsetof(typeof(**elements), num_objects_fld),\
223 offsetof(typeof(**elements), objects_fld), \
224 offsetof(typeof(***(*elements)->objects_fld), id))
225
226/*
227 * find_max_xxxx_ns_id gets a few elements. Each element is described by an id
228 * which its upper bits represents a namespace. It finds the max namespace. This
229 * could be used in order to know how many buckets do we need to allocate. If no
230 * elements exist, SHRT_MIN is returned. Namespace represents here different
231 * buckets. The common example is "common bucket" and "driver bucket".
232 *
233 * find_max_xxxx_id gets a few elements and a bucket. Each element is described
234 * by an id which its upper bits represent a namespace. It returns the max id
235 * which is contained in the same namespace defined in @bucket. This could be
236 * used in order to know how many elements do we need to allocate in the bucket.
237 * If no elements exist, SHRT_MIN is returned.
238 */
239
240#define find_max_object_id(num_trees, trees, bucket) \
241 find_max_element_entry_id(num_trees, trees, num_objects,\
242 objects, bucket)
243#define find_max_object_ns_id(num_trees, trees) \
244 find_max_element_ns_entry_id(num_trees, trees, \
245 num_objects, objects)
246
247#define find_max_method_id(num_iters, iters, bucket) \
248 find_max_element_entry_id(num_iters, iters, num_methods,\
249 methods, bucket)
250#define find_max_method_ns_id(num_iters, iters) \
251 find_max_element_ns_entry_id(num_iters, iters, \
252 num_methods, methods)
253
254#define find_max_attr_id(num_iters, iters, bucket) \
255 find_max_element_entry_id(num_iters, iters, num_attrs, \
256 attrs, bucket)
257#define find_max_attr_ns_id(num_iters, iters) \
258 find_max_element_ns_entry_id(num_iters, iters, \
259 num_attrs, attrs)
260
261static void free_method(struct uverbs_method_spec *method)
262{
263 unsigned int i;
264
265 if (!method)
266 return;
267
268 for (i = 0; i < method->num_buckets; i++)
269 kfree(method->attr_buckets[i]);
270
271 kfree(method);
272}
273
274#define IS_ATTR_OBJECT(attr) ((attr)->type == UVERBS_ATTR_TYPE_IDR || \
275 (attr)->type == UVERBS_ATTR_TYPE_FD)
276
277/*
278 * This function gets array of size @num_method_defs which contains pointers to
279 * method definitions @method_defs. The function allocates an
280 * uverbs_method_spec structure and initializes its number of buckets and the
281 * elements in buckets to the correct attributes. While doing that, it
282 * validates that there aren't conflicts between attributes of different
283 * method_defs.
284 */
285static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_method_def **method_defs,
286 size_t num_method_defs)
287{
288 int bucket_idx;
289 int max_attr_buckets = 0;
290 size_t num_attr_buckets = 0;
291 int res = 0;
292 struct uverbs_method_spec *method = NULL;
293 const struct uverbs_attr_def **attr_defs;
294 unsigned int num_of_singularities = 0;
295
296 max_attr_buckets = find_max_attr_ns_id(num_method_defs, method_defs);
297 if (max_attr_buckets >= 0)
298 num_attr_buckets = max_attr_buckets + 1;
299
acafe7e3 300 method = kzalloc(struct_size(method, attr_buckets, num_attr_buckets),
118620d3
MB
301 GFP_KERNEL);
302 if (!method)
303 return ERR_PTR(-ENOMEM);
304
305 method->num_buckets = num_attr_buckets;
306 attr_defs = kcalloc(num_method_defs, sizeof(*attr_defs), GFP_KERNEL);
307 if (!attr_defs) {
308 res = -ENOMEM;
309 goto free_method;
310 }
311 for (bucket_idx = 0; bucket_idx < method->num_buckets; bucket_idx++) {
312 short min_id = SHRT_MIN;
313 int attr_max_bucket = 0;
314 struct uverbs_attr_spec_hash *hash = NULL;
315
316 attr_max_bucket = find_max_attr_id(num_method_defs, method_defs,
317 bucket_idx);
318 if (attr_max_bucket < 0)
319 continue;
320
321 hash = kzalloc(sizeof(*hash) +
322 ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
323 sizeof(long)) +
3d89459e 324 BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
118620d3
MB
325 GFP_KERNEL);
326 if (!hash) {
327 res = -ENOMEM;
328 goto free;
329 }
330 hash->num_attrs = attr_max_bucket + 1;
331 method->num_child_attrs += hash->num_attrs;
332 hash->mandatory_attrs_bitmask = (void *)(hash + 1) +
333 ALIGN(sizeof(*hash->attrs) *
334 (attr_max_bucket + 1),
335 sizeof(long));
336
337 method->attr_buckets[bucket_idx] = hash;
338
339 do {
340 size_t num_attr_defs;
341 struct uverbs_attr_spec *attr;
342 bool attr_obj_with_special_access;
343
344 num_attr_defs =
345 get_attrs_above_id(attr_defs,
346 num_method_defs,
347 method_defs,
348 bucket_idx,
349 &min_id);
350 /* Last attr in bucket */
351 if (!num_attr_defs)
352 break;
353
354 if (num_attr_defs > 1) {
355 /*
356 * We don't allow two attribute definitions for
357 * the same attribute. This is usually a
358 * programmer error. If required, it's better to
359 * just add a new attribute to capture the new
360 * semantics.
361 */
362 res = -EEXIST;
363 goto free;
364 }
365
366 attr = &hash->attrs[min_id];
367 memcpy(attr, &attr_defs[0]->attr, sizeof(*attr));
368
369 attr_obj_with_special_access = IS_ATTR_OBJECT(attr) &&
d108dac0
JG
370 (attr->u.obj.access == UVERBS_ACCESS_NEW ||
371 attr->u.obj.access == UVERBS_ACCESS_DESTROY);
118620d3
MB
372 num_of_singularities += !!attr_obj_with_special_access;
373 if (WARN(num_of_singularities > 1,
374 "ib_uverbs: Method contains more than one object attr (%d) with new/destroy access\n",
375 min_id) ||
376 WARN(attr_obj_with_special_access &&
83bb4442 377 !attr->mandatory,
8f63d4b1 378 "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
118620d3
MB
379 min_id) ||
380 WARN(IS_ATTR_OBJECT(attr) &&
422e3d37 381 attr->zero_trailing,
118620d3
MB
382 "ib_uverbs: Tried to merge attr (%d) but it's an object with min_sz flag\n",
383 min_id)) {
384 res = -EINVAL;
385 goto free;
386 }
387
83bb4442 388 if (attr->mandatory)
118620d3
MB
389 set_bit(min_id, hash->mandatory_attrs_bitmask);
390 min_id++;
391
392 } while (1);
393 }
394 kfree(attr_defs);
395 return method;
396
397free:
398 kfree(attr_defs);
399free_method:
400 free_method(method);
401 return ERR_PTR(res);
402}
403
404static void free_object(struct uverbs_object_spec *object)
405{
406 unsigned int i, j;
407
408 if (!object)
409 return;
410
411 for (i = 0; i < object->num_buckets; i++) {
412 struct uverbs_method_spec_hash *method_buckets =
413 object->method_buckets[i];
414
415 if (!method_buckets)
416 continue;
417
418 for (j = 0; j < method_buckets->num_methods; j++)
419 free_method(method_buckets->methods[j]);
420
421 kfree(method_buckets);
422 }
423
424 kfree(object);
425}
426
427/*
428 * This function gets array of size @num_object_defs which contains pointers to
429 * object definitions @object_defs. The function allocated an
430 * uverbs_object_spec structure and initialize its number of buckets and the
431 * elements in buckets to the correct methods. While doing that, it
432 * sorts out the correct relationship between conflicts in the same method.
433 */
434static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_object_def **object_defs,
435 size_t num_object_defs)
436{
437 u16 bucket_idx;
438 int max_method_buckets = 0;
439 u16 num_method_buckets = 0;
440 int res = 0;
441 struct uverbs_object_spec *object = NULL;
442 const struct uverbs_method_def **method_defs;
443
444 max_method_buckets = find_max_method_ns_id(num_object_defs, object_defs);
445 if (max_method_buckets >= 0)
446 num_method_buckets = max_method_buckets + 1;
447
acafe7e3
KC
448 object = kzalloc(struct_size(object, method_buckets,
449 num_method_buckets),
450 GFP_KERNEL);
118620d3
MB
451 if (!object)
452 return ERR_PTR(-ENOMEM);
453
454 object->num_buckets = num_method_buckets;
455 method_defs = kcalloc(num_object_defs, sizeof(*method_defs), GFP_KERNEL);
456 if (!method_defs) {
457 res = -ENOMEM;
458 goto free_object;
459 }
460
461 for (bucket_idx = 0; bucket_idx < object->num_buckets; bucket_idx++) {
462 short min_id = SHRT_MIN;
463 int methods_max_bucket = 0;
464 struct uverbs_method_spec_hash *hash = NULL;
465
466 methods_max_bucket = find_max_method_id(num_object_defs, object_defs,
467 bucket_idx);
468 if (methods_max_bucket < 0)
469 continue;
470
acafe7e3
KC
471 hash = kzalloc(struct_size(hash, methods,
472 methods_max_bucket + 1),
118620d3
MB
473 GFP_KERNEL);
474 if (!hash) {
475 res = -ENOMEM;
476 goto free;
477 }
478
479 hash->num_methods = methods_max_bucket + 1;
480 object->method_buckets[bucket_idx] = hash;
481
482 do {
483 size_t num_method_defs;
484 struct uverbs_method_spec *method;
485 int i;
486
487 num_method_defs =
488 get_methods_above_id(method_defs,
489 num_object_defs,
490 object_defs,
491 bucket_idx,
492 &min_id);
493 /* Last method in bucket */
494 if (!num_method_defs)
495 break;
496
497 method = build_method_with_attrs(method_defs,
498 num_method_defs);
499 if (IS_ERR(method)) {
500 res = PTR_ERR(method);
501 goto free;
502 }
503
504 /*
505 * The last tree which is given as an argument to the
506 * merge overrides previous method handler.
507 * Therefore, we iterate backwards and search for the
508 * first handler which != NULL. This also defines the
509 * set of flags used for this handler.
510 */
3d89459e 511 for (i = num_method_defs - 1;
118620d3
MB
512 i >= 0 && !method_defs[i]->handler; i--)
513 ;
514 hash->methods[min_id++] = method;
515 /* NULL handler isn't allowed */
516 if (WARN(i < 0,
517 "ib_uverbs: tried to merge function id %d, but all handlers are NULL\n",
518 min_id)) {
519 res = -EINVAL;
520 goto free;
521 }
522 method->handler = method_defs[i]->handler;
523 method->flags = method_defs[i]->flags;
524
525 } while (1);
526 }
527 kfree(method_defs);
528 return object;
529
530free:
531 kfree(method_defs);
532free_object:
533 free_object(object);
534 return ERR_PTR(res);
535}
536
537void uverbs_free_spec_tree(struct uverbs_root_spec *root)
538{
539 unsigned int i, j;
540
541 if (!root)
542 return;
543
544 for (i = 0; i < root->num_buckets; i++) {
545 struct uverbs_object_spec_hash *object_hash =
546 root->object_buckets[i];
547
548 if (!object_hash)
549 continue;
550
551 for (j = 0; j < object_hash->num_objects; j++)
552 free_object(object_hash->objects[j]);
553
554 kfree(object_hash);
555 }
556
557 kfree(root);
558}
118620d3
MB
559
560struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
561 const struct uverbs_object_tree_def **trees)
562{
563 u16 bucket_idx;
564 short max_object_buckets = 0;
565 size_t num_objects_buckets = 0;
566 struct uverbs_root_spec *root_spec = NULL;
567 const struct uverbs_object_def **object_defs;
568 int i;
569 int res = 0;
570
571 max_object_buckets = find_max_object_ns_id(num_trees, trees);
572 /*
573 * Devices which don't want to support ib_uverbs, should just allocate
574 * an empty parsing tree. Every user-space command won't hit any valid
575 * entry in the parsing tree and thus will fail.
576 */
577 if (max_object_buckets >= 0)
578 num_objects_buckets = max_object_buckets + 1;
579
acafe7e3
KC
580 root_spec = kzalloc(struct_size(root_spec, object_buckets,
581 num_objects_buckets),
118620d3
MB
582 GFP_KERNEL);
583 if (!root_spec)
584 return ERR_PTR(-ENOMEM);
585 root_spec->num_buckets = num_objects_buckets;
586
587 object_defs = kcalloc(num_trees, sizeof(*object_defs),
588 GFP_KERNEL);
589 if (!object_defs) {
590 res = -ENOMEM;
591 goto free_root;
592 }
593
594 for (bucket_idx = 0; bucket_idx < root_spec->num_buckets; bucket_idx++) {
595 short min_id = SHRT_MIN;
596 short objects_max_bucket;
597 struct uverbs_object_spec_hash *hash = NULL;
598
599 objects_max_bucket = find_max_object_id(num_trees, trees,
600 bucket_idx);
601 if (objects_max_bucket < 0)
602 continue;
603
acafe7e3
KC
604 hash = kzalloc(struct_size(hash, objects,
605 objects_max_bucket + 1),
118620d3
MB
606 GFP_KERNEL);
607 if (!hash) {
608 res = -ENOMEM;
609 goto free;
610 }
611 hash->num_objects = objects_max_bucket + 1;
612 root_spec->object_buckets[bucket_idx] = hash;
613
614 do {
615 size_t num_object_defs;
616 struct uverbs_object_spec *object;
617
618 num_object_defs = get_objects_above_id(object_defs,
619 num_trees,
620 trees,
621 bucket_idx,
622 &min_id);
623 /* Last object in bucket */
624 if (!num_object_defs)
625 break;
626
627 object = build_object_with_methods(object_defs,
628 num_object_defs);
629 if (IS_ERR(object)) {
630 res = PTR_ERR(object);
631 goto free;
632 }
633
634 /*
635 * The last tree which is given as an argument to the
636 * merge overrides previous object's type_attrs.
637 * Therefore, we iterate backwards and search for the
638 * first type_attrs which != NULL.
639 */
640 for (i = num_object_defs - 1;
641 i >= 0 && !object_defs[i]->type_attrs; i--)
642 ;
643 /*
644 * NULL is a valid type_attrs. It means an object we
645 * can't instantiate (like DEVICE).
646 */
647 object->type_attrs = i < 0 ? NULL :
648 object_defs[i]->type_attrs;
649
650 hash->objects[min_id++] = object;
651 } while (1);
652 }
653
654 kfree(object_defs);
655 return root_spec;
656
657free:
658 kfree(object_defs);
659free_root:
660 uverbs_free_spec_tree(root_spec);
661 return ERR_PTR(res);
662}