Linux 5.2-rc1
[linux-block.git] / drivers / infiniband / core / device.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
1da177e4
LT
32 */
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
9a6b090c 37#include <linux/kernel.h>
1da177e4
LT
38#include <linux/slab.h>
39#include <linux/init.h>
9268f72d 40#include <linux/netdevice.h>
4e0f7b90
PP
41#include <net/net_namespace.h>
42#include <net/netns/generic.h>
8f408ab6
DJ
43#include <linux/security.h>
44#include <linux/notifier.h>
324e227e 45#include <linux/hashtable.h>
b2cbae2c 46#include <rdma/rdma_netlink.h>
03db3a2d
MB
47#include <rdma/ib_addr.h>
48#include <rdma/ib_cache.h>
1da177e4
LT
49
50#include "core_priv.h"
41eda65c 51#include "restrack.h"
1da177e4
LT
52
53MODULE_AUTHOR("Roland Dreier");
54MODULE_DESCRIPTION("core kernel InfiniBand API");
55MODULE_LICENSE("Dual BSD/GPL");
56
14d3a3b2 57struct workqueue_struct *ib_comp_wq;
f794809a 58struct workqueue_struct *ib_comp_unbound_wq;
f0626710
TH
59struct workqueue_struct *ib_wq;
60EXPORT_SYMBOL_GPL(ib_wq);
61
921eab11
JG
62/*
63 * Each of the three rwsem locks (devices, clients, client_data) protects the
64 * xarray of the same name. Specifically it allows the caller to assert that
65 * the MARK will/will not be changing under the lock, and for devices and
66 * clients, that the value in the xarray is still a valid pointer. Change of
67 * the MARK is linked to the object state, so holding the lock and testing the
68 * MARK also asserts that the contained object is in a certain state.
69 *
70 * This is used to build a two stage register/unregister flow where objects
71 * can continue to be in the xarray even though they are still in progress to
72 * register/unregister.
73 *
74 * The xarray itself provides additional locking, and restartable iteration,
75 * which is also relied on.
76 *
77 * Locks should not be nested, with the exception of client_data, which is
78 * allowed to nest under the read side of the other two locks.
79 *
80 * The devices_rwsem also protects the device name list, any change or
81 * assignment of device name must also hold the write side to guarantee unique
82 * names.
83 */
84
0df91bb6
JG
85/*
86 * devices contains devices that have had their names assigned. The
87 * devices may not be registered. Users that care about the registration
88 * status need to call ib_device_try_get() on the device to ensure it is
89 * registered, and keep it registered, for the required duration.
90 *
91 */
92static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
921eab11 93static DECLARE_RWSEM(devices_rwsem);
0df91bb6
JG
94#define DEVICE_REGISTERED XA_MARK_1
95
1da177e4 96static LIST_HEAD(client_list);
e59178d8
JG
97#define CLIENT_REGISTERED XA_MARK_1
98static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
921eab11 99static DECLARE_RWSEM(clients_rwsem);
1da177e4
LT
100
101/*
0df91bb6
JG
102 * If client_data is registered then the corresponding client must also still
103 * be registered.
104 */
105#define CLIENT_DATA_REGISTERED XA_MARK_1
4e0f7b90
PP
106
107/**
108 * struct rdma_dev_net - rdma net namespace metadata for a net
109 * @net: Pointer to owner net namespace
110 * @id: xarray id to identify the net namespace.
111 */
112struct rdma_dev_net {
113 possible_net_t net;
114 u32 id;
115};
116
117static unsigned int rdma_dev_net_id;
118
119/*
120 * A list of net namespaces is maintained in an xarray. This is necessary
121 * because we can't get the locking right using the existing net ns list. We
122 * would require a init_net callback after the list is updated.
123 */
124static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
125/*
126 * rwsem to protect accessing the rdma_nets xarray entries.
127 */
128static DECLARE_RWSEM(rdma_nets_rwsem);
129
cb7e0e13 130bool ib_devices_shared_netns = true;
a56bc45b
PP
131module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
132MODULE_PARM_DESC(netns_mode,
133 "Share device among net namespaces; default=1 (shared)");
41c61401
PP
134/**
135 * rdma_dev_access_netns() - Return whether a rdma device can be accessed
136 * from a specified net namespace or not.
137 * @device: Pointer to rdma device which needs to be checked
138 * @net: Pointer to net namesapce for which access to be checked
139 *
140 * rdma_dev_access_netns() - Return whether a rdma device can be accessed
141 * from a specified net namespace or not. When
142 * rdma device is in shared mode, it ignores the
143 * net namespace. When rdma device is exclusive
144 * to a net namespace, rdma device net namespace is
145 * checked against the specified one.
146 */
147bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
148{
149 return (ib_devices_shared_netns ||
150 net_eq(read_pnet(&dev->coredev.rdma_net), net));
151}
152EXPORT_SYMBOL(rdma_dev_access_netns);
153
0df91bb6
JG
154/*
155 * xarray has this behavior where it won't iterate over NULL values stored in
156 * allocated arrays. So we need our own iterator to see all values stored in
157 * the array. This does the same thing as xa_for_each except that it also
158 * returns NULL valued entries if the array is allocating. Simplified to only
159 * work on simple xarrays.
160 */
161static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
162 xa_mark_t filter)
163{
164 XA_STATE(xas, xa, *indexp);
165 void *entry;
166
167 rcu_read_lock();
168 do {
169 entry = xas_find_marked(&xas, ULONG_MAX, filter);
170 if (xa_is_zero(entry))
171 break;
172 } while (xas_retry(&xas, entry));
173 rcu_read_unlock();
174
175 if (entry) {
176 *indexp = xas.xa_index;
177 if (xa_is_zero(entry))
178 return NULL;
179 return entry;
180 }
181 return XA_ERROR(-ENOENT);
182}
183#define xan_for_each_marked(xa, index, entry, filter) \
184 for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
185 !xa_is_err(entry); \
186 (index)++, entry = xan_find_marked(xa, &(index), filter))
187
324e227e
JG
188/* RCU hash table mapping netdevice pointers to struct ib_port_data */
189static DEFINE_SPINLOCK(ndev_hash_lock);
190static DECLARE_HASHTABLE(ndev_hash, 5);
191
c2261dd7 192static void free_netdevs(struct ib_device *ib_dev);
d0899892
JG
193static void ib_unregister_work(struct work_struct *work);
194static void __ib_unregister_device(struct ib_device *device);
8f408ab6
DJ
195static int ib_security_change(struct notifier_block *nb, unsigned long event,
196 void *lsm_data);
197static void ib_policy_change_task(struct work_struct *work);
198static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
199
923abb9d
GP
200static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
201 struct va_format *vaf)
202{
203 if (ibdev && ibdev->dev.parent)
204 dev_printk_emit(level[1] - '0',
205 ibdev->dev.parent,
206 "%s %s %s: %pV",
207 dev_driver_string(ibdev->dev.parent),
208 dev_name(ibdev->dev.parent),
209 dev_name(&ibdev->dev),
210 vaf);
211 else if (ibdev)
212 printk("%s%s: %pV",
213 level, dev_name(&ibdev->dev), vaf);
214 else
215 printk("%s(NULL ib_device): %pV", level, vaf);
216}
217
218void ibdev_printk(const char *level, const struct ib_device *ibdev,
219 const char *format, ...)
220{
221 struct va_format vaf;
222 va_list args;
223
224 va_start(args, format);
225
226 vaf.fmt = format;
227 vaf.va = &args;
228
229 __ibdev_printk(level, ibdev, &vaf);
230
231 va_end(args);
232}
233EXPORT_SYMBOL(ibdev_printk);
234
235#define define_ibdev_printk_level(func, level) \
236void func(const struct ib_device *ibdev, const char *fmt, ...) \
237{ \
238 struct va_format vaf; \
239 va_list args; \
240 \
241 va_start(args, fmt); \
242 \
243 vaf.fmt = fmt; \
244 vaf.va = &args; \
245 \
246 __ibdev_printk(level, ibdev, &vaf); \
247 \
248 va_end(args); \
249} \
250EXPORT_SYMBOL(func);
251
252define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
253define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
254define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
255define_ibdev_printk_level(ibdev_err, KERN_ERR);
256define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
257define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
258define_ibdev_printk_level(ibdev_info, KERN_INFO);
259
8f408ab6
DJ
260static struct notifier_block ibdev_lsm_nb = {
261 .notifier_call = ib_security_change,
262};
1da177e4 263
decbc7a6
PP
264static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
265 struct net *net);
266
324e227e
JG
267/* Pointer to the RCU head at the start of the ib_port_data array */
268struct ib_port_data_rcu {
269 struct rcu_head rcu_head;
270 struct ib_port_data pdata[];
271};
272
1da177e4
LT
273static int ib_device_check_mandatory(struct ib_device *device)
274{
3023a1e9 275#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
1da177e4
LT
276 static const struct {
277 size_t offset;
278 char *name;
279 } mandatory_table[] = {
280 IB_MANDATORY_FUNC(query_device),
281 IB_MANDATORY_FUNC(query_port),
282 IB_MANDATORY_FUNC(query_pkey),
1da177e4
LT
283 IB_MANDATORY_FUNC(alloc_pd),
284 IB_MANDATORY_FUNC(dealloc_pd),
1da177e4
LT
285 IB_MANDATORY_FUNC(create_qp),
286 IB_MANDATORY_FUNC(modify_qp),
287 IB_MANDATORY_FUNC(destroy_qp),
288 IB_MANDATORY_FUNC(post_send),
289 IB_MANDATORY_FUNC(post_recv),
290 IB_MANDATORY_FUNC(create_cq),
291 IB_MANDATORY_FUNC(destroy_cq),
292 IB_MANDATORY_FUNC(poll_cq),
293 IB_MANDATORY_FUNC(req_notify_cq),
294 IB_MANDATORY_FUNC(get_dma_mr),
7738613e
IW
295 IB_MANDATORY_FUNC(dereg_mr),
296 IB_MANDATORY_FUNC(get_port_immutable)
1da177e4
LT
297 };
298 int i;
299
6780c4fa 300 device->kverbs_provider = true;
9a6b090c 301 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
3023a1e9
KH
302 if (!*(void **) ((void *) &device->ops +
303 mandatory_table[i].offset)) {
6780c4fa
GP
304 device->kverbs_provider = false;
305 break;
1da177e4
LT
306 }
307 }
308
309 return 0;
310}
311
f8978bd9 312/*
01b67117
PP
313 * Caller must perform ib_device_put() to return the device reference count
314 * when ib_device_get_by_index() returns valid device pointer.
f8978bd9 315 */
37eeab55 316struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
f8978bd9
LR
317{
318 struct ib_device *device;
319
921eab11 320 down_read(&devices_rwsem);
0df91bb6 321 device = xa_load(&devices, index);
01b67117 322 if (device) {
37eeab55
PP
323 if (!rdma_dev_access_netns(device, net)) {
324 device = NULL;
325 goto out;
326 }
327
d79af724 328 if (!ib_device_try_get(device))
01b67117
PP
329 device = NULL;
330 }
37eeab55 331out:
921eab11 332 up_read(&devices_rwsem);
f8978bd9
LR
333 return device;
334}
335
d79af724
JG
336/**
337 * ib_device_put - Release IB device reference
338 * @device: device whose reference to be released
339 *
340 * ib_device_put() releases reference to the IB device to allow it to be
341 * unregistered and eventually free.
342 */
01b67117
PP
343void ib_device_put(struct ib_device *device)
344{
345 if (refcount_dec_and_test(&device->refcount))
346 complete(&device->unreg_completion);
347}
d79af724 348EXPORT_SYMBOL(ib_device_put);
01b67117 349
1da177e4
LT
350static struct ib_device *__ib_device_get_by_name(const char *name)
351{
352 struct ib_device *device;
0df91bb6 353 unsigned long index;
1da177e4 354
0df91bb6 355 xa_for_each (&devices, index, device)
896de009 356 if (!strcmp(name, dev_name(&device->dev)))
1da177e4
LT
357 return device;
358
359 return NULL;
360}
361
6cc2c8e5
JG
362/**
363 * ib_device_get_by_name - Find an IB device by name
364 * @name: The name to look for
365 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
366 *
367 * Find and hold an ib_device by its name. The caller must call
368 * ib_device_put() on the returned pointer.
369 */
370struct ib_device *ib_device_get_by_name(const char *name,
371 enum rdma_driver_id driver_id)
372{
373 struct ib_device *device;
374
375 down_read(&devices_rwsem);
376 device = __ib_device_get_by_name(name);
377 if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
378 device->driver_id != driver_id)
379 device = NULL;
380
381 if (device) {
382 if (!ib_device_try_get(device))
383 device = NULL;
384 }
385 up_read(&devices_rwsem);
386 return device;
387}
388EXPORT_SYMBOL(ib_device_get_by_name);
389
4e0f7b90
PP
390static int rename_compat_devs(struct ib_device *device)
391{
392 struct ib_core_device *cdev;
393 unsigned long index;
394 int ret = 0;
395
396 mutex_lock(&device->compat_devs_mutex);
397 xa_for_each (&device->compat_devs, index, cdev) {
398 ret = device_rename(&cdev->dev, dev_name(&device->dev));
399 if (ret) {
400 dev_warn(&cdev->dev,
401 "Fail to rename compatdev to new name %s\n",
402 dev_name(&device->dev));
403 break;
404 }
405 }
406 mutex_unlock(&device->compat_devs_mutex);
407 return ret;
408}
409
d21943dd
LR
410int ib_device_rename(struct ib_device *ibdev, const char *name)
411{
e3593b56 412 int ret;
d21943dd 413
921eab11 414 down_write(&devices_rwsem);
e3593b56
JG
415 if (!strcmp(name, dev_name(&ibdev->dev))) {
416 ret = 0;
417 goto out;
418 }
419
344684e6
JG
420 if (__ib_device_get_by_name(name)) {
421 ret = -EEXIST;
422 goto out;
d21943dd
LR
423 }
424
425 ret = device_rename(&ibdev->dev, name);
426 if (ret)
427 goto out;
428 strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
4e0f7b90 429 ret = rename_compat_devs(ibdev);
d21943dd 430out:
921eab11 431 up_write(&devices_rwsem);
d21943dd
LR
432 return ret;
433}
434
e349f858 435static int alloc_name(struct ib_device *ibdev, const char *name)
1da177e4 436{
1da177e4 437 struct ib_device *device;
0df91bb6 438 unsigned long index;
3b88afd3
JG
439 struct ida inuse;
440 int rc;
1da177e4
LT
441 int i;
442
921eab11 443 lockdep_assert_held_exclusive(&devices_rwsem);
3b88afd3 444 ida_init(&inuse);
0df91bb6 445 xa_for_each (&devices, index, device) {
e349f858
JG
446 char buf[IB_DEVICE_NAME_MAX];
447
896de009 448 if (sscanf(dev_name(&device->dev), name, &i) != 1)
1da177e4 449 continue;
3b88afd3 450 if (i < 0 || i >= INT_MAX)
1da177e4
LT
451 continue;
452 snprintf(buf, sizeof buf, name, i);
3b88afd3
JG
453 if (strcmp(buf, dev_name(&device->dev)) != 0)
454 continue;
455
456 rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
457 if (rc < 0)
458 goto out;
1da177e4
LT
459 }
460
3b88afd3
JG
461 rc = ida_alloc(&inuse, GFP_KERNEL);
462 if (rc < 0)
463 goto out;
1da177e4 464
3b88afd3
JG
465 rc = dev_set_name(&ibdev->dev, name, rc);
466out:
467 ida_destroy(&inuse);
468 return rc;
1da177e4
LT
469}
470
55aeed06
JG
471static void ib_device_release(struct device *device)
472{
473 struct ib_device *dev = container_of(device, struct ib_device, dev);
474
c2261dd7 475 free_netdevs(dev);
652432f3 476 WARN_ON(refcount_read(&dev->refcount));
d45f89d5 477 ib_cache_release_one(dev);
b34b269a 478 ib_security_release_port_pkey_list(dev);
4e0f7b90 479 xa_destroy(&dev->compat_devs);
0df91bb6 480 xa_destroy(&dev->client_data);
324e227e
JG
481 if (dev->port_data)
482 kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
483 pdata[0]),
484 rcu_head);
485 kfree_rcu(dev, rcu_head);
55aeed06
JG
486}
487
488static int ib_device_uevent(struct device *device,
489 struct kobj_uevent_env *env)
490{
896de009 491 if (add_uevent_var(env, "NAME=%s", dev_name(device)))
55aeed06
JG
492 return -ENOMEM;
493
494 /*
495 * It would be nice to pass the node GUID with the event...
496 */
497
498 return 0;
499}
500
62dfa795
PP
501static const void *net_namespace(struct device *d)
502{
4e0f7b90
PP
503 struct ib_core_device *coredev =
504 container_of(d, struct ib_core_device, dev);
505
506 return read_pnet(&coredev->rdma_net);
62dfa795
PP
507}
508
55aeed06
JG
509static struct class ib_class = {
510 .name = "infiniband",
511 .dev_release = ib_device_release,
512 .dev_uevent = ib_device_uevent,
62dfa795
PP
513 .ns_type = &net_ns_type_operations,
514 .namespace = net_namespace,
55aeed06
JG
515};
516
cebe556b 517static void rdma_init_coredev(struct ib_core_device *coredev,
4e0f7b90 518 struct ib_device *dev, struct net *net)
cebe556b
PP
519{
520 /* This BUILD_BUG_ON is intended to catch layout change
521 * of union of ib_core_device and device.
522 * dev must be the first element as ib_core and providers
523 * driver uses it. Adding anything in ib_core_device before
524 * device will break this assumption.
525 */
526 BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
527 offsetof(struct ib_device, dev));
528
529 coredev->dev.class = &ib_class;
530 coredev->dev.groups = dev->groups;
531 device_initialize(&coredev->dev);
532 coredev->owner = dev;
533 INIT_LIST_HEAD(&coredev->port_list);
4e0f7b90 534 write_pnet(&coredev->rdma_net, net);
cebe556b
PP
535}
536
1da177e4 537/**
459cc69f 538 * _ib_alloc_device - allocate an IB device struct
1da177e4
LT
539 * @size:size of structure to allocate
540 *
541 * Low-level drivers should use ib_alloc_device() to allocate &struct
542 * ib_device. @size is the size of the structure to be allocated,
543 * including any private data used by the low-level driver.
544 * ib_dealloc_device() must be used to free structures allocated with
545 * ib_alloc_device().
546 */
459cc69f 547struct ib_device *_ib_alloc_device(size_t size)
1da177e4 548{
55aeed06
JG
549 struct ib_device *device;
550
551 if (WARN_ON(size < sizeof(struct ib_device)))
552 return NULL;
553
554 device = kzalloc(size, GFP_KERNEL);
555 if (!device)
556 return NULL;
557
41eda65c
LR
558 if (rdma_restrack_init(device)) {
559 kfree(device);
560 return NULL;
561 }
02d8883f 562
5f8f5499 563 device->groups[0] = &ib_dev_attr_group;
4e0f7b90 564 rdma_init_coredev(&device->coredev, device, &init_net);
55aeed06 565
55aeed06
JG
566 INIT_LIST_HEAD(&device->event_handler_list);
567 spin_lock_init(&device->event_handler_lock);
d0899892 568 mutex_init(&device->unregistration_lock);
0df91bb6
JG
569 /*
570 * client_data needs to be alloc because we don't want our mark to be
571 * destroyed if the user stores NULL in the client data.
572 */
573 xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
921eab11 574 init_rwsem(&device->client_data_rwsem);
4e0f7b90
PP
575 xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
576 mutex_init(&device->compat_devs_mutex);
01b67117 577 init_completion(&device->unreg_completion);
d0899892 578 INIT_WORK(&device->unregistration_work, ib_unregister_work);
1da177e4 579
55aeed06 580 return device;
1da177e4 581}
459cc69f 582EXPORT_SYMBOL(_ib_alloc_device);
1da177e4
LT
583
584/**
585 * ib_dealloc_device - free an IB device struct
586 * @device:structure to free
587 *
588 * Free a structure allocated with ib_alloc_device().
589 */
590void ib_dealloc_device(struct ib_device *device)
591{
d0899892
JG
592 if (device->ops.dealloc_driver)
593 device->ops.dealloc_driver(device);
594
595 /*
596 * ib_unregister_driver() requires all devices to remain in the xarray
597 * while their ops are callable. The last op we call is dealloc_driver
598 * above. This is needed to create a fence on op callbacks prior to
599 * allowing the driver module to unload.
600 */
601 down_write(&devices_rwsem);
602 if (xa_load(&devices, device->index) == device)
603 xa_erase(&devices, device->index);
604 up_write(&devices_rwsem);
605
c2261dd7
JG
606 /* Expedite releasing netdev references */
607 free_netdevs(device);
608
4e0f7b90 609 WARN_ON(!xa_empty(&device->compat_devs));
0df91bb6 610 WARN_ON(!xa_empty(&device->client_data));
652432f3 611 WARN_ON(refcount_read(&device->refcount));
0ad699c0 612 rdma_restrack_clean(device);
e155755e 613 /* Balances with device_initialize */
924b8900 614 put_device(&device->dev);
1da177e4
LT
615}
616EXPORT_SYMBOL(ib_dealloc_device);
617
921eab11
JG
618/*
619 * add_client_context() and remove_client_context() must be safe against
620 * parallel calls on the same device - registration/unregistration of both the
621 * device and client can be occurring in parallel.
622 *
623 * The routines need to be a fence, any caller must not return until the add
624 * or remove is fully completed.
625 */
626static int add_client_context(struct ib_device *device,
627 struct ib_client *client)
1da177e4 628{
921eab11 629 int ret = 0;
1da177e4 630
6780c4fa 631 if (!device->kverbs_provider && !client->no_kverbs_req)
921eab11
JG
632 return 0;
633
634 down_write(&device->client_data_rwsem);
635 /*
636 * Another caller to add_client_context got here first and has already
637 * completely initialized context.
638 */
639 if (xa_get_mark(&device->client_data, client->client_id,
640 CLIENT_DATA_REGISTERED))
641 goto out;
642
643 ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
644 GFP_KERNEL));
645 if (ret)
646 goto out;
647 downgrade_write(&device->client_data_rwsem);
648 if (client->add)
649 client->add(device);
650
651 /* Readers shall not see a client until add has been completed */
652 xa_set_mark(&device->client_data, client->client_id,
653 CLIENT_DATA_REGISTERED);
654 up_read(&device->client_data_rwsem);
655 return 0;
656
657out:
658 up_write(&device->client_data_rwsem);
659 return ret;
660}
661
662static void remove_client_context(struct ib_device *device,
663 unsigned int client_id)
664{
665 struct ib_client *client;
666 void *client_data;
6780c4fa 667
921eab11
JG
668 down_write(&device->client_data_rwsem);
669 if (!xa_get_mark(&device->client_data, client_id,
670 CLIENT_DATA_REGISTERED)) {
671 up_write(&device->client_data_rwsem);
672 return;
673 }
674 client_data = xa_load(&device->client_data, client_id);
675 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
676 client = xa_load(&clients, client_id);
677 downgrade_write(&device->client_data_rwsem);
1da177e4 678
921eab11
JG
679 /*
680 * Notice we cannot be holding any exclusive locks when calling the
681 * remove callback as the remove callback can recurse back into any
682 * public functions in this module and thus try for any locks those
683 * functions take.
684 *
685 * For this reason clients and drivers should not call the
686 * unregistration functions will holdling any locks.
687 *
688 * It tempting to drop the client_data_rwsem too, but this is required
689 * to ensure that unregister_client does not return until all clients
690 * are completely unregistered, which is required to avoid module
691 * unloading races.
692 */
693 if (client->remove)
694 client->remove(device, client_data);
695
696 xa_erase(&device->client_data, client_id);
697 up_read(&device->client_data_rwsem);
1da177e4
LT
698}
699
c2261dd7 700static int alloc_port_data(struct ib_device *device)
5eb620c8 701{
324e227e 702 struct ib_port_data_rcu *pdata_rcu;
ea1075ed 703 unsigned int port;
c2261dd7
JG
704
705 if (device->port_data)
706 return 0;
707
708 /* This can only be called once the physical port range is defined */
709 if (WARN_ON(!device->phys_port_cnt))
710 return -EINVAL;
7738613e 711
8ceb1357
JG
712 /*
713 * device->port_data is indexed directly by the port number to make
7738613e
IW
714 * access to this data as efficient as possible.
715 *
8ceb1357
JG
716 * Therefore port_data is declared as a 1 based array with potential
717 * empty slots at the beginning.
7738613e 718 */
324e227e
JG
719 pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
720 rdma_end_port(device) + 1),
721 GFP_KERNEL);
722 if (!pdata_rcu)
55aeed06 723 return -ENOMEM;
324e227e
JG
724 /*
725 * The rcu_head is put in front of the port data array and the stored
726 * pointer is adjusted since we never need to see that member until
727 * kfree_rcu.
728 */
729 device->port_data = pdata_rcu->pdata;
5eb620c8 730
ea1075ed 731 rdma_for_each_port (device, port) {
8ceb1357
JG
732 struct ib_port_data *pdata = &device->port_data[port];
733
324e227e 734 pdata->ib_dev = device;
8ceb1357
JG
735 spin_lock_init(&pdata->pkey_list_lock);
736 INIT_LIST_HEAD(&pdata->pkey_list);
c2261dd7 737 spin_lock_init(&pdata->netdev_lock);
324e227e 738 INIT_HLIST_NODE(&pdata->ndev_hash_link);
c2261dd7
JG
739 }
740 return 0;
741}
742
743static int verify_immutable(const struct ib_device *dev, u8 port)
744{
745 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
746 rdma_max_mad_size(dev, port) != 0);
747}
748
749static int setup_port_data(struct ib_device *device)
750{
751 unsigned int port;
752 int ret;
753
754 ret = alloc_port_data(device);
755 if (ret)
756 return ret;
757
758 rdma_for_each_port (device, port) {
759 struct ib_port_data *pdata = &device->port_data[port];
8ceb1357
JG
760
761 ret = device->ops.get_port_immutable(device, port,
762 &pdata->immutable);
5eb620c8 763 if (ret)
55aeed06 764 return ret;
337877a4 765
55aeed06
JG
766 if (verify_immutable(device, port))
767 return -EINVAL;
5eb620c8 768 }
55aeed06 769 return 0;
5eb620c8
YE
770}
771
9abb0d1b 772void ib_get_device_fw_str(struct ib_device *dev, char *str)
5fa76c20 773{
3023a1e9
KH
774 if (dev->ops.get_dev_fw_str)
775 dev->ops.get_dev_fw_str(dev, str);
5fa76c20
IW
776 else
777 str[0] = '\0';
778}
779EXPORT_SYMBOL(ib_get_device_fw_str);
780
8f408ab6
DJ
781static void ib_policy_change_task(struct work_struct *work)
782{
783 struct ib_device *dev;
0df91bb6 784 unsigned long index;
8f408ab6 785
921eab11 786 down_read(&devices_rwsem);
0df91bb6 787 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
ea1075ed 788 unsigned int i;
8f408ab6 789
ea1075ed 790 rdma_for_each_port (dev, i) {
8f408ab6
DJ
791 u64 sp;
792 int ret = ib_get_cached_subnet_prefix(dev,
793 i,
794 &sp);
795
796 WARN_ONCE(ret,
797 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
798 ret);
a750cfde
DJ
799 if (!ret)
800 ib_security_cache_change(dev, i, sp);
8f408ab6
DJ
801 }
802 }
921eab11 803 up_read(&devices_rwsem);
8f408ab6
DJ
804}
805
806static int ib_security_change(struct notifier_block *nb, unsigned long event,
807 void *lsm_data)
808{
809 if (event != LSM_POLICY_CHANGE)
810 return NOTIFY_DONE;
811
812 schedule_work(&ib_policy_change_work);
c66f6741 813 ib_mad_agent_security_change();
8f408ab6
DJ
814
815 return NOTIFY_OK;
816}
817
4e0f7b90
PP
818static void compatdev_release(struct device *dev)
819{
820 struct ib_core_device *cdev =
821 container_of(dev, struct ib_core_device, dev);
822
823 kfree(cdev);
824}
825
826static int add_one_compat_dev(struct ib_device *device,
827 struct rdma_dev_net *rnet)
828{
829 struct ib_core_device *cdev;
830 int ret;
831
2b34c558 832 lockdep_assert_held(&rdma_nets_rwsem);
a56bc45b
PP
833 if (!ib_devices_shared_netns)
834 return 0;
835
4e0f7b90
PP
836 /*
837 * Create and add compat device in all namespaces other than where it
838 * is currently bound to.
839 */
840 if (net_eq(read_pnet(&rnet->net),
841 read_pnet(&device->coredev.rdma_net)))
842 return 0;
843
844 /*
845 * The first of init_net() or ib_register_device() to take the
846 * compat_devs_mutex wins and gets to add the device. Others will wait
847 * for completion here.
848 */
849 mutex_lock(&device->compat_devs_mutex);
850 cdev = xa_load(&device->compat_devs, rnet->id);
851 if (cdev) {
852 ret = 0;
853 goto done;
854 }
855 ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
856 if (ret)
857 goto done;
858
859 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
860 if (!cdev) {
861 ret = -ENOMEM;
862 goto cdev_err;
863 }
864
865 cdev->dev.parent = device->dev.parent;
866 rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
867 cdev->dev.release = compatdev_release;
868 dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
869
870 ret = device_add(&cdev->dev);
871 if (ret)
872 goto add_err;
eb15c78b 873 ret = ib_setup_port_attrs(cdev);
5417783e
PP
874 if (ret)
875 goto port_err;
4e0f7b90
PP
876
877 ret = xa_err(xa_store(&device->compat_devs, rnet->id,
878 cdev, GFP_KERNEL));
879 if (ret)
880 goto insert_err;
881
882 mutex_unlock(&device->compat_devs_mutex);
883 return 0;
884
885insert_err:
5417783e
PP
886 ib_free_port_attrs(cdev);
887port_err:
4e0f7b90
PP
888 device_del(&cdev->dev);
889add_err:
890 put_device(&cdev->dev);
891cdev_err:
892 xa_release(&device->compat_devs, rnet->id);
893done:
894 mutex_unlock(&device->compat_devs_mutex);
895 return ret;
896}
897
898static void remove_one_compat_dev(struct ib_device *device, u32 id)
899{
900 struct ib_core_device *cdev;
901
902 mutex_lock(&device->compat_devs_mutex);
903 cdev = xa_erase(&device->compat_devs, id);
904 mutex_unlock(&device->compat_devs_mutex);
905 if (cdev) {
5417783e 906 ib_free_port_attrs(cdev);
4e0f7b90
PP
907 device_del(&cdev->dev);
908 put_device(&cdev->dev);
909 }
910}
911
912static void remove_compat_devs(struct ib_device *device)
913{
914 struct ib_core_device *cdev;
915 unsigned long index;
916
917 xa_for_each (&device->compat_devs, index, cdev)
918 remove_one_compat_dev(device, index);
919}
920
921static int add_compat_devs(struct ib_device *device)
922{
923 struct rdma_dev_net *rnet;
924 unsigned long index;
925 int ret = 0;
926
decbc7a6
PP
927 lockdep_assert_held(&devices_rwsem);
928
4e0f7b90
PP
929 down_read(&rdma_nets_rwsem);
930 xa_for_each (&rdma_nets, index, rnet) {
931 ret = add_one_compat_dev(device, rnet);
932 if (ret)
933 break;
934 }
935 up_read(&rdma_nets_rwsem);
936 return ret;
937}
938
2b34c558
PP
939static void remove_all_compat_devs(void)
940{
941 struct ib_compat_device *cdev;
942 struct ib_device *dev;
943 unsigned long index;
944
945 down_read(&devices_rwsem);
946 xa_for_each (&devices, index, dev) {
947 unsigned long c_index = 0;
948
949 /* Hold nets_rwsem so that any other thread modifying this
950 * system param can sync with this thread.
951 */
952 down_read(&rdma_nets_rwsem);
953 xa_for_each (&dev->compat_devs, c_index, cdev)
954 remove_one_compat_dev(dev, c_index);
955 up_read(&rdma_nets_rwsem);
956 }
957 up_read(&devices_rwsem);
958}
959
960static int add_all_compat_devs(void)
961{
962 struct rdma_dev_net *rnet;
963 struct ib_device *dev;
964 unsigned long index;
965 int ret = 0;
966
967 down_read(&devices_rwsem);
968 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
969 unsigned long net_index = 0;
970
971 /* Hold nets_rwsem so that any other thread modifying this
972 * system param can sync with this thread.
973 */
974 down_read(&rdma_nets_rwsem);
975 xa_for_each (&rdma_nets, net_index, rnet) {
976 ret = add_one_compat_dev(dev, rnet);
977 if (ret)
978 break;
979 }
980 up_read(&rdma_nets_rwsem);
981 }
982 up_read(&devices_rwsem);
983 if (ret)
984 remove_all_compat_devs();
985 return ret;
986}
987
988int rdma_compatdev_set(u8 enable)
989{
990 struct rdma_dev_net *rnet;
991 unsigned long index;
992 int ret = 0;
993
994 down_write(&rdma_nets_rwsem);
995 if (ib_devices_shared_netns == enable) {
996 up_write(&rdma_nets_rwsem);
997 return 0;
998 }
999
1000 /* enable/disable of compat devices is not supported
1001 * when more than default init_net exists.
1002 */
1003 xa_for_each (&rdma_nets, index, rnet) {
1004 ret++;
1005 break;
1006 }
1007 if (!ret)
1008 ib_devices_shared_netns = enable;
1009 up_write(&rdma_nets_rwsem);
1010 if (ret)
1011 return -EBUSY;
1012
1013 if (enable)
1014 ret = add_all_compat_devs();
1015 else
1016 remove_all_compat_devs();
1017 return ret;
1018}
1019
4e0f7b90
PP
1020static void rdma_dev_exit_net(struct net *net)
1021{
1022 struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
1023 struct ib_device *dev;
1024 unsigned long index;
1025 int ret;
1026
1027 down_write(&rdma_nets_rwsem);
1028 /*
1029 * Prevent the ID from being re-used and hide the id from xa_for_each.
1030 */
1031 ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
1032 WARN_ON(ret);
1033 up_write(&rdma_nets_rwsem);
1034
1035 down_read(&devices_rwsem);
1036 xa_for_each (&devices, index, dev) {
1037 get_device(&dev->dev);
1038 /*
1039 * Release the devices_rwsem so that pontentially blocking
1040 * device_del, doesn't hold the devices_rwsem for too long.
1041 */
1042 up_read(&devices_rwsem);
1043
1044 remove_one_compat_dev(dev, rnet->id);
1045
decbc7a6
PP
1046 /*
1047 * If the real device is in the NS then move it back to init.
1048 */
1049 rdma_dev_change_netns(dev, net, &init_net);
1050
4e0f7b90
PP
1051 put_device(&dev->dev);
1052 down_read(&devices_rwsem);
1053 }
1054 up_read(&devices_rwsem);
1055
1056 xa_erase(&rdma_nets, rnet->id);
1057}
1058
1059static __net_init int rdma_dev_init_net(struct net *net)
1060{
1061 struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
1062 unsigned long index;
1063 struct ib_device *dev;
1064 int ret;
1065
1066 /* No need to create any compat devices in default init_net. */
1067 if (net_eq(net, &init_net))
1068 return 0;
1069
1070 write_pnet(&rnet->net, net);
1071
1072 ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
1073 if (ret)
1074 return ret;
1075
1076 down_read(&devices_rwsem);
1077 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
2b34c558
PP
1078 /* Hold nets_rwsem so that netlink command cannot change
1079 * system configuration for device sharing mode.
1080 */
1081 down_read(&rdma_nets_rwsem);
4e0f7b90 1082 ret = add_one_compat_dev(dev, rnet);
2b34c558 1083 up_read(&rdma_nets_rwsem);
4e0f7b90
PP
1084 if (ret)
1085 break;
1086 }
1087 up_read(&devices_rwsem);
1088
1089 if (ret)
1090 rdma_dev_exit_net(net);
1091
1092 return ret;
1093}
1094
0df91bb6 1095/*
d0899892
JG
1096 * Assign the unique string device name and the unique device index. This is
1097 * undone by ib_dealloc_device.
ecc82c53 1098 */
0df91bb6 1099static int assign_name(struct ib_device *device, const char *name)
ecc82c53 1100{
0df91bb6
JG
1101 static u32 last_id;
1102 int ret;
ecc82c53 1103
921eab11 1104 down_write(&devices_rwsem);
0df91bb6
JG
1105 /* Assign a unique name to the device */
1106 if (strchr(name, '%'))
1107 ret = alloc_name(device, name);
1108 else
1109 ret = dev_set_name(&device->dev, name);
1110 if (ret)
1111 goto out;
1112
1113 if (__ib_device_get_by_name(dev_name(&device->dev))) {
1114 ret = -ENFILE;
1115 goto out;
1116 }
1117 strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
ecc82c53 1118
ea295481
LT
1119 ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
1120 &last_id, GFP_KERNEL);
1121 if (ret > 0)
1122 ret = 0;
921eab11 1123
0df91bb6 1124out:
921eab11 1125 up_write(&devices_rwsem);
0df91bb6
JG
1126 return ret;
1127}
1128
548cb4fb 1129static void setup_dma_device(struct ib_device *device)
1da177e4 1130{
99db9494
BVA
1131 struct device *parent = device->dev.parent;
1132
0957c29f
BVA
1133 WARN_ON_ONCE(device->dma_device);
1134 if (device->dev.dma_ops) {
1135 /*
1136 * The caller provided custom DMA operations. Copy the
1137 * DMA-related fields that are used by e.g. dma_alloc_coherent()
1138 * into device->dev.
1139 */
1140 device->dma_device = &device->dev;
02ee9da3
BVA
1141 if (!device->dev.dma_mask) {
1142 if (parent)
1143 device->dev.dma_mask = parent->dma_mask;
1144 else
1145 WARN_ON_ONCE(true);
1146 }
1147 if (!device->dev.coherent_dma_mask) {
1148 if (parent)
1149 device->dev.coherent_dma_mask =
1150 parent->coherent_dma_mask;
1151 else
1152 WARN_ON_ONCE(true);
1153 }
0957c29f
BVA
1154 } else {
1155 /*
1156 * The caller did not provide custom DMA operations. Use the
1157 * DMA mapping operations of the parent device.
1158 */
02ee9da3 1159 WARN_ON_ONCE(!parent);
0957c29f
BVA
1160 device->dma_device = parent;
1161 }
d10bcf94
SS
1162 /* Setup default max segment size for all IB devices */
1163 dma_set_max_seg_size(device->dma_device, SZ_2G);
1164
548cb4fb 1165}
1da177e4 1166
921eab11
JG
1167/*
1168 * setup_device() allocates memory and sets up data that requires calling the
1169 * device ops, this is the only reason these actions are not done during
1170 * ib_alloc_device. It is undone by ib_dealloc_device().
1171 */
548cb4fb
PP
1172static int setup_device(struct ib_device *device)
1173{
1174 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
1175 int ret;
1da177e4 1176
921eab11
JG
1177 setup_dma_device(device);
1178
548cb4fb
PP
1179 ret = ib_device_check_mandatory(device);
1180 if (ret)
1181 return ret;
1da177e4 1182
8ceb1357 1183 ret = setup_port_data(device);
5eb620c8 1184 if (ret) {
8ceb1357 1185 dev_warn(&device->dev, "Couldn't create per-port data\n");
548cb4fb
PP
1186 return ret;
1187 }
1188
1189 memset(&device->attrs, 0, sizeof(device->attrs));
3023a1e9 1190 ret = device->ops.query_device(device, &device->attrs, &uhw);
548cb4fb
PP
1191 if (ret) {
1192 dev_warn(&device->dev,
1193 "Couldn't query the device attributes\n");
d45f89d5 1194 return ret;
5eb620c8
YE
1195 }
1196
d45f89d5 1197 return 0;
548cb4fb
PP
1198}
1199
921eab11
JG
1200static void disable_device(struct ib_device *device)
1201{
1202 struct ib_client *client;
1203
1204 WARN_ON(!refcount_read(&device->refcount));
1205
1206 down_write(&devices_rwsem);
1207 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
1208 up_write(&devices_rwsem);
1209
1210 down_read(&clients_rwsem);
1211 list_for_each_entry_reverse(client, &client_list, list)
1212 remove_client_context(device, client->client_id);
1213 up_read(&clients_rwsem);
1214
1215 /* Pairs with refcount_set in enable_device */
1216 ib_device_put(device);
1217 wait_for_completion(&device->unreg_completion);
c2261dd7 1218
4e0f7b90
PP
1219 /*
1220 * compat devices must be removed after device refcount drops to zero.
1221 * Otherwise init_net() may add more compatdevs after removing compat
1222 * devices and before device is disabled.
1223 */
1224 remove_compat_devs(device);
921eab11
JG
1225}
1226
1227/*
1228 * An enabled device is visible to all clients and to all the public facing
d0899892
JG
1229 * APIs that return a device pointer. This always returns with a new get, even
1230 * if it fails.
921eab11 1231 */
d0899892 1232static int enable_device_and_get(struct ib_device *device)
921eab11
JG
1233{
1234 struct ib_client *client;
1235 unsigned long index;
d0899892 1236 int ret = 0;
921eab11 1237
d0899892
JG
1238 /*
1239 * One ref belongs to the xa and the other belongs to this
1240 * thread. This is needed to guard against parallel unregistration.
1241 */
1242 refcount_set(&device->refcount, 2);
921eab11
JG
1243 down_write(&devices_rwsem);
1244 xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
d0899892
JG
1245
1246 /*
1247 * By using downgrade_write() we ensure that no other thread can clear
1248 * DEVICE_REGISTERED while we are completing the client setup.
1249 */
1250 downgrade_write(&devices_rwsem);
921eab11 1251
ca22354b
JG
1252 if (device->ops.enable_driver) {
1253 ret = device->ops.enable_driver(device);
1254 if (ret)
1255 goto out;
1256 }
1257
921eab11
JG
1258 down_read(&clients_rwsem);
1259 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1260 ret = add_client_context(device, client);
d0899892
JG
1261 if (ret)
1262 break;
921eab11
JG
1263 }
1264 up_read(&clients_rwsem);
4e0f7b90
PP
1265 if (!ret)
1266 ret = add_compat_devs(device);
ca22354b 1267out:
d0899892
JG
1268 up_read(&devices_rwsem);
1269 return ret;
921eab11
JG
1270}
1271
548cb4fb
PP
1272/**
1273 * ib_register_device - Register an IB device with IB core
1274 * @device:Device to register
1275 *
1276 * Low-level drivers use ib_register_device() to register their
1277 * devices with the IB core. All registered clients will receive a
1278 * callback for each device that is added. @device must be allocated
1279 * with ib_alloc_device().
d0899892
JG
1280 *
1281 * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
1282 * asynchronously then the device pointer may become freed as soon as this
1283 * function returns.
548cb4fb 1284 */
ea4baf7f 1285int ib_register_device(struct ib_device *device, const char *name)
548cb4fb
PP
1286{
1287 int ret;
548cb4fb 1288
0df91bb6
JG
1289 ret = assign_name(device, name);
1290 if (ret)
921eab11 1291 return ret;
548cb4fb
PP
1292
1293 ret = setup_device(device);
1294 if (ret)
d0899892 1295 return ret;
03db3a2d 1296
d45f89d5
JG
1297 ret = ib_cache_setup_one(device);
1298 if (ret) {
1299 dev_warn(&device->dev,
1300 "Couldn't set up InfiniBand P_Key/GID cache\n");
d0899892 1301 return ret;
d45f89d5
JG
1302 }
1303
7527a7b1 1304 ib_device_register_rdmacg(device);
3e153a93 1305
e7a5b4aa
LR
1306 /*
1307 * Ensure that ADD uevent is not fired because it
1308 * is too early amd device is not initialized yet.
1309 */
1310 dev_set_uevent_suppress(&device->dev, true);
5f8f5499
PP
1311 ret = device_add(&device->dev);
1312 if (ret)
1313 goto cg_cleanup;
1314
ea4baf7f 1315 ret = ib_device_register_sysfs(device);
1da177e4 1316 if (ret) {
43c7c851
JG
1317 dev_warn(&device->dev,
1318 "Couldn't register device with driver model\n");
5f8f5499 1319 goto dev_cleanup;
1da177e4
LT
1320 }
1321
d0899892 1322 ret = enable_device_and_get(device);
e7a5b4aa
LR
1323 dev_set_uevent_suppress(&device->dev, false);
1324 /* Mark for userspace that device is ready */
1325 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
d0899892
JG
1326 if (ret) {
1327 void (*dealloc_fn)(struct ib_device *);
1328
1329 /*
1330 * If we hit this error flow then we don't want to
1331 * automatically dealloc the device since the caller is
1332 * expected to call ib_dealloc_device() after
1333 * ib_register_device() fails. This is tricky due to the
1334 * possibility for a parallel unregistration along with this
1335 * error flow. Since we have a refcount here we know any
1336 * parallel flow is stopped in disable_device and will see the
1337 * NULL pointers, causing the responsibility to
1338 * ib_dealloc_device() to revert back to this thread.
1339 */
1340 dealloc_fn = device->ops.dealloc_driver;
1341 device->ops.dealloc_driver = NULL;
1342 ib_device_put(device);
1343 __ib_unregister_device(device);
1344 device->ops.dealloc_driver = dealloc_fn;
1345 return ret;
1346 }
1347 ib_device_put(device);
1da177e4 1348
4be3a4fa
PP
1349 return 0;
1350
5f8f5499
PP
1351dev_cleanup:
1352 device_del(&device->dev);
2fb4f4ea 1353cg_cleanup:
e7a5b4aa 1354 dev_set_uevent_suppress(&device->dev, false);
2fb4f4ea 1355 ib_device_unregister_rdmacg(device);
d45f89d5 1356 ib_cache_cleanup_one(device);
1da177e4
LT
1357 return ret;
1358}
1359EXPORT_SYMBOL(ib_register_device);
1360
d0899892
JG
1361/* Callers must hold a get on the device. */
1362static void __ib_unregister_device(struct ib_device *ib_dev)
1363{
1364 /*
1365 * We have a registration lock so that all the calls to unregister are
1366 * fully fenced, once any unregister returns the device is truely
1367 * unregistered even if multiple callers are unregistering it at the
1368 * same time. This also interacts with the registration flow and
1369 * provides sane semantics if register and unregister are racing.
1370 */
1371 mutex_lock(&ib_dev->unregistration_lock);
1372 if (!refcount_read(&ib_dev->refcount))
1373 goto out;
1374
1375 disable_device(ib_dev);
3042492b
PP
1376
1377 /* Expedite removing unregistered pointers from the hash table */
1378 free_netdevs(ib_dev);
1379
d0899892
JG
1380 ib_device_unregister_sysfs(ib_dev);
1381 device_del(&ib_dev->dev);
1382 ib_device_unregister_rdmacg(ib_dev);
1383 ib_cache_cleanup_one(ib_dev);
1384
1385 /*
1386 * Drivers using the new flow may not call ib_dealloc_device except
1387 * in error unwind prior to registration success.
1388 */
1389 if (ib_dev->ops.dealloc_driver) {
1390 WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
1391 ib_dealloc_device(ib_dev);
1392 }
1393out:
1394 mutex_unlock(&ib_dev->unregistration_lock);
1395}
1396
1da177e4
LT
1397/**
1398 * ib_unregister_device - Unregister an IB device
d0899892 1399 * @device: The device to unregister
1da177e4
LT
1400 *
1401 * Unregister an IB device. All clients will receive a remove callback.
d0899892
JG
1402 *
1403 * Callers should call this routine only once, and protect against races with
1404 * registration. Typically it should only be called as part of a remove
1405 * callback in an implementation of driver core's struct device_driver and
1406 * related.
1407 *
1408 * If ops.dealloc_driver is used then ib_dev will be freed upon return from
1409 * this function.
1da177e4 1410 */
d0899892 1411void ib_unregister_device(struct ib_device *ib_dev)
1da177e4 1412{
d0899892
JG
1413 get_device(&ib_dev->dev);
1414 __ib_unregister_device(ib_dev);
1415 put_device(&ib_dev->dev);
1da177e4
LT
1416}
1417EXPORT_SYMBOL(ib_unregister_device);
1418
d0899892
JG
1419/**
1420 * ib_unregister_device_and_put - Unregister a device while holding a 'get'
1421 * device: The device to unregister
1422 *
1423 * This is the same as ib_unregister_device(), except it includes an internal
1424 * ib_device_put() that should match a 'get' obtained by the caller.
1425 *
1426 * It is safe to call this routine concurrently from multiple threads while
1427 * holding the 'get'. When the function returns the device is fully
1428 * unregistered.
1429 *
1430 * Drivers using this flow MUST use the driver_unregister callback to clean up
1431 * their resources associated with the device and dealloc it.
1432 */
1433void ib_unregister_device_and_put(struct ib_device *ib_dev)
1434{
1435 WARN_ON(!ib_dev->ops.dealloc_driver);
1436 get_device(&ib_dev->dev);
1437 ib_device_put(ib_dev);
1438 __ib_unregister_device(ib_dev);
1439 put_device(&ib_dev->dev);
1440}
1441EXPORT_SYMBOL(ib_unregister_device_and_put);
1442
1443/**
1444 * ib_unregister_driver - Unregister all IB devices for a driver
1445 * @driver_id: The driver to unregister
1446 *
1447 * This implements a fence for device unregistration. It only returns once all
1448 * devices associated with the driver_id have fully completed their
1449 * unregistration and returned from ib_unregister_device*().
1450 *
1451 * If device's are not yet unregistered it goes ahead and starts unregistering
1452 * them.
1453 *
1454 * This does not block creation of new devices with the given driver_id, that
1455 * is the responsibility of the caller.
1456 */
1457void ib_unregister_driver(enum rdma_driver_id driver_id)
1458{
1459 struct ib_device *ib_dev;
1460 unsigned long index;
1461
1462 down_read(&devices_rwsem);
1463 xa_for_each (&devices, index, ib_dev) {
1464 if (ib_dev->driver_id != driver_id)
1465 continue;
1466
1467 get_device(&ib_dev->dev);
1468 up_read(&devices_rwsem);
1469
1470 WARN_ON(!ib_dev->ops.dealloc_driver);
1471 __ib_unregister_device(ib_dev);
1472
1473 put_device(&ib_dev->dev);
1474 down_read(&devices_rwsem);
1475 }
1476 up_read(&devices_rwsem);
1477}
1478EXPORT_SYMBOL(ib_unregister_driver);
1479
1480static void ib_unregister_work(struct work_struct *work)
1481{
1482 struct ib_device *ib_dev =
1483 container_of(work, struct ib_device, unregistration_work);
1484
1485 __ib_unregister_device(ib_dev);
1486 put_device(&ib_dev->dev);
1487}
1488
1489/**
1490 * ib_unregister_device_queued - Unregister a device using a work queue
1491 * device: The device to unregister
1492 *
1493 * This schedules an asynchronous unregistration using a WQ for the device. A
1494 * driver should use this to avoid holding locks while doing unregistration,
1495 * such as holding the RTNL lock.
1496 *
1497 * Drivers using this API must use ib_unregister_driver before module unload
1498 * to ensure that all scheduled unregistrations have completed.
1499 */
1500void ib_unregister_device_queued(struct ib_device *ib_dev)
1501{
1502 WARN_ON(!refcount_read(&ib_dev->refcount));
1503 WARN_ON(!ib_dev->ops.dealloc_driver);
1504 get_device(&ib_dev->dev);
1505 if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work))
1506 put_device(&ib_dev->dev);
1507}
1508EXPORT_SYMBOL(ib_unregister_device_queued);
1509
decbc7a6
PP
1510/*
1511 * The caller must pass in a device that has the kref held and the refcount
1512 * released. If the device is in cur_net and still registered then it is moved
1513 * into net.
1514 */
1515static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
1516 struct net *net)
1517{
1518 int ret2 = -EINVAL;
1519 int ret;
1520
1521 mutex_lock(&device->unregistration_lock);
1522
1523 /*
2e5b8a01
PP
1524 * If a device not under ib_device_get() or if the unregistration_lock
1525 * is not held, the namespace can be changed, or it can be unregistered.
1526 * Check again under the lock.
decbc7a6
PP
1527 */
1528 if (refcount_read(&device->refcount) == 0 ||
1529 !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
1530 ret = -ENODEV;
1531 goto out;
1532 }
1533
1534 kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
1535 disable_device(device);
1536
1537 /*
1538 * At this point no one can be using the device, so it is safe to
1539 * change the namespace.
1540 */
1541 write_pnet(&device->coredev.rdma_net, net);
1542
2e5b8a01 1543 down_read(&devices_rwsem);
decbc7a6
PP
1544 /*
1545 * Currently rdma devices are system wide unique. So the device name
1546 * is guaranteed free in the new namespace. Publish the new namespace
1547 * at the sysfs level.
1548 */
decbc7a6
PP
1549 ret = device_rename(&device->dev, dev_name(&device->dev));
1550 up_read(&devices_rwsem);
1551 if (ret) {
1552 dev_warn(&device->dev,
1553 "%s: Couldn't rename device after namespace change\n",
1554 __func__);
1555 /* Try and put things back and re-enable the device */
1556 write_pnet(&device->coredev.rdma_net, cur_net);
1557 }
1558
1559 ret2 = enable_device_and_get(device);
2e5b8a01 1560 if (ret2) {
decbc7a6
PP
1561 /*
1562 * This shouldn't really happen, but if it does, let the user
1563 * retry at later point. So don't disable the device.
1564 */
1565 dev_warn(&device->dev,
1566 "%s: Couldn't re-enable device after namespace change\n",
1567 __func__);
2e5b8a01 1568 }
decbc7a6 1569 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
2e5b8a01 1570
decbc7a6
PP
1571 ib_device_put(device);
1572out:
1573 mutex_unlock(&device->unregistration_lock);
1574 if (ret)
1575 return ret;
1576 return ret2;
1577}
1578
2e5b8a01
PP
1579int ib_device_set_netns_put(struct sk_buff *skb,
1580 struct ib_device *dev, u32 ns_fd)
1581{
1582 struct net *net;
1583 int ret;
1584
1585 net = get_net_ns_by_fd(ns_fd);
1586 if (IS_ERR(net)) {
1587 ret = PTR_ERR(net);
1588 goto net_err;
1589 }
1590
1591 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1592 ret = -EPERM;
1593 goto ns_err;
1594 }
1595
1596 /*
1597 * Currently supported only for those providers which support
1598 * disassociation and don't do port specific sysfs init. Once a
1599 * port_cleanup infrastructure is implemented, this limitation will be
1600 * removed.
1601 */
1602 if (!dev->ops.disassociate_ucontext || dev->ops.init_port ||
1603 ib_devices_shared_netns) {
1604 ret = -EOPNOTSUPP;
1605 goto ns_err;
1606 }
1607
1608 get_device(&dev->dev);
1609 ib_device_put(dev);
1610 ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
1611 put_device(&dev->dev);
1612
1613 put_net(net);
1614 return ret;
1615
1616ns_err:
1617 put_net(net);
1618net_err:
1619 ib_device_put(dev);
1620 return ret;
1621}
1622
4e0f7b90
PP
1623static struct pernet_operations rdma_dev_net_ops = {
1624 .init = rdma_dev_init_net,
1625 .exit = rdma_dev_exit_net,
1626 .id = &rdma_dev_net_id,
1627 .size = sizeof(struct rdma_dev_net),
1628};
1629
e59178d8
JG
1630static int assign_client_id(struct ib_client *client)
1631{
1632 int ret;
1633
921eab11 1634 down_write(&clients_rwsem);
e59178d8
JG
1635 /*
1636 * The add/remove callbacks must be called in FIFO/LIFO order. To
1637 * achieve this we assign client_ids so they are sorted in
1638 * registration order, and retain a linked list we can reverse iterate
1639 * to get the LIFO order. The extra linked list can go away if xarray
1640 * learns to reverse iterate.
1641 */
ea295481 1642 if (list_empty(&client_list)) {
e59178d8 1643 client->client_id = 0;
ea295481
LT
1644 } else {
1645 struct ib_client *last;
1646
1647 last = list_last_entry(&client_list, struct ib_client, list);
1648 client->client_id = last->client_id + 1;
4512acd0 1649 }
ea295481 1650 ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
e59178d8
JG
1651 if (ret)
1652 goto out;
1653
921eab11
JG
1654 xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
1655 list_add_tail(&client->list, &client_list);
1656
e59178d8 1657out:
921eab11 1658 up_write(&clients_rwsem);
e59178d8
JG
1659 return ret;
1660}
1661
1da177e4
LT
1662/**
1663 * ib_register_client - Register an IB client
1664 * @client:Client to register
1665 *
1666 * Upper level users of the IB drivers can use ib_register_client() to
1667 * register callbacks for IB device addition and removal. When an IB
1668 * device is added, each registered client's add method will be called
1669 * (in the order the clients were registered), and when a device is
1670 * removed, each client's remove method will be called (in the reverse
1671 * order that clients were registered). In addition, when
1672 * ib_register_client() is called, the client will receive an add
1673 * callback for all devices already registered.
1674 */
1675int ib_register_client(struct ib_client *client)
1676{
1677 struct ib_device *device;
0df91bb6 1678 unsigned long index;
e59178d8 1679 int ret;
1da177e4 1680
e59178d8 1681 ret = assign_client_id(client);
921eab11 1682 if (ret)
e59178d8 1683 return ret;
1da177e4 1684
921eab11
JG
1685 down_read(&devices_rwsem);
1686 xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
1687 ret = add_client_context(device, client);
1688 if (ret) {
1689 up_read(&devices_rwsem);
1690 ib_unregister_client(client);
1691 return ret;
1692 }
1693 }
1694 up_read(&devices_rwsem);
1da177e4
LT
1695 return 0;
1696}
1697EXPORT_SYMBOL(ib_register_client);
1698
1699/**
1700 * ib_unregister_client - Unregister an IB client
1701 * @client:Client to unregister
1702 *
1703 * Upper level users use ib_unregister_client() to remove their client
1704 * registration. When ib_unregister_client() is called, the client
1705 * will receive a remove callback for each IB device still registered.
921eab11
JG
1706 *
1707 * This is a full fence, once it returns no client callbacks will be called,
1708 * or are running in another thread.
1da177e4
LT
1709 */
1710void ib_unregister_client(struct ib_client *client)
1711{
1da177e4 1712 struct ib_device *device;
0df91bb6 1713 unsigned long index;
1da177e4 1714
921eab11 1715 down_write(&clients_rwsem);
e59178d8 1716 xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
921eab11
JG
1717 up_write(&clients_rwsem);
1718 /*
1719 * Every device still known must be serialized to make sure we are
1720 * done with the client callbacks before we return.
1721 */
1722 down_read(&devices_rwsem);
1723 xa_for_each (&devices, index, device)
1724 remove_client_context(device, client->client_id);
1725 up_read(&devices_rwsem);
1da177e4 1726
921eab11 1727 down_write(&clients_rwsem);
e59178d8
JG
1728 list_del(&client->list);
1729 xa_erase(&clients, client->client_id);
921eab11 1730 up_write(&clients_rwsem);
1da177e4
LT
1731}
1732EXPORT_SYMBOL(ib_unregister_client);
1733
1da177e4 1734/**
9cd330d3 1735 * ib_set_client_data - Set IB client context
1da177e4
LT
1736 * @device:Device to set context for
1737 * @client:Client to set context for
1738 * @data:Context to set
1739 *
0df91bb6
JG
1740 * ib_set_client_data() sets client context data that can be retrieved with
1741 * ib_get_client_data(). This can only be called while the client is
1742 * registered to the device, once the ib_client remove() callback returns this
1743 * cannot be called.
1da177e4
LT
1744 */
1745void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1746 void *data)
1747{
0df91bb6 1748 void *rc;
1da177e4 1749
0df91bb6
JG
1750 if (WARN_ON(IS_ERR(data)))
1751 data = NULL;
1da177e4 1752
0df91bb6
JG
1753 rc = xa_store(&device->client_data, client->client_id, data,
1754 GFP_KERNEL);
1755 WARN_ON(xa_is_err(rc));
1da177e4
LT
1756}
1757EXPORT_SYMBOL(ib_set_client_data);
1758
1759/**
1760 * ib_register_event_handler - Register an IB event handler
1761 * @event_handler:Handler to register
1762 *
1763 * ib_register_event_handler() registers an event handler that will be
1764 * called back when asynchronous IB events occur (as defined in
1765 * chapter 11 of the InfiniBand Architecture Specification). This
1766 * callback may occur in interrupt context.
1767 */
dcc9881e 1768void ib_register_event_handler(struct ib_event_handler *event_handler)
1da177e4
LT
1769{
1770 unsigned long flags;
1771
1772 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
1773 list_add_tail(&event_handler->list,
1774 &event_handler->device->event_handler_list);
1775 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
1da177e4
LT
1776}
1777EXPORT_SYMBOL(ib_register_event_handler);
1778
1779/**
1780 * ib_unregister_event_handler - Unregister an event handler
1781 * @event_handler:Handler to unregister
1782 *
1783 * Unregister an event handler registered with
1784 * ib_register_event_handler().
1785 */
dcc9881e 1786void ib_unregister_event_handler(struct ib_event_handler *event_handler)
1da177e4
LT
1787{
1788 unsigned long flags;
1789
1790 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
1791 list_del(&event_handler->list);
1792 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
1da177e4
LT
1793}
1794EXPORT_SYMBOL(ib_unregister_event_handler);
1795
1796/**
1797 * ib_dispatch_event - Dispatch an asynchronous event
1798 * @event:Event to dispatch
1799 *
1800 * Low-level drivers must call ib_dispatch_event() to dispatch the
1801 * event to all registered event handlers when an asynchronous event
1802 * occurs.
1803 */
1804void ib_dispatch_event(struct ib_event *event)
1805{
1806 unsigned long flags;
1807 struct ib_event_handler *handler;
1808
1809 spin_lock_irqsave(&event->device->event_handler_lock, flags);
1810
1811 list_for_each_entry(handler, &event->device->event_handler_list, list)
1812 handler->handler(handler, event);
1813
1814 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
1815}
1816EXPORT_SYMBOL(ib_dispatch_event);
1817
1da177e4
LT
1818/**
1819 * ib_query_port - Query IB port attributes
1820 * @device:Device to query
1821 * @port_num:Port number to query
1822 * @port_attr:Port attributes
1823 *
1824 * ib_query_port() returns the attributes of a port through the
1825 * @port_attr pointer.
1826 */
1827int ib_query_port(struct ib_device *device,
1828 u8 port_num,
1829 struct ib_port_attr *port_attr)
1830{
fad61ad4
EC
1831 union ib_gid gid;
1832 int err;
1833
24dc831b 1834 if (!rdma_is_port_valid(device, port_num))
116c0074
RD
1835 return -EINVAL;
1836
fad61ad4 1837 memset(port_attr, 0, sizeof(*port_attr));
3023a1e9 1838 err = device->ops.query_port(device, port_num, port_attr);
fad61ad4
EC
1839 if (err || port_attr->subnet_prefix)
1840 return err;
1841
d7012467
EC
1842 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
1843 return 0;
1844
3023a1e9 1845 err = device->ops.query_gid(device, port_num, 0, &gid);
fad61ad4
EC
1846 if (err)
1847 return err;
1848
1849 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
1850 return 0;
1da177e4
LT
1851}
1852EXPORT_SYMBOL(ib_query_port);
1853
324e227e
JG
1854static void add_ndev_hash(struct ib_port_data *pdata)
1855{
1856 unsigned long flags;
1857
1858 might_sleep();
1859
1860 spin_lock_irqsave(&ndev_hash_lock, flags);
1861 if (hash_hashed(&pdata->ndev_hash_link)) {
1862 hash_del_rcu(&pdata->ndev_hash_link);
1863 spin_unlock_irqrestore(&ndev_hash_lock, flags);
1864 /*
1865 * We cannot do hash_add_rcu after a hash_del_rcu until the
1866 * grace period
1867 */
1868 synchronize_rcu();
1869 spin_lock_irqsave(&ndev_hash_lock, flags);
1870 }
1871 if (pdata->netdev)
1872 hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
1873 (uintptr_t)pdata->netdev);
1874 spin_unlock_irqrestore(&ndev_hash_lock, flags);
1875}
1876
c2261dd7
JG
1877/**
1878 * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
1879 * @ib_dev: Device to modify
1880 * @ndev: net_device to affiliate, may be NULL
1881 * @port: IB port the net_device is connected to
1882 *
1883 * Drivers should use this to link the ib_device to a netdev so the netdev
1884 * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
1885 * affiliated with any port.
1886 *
1887 * The caller must ensure that the given ndev is not unregistered or
1888 * unregistering, and that either the ib_device is unregistered or
1889 * ib_device_set_netdev() is called with NULL when the ndev sends a
1890 * NETDEV_UNREGISTER event.
1891 */
1892int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
1893 unsigned int port)
1894{
1895 struct net_device *old_ndev;
1896 struct ib_port_data *pdata;
1897 unsigned long flags;
1898 int ret;
1899
1900 /*
1901 * Drivers wish to call this before ib_register_driver, so we have to
1902 * setup the port data early.
1903 */
1904 ret = alloc_port_data(ib_dev);
1905 if (ret)
1906 return ret;
1907
1908 if (!rdma_is_port_valid(ib_dev, port))
1909 return -EINVAL;
1910
1911 pdata = &ib_dev->port_data[port];
1912 spin_lock_irqsave(&pdata->netdev_lock, flags);
324e227e
JG
1913 old_ndev = rcu_dereference_protected(
1914 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
1915 if (old_ndev == ndev) {
c2261dd7
JG
1916 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
1917 return 0;
1918 }
c2261dd7
JG
1919
1920 if (ndev)
1921 dev_hold(ndev);
324e227e 1922 rcu_assign_pointer(pdata->netdev, ndev);
c2261dd7
JG
1923 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
1924
324e227e 1925 add_ndev_hash(pdata);
c2261dd7
JG
1926 if (old_ndev)
1927 dev_put(old_ndev);
1928
1929 return 0;
1930}
1931EXPORT_SYMBOL(ib_device_set_netdev);
1932
1933static void free_netdevs(struct ib_device *ib_dev)
1934{
1935 unsigned long flags;
1936 unsigned int port;
1937
1938 rdma_for_each_port (ib_dev, port) {
1939 struct ib_port_data *pdata = &ib_dev->port_data[port];
324e227e 1940 struct net_device *ndev;
c2261dd7
JG
1941
1942 spin_lock_irqsave(&pdata->netdev_lock, flags);
324e227e
JG
1943 ndev = rcu_dereference_protected(
1944 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
1945 if (ndev) {
1946 spin_lock(&ndev_hash_lock);
1947 hash_del_rcu(&pdata->ndev_hash_link);
1948 spin_unlock(&ndev_hash_lock);
1949
1950 /*
1951 * If this is the last dev_put there is still a
1952 * synchronize_rcu before the netdev is kfreed, so we
1953 * can continue to rely on unlocked pointer
1954 * comparisons after the put
1955 */
1956 rcu_assign_pointer(pdata->netdev, NULL);
1957 dev_put(ndev);
c2261dd7
JG
1958 }
1959 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
1960 }
1961}
1962
1963struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
1964 unsigned int port)
1965{
1966 struct ib_port_data *pdata;
1967 struct net_device *res;
1968
1969 if (!rdma_is_port_valid(ib_dev, port))
1970 return NULL;
1971
1972 pdata = &ib_dev->port_data[port];
1973
1974 /*
1975 * New drivers should use ib_device_set_netdev() not the legacy
1976 * get_netdev().
1977 */
1978 if (ib_dev->ops.get_netdev)
1979 res = ib_dev->ops.get_netdev(ib_dev, port);
1980 else {
1981 spin_lock(&pdata->netdev_lock);
324e227e
JG
1982 res = rcu_dereference_protected(
1983 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
c2261dd7
JG
1984 if (res)
1985 dev_hold(res);
1986 spin_unlock(&pdata->netdev_lock);
1987 }
1988
1989 /*
1990 * If we are starting to unregister expedite things by preventing
1991 * propagation of an unregistering netdev.
1992 */
1993 if (res && res->reg_state != NETREG_REGISTERED) {
1994 dev_put(res);
1995 return NULL;
1996 }
1997
1998 return res;
1999}
2000
324e227e
JG
2001/**
2002 * ib_device_get_by_netdev - Find an IB device associated with a netdev
2003 * @ndev: netdev to locate
2004 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
2005 *
2006 * Find and hold an ib_device that is associated with a netdev via
2007 * ib_device_set_netdev(). The caller must call ib_device_put() on the
2008 * returned pointer.
2009 */
2010struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
2011 enum rdma_driver_id driver_id)
2012{
2013 struct ib_device *res = NULL;
2014 struct ib_port_data *cur;
2015
2016 rcu_read_lock();
2017 hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
2018 (uintptr_t)ndev) {
2019 if (rcu_access_pointer(cur->netdev) == ndev &&
2020 (driver_id == RDMA_DRIVER_UNKNOWN ||
2021 cur->ib_dev->driver_id == driver_id) &&
2022 ib_device_try_get(cur->ib_dev)) {
2023 res = cur->ib_dev;
2024 break;
2025 }
2026 }
2027 rcu_read_unlock();
2028
2029 return res;
2030}
2031EXPORT_SYMBOL(ib_device_get_by_netdev);
2032
03db3a2d
MB
2033/**
2034 * ib_enum_roce_netdev - enumerate all RoCE ports
2035 * @ib_dev : IB device we want to query
2036 * @filter: Should we call the callback?
2037 * @filter_cookie: Cookie passed to filter
2038 * @cb: Callback to call for each found RoCE ports
2039 * @cookie: Cookie passed back to the callback
2040 *
2041 * Enumerates all of the physical RoCE ports of ib_dev
2042 * which are related to netdevice and calls callback() on each
2043 * device for which filter() function returns non zero.
2044 */
2045void ib_enum_roce_netdev(struct ib_device *ib_dev,
2046 roce_netdev_filter filter,
2047 void *filter_cookie,
2048 roce_netdev_callback cb,
2049 void *cookie)
2050{
ea1075ed 2051 unsigned int port;
03db3a2d 2052
ea1075ed 2053 rdma_for_each_port (ib_dev, port)
03db3a2d 2054 if (rdma_protocol_roce(ib_dev, port)) {
c2261dd7
JG
2055 struct net_device *idev =
2056 ib_device_get_netdev(ib_dev, port);
03db3a2d
MB
2057
2058 if (filter(ib_dev, port, idev, filter_cookie))
2059 cb(ib_dev, port, idev, cookie);
2060
2061 if (idev)
2062 dev_put(idev);
2063 }
2064}
2065
2066/**
2067 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
2068 * @filter: Should we call the callback?
2069 * @filter_cookie: Cookie passed to filter
2070 * @cb: Callback to call for each found RoCE ports
2071 * @cookie: Cookie passed back to the callback
2072 *
2073 * Enumerates all RoCE devices' physical ports which are related
2074 * to netdevices and calls callback() on each device for which
2075 * filter() function returns non zero.
2076 */
2077void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
2078 void *filter_cookie,
2079 roce_netdev_callback cb,
2080 void *cookie)
2081{
2082 struct ib_device *dev;
0df91bb6 2083 unsigned long index;
03db3a2d 2084
921eab11 2085 down_read(&devices_rwsem);
0df91bb6 2086 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
03db3a2d 2087 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
921eab11 2088 up_read(&devices_rwsem);
8030c835
LR
2089}
2090
2091/**
2092 * ib_enum_all_devs - enumerate all ib_devices
2093 * @cb: Callback to call for each found ib_device
2094 *
2095 * Enumerates all ib_devices and calls callback() on each device.
2096 */
2097int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
2098 struct netlink_callback *cb)
2099{
0df91bb6 2100 unsigned long index;
8030c835
LR
2101 struct ib_device *dev;
2102 unsigned int idx = 0;
2103 int ret = 0;
2104
921eab11 2105 down_read(&devices_rwsem);
0df91bb6 2106 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
37eeab55
PP
2107 if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
2108 continue;
2109
8030c835
LR
2110 ret = nldev_cb(dev, skb, cb, idx);
2111 if (ret)
2112 break;
2113 idx++;
2114 }
921eab11 2115 up_read(&devices_rwsem);
8030c835 2116 return ret;
03db3a2d
MB
2117}
2118
1da177e4
LT
2119/**
2120 * ib_query_pkey - Get P_Key table entry
2121 * @device:Device to query
2122 * @port_num:Port number to query
2123 * @index:P_Key table index to query
2124 * @pkey:Returned P_Key
2125 *
2126 * ib_query_pkey() fetches the specified P_Key table entry.
2127 */
2128int ib_query_pkey(struct ib_device *device,
2129 u8 port_num, u16 index, u16 *pkey)
2130{
9af3f5cf
YS
2131 if (!rdma_is_port_valid(device, port_num))
2132 return -EINVAL;
2133
3023a1e9 2134 return device->ops.query_pkey(device, port_num, index, pkey);
1da177e4
LT
2135}
2136EXPORT_SYMBOL(ib_query_pkey);
2137
2138/**
2139 * ib_modify_device - Change IB device attributes
2140 * @device:Device to modify
2141 * @device_modify_mask:Mask of attributes to change
2142 * @device_modify:New attribute values
2143 *
2144 * ib_modify_device() changes a device's attributes as specified by
2145 * the @device_modify_mask and @device_modify structure.
2146 */
2147int ib_modify_device(struct ib_device *device,
2148 int device_modify_mask,
2149 struct ib_device_modify *device_modify)
2150{
3023a1e9 2151 if (!device->ops.modify_device)
10e1b54b
BVA
2152 return -ENOSYS;
2153
3023a1e9
KH
2154 return device->ops.modify_device(device, device_modify_mask,
2155 device_modify);
1da177e4
LT
2156}
2157EXPORT_SYMBOL(ib_modify_device);
2158
2159/**
2160 * ib_modify_port - Modifies the attributes for the specified port.
2161 * @device: The device to modify.
2162 * @port_num: The number of the port to modify.
2163 * @port_modify_mask: Mask used to specify which attributes of the port
2164 * to change.
2165 * @port_modify: New attribute values for the port.
2166 *
2167 * ib_modify_port() changes a port's attributes as specified by the
2168 * @port_modify_mask and @port_modify structure.
2169 */
2170int ib_modify_port(struct ib_device *device,
2171 u8 port_num, int port_modify_mask,
2172 struct ib_port_modify *port_modify)
2173{
61e0962d 2174 int rc;
10e1b54b 2175
24dc831b 2176 if (!rdma_is_port_valid(device, port_num))
116c0074
RD
2177 return -EINVAL;
2178
3023a1e9
KH
2179 if (device->ops.modify_port)
2180 rc = device->ops.modify_port(device, port_num,
2181 port_modify_mask,
2182 port_modify);
61e0962d
SX
2183 else
2184 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
2185 return rc;
1da177e4
LT
2186}
2187EXPORT_SYMBOL(ib_modify_port);
2188
5eb620c8
YE
2189/**
2190 * ib_find_gid - Returns the port number and GID table index where
dbb12562 2191 * a specified GID value occurs. Its searches only for IB link layer.
5eb620c8
YE
2192 * @device: The device to query.
2193 * @gid: The GID value to search for.
2194 * @port_num: The port number of the device where the GID value was found.
2195 * @index: The index into the GID table where the GID was found. This
2196 * parameter may be NULL.
2197 */
2198int ib_find_gid(struct ib_device *device, union ib_gid *gid,
b26c4a11 2199 u8 *port_num, u16 *index)
5eb620c8
YE
2200{
2201 union ib_gid tmp_gid;
ea1075ed
JG
2202 unsigned int port;
2203 int ret, i;
5eb620c8 2204
ea1075ed 2205 rdma_for_each_port (device, port) {
22d24f75 2206 if (!rdma_protocol_ib(device, port))
b39ffa1d
MB
2207 continue;
2208
8ceb1357
JG
2209 for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
2210 ++i) {
1dfce294 2211 ret = rdma_query_gid(device, port, i, &tmp_gid);
5eb620c8
YE
2212 if (ret)
2213 return ret;
2214 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
2215 *port_num = port;
2216 if (index)
2217 *index = i;
2218 return 0;
2219 }
2220 }
2221 }
2222
2223 return -ENOENT;
2224}
2225EXPORT_SYMBOL(ib_find_gid);
2226
2227/**
2228 * ib_find_pkey - Returns the PKey table index where a specified
2229 * PKey value occurs.
2230 * @device: The device to query.
2231 * @port_num: The port number of the device to search for the PKey.
2232 * @pkey: The PKey value to search for.
2233 * @index: The index into the PKey table where the PKey was found.
2234 */
2235int ib_find_pkey(struct ib_device *device,
2236 u8 port_num, u16 pkey, u16 *index)
2237{
2238 int ret, i;
2239 u16 tmp_pkey;
ff7166c4 2240 int partial_ix = -1;
5eb620c8 2241
8ceb1357
JG
2242 for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
2243 ++i) {
5eb620c8
YE
2244 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
2245 if (ret)
2246 return ret;
36026ecc 2247 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
ff7166c4
JM
2248 /* if there is full-member pkey take it.*/
2249 if (tmp_pkey & 0x8000) {
2250 *index = i;
2251 return 0;
2252 }
2253 if (partial_ix < 0)
2254 partial_ix = i;
5eb620c8
YE
2255 }
2256 }
2257
ff7166c4
JM
2258 /*no full-member, if exists take the limited*/
2259 if (partial_ix >= 0) {
2260 *index = partial_ix;
2261 return 0;
2262 }
5eb620c8
YE
2263 return -ENOENT;
2264}
2265EXPORT_SYMBOL(ib_find_pkey);
2266
9268f72d
YK
2267/**
2268 * ib_get_net_dev_by_params() - Return the appropriate net_dev
2269 * for a received CM request
2270 * @dev: An RDMA device on which the request has been received.
2271 * @port: Port number on the RDMA device.
2272 * @pkey: The Pkey the request came on.
2273 * @gid: A GID that the net_dev uses to communicate.
2274 * @addr: Contains the IP address that the request specified as its
2275 * destination.
921eab11 2276 *
9268f72d
YK
2277 */
2278struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
2279 u8 port,
2280 u16 pkey,
2281 const union ib_gid *gid,
2282 const struct sockaddr *addr)
2283{
2284 struct net_device *net_dev = NULL;
0df91bb6
JG
2285 unsigned long index;
2286 void *client_data;
9268f72d
YK
2287
2288 if (!rdma_protocol_ib(dev, port))
2289 return NULL;
2290
921eab11
JG
2291 /*
2292 * Holding the read side guarantees that the client will not become
2293 * unregistered while we are calling get_net_dev_by_params()
2294 */
2295 down_read(&dev->client_data_rwsem);
0df91bb6
JG
2296 xan_for_each_marked (&dev->client_data, index, client_data,
2297 CLIENT_DATA_REGISTERED) {
2298 struct ib_client *client = xa_load(&clients, index);
9268f72d 2299
0df91bb6 2300 if (!client || !client->get_net_dev_by_params)
9268f72d
YK
2301 continue;
2302
0df91bb6
JG
2303 net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
2304 addr, client_data);
2305 if (net_dev)
2306 break;
9268f72d 2307 }
921eab11 2308 up_read(&dev->client_data_rwsem);
9268f72d
YK
2309
2310 return net_dev;
2311}
2312EXPORT_SYMBOL(ib_get_net_dev_by_params);
2313
521ed0d9
KH
2314void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
2315{
3023a1e9 2316 struct ib_device_ops *dev_ops = &dev->ops;
521ed0d9
KH
2317#define SET_DEVICE_OP(ptr, name) \
2318 do { \
2319 if (ops->name) \
2320 if (!((ptr)->name)) \
2321 (ptr)->name = ops->name; \
2322 } while (0)
2323
30471d4b
LR
2324#define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
2325
3023a1e9 2326 SET_DEVICE_OP(dev_ops, add_gid);
2f1927b0 2327 SET_DEVICE_OP(dev_ops, advise_mr);
3023a1e9
KH
2328 SET_DEVICE_OP(dev_ops, alloc_dm);
2329 SET_DEVICE_OP(dev_ops, alloc_fmr);
2330 SET_DEVICE_OP(dev_ops, alloc_hw_stats);
2331 SET_DEVICE_OP(dev_ops, alloc_mr);
2332 SET_DEVICE_OP(dev_ops, alloc_mw);
2333 SET_DEVICE_OP(dev_ops, alloc_pd);
2334 SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
2335 SET_DEVICE_OP(dev_ops, alloc_ucontext);
2336 SET_DEVICE_OP(dev_ops, alloc_xrcd);
2337 SET_DEVICE_OP(dev_ops, attach_mcast);
2338 SET_DEVICE_OP(dev_ops, check_mr_status);
2339 SET_DEVICE_OP(dev_ops, create_ah);
2340 SET_DEVICE_OP(dev_ops, create_counters);
2341 SET_DEVICE_OP(dev_ops, create_cq);
2342 SET_DEVICE_OP(dev_ops, create_flow);
2343 SET_DEVICE_OP(dev_ops, create_flow_action_esp);
2344 SET_DEVICE_OP(dev_ops, create_qp);
2345 SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
2346 SET_DEVICE_OP(dev_ops, create_srq);
2347 SET_DEVICE_OP(dev_ops, create_wq);
2348 SET_DEVICE_OP(dev_ops, dealloc_dm);
d0899892 2349 SET_DEVICE_OP(dev_ops, dealloc_driver);
3023a1e9
KH
2350 SET_DEVICE_OP(dev_ops, dealloc_fmr);
2351 SET_DEVICE_OP(dev_ops, dealloc_mw);
2352 SET_DEVICE_OP(dev_ops, dealloc_pd);
2353 SET_DEVICE_OP(dev_ops, dealloc_ucontext);
2354 SET_DEVICE_OP(dev_ops, dealloc_xrcd);
2355 SET_DEVICE_OP(dev_ops, del_gid);
2356 SET_DEVICE_OP(dev_ops, dereg_mr);
2357 SET_DEVICE_OP(dev_ops, destroy_ah);
2358 SET_DEVICE_OP(dev_ops, destroy_counters);
2359 SET_DEVICE_OP(dev_ops, destroy_cq);
2360 SET_DEVICE_OP(dev_ops, destroy_flow);
2361 SET_DEVICE_OP(dev_ops, destroy_flow_action);
2362 SET_DEVICE_OP(dev_ops, destroy_qp);
2363 SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
2364 SET_DEVICE_OP(dev_ops, destroy_srq);
2365 SET_DEVICE_OP(dev_ops, destroy_wq);
2366 SET_DEVICE_OP(dev_ops, detach_mcast);
2367 SET_DEVICE_OP(dev_ops, disassociate_ucontext);
2368 SET_DEVICE_OP(dev_ops, drain_rq);
2369 SET_DEVICE_OP(dev_ops, drain_sq);
ca22354b 2370 SET_DEVICE_OP(dev_ops, enable_driver);
02da3750 2371 SET_DEVICE_OP(dev_ops, fill_res_entry);
3023a1e9
KH
2372 SET_DEVICE_OP(dev_ops, get_dev_fw_str);
2373 SET_DEVICE_OP(dev_ops, get_dma_mr);
2374 SET_DEVICE_OP(dev_ops, get_hw_stats);
2375 SET_DEVICE_OP(dev_ops, get_link_layer);
2376 SET_DEVICE_OP(dev_ops, get_netdev);
2377 SET_DEVICE_OP(dev_ops, get_port_immutable);
2378 SET_DEVICE_OP(dev_ops, get_vector_affinity);
2379 SET_DEVICE_OP(dev_ops, get_vf_config);
2380 SET_DEVICE_OP(dev_ops, get_vf_stats);
ea4baf7f 2381 SET_DEVICE_OP(dev_ops, init_port);
dd05cb82
KH
2382 SET_DEVICE_OP(dev_ops, iw_accept);
2383 SET_DEVICE_OP(dev_ops, iw_add_ref);
2384 SET_DEVICE_OP(dev_ops, iw_connect);
2385 SET_DEVICE_OP(dev_ops, iw_create_listen);
2386 SET_DEVICE_OP(dev_ops, iw_destroy_listen);
2387 SET_DEVICE_OP(dev_ops, iw_get_qp);
2388 SET_DEVICE_OP(dev_ops, iw_reject);
2389 SET_DEVICE_OP(dev_ops, iw_rem_ref);
3023a1e9
KH
2390 SET_DEVICE_OP(dev_ops, map_mr_sg);
2391 SET_DEVICE_OP(dev_ops, map_phys_fmr);
2392 SET_DEVICE_OP(dev_ops, mmap);
2393 SET_DEVICE_OP(dev_ops, modify_ah);
2394 SET_DEVICE_OP(dev_ops, modify_cq);
2395 SET_DEVICE_OP(dev_ops, modify_device);
2396 SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
2397 SET_DEVICE_OP(dev_ops, modify_port);
2398 SET_DEVICE_OP(dev_ops, modify_qp);
2399 SET_DEVICE_OP(dev_ops, modify_srq);
2400 SET_DEVICE_OP(dev_ops, modify_wq);
2401 SET_DEVICE_OP(dev_ops, peek_cq);
2402 SET_DEVICE_OP(dev_ops, poll_cq);
2403 SET_DEVICE_OP(dev_ops, post_recv);
2404 SET_DEVICE_OP(dev_ops, post_send);
2405 SET_DEVICE_OP(dev_ops, post_srq_recv);
2406 SET_DEVICE_OP(dev_ops, process_mad);
2407 SET_DEVICE_OP(dev_ops, query_ah);
2408 SET_DEVICE_OP(dev_ops, query_device);
2409 SET_DEVICE_OP(dev_ops, query_gid);
2410 SET_DEVICE_OP(dev_ops, query_pkey);
2411 SET_DEVICE_OP(dev_ops, query_port);
2412 SET_DEVICE_OP(dev_ops, query_qp);
2413 SET_DEVICE_OP(dev_ops, query_srq);
2414 SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
2415 SET_DEVICE_OP(dev_ops, read_counters);
2416 SET_DEVICE_OP(dev_ops, reg_dm_mr);
2417 SET_DEVICE_OP(dev_ops, reg_user_mr);
2418 SET_DEVICE_OP(dev_ops, req_ncomp_notif);
2419 SET_DEVICE_OP(dev_ops, req_notify_cq);
2420 SET_DEVICE_OP(dev_ops, rereg_user_mr);
2421 SET_DEVICE_OP(dev_ops, resize_cq);
2422 SET_DEVICE_OP(dev_ops, set_vf_guid);
2423 SET_DEVICE_OP(dev_ops, set_vf_link_state);
2424 SET_DEVICE_OP(dev_ops, unmap_fmr);
21a428a0 2425
d3456914 2426 SET_OBJ_SIZE(dev_ops, ib_ah);
21a428a0 2427 SET_OBJ_SIZE(dev_ops, ib_pd);
68e326de 2428 SET_OBJ_SIZE(dev_ops, ib_srq);
a2a074ef 2429 SET_OBJ_SIZE(dev_ops, ib_ucontext);
521ed0d9
KH
2430}
2431EXPORT_SYMBOL(ib_set_device_ops);
2432
d0e312fe 2433static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
735c631a 2434 [RDMA_NL_LS_OP_RESOLVE] = {
647c75ac 2435 .doit = ib_nl_handle_resolve_resp,
e3a2b93d
LR
2436 .flags = RDMA_NL_ADMIN_PERM,
2437 },
735c631a 2438 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
647c75ac 2439 .doit = ib_nl_handle_set_timeout,
e3a2b93d
LR
2440 .flags = RDMA_NL_ADMIN_PERM,
2441 },
ae43f828 2442 [RDMA_NL_LS_OP_IP_RESOLVE] = {
647c75ac 2443 .doit = ib_nl_handle_ip_res_resp,
e3a2b93d
LR
2444 .flags = RDMA_NL_ADMIN_PERM,
2445 },
735c631a
MB
2446};
2447
1da177e4
LT
2448static int __init ib_core_init(void)
2449{
2450 int ret;
2451
f0626710
TH
2452 ib_wq = alloc_workqueue("infiniband", 0, 0);
2453 if (!ib_wq)
2454 return -ENOMEM;
2455
14d3a3b2 2456 ib_comp_wq = alloc_workqueue("ib-comp-wq",
b7363e67 2457 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
14d3a3b2
CH
2458 if (!ib_comp_wq) {
2459 ret = -ENOMEM;
2460 goto err;
2461 }
2462
f794809a
JM
2463 ib_comp_unbound_wq =
2464 alloc_workqueue("ib-comp-unb-wq",
2465 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
2466 WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
2467 if (!ib_comp_unbound_wq) {
2468 ret = -ENOMEM;
2469 goto err_comp;
2470 }
2471
55aeed06 2472 ret = class_register(&ib_class);
fd75c789 2473 if (ret) {
aba25a3e 2474 pr_warn("Couldn't create InfiniBand device class\n");
f794809a 2475 goto err_comp_unbound;
fd75c789 2476 }
1da177e4 2477
c9901724 2478 ret = rdma_nl_init();
b2cbae2c 2479 if (ret) {
c9901724 2480 pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
b2cbae2c
RD
2481 goto err_sysfs;
2482 }
2483
e3f20f02
LR
2484 ret = addr_init();
2485 if (ret) {
2486 pr_warn("Could't init IB address resolution\n");
2487 goto err_ibnl;
2488 }
2489
4c2cb422
MB
2490 ret = ib_mad_init();
2491 if (ret) {
2492 pr_warn("Couldn't init IB MAD\n");
2493 goto err_addr;
2494 }
2495
c2e49c92
MB
2496 ret = ib_sa_init();
2497 if (ret) {
2498 pr_warn("Couldn't init SA\n");
2499 goto err_mad;
2500 }
2501
8f408ab6
DJ
2502 ret = register_lsm_notifier(&ibdev_lsm_nb);
2503 if (ret) {
2504 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
c9901724 2505 goto err_sa;
8f408ab6
DJ
2506 }
2507
4e0f7b90
PP
2508 ret = register_pernet_device(&rdma_dev_net_ops);
2509 if (ret) {
2510 pr_warn("Couldn't init compat dev. ret %d\n", ret);
2511 goto err_compat;
2512 }
2513
6c80b41a 2514 nldev_init();
c9901724 2515 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
5ef8c0c1 2516 roce_gid_mgmt_init();
1da177e4 2517
fd75c789
NM
2518 return 0;
2519
4e0f7b90
PP
2520err_compat:
2521 unregister_lsm_notifier(&ibdev_lsm_nb);
735c631a
MB
2522err_sa:
2523 ib_sa_cleanup();
c2e49c92
MB
2524err_mad:
2525 ib_mad_cleanup();
4c2cb422
MB
2526err_addr:
2527 addr_cleanup();
e3f20f02 2528err_ibnl:
c9901724 2529 rdma_nl_exit();
fd75c789 2530err_sysfs:
55aeed06 2531 class_unregister(&ib_class);
f794809a
JM
2532err_comp_unbound:
2533 destroy_workqueue(ib_comp_unbound_wq);
14d3a3b2
CH
2534err_comp:
2535 destroy_workqueue(ib_comp_wq);
fd75c789
NM
2536err:
2537 destroy_workqueue(ib_wq);
1da177e4
LT
2538 return ret;
2539}
2540
2541static void __exit ib_core_cleanup(void)
2542{
5ef8c0c1 2543 roce_gid_mgmt_cleanup();
6c80b41a 2544 nldev_exit();
c9901724 2545 rdma_nl_unregister(RDMA_NL_LS);
4e0f7b90 2546 unregister_pernet_device(&rdma_dev_net_ops);
c9901724 2547 unregister_lsm_notifier(&ibdev_lsm_nb);
c2e49c92 2548 ib_sa_cleanup();
4c2cb422 2549 ib_mad_cleanup();
e3f20f02 2550 addr_cleanup();
c9901724 2551 rdma_nl_exit();
55aeed06 2552 class_unregister(&ib_class);
f794809a 2553 destroy_workqueue(ib_comp_unbound_wq);
14d3a3b2 2554 destroy_workqueue(ib_comp_wq);
f7c6a7b5 2555 /* Make sure that any pending umem accounting work is done. */
f0626710 2556 destroy_workqueue(ib_wq);
d0899892 2557 flush_workqueue(system_unbound_wq);
e59178d8 2558 WARN_ON(!xa_empty(&clients));
0df91bb6 2559 WARN_ON(!xa_empty(&devices));
1da177e4
LT
2560}
2561
e3bf14bd
JG
2562MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
2563
62dfa795
PP
2564/* ib core relies on netdev stack to first register net_ns_type_operations
2565 * ns kobject type before ib_core initialization.
2566 */
2567fs_initcall(ib_core_init);
1da177e4 2568module_exit(ib_core_cleanup);