Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
2a1d9b7f | 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
1da177e4 LT |
32 | */ |
33 | ||
34 | #include <linux/module.h> | |
35 | #include <linux/string.h> | |
36 | #include <linux/errno.h> | |
9a6b090c | 37 | #include <linux/kernel.h> |
1da177e4 LT |
38 | #include <linux/slab.h> |
39 | #include <linux/init.h> | |
9268f72d | 40 | #include <linux/netdevice.h> |
4e0f7b90 | 41 | #include <net/net_namespace.h> |
8f408ab6 DJ |
42 | #include <linux/security.h> |
43 | #include <linux/notifier.h> | |
324e227e | 44 | #include <linux/hashtable.h> |
b2cbae2c | 45 | #include <rdma/rdma_netlink.h> |
03db3a2d MB |
46 | #include <rdma/ib_addr.h> |
47 | #include <rdma/ib_cache.h> | |
413d3347 | 48 | #include <rdma/rdma_counter.h> |
1da177e4 LT |
49 | |
50 | #include "core_priv.h" | |
41eda65c | 51 | #include "restrack.h" |
1da177e4 LT |
52 | |
53 | MODULE_AUTHOR("Roland Dreier"); | |
54 | MODULE_DESCRIPTION("core kernel InfiniBand API"); | |
55 | MODULE_LICENSE("Dual BSD/GPL"); | |
56 | ||
14d3a3b2 | 57 | struct workqueue_struct *ib_comp_wq; |
f794809a | 58 | struct workqueue_struct *ib_comp_unbound_wq; |
f0626710 TH |
59 | struct workqueue_struct *ib_wq; |
60 | EXPORT_SYMBOL_GPL(ib_wq); | |
61 | ||
921eab11 JG |
62 | /* |
63 | * Each of the three rwsem locks (devices, clients, client_data) protects the | |
64 | * xarray of the same name. Specifically it allows the caller to assert that | |
65 | * the MARK will/will not be changing under the lock, and for devices and | |
66 | * clients, that the value in the xarray is still a valid pointer. Change of | |
67 | * the MARK is linked to the object state, so holding the lock and testing the | |
68 | * MARK also asserts that the contained object is in a certain state. | |
69 | * | |
70 | * This is used to build a two stage register/unregister flow where objects | |
71 | * can continue to be in the xarray even though they are still in progress to | |
72 | * register/unregister. | |
73 | * | |
74 | * The xarray itself provides additional locking, and restartable iteration, | |
75 | * which is also relied on. | |
76 | * | |
77 | * Locks should not be nested, with the exception of client_data, which is | |
78 | * allowed to nest under the read side of the other two locks. | |
79 | * | |
80 | * The devices_rwsem also protects the device name list, any change or | |
81 | * assignment of device name must also hold the write side to guarantee unique | |
82 | * names. | |
83 | */ | |
84 | ||
0df91bb6 JG |
85 | /* |
86 | * devices contains devices that have had their names assigned. The | |
87 | * devices may not be registered. Users that care about the registration | |
88 | * status need to call ib_device_try_get() on the device to ensure it is | |
89 | * registered, and keep it registered, for the required duration. | |
90 | * | |
91 | */ | |
92 | static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); | |
921eab11 | 93 | static DECLARE_RWSEM(devices_rwsem); |
0df91bb6 JG |
94 | #define DEVICE_REGISTERED XA_MARK_1 |
95 | ||
9cd58817 | 96 | static u32 highest_client_id; |
e59178d8 JG |
97 | #define CLIENT_REGISTERED XA_MARK_1 |
98 | static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); | |
921eab11 | 99 | static DECLARE_RWSEM(clients_rwsem); |
1da177e4 | 100 | |
621e55ff JG |
101 | static void ib_client_put(struct ib_client *client) |
102 | { | |
103 | if (refcount_dec_and_test(&client->uses)) | |
104 | complete(&client->uses_zero); | |
105 | } | |
106 | ||
1da177e4 | 107 | /* |
0df91bb6 JG |
108 | * If client_data is registered then the corresponding client must also still |
109 | * be registered. | |
110 | */ | |
111 | #define CLIENT_DATA_REGISTERED XA_MARK_1 | |
4e0f7b90 | 112 | |
1d2fedd8 | 113 | unsigned int rdma_dev_net_id; |
4e0f7b90 PP |
114 | |
115 | /* | |
116 | * A list of net namespaces is maintained in an xarray. This is necessary | |
117 | * because we can't get the locking right using the existing net ns list. We | |
118 | * would require a init_net callback after the list is updated. | |
119 | */ | |
120 | static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC); | |
121 | /* | |
122 | * rwsem to protect accessing the rdma_nets xarray entries. | |
123 | */ | |
124 | static DECLARE_RWSEM(rdma_nets_rwsem); | |
125 | ||
cb7e0e13 | 126 | bool ib_devices_shared_netns = true; |
a56bc45b PP |
127 | module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444); |
128 | MODULE_PARM_DESC(netns_mode, | |
129 | "Share device among net namespaces; default=1 (shared)"); | |
41c61401 | 130 | /** |
d6537c1a | 131 | * rdma_dev_access_netns() - Return whether an rdma device can be accessed |
41c61401 | 132 | * from a specified net namespace or not. |
d6537c1a | 133 | * @dev: Pointer to rdma device which needs to be checked |
41c61401 PP |
134 | * @net: Pointer to net namesapce for which access to be checked |
135 | * | |
d6537c1a | 136 | * When the rdma device is in shared mode, it ignores the net namespace. |
137 | * When the rdma device is exclusive to a net namespace, rdma device net | |
138 | * namespace is checked against the specified one. | |
41c61401 PP |
139 | */ |
140 | bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net) | |
141 | { | |
142 | return (ib_devices_shared_netns || | |
143 | net_eq(read_pnet(&dev->coredev.rdma_net), net)); | |
144 | } | |
145 | EXPORT_SYMBOL(rdma_dev_access_netns); | |
146 | ||
0df91bb6 JG |
147 | /* |
148 | * xarray has this behavior where it won't iterate over NULL values stored in | |
149 | * allocated arrays. So we need our own iterator to see all values stored in | |
150 | * the array. This does the same thing as xa_for_each except that it also | |
151 | * returns NULL valued entries if the array is allocating. Simplified to only | |
152 | * work on simple xarrays. | |
153 | */ | |
154 | static void *xan_find_marked(struct xarray *xa, unsigned long *indexp, | |
155 | xa_mark_t filter) | |
156 | { | |
157 | XA_STATE(xas, xa, *indexp); | |
158 | void *entry; | |
159 | ||
160 | rcu_read_lock(); | |
161 | do { | |
162 | entry = xas_find_marked(&xas, ULONG_MAX, filter); | |
163 | if (xa_is_zero(entry)) | |
164 | break; | |
165 | } while (xas_retry(&xas, entry)); | |
166 | rcu_read_unlock(); | |
167 | ||
168 | if (entry) { | |
169 | *indexp = xas.xa_index; | |
170 | if (xa_is_zero(entry)) | |
171 | return NULL; | |
172 | return entry; | |
173 | } | |
174 | return XA_ERROR(-ENOENT); | |
175 | } | |
176 | #define xan_for_each_marked(xa, index, entry, filter) \ | |
177 | for (index = 0, entry = xan_find_marked(xa, &(index), filter); \ | |
178 | !xa_is_err(entry); \ | |
179 | (index)++, entry = xan_find_marked(xa, &(index), filter)) | |
180 | ||
324e227e JG |
181 | /* RCU hash table mapping netdevice pointers to struct ib_port_data */ |
182 | static DEFINE_SPINLOCK(ndev_hash_lock); | |
183 | static DECLARE_HASHTABLE(ndev_hash, 5); | |
184 | ||
c2261dd7 | 185 | static void free_netdevs(struct ib_device *ib_dev); |
d0899892 JG |
186 | static void ib_unregister_work(struct work_struct *work); |
187 | static void __ib_unregister_device(struct ib_device *device); | |
8f408ab6 DJ |
188 | static int ib_security_change(struct notifier_block *nb, unsigned long event, |
189 | void *lsm_data); | |
190 | static void ib_policy_change_task(struct work_struct *work); | |
191 | static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); | |
192 | ||
923abb9d GP |
193 | static void __ibdev_printk(const char *level, const struct ib_device *ibdev, |
194 | struct va_format *vaf) | |
195 | { | |
196 | if (ibdev && ibdev->dev.parent) | |
197 | dev_printk_emit(level[1] - '0', | |
198 | ibdev->dev.parent, | |
199 | "%s %s %s: %pV", | |
200 | dev_driver_string(ibdev->dev.parent), | |
201 | dev_name(ibdev->dev.parent), | |
202 | dev_name(&ibdev->dev), | |
203 | vaf); | |
204 | else if (ibdev) | |
205 | printk("%s%s: %pV", | |
206 | level, dev_name(&ibdev->dev), vaf); | |
207 | else | |
208 | printk("%s(NULL ib_device): %pV", level, vaf); | |
209 | } | |
210 | ||
211 | void ibdev_printk(const char *level, const struct ib_device *ibdev, | |
212 | const char *format, ...) | |
213 | { | |
214 | struct va_format vaf; | |
215 | va_list args; | |
216 | ||
217 | va_start(args, format); | |
218 | ||
219 | vaf.fmt = format; | |
220 | vaf.va = &args; | |
221 | ||
222 | __ibdev_printk(level, ibdev, &vaf); | |
223 | ||
224 | va_end(args); | |
225 | } | |
226 | EXPORT_SYMBOL(ibdev_printk); | |
227 | ||
228 | #define define_ibdev_printk_level(func, level) \ | |
229 | void func(const struct ib_device *ibdev, const char *fmt, ...) \ | |
230 | { \ | |
231 | struct va_format vaf; \ | |
232 | va_list args; \ | |
233 | \ | |
234 | va_start(args, fmt); \ | |
235 | \ | |
236 | vaf.fmt = fmt; \ | |
237 | vaf.va = &args; \ | |
238 | \ | |
239 | __ibdev_printk(level, ibdev, &vaf); \ | |
240 | \ | |
241 | va_end(args); \ | |
242 | } \ | |
243 | EXPORT_SYMBOL(func); | |
244 | ||
245 | define_ibdev_printk_level(ibdev_emerg, KERN_EMERG); | |
246 | define_ibdev_printk_level(ibdev_alert, KERN_ALERT); | |
247 | define_ibdev_printk_level(ibdev_crit, KERN_CRIT); | |
248 | define_ibdev_printk_level(ibdev_err, KERN_ERR); | |
249 | define_ibdev_printk_level(ibdev_warn, KERN_WARNING); | |
250 | define_ibdev_printk_level(ibdev_notice, KERN_NOTICE); | |
251 | define_ibdev_printk_level(ibdev_info, KERN_INFO); | |
252 | ||
8f408ab6 DJ |
253 | static struct notifier_block ibdev_lsm_nb = { |
254 | .notifier_call = ib_security_change, | |
255 | }; | |
1da177e4 | 256 | |
decbc7a6 PP |
257 | static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, |
258 | struct net *net); | |
259 | ||
324e227e JG |
260 | /* Pointer to the RCU head at the start of the ib_port_data array */ |
261 | struct ib_port_data_rcu { | |
262 | struct rcu_head rcu_head; | |
263 | struct ib_port_data pdata[]; | |
264 | }; | |
265 | ||
deee3c7e | 266 | static void ib_device_check_mandatory(struct ib_device *device) |
1da177e4 | 267 | { |
3023a1e9 | 268 | #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x } |
1da177e4 LT |
269 | static const struct { |
270 | size_t offset; | |
271 | char *name; | |
272 | } mandatory_table[] = { | |
273 | IB_MANDATORY_FUNC(query_device), | |
274 | IB_MANDATORY_FUNC(query_port), | |
275 | IB_MANDATORY_FUNC(query_pkey), | |
1da177e4 LT |
276 | IB_MANDATORY_FUNC(alloc_pd), |
277 | IB_MANDATORY_FUNC(dealloc_pd), | |
1da177e4 LT |
278 | IB_MANDATORY_FUNC(create_qp), |
279 | IB_MANDATORY_FUNC(modify_qp), | |
280 | IB_MANDATORY_FUNC(destroy_qp), | |
281 | IB_MANDATORY_FUNC(post_send), | |
282 | IB_MANDATORY_FUNC(post_recv), | |
283 | IB_MANDATORY_FUNC(create_cq), | |
284 | IB_MANDATORY_FUNC(destroy_cq), | |
285 | IB_MANDATORY_FUNC(poll_cq), | |
286 | IB_MANDATORY_FUNC(req_notify_cq), | |
287 | IB_MANDATORY_FUNC(get_dma_mr), | |
7738613e IW |
288 | IB_MANDATORY_FUNC(dereg_mr), |
289 | IB_MANDATORY_FUNC(get_port_immutable) | |
1da177e4 LT |
290 | }; |
291 | int i; | |
292 | ||
6780c4fa | 293 | device->kverbs_provider = true; |
9a6b090c | 294 | for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { |
3023a1e9 KH |
295 | if (!*(void **) ((void *) &device->ops + |
296 | mandatory_table[i].offset)) { | |
6780c4fa GP |
297 | device->kverbs_provider = false; |
298 | break; | |
1da177e4 LT |
299 | } |
300 | } | |
1da177e4 LT |
301 | } |
302 | ||
f8978bd9 | 303 | /* |
01b67117 PP |
304 | * Caller must perform ib_device_put() to return the device reference count |
305 | * when ib_device_get_by_index() returns valid device pointer. | |
f8978bd9 | 306 | */ |
37eeab55 | 307 | struct ib_device *ib_device_get_by_index(const struct net *net, u32 index) |
f8978bd9 LR |
308 | { |
309 | struct ib_device *device; | |
310 | ||
921eab11 | 311 | down_read(&devices_rwsem); |
0df91bb6 | 312 | device = xa_load(&devices, index); |
01b67117 | 313 | if (device) { |
37eeab55 PP |
314 | if (!rdma_dev_access_netns(device, net)) { |
315 | device = NULL; | |
316 | goto out; | |
317 | } | |
318 | ||
d79af724 | 319 | if (!ib_device_try_get(device)) |
01b67117 PP |
320 | device = NULL; |
321 | } | |
37eeab55 | 322 | out: |
921eab11 | 323 | up_read(&devices_rwsem); |
f8978bd9 LR |
324 | return device; |
325 | } | |
326 | ||
d79af724 JG |
327 | /** |
328 | * ib_device_put - Release IB device reference | |
329 | * @device: device whose reference to be released | |
330 | * | |
331 | * ib_device_put() releases reference to the IB device to allow it to be | |
332 | * unregistered and eventually free. | |
333 | */ | |
01b67117 PP |
334 | void ib_device_put(struct ib_device *device) |
335 | { | |
336 | if (refcount_dec_and_test(&device->refcount)) | |
337 | complete(&device->unreg_completion); | |
338 | } | |
d79af724 | 339 | EXPORT_SYMBOL(ib_device_put); |
01b67117 | 340 | |
1da177e4 LT |
341 | static struct ib_device *__ib_device_get_by_name(const char *name) |
342 | { | |
343 | struct ib_device *device; | |
0df91bb6 | 344 | unsigned long index; |
1da177e4 | 345 | |
0df91bb6 | 346 | xa_for_each (&devices, index, device) |
896de009 | 347 | if (!strcmp(name, dev_name(&device->dev))) |
1da177e4 LT |
348 | return device; |
349 | ||
350 | return NULL; | |
351 | } | |
352 | ||
6cc2c8e5 JG |
353 | /** |
354 | * ib_device_get_by_name - Find an IB device by name | |
355 | * @name: The name to look for | |
356 | * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) | |
357 | * | |
358 | * Find and hold an ib_device by its name. The caller must call | |
359 | * ib_device_put() on the returned pointer. | |
360 | */ | |
361 | struct ib_device *ib_device_get_by_name(const char *name, | |
362 | enum rdma_driver_id driver_id) | |
363 | { | |
364 | struct ib_device *device; | |
365 | ||
366 | down_read(&devices_rwsem); | |
367 | device = __ib_device_get_by_name(name); | |
368 | if (device && driver_id != RDMA_DRIVER_UNKNOWN && | |
b9560a41 | 369 | device->ops.driver_id != driver_id) |
6cc2c8e5 JG |
370 | device = NULL; |
371 | ||
372 | if (device) { | |
373 | if (!ib_device_try_get(device)) | |
374 | device = NULL; | |
375 | } | |
376 | up_read(&devices_rwsem); | |
377 | return device; | |
378 | } | |
379 | EXPORT_SYMBOL(ib_device_get_by_name); | |
380 | ||
4e0f7b90 PP |
381 | static int rename_compat_devs(struct ib_device *device) |
382 | { | |
383 | struct ib_core_device *cdev; | |
384 | unsigned long index; | |
385 | int ret = 0; | |
386 | ||
387 | mutex_lock(&device->compat_devs_mutex); | |
388 | xa_for_each (&device->compat_devs, index, cdev) { | |
389 | ret = device_rename(&cdev->dev, dev_name(&device->dev)); | |
390 | if (ret) { | |
391 | dev_warn(&cdev->dev, | |
392 | "Fail to rename compatdev to new name %s\n", | |
393 | dev_name(&device->dev)); | |
394 | break; | |
395 | } | |
396 | } | |
397 | mutex_unlock(&device->compat_devs_mutex); | |
398 | return ret; | |
399 | } | |
400 | ||
d21943dd LR |
401 | int ib_device_rename(struct ib_device *ibdev, const char *name) |
402 | { | |
dc1435c0 LR |
403 | unsigned long index; |
404 | void *client_data; | |
e3593b56 | 405 | int ret; |
d21943dd | 406 | |
921eab11 | 407 | down_write(&devices_rwsem); |
e3593b56 | 408 | if (!strcmp(name, dev_name(&ibdev->dev))) { |
dc1435c0 LR |
409 | up_write(&devices_rwsem); |
410 | return 0; | |
e3593b56 JG |
411 | } |
412 | ||
344684e6 | 413 | if (__ib_device_get_by_name(name)) { |
dc1435c0 LR |
414 | up_write(&devices_rwsem); |
415 | return -EEXIST; | |
d21943dd LR |
416 | } |
417 | ||
418 | ret = device_rename(&ibdev->dev, name); | |
dc1435c0 LR |
419 | if (ret) { |
420 | up_write(&devices_rwsem); | |
421 | return ret; | |
422 | } | |
423 | ||
d21943dd | 424 | strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX); |
4e0f7b90 | 425 | ret = rename_compat_devs(ibdev); |
dc1435c0 LR |
426 | |
427 | downgrade_write(&devices_rwsem); | |
428 | down_read(&ibdev->client_data_rwsem); | |
429 | xan_for_each_marked(&ibdev->client_data, index, client_data, | |
430 | CLIENT_DATA_REGISTERED) { | |
431 | struct ib_client *client = xa_load(&clients, index); | |
432 | ||
433 | if (!client || !client->rename) | |
434 | continue; | |
435 | ||
436 | client->rename(ibdev, client_data); | |
437 | } | |
438 | up_read(&ibdev->client_data_rwsem); | |
439 | up_read(&devices_rwsem); | |
440 | return 0; | |
d21943dd LR |
441 | } |
442 | ||
f8fc8cd9 YF |
443 | int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim) |
444 | { | |
445 | if (use_dim > 1) | |
446 | return -EINVAL; | |
447 | ibdev->use_cq_dim = use_dim; | |
448 | ||
449 | return 0; | |
450 | } | |
451 | ||
e349f858 | 452 | static int alloc_name(struct ib_device *ibdev, const char *name) |
1da177e4 | 453 | { |
1da177e4 | 454 | struct ib_device *device; |
0df91bb6 | 455 | unsigned long index; |
3b88afd3 JG |
456 | struct ida inuse; |
457 | int rc; | |
1da177e4 LT |
458 | int i; |
459 | ||
9ffbe8ac | 460 | lockdep_assert_held_write(&devices_rwsem); |
3b88afd3 | 461 | ida_init(&inuse); |
0df91bb6 | 462 | xa_for_each (&devices, index, device) { |
e349f858 JG |
463 | char buf[IB_DEVICE_NAME_MAX]; |
464 | ||
896de009 | 465 | if (sscanf(dev_name(&device->dev), name, &i) != 1) |
1da177e4 | 466 | continue; |
3b88afd3 | 467 | if (i < 0 || i >= INT_MAX) |
1da177e4 LT |
468 | continue; |
469 | snprintf(buf, sizeof buf, name, i); | |
3b88afd3 JG |
470 | if (strcmp(buf, dev_name(&device->dev)) != 0) |
471 | continue; | |
472 | ||
473 | rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL); | |
474 | if (rc < 0) | |
475 | goto out; | |
1da177e4 LT |
476 | } |
477 | ||
3b88afd3 JG |
478 | rc = ida_alloc(&inuse, GFP_KERNEL); |
479 | if (rc < 0) | |
480 | goto out; | |
1da177e4 | 481 | |
3b88afd3 JG |
482 | rc = dev_set_name(&ibdev->dev, name, rc); |
483 | out: | |
484 | ida_destroy(&inuse); | |
485 | return rc; | |
1da177e4 LT |
486 | } |
487 | ||
55aeed06 JG |
488 | static void ib_device_release(struct device *device) |
489 | { | |
490 | struct ib_device *dev = container_of(device, struct ib_device, dev); | |
491 | ||
c2261dd7 | 492 | free_netdevs(dev); |
652432f3 | 493 | WARN_ON(refcount_read(&dev->refcount)); |
46bdf370 KH |
494 | if (dev->port_data) { |
495 | ib_cache_release_one(dev); | |
496 | ib_security_release_port_pkey_list(dev); | |
413d3347 | 497 | rdma_counter_release(dev); |
324e227e JG |
498 | kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu, |
499 | pdata[0]), | |
500 | rcu_head); | |
46bdf370 | 501 | } |
413d3347 | 502 | |
56594ae1 PP |
503 | mutex_destroy(&dev->unregistration_lock); |
504 | mutex_destroy(&dev->compat_devs_mutex); | |
505 | ||
46bdf370 KH |
506 | xa_destroy(&dev->compat_devs); |
507 | xa_destroy(&dev->client_data); | |
324e227e | 508 | kfree_rcu(dev, rcu_head); |
55aeed06 JG |
509 | } |
510 | ||
511 | static int ib_device_uevent(struct device *device, | |
512 | struct kobj_uevent_env *env) | |
513 | { | |
896de009 | 514 | if (add_uevent_var(env, "NAME=%s", dev_name(device))) |
55aeed06 JG |
515 | return -ENOMEM; |
516 | ||
517 | /* | |
518 | * It would be nice to pass the node GUID with the event... | |
519 | */ | |
520 | ||
521 | return 0; | |
522 | } | |
523 | ||
62dfa795 PP |
524 | static const void *net_namespace(struct device *d) |
525 | { | |
4e0f7b90 PP |
526 | struct ib_core_device *coredev = |
527 | container_of(d, struct ib_core_device, dev); | |
528 | ||
529 | return read_pnet(&coredev->rdma_net); | |
62dfa795 PP |
530 | } |
531 | ||
55aeed06 JG |
532 | static struct class ib_class = { |
533 | .name = "infiniband", | |
534 | .dev_release = ib_device_release, | |
535 | .dev_uevent = ib_device_uevent, | |
62dfa795 PP |
536 | .ns_type = &net_ns_type_operations, |
537 | .namespace = net_namespace, | |
55aeed06 JG |
538 | }; |
539 | ||
cebe556b | 540 | static void rdma_init_coredev(struct ib_core_device *coredev, |
4e0f7b90 | 541 | struct ib_device *dev, struct net *net) |
cebe556b PP |
542 | { |
543 | /* This BUILD_BUG_ON is intended to catch layout change | |
544 | * of union of ib_core_device and device. | |
545 | * dev must be the first element as ib_core and providers | |
546 | * driver uses it. Adding anything in ib_core_device before | |
547 | * device will break this assumption. | |
548 | */ | |
549 | BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) != | |
550 | offsetof(struct ib_device, dev)); | |
551 | ||
552 | coredev->dev.class = &ib_class; | |
553 | coredev->dev.groups = dev->groups; | |
554 | device_initialize(&coredev->dev); | |
555 | coredev->owner = dev; | |
556 | INIT_LIST_HEAD(&coredev->port_list); | |
4e0f7b90 | 557 | write_pnet(&coredev->rdma_net, net); |
cebe556b PP |
558 | } |
559 | ||
1da177e4 | 560 | /** |
459cc69f | 561 | * _ib_alloc_device - allocate an IB device struct |
1da177e4 LT |
562 | * @size:size of structure to allocate |
563 | * | |
564 | * Low-level drivers should use ib_alloc_device() to allocate &struct | |
565 | * ib_device. @size is the size of the structure to be allocated, | |
566 | * including any private data used by the low-level driver. | |
567 | * ib_dealloc_device() must be used to free structures allocated with | |
568 | * ib_alloc_device(). | |
569 | */ | |
459cc69f | 570 | struct ib_device *_ib_alloc_device(size_t size) |
1da177e4 | 571 | { |
55aeed06 JG |
572 | struct ib_device *device; |
573 | ||
574 | if (WARN_ON(size < sizeof(struct ib_device))) | |
575 | return NULL; | |
576 | ||
577 | device = kzalloc(size, GFP_KERNEL); | |
578 | if (!device) | |
579 | return NULL; | |
580 | ||
41eda65c LR |
581 | if (rdma_restrack_init(device)) { |
582 | kfree(device); | |
583 | return NULL; | |
584 | } | |
02d8883f | 585 | |
5f8f5499 | 586 | device->groups[0] = &ib_dev_attr_group; |
4e0f7b90 | 587 | rdma_init_coredev(&device->coredev, device, &init_net); |
55aeed06 | 588 | |
55aeed06 | 589 | INIT_LIST_HEAD(&device->event_handler_list); |
40adf686 | 590 | spin_lock_init(&device->qp_open_list_lock); |
6b57cea9 | 591 | init_rwsem(&device->event_handler_rwsem); |
d0899892 | 592 | mutex_init(&device->unregistration_lock); |
0df91bb6 JG |
593 | /* |
594 | * client_data needs to be alloc because we don't want our mark to be | |
595 | * destroyed if the user stores NULL in the client data. | |
596 | */ | |
597 | xa_init_flags(&device->client_data, XA_FLAGS_ALLOC); | |
921eab11 | 598 | init_rwsem(&device->client_data_rwsem); |
4e0f7b90 PP |
599 | xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC); |
600 | mutex_init(&device->compat_devs_mutex); | |
01b67117 | 601 | init_completion(&device->unreg_completion); |
d0899892 | 602 | INIT_WORK(&device->unregistration_work, ib_unregister_work); |
1da177e4 | 603 | |
55aeed06 | 604 | return device; |
1da177e4 | 605 | } |
459cc69f | 606 | EXPORT_SYMBOL(_ib_alloc_device); |
1da177e4 LT |
607 | |
608 | /** | |
609 | * ib_dealloc_device - free an IB device struct | |
610 | * @device:structure to free | |
611 | * | |
612 | * Free a structure allocated with ib_alloc_device(). | |
613 | */ | |
614 | void ib_dealloc_device(struct ib_device *device) | |
615 | { | |
d0899892 JG |
616 | if (device->ops.dealloc_driver) |
617 | device->ops.dealloc_driver(device); | |
618 | ||
619 | /* | |
620 | * ib_unregister_driver() requires all devices to remain in the xarray | |
621 | * while their ops are callable. The last op we call is dealloc_driver | |
622 | * above. This is needed to create a fence on op callbacks prior to | |
623 | * allowing the driver module to unload. | |
624 | */ | |
625 | down_write(&devices_rwsem); | |
626 | if (xa_load(&devices, device->index) == device) | |
627 | xa_erase(&devices, device->index); | |
628 | up_write(&devices_rwsem); | |
629 | ||
c2261dd7 JG |
630 | /* Expedite releasing netdev references */ |
631 | free_netdevs(device); | |
632 | ||
4e0f7b90 | 633 | WARN_ON(!xa_empty(&device->compat_devs)); |
0df91bb6 | 634 | WARN_ON(!xa_empty(&device->client_data)); |
652432f3 | 635 | WARN_ON(refcount_read(&device->refcount)); |
0ad699c0 | 636 | rdma_restrack_clean(device); |
e155755e | 637 | /* Balances with device_initialize */ |
924b8900 | 638 | put_device(&device->dev); |
1da177e4 LT |
639 | } |
640 | EXPORT_SYMBOL(ib_dealloc_device); | |
641 | ||
921eab11 JG |
642 | /* |
643 | * add_client_context() and remove_client_context() must be safe against | |
644 | * parallel calls on the same device - registration/unregistration of both the | |
645 | * device and client can be occurring in parallel. | |
646 | * | |
647 | * The routines need to be a fence, any caller must not return until the add | |
648 | * or remove is fully completed. | |
649 | */ | |
650 | static int add_client_context(struct ib_device *device, | |
651 | struct ib_client *client) | |
1da177e4 | 652 | { |
921eab11 | 653 | int ret = 0; |
1da177e4 | 654 | |
6780c4fa | 655 | if (!device->kverbs_provider && !client->no_kverbs_req) |
921eab11 JG |
656 | return 0; |
657 | ||
658 | down_write(&device->client_data_rwsem); | |
621e55ff JG |
659 | /* |
660 | * So long as the client is registered hold both the client and device | |
661 | * unregistration locks. | |
662 | */ | |
663 | if (!refcount_inc_not_zero(&client->uses)) | |
664 | goto out_unlock; | |
665 | refcount_inc(&device->refcount); | |
666 | ||
921eab11 JG |
667 | /* |
668 | * Another caller to add_client_context got here first and has already | |
669 | * completely initialized context. | |
670 | */ | |
671 | if (xa_get_mark(&device->client_data, client->client_id, | |
672 | CLIENT_DATA_REGISTERED)) | |
673 | goto out; | |
674 | ||
675 | ret = xa_err(xa_store(&device->client_data, client->client_id, NULL, | |
676 | GFP_KERNEL)); | |
677 | if (ret) | |
678 | goto out; | |
679 | downgrade_write(&device->client_data_rwsem); | |
680 | if (client->add) | |
681 | client->add(device); | |
682 | ||
683 | /* Readers shall not see a client until add has been completed */ | |
684 | xa_set_mark(&device->client_data, client->client_id, | |
685 | CLIENT_DATA_REGISTERED); | |
686 | up_read(&device->client_data_rwsem); | |
687 | return 0; | |
688 | ||
689 | out: | |
621e55ff JG |
690 | ib_device_put(device); |
691 | ib_client_put(client); | |
692 | out_unlock: | |
921eab11 JG |
693 | up_write(&device->client_data_rwsem); |
694 | return ret; | |
695 | } | |
696 | ||
697 | static void remove_client_context(struct ib_device *device, | |
698 | unsigned int client_id) | |
699 | { | |
700 | struct ib_client *client; | |
701 | void *client_data; | |
6780c4fa | 702 | |
921eab11 JG |
703 | down_write(&device->client_data_rwsem); |
704 | if (!xa_get_mark(&device->client_data, client_id, | |
705 | CLIENT_DATA_REGISTERED)) { | |
706 | up_write(&device->client_data_rwsem); | |
707 | return; | |
708 | } | |
709 | client_data = xa_load(&device->client_data, client_id); | |
710 | xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); | |
711 | client = xa_load(&clients, client_id); | |
621e55ff | 712 | up_write(&device->client_data_rwsem); |
1da177e4 | 713 | |
921eab11 JG |
714 | /* |
715 | * Notice we cannot be holding any exclusive locks when calling the | |
716 | * remove callback as the remove callback can recurse back into any | |
717 | * public functions in this module and thus try for any locks those | |
718 | * functions take. | |
719 | * | |
720 | * For this reason clients and drivers should not call the | |
721 | * unregistration functions will holdling any locks. | |
921eab11 JG |
722 | */ |
723 | if (client->remove) | |
724 | client->remove(device, client_data); | |
725 | ||
726 | xa_erase(&device->client_data, client_id); | |
621e55ff JG |
727 | ib_device_put(device); |
728 | ib_client_put(client); | |
1da177e4 LT |
729 | } |
730 | ||
c2261dd7 | 731 | static int alloc_port_data(struct ib_device *device) |
5eb620c8 | 732 | { |
324e227e | 733 | struct ib_port_data_rcu *pdata_rcu; |
ea1075ed | 734 | unsigned int port; |
c2261dd7 JG |
735 | |
736 | if (device->port_data) | |
737 | return 0; | |
738 | ||
739 | /* This can only be called once the physical port range is defined */ | |
740 | if (WARN_ON(!device->phys_port_cnt)) | |
741 | return -EINVAL; | |
7738613e | 742 | |
8ceb1357 JG |
743 | /* |
744 | * device->port_data is indexed directly by the port number to make | |
7738613e IW |
745 | * access to this data as efficient as possible. |
746 | * | |
8ceb1357 JG |
747 | * Therefore port_data is declared as a 1 based array with potential |
748 | * empty slots at the beginning. | |
7738613e | 749 | */ |
324e227e JG |
750 | pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata, |
751 | rdma_end_port(device) + 1), | |
752 | GFP_KERNEL); | |
753 | if (!pdata_rcu) | |
55aeed06 | 754 | return -ENOMEM; |
324e227e JG |
755 | /* |
756 | * The rcu_head is put in front of the port data array and the stored | |
757 | * pointer is adjusted since we never need to see that member until | |
758 | * kfree_rcu. | |
759 | */ | |
760 | device->port_data = pdata_rcu->pdata; | |
5eb620c8 | 761 | |
ea1075ed | 762 | rdma_for_each_port (device, port) { |
8ceb1357 JG |
763 | struct ib_port_data *pdata = &device->port_data[port]; |
764 | ||
324e227e | 765 | pdata->ib_dev = device; |
8ceb1357 JG |
766 | spin_lock_init(&pdata->pkey_list_lock); |
767 | INIT_LIST_HEAD(&pdata->pkey_list); | |
c2261dd7 | 768 | spin_lock_init(&pdata->netdev_lock); |
324e227e | 769 | INIT_HLIST_NODE(&pdata->ndev_hash_link); |
c2261dd7 JG |
770 | } |
771 | return 0; | |
772 | } | |
773 | ||
774 | static int verify_immutable(const struct ib_device *dev, u8 port) | |
775 | { | |
776 | return WARN_ON(!rdma_cap_ib_mad(dev, port) && | |
777 | rdma_max_mad_size(dev, port) != 0); | |
778 | } | |
779 | ||
780 | static int setup_port_data(struct ib_device *device) | |
781 | { | |
782 | unsigned int port; | |
783 | int ret; | |
784 | ||
785 | ret = alloc_port_data(device); | |
786 | if (ret) | |
787 | return ret; | |
788 | ||
789 | rdma_for_each_port (device, port) { | |
790 | struct ib_port_data *pdata = &device->port_data[port]; | |
8ceb1357 JG |
791 | |
792 | ret = device->ops.get_port_immutable(device, port, | |
793 | &pdata->immutable); | |
5eb620c8 | 794 | if (ret) |
55aeed06 | 795 | return ret; |
337877a4 | 796 | |
55aeed06 JG |
797 | if (verify_immutable(device, port)) |
798 | return -EINVAL; | |
5eb620c8 | 799 | } |
55aeed06 | 800 | return 0; |
5eb620c8 YE |
801 | } |
802 | ||
9abb0d1b | 803 | void ib_get_device_fw_str(struct ib_device *dev, char *str) |
5fa76c20 | 804 | { |
3023a1e9 KH |
805 | if (dev->ops.get_dev_fw_str) |
806 | dev->ops.get_dev_fw_str(dev, str); | |
5fa76c20 IW |
807 | else |
808 | str[0] = '\0'; | |
809 | } | |
810 | EXPORT_SYMBOL(ib_get_device_fw_str); | |
811 | ||
8f408ab6 DJ |
812 | static void ib_policy_change_task(struct work_struct *work) |
813 | { | |
814 | struct ib_device *dev; | |
0df91bb6 | 815 | unsigned long index; |
8f408ab6 | 816 | |
921eab11 | 817 | down_read(&devices_rwsem); |
0df91bb6 | 818 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { |
ea1075ed | 819 | unsigned int i; |
8f408ab6 | 820 | |
ea1075ed | 821 | rdma_for_each_port (dev, i) { |
8f408ab6 DJ |
822 | u64 sp; |
823 | int ret = ib_get_cached_subnet_prefix(dev, | |
824 | i, | |
825 | &sp); | |
826 | ||
827 | WARN_ONCE(ret, | |
828 | "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", | |
829 | ret); | |
a750cfde DJ |
830 | if (!ret) |
831 | ib_security_cache_change(dev, i, sp); | |
8f408ab6 DJ |
832 | } |
833 | } | |
921eab11 | 834 | up_read(&devices_rwsem); |
8f408ab6 DJ |
835 | } |
836 | ||
837 | static int ib_security_change(struct notifier_block *nb, unsigned long event, | |
838 | void *lsm_data) | |
839 | { | |
840 | if (event != LSM_POLICY_CHANGE) | |
841 | return NOTIFY_DONE; | |
842 | ||
843 | schedule_work(&ib_policy_change_work); | |
c66f6741 | 844 | ib_mad_agent_security_change(); |
8f408ab6 DJ |
845 | |
846 | return NOTIFY_OK; | |
847 | } | |
848 | ||
4e0f7b90 PP |
849 | static void compatdev_release(struct device *dev) |
850 | { | |
851 | struct ib_core_device *cdev = | |
852 | container_of(dev, struct ib_core_device, dev); | |
853 | ||
854 | kfree(cdev); | |
855 | } | |
856 | ||
857 | static int add_one_compat_dev(struct ib_device *device, | |
858 | struct rdma_dev_net *rnet) | |
859 | { | |
860 | struct ib_core_device *cdev; | |
861 | int ret; | |
862 | ||
2b34c558 | 863 | lockdep_assert_held(&rdma_nets_rwsem); |
a56bc45b PP |
864 | if (!ib_devices_shared_netns) |
865 | return 0; | |
866 | ||
4e0f7b90 PP |
867 | /* |
868 | * Create and add compat device in all namespaces other than where it | |
869 | * is currently bound to. | |
870 | */ | |
871 | if (net_eq(read_pnet(&rnet->net), | |
872 | read_pnet(&device->coredev.rdma_net))) | |
873 | return 0; | |
874 | ||
875 | /* | |
876 | * The first of init_net() or ib_register_device() to take the | |
877 | * compat_devs_mutex wins and gets to add the device. Others will wait | |
878 | * for completion here. | |
879 | */ | |
880 | mutex_lock(&device->compat_devs_mutex); | |
881 | cdev = xa_load(&device->compat_devs, rnet->id); | |
882 | if (cdev) { | |
883 | ret = 0; | |
884 | goto done; | |
885 | } | |
886 | ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL); | |
887 | if (ret) | |
888 | goto done; | |
889 | ||
890 | cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); | |
891 | if (!cdev) { | |
892 | ret = -ENOMEM; | |
893 | goto cdev_err; | |
894 | } | |
895 | ||
896 | cdev->dev.parent = device->dev.parent; | |
897 | rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); | |
898 | cdev->dev.release = compatdev_release; | |
f2f2b3bb JG |
899 | ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); |
900 | if (ret) | |
901 | goto add_err; | |
4e0f7b90 PP |
902 | |
903 | ret = device_add(&cdev->dev); | |
904 | if (ret) | |
905 | goto add_err; | |
eb15c78b | 906 | ret = ib_setup_port_attrs(cdev); |
5417783e PP |
907 | if (ret) |
908 | goto port_err; | |
4e0f7b90 PP |
909 | |
910 | ret = xa_err(xa_store(&device->compat_devs, rnet->id, | |
911 | cdev, GFP_KERNEL)); | |
912 | if (ret) | |
913 | goto insert_err; | |
914 | ||
915 | mutex_unlock(&device->compat_devs_mutex); | |
916 | return 0; | |
917 | ||
918 | insert_err: | |
5417783e PP |
919 | ib_free_port_attrs(cdev); |
920 | port_err: | |
4e0f7b90 PP |
921 | device_del(&cdev->dev); |
922 | add_err: | |
923 | put_device(&cdev->dev); | |
924 | cdev_err: | |
925 | xa_release(&device->compat_devs, rnet->id); | |
926 | done: | |
927 | mutex_unlock(&device->compat_devs_mutex); | |
928 | return ret; | |
929 | } | |
930 | ||
931 | static void remove_one_compat_dev(struct ib_device *device, u32 id) | |
932 | { | |
933 | struct ib_core_device *cdev; | |
934 | ||
935 | mutex_lock(&device->compat_devs_mutex); | |
936 | cdev = xa_erase(&device->compat_devs, id); | |
937 | mutex_unlock(&device->compat_devs_mutex); | |
938 | if (cdev) { | |
5417783e | 939 | ib_free_port_attrs(cdev); |
4e0f7b90 PP |
940 | device_del(&cdev->dev); |
941 | put_device(&cdev->dev); | |
942 | } | |
943 | } | |
944 | ||
945 | static void remove_compat_devs(struct ib_device *device) | |
946 | { | |
947 | struct ib_core_device *cdev; | |
948 | unsigned long index; | |
949 | ||
950 | xa_for_each (&device->compat_devs, index, cdev) | |
951 | remove_one_compat_dev(device, index); | |
952 | } | |
953 | ||
954 | static int add_compat_devs(struct ib_device *device) | |
955 | { | |
956 | struct rdma_dev_net *rnet; | |
957 | unsigned long index; | |
958 | int ret = 0; | |
959 | ||
decbc7a6 PP |
960 | lockdep_assert_held(&devices_rwsem); |
961 | ||
4e0f7b90 PP |
962 | down_read(&rdma_nets_rwsem); |
963 | xa_for_each (&rdma_nets, index, rnet) { | |
964 | ret = add_one_compat_dev(device, rnet); | |
965 | if (ret) | |
966 | break; | |
967 | } | |
968 | up_read(&rdma_nets_rwsem); | |
969 | return ret; | |
970 | } | |
971 | ||
2b34c558 PP |
972 | static void remove_all_compat_devs(void) |
973 | { | |
974 | struct ib_compat_device *cdev; | |
975 | struct ib_device *dev; | |
976 | unsigned long index; | |
977 | ||
978 | down_read(&devices_rwsem); | |
979 | xa_for_each (&devices, index, dev) { | |
980 | unsigned long c_index = 0; | |
981 | ||
982 | /* Hold nets_rwsem so that any other thread modifying this | |
983 | * system param can sync with this thread. | |
984 | */ | |
985 | down_read(&rdma_nets_rwsem); | |
986 | xa_for_each (&dev->compat_devs, c_index, cdev) | |
987 | remove_one_compat_dev(dev, c_index); | |
988 | up_read(&rdma_nets_rwsem); | |
989 | } | |
990 | up_read(&devices_rwsem); | |
991 | } | |
992 | ||
993 | static int add_all_compat_devs(void) | |
994 | { | |
995 | struct rdma_dev_net *rnet; | |
996 | struct ib_device *dev; | |
997 | unsigned long index; | |
998 | int ret = 0; | |
999 | ||
1000 | down_read(&devices_rwsem); | |
1001 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { | |
1002 | unsigned long net_index = 0; | |
1003 | ||
1004 | /* Hold nets_rwsem so that any other thread modifying this | |
1005 | * system param can sync with this thread. | |
1006 | */ | |
1007 | down_read(&rdma_nets_rwsem); | |
1008 | xa_for_each (&rdma_nets, net_index, rnet) { | |
1009 | ret = add_one_compat_dev(dev, rnet); | |
1010 | if (ret) | |
1011 | break; | |
1012 | } | |
1013 | up_read(&rdma_nets_rwsem); | |
1014 | } | |
1015 | up_read(&devices_rwsem); | |
1016 | if (ret) | |
1017 | remove_all_compat_devs(); | |
1018 | return ret; | |
1019 | } | |
1020 | ||
1021 | int rdma_compatdev_set(u8 enable) | |
1022 | { | |
1023 | struct rdma_dev_net *rnet; | |
1024 | unsigned long index; | |
1025 | int ret = 0; | |
1026 | ||
1027 | down_write(&rdma_nets_rwsem); | |
1028 | if (ib_devices_shared_netns == enable) { | |
1029 | up_write(&rdma_nets_rwsem); | |
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | /* enable/disable of compat devices is not supported | |
1034 | * when more than default init_net exists. | |
1035 | */ | |
1036 | xa_for_each (&rdma_nets, index, rnet) { | |
1037 | ret++; | |
1038 | break; | |
1039 | } | |
1040 | if (!ret) | |
1041 | ib_devices_shared_netns = enable; | |
1042 | up_write(&rdma_nets_rwsem); | |
1043 | if (ret) | |
1044 | return -EBUSY; | |
1045 | ||
1046 | if (enable) | |
1047 | ret = add_all_compat_devs(); | |
1048 | else | |
1049 | remove_all_compat_devs(); | |
1050 | return ret; | |
1051 | } | |
1052 | ||
4e0f7b90 PP |
1053 | static void rdma_dev_exit_net(struct net *net) |
1054 | { | |
1d2fedd8 | 1055 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); |
4e0f7b90 PP |
1056 | struct ib_device *dev; |
1057 | unsigned long index; | |
1058 | int ret; | |
1059 | ||
1060 | down_write(&rdma_nets_rwsem); | |
1061 | /* | |
1062 | * Prevent the ID from being re-used and hide the id from xa_for_each. | |
1063 | */ | |
1064 | ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL)); | |
1065 | WARN_ON(ret); | |
1066 | up_write(&rdma_nets_rwsem); | |
1067 | ||
1068 | down_read(&devices_rwsem); | |
1069 | xa_for_each (&devices, index, dev) { | |
1070 | get_device(&dev->dev); | |
1071 | /* | |
1072 | * Release the devices_rwsem so that pontentially blocking | |
1073 | * device_del, doesn't hold the devices_rwsem for too long. | |
1074 | */ | |
1075 | up_read(&devices_rwsem); | |
1076 | ||
1077 | remove_one_compat_dev(dev, rnet->id); | |
1078 | ||
decbc7a6 PP |
1079 | /* |
1080 | * If the real device is in the NS then move it back to init. | |
1081 | */ | |
1082 | rdma_dev_change_netns(dev, net, &init_net); | |
1083 | ||
4e0f7b90 PP |
1084 | put_device(&dev->dev); |
1085 | down_read(&devices_rwsem); | |
1086 | } | |
1087 | up_read(&devices_rwsem); | |
1088 | ||
1d2fedd8 | 1089 | rdma_nl_net_exit(rnet); |
4e0f7b90 PP |
1090 | xa_erase(&rdma_nets, rnet->id); |
1091 | } | |
1092 | ||
1093 | static __net_init int rdma_dev_init_net(struct net *net) | |
1094 | { | |
1d2fedd8 | 1095 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); |
4e0f7b90 PP |
1096 | unsigned long index; |
1097 | struct ib_device *dev; | |
1098 | int ret; | |
1099 | ||
1d2fedd8 PP |
1100 | write_pnet(&rnet->net, net); |
1101 | ||
1102 | ret = rdma_nl_net_init(rnet); | |
1103 | if (ret) | |
1104 | return ret; | |
1105 | ||
4e0f7b90 PP |
1106 | /* No need to create any compat devices in default init_net. */ |
1107 | if (net_eq(net, &init_net)) | |
1108 | return 0; | |
1109 | ||
4e0f7b90 | 1110 | ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); |
1d2fedd8 PP |
1111 | if (ret) { |
1112 | rdma_nl_net_exit(rnet); | |
4e0f7b90 | 1113 | return ret; |
1d2fedd8 | 1114 | } |
4e0f7b90 PP |
1115 | |
1116 | down_read(&devices_rwsem); | |
1117 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { | |
2b34c558 PP |
1118 | /* Hold nets_rwsem so that netlink command cannot change |
1119 | * system configuration for device sharing mode. | |
1120 | */ | |
1121 | down_read(&rdma_nets_rwsem); | |
4e0f7b90 | 1122 | ret = add_one_compat_dev(dev, rnet); |
2b34c558 | 1123 | up_read(&rdma_nets_rwsem); |
4e0f7b90 PP |
1124 | if (ret) |
1125 | break; | |
1126 | } | |
1127 | up_read(&devices_rwsem); | |
1128 | ||
1129 | if (ret) | |
1130 | rdma_dev_exit_net(net); | |
1131 | ||
1132 | return ret; | |
1133 | } | |
1134 | ||
0df91bb6 | 1135 | /* |
d0899892 JG |
1136 | * Assign the unique string device name and the unique device index. This is |
1137 | * undone by ib_dealloc_device. | |
ecc82c53 | 1138 | */ |
0df91bb6 | 1139 | static int assign_name(struct ib_device *device, const char *name) |
ecc82c53 | 1140 | { |
0df91bb6 JG |
1141 | static u32 last_id; |
1142 | int ret; | |
ecc82c53 | 1143 | |
921eab11 | 1144 | down_write(&devices_rwsem); |
0df91bb6 JG |
1145 | /* Assign a unique name to the device */ |
1146 | if (strchr(name, '%')) | |
1147 | ret = alloc_name(device, name); | |
1148 | else | |
1149 | ret = dev_set_name(&device->dev, name); | |
1150 | if (ret) | |
1151 | goto out; | |
1152 | ||
1153 | if (__ib_device_get_by_name(dev_name(&device->dev))) { | |
1154 | ret = -ENFILE; | |
1155 | goto out; | |
1156 | } | |
1157 | strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX); | |
ecc82c53 | 1158 | |
ea295481 LT |
1159 | ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b, |
1160 | &last_id, GFP_KERNEL); | |
1161 | if (ret > 0) | |
1162 | ret = 0; | |
921eab11 | 1163 | |
0df91bb6 | 1164 | out: |
921eab11 | 1165 | up_write(&devices_rwsem); |
0df91bb6 JG |
1166 | return ret; |
1167 | } | |
1168 | ||
548cb4fb | 1169 | static void setup_dma_device(struct ib_device *device) |
1da177e4 | 1170 | { |
99db9494 BVA |
1171 | struct device *parent = device->dev.parent; |
1172 | ||
0957c29f BVA |
1173 | WARN_ON_ONCE(device->dma_device); |
1174 | if (device->dev.dma_ops) { | |
1175 | /* | |
1176 | * The caller provided custom DMA operations. Copy the | |
1177 | * DMA-related fields that are used by e.g. dma_alloc_coherent() | |
1178 | * into device->dev. | |
1179 | */ | |
1180 | device->dma_device = &device->dev; | |
02ee9da3 BVA |
1181 | if (!device->dev.dma_mask) { |
1182 | if (parent) | |
1183 | device->dev.dma_mask = parent->dma_mask; | |
1184 | else | |
1185 | WARN_ON_ONCE(true); | |
1186 | } | |
1187 | if (!device->dev.coherent_dma_mask) { | |
1188 | if (parent) | |
1189 | device->dev.coherent_dma_mask = | |
1190 | parent->coherent_dma_mask; | |
1191 | else | |
1192 | WARN_ON_ONCE(true); | |
1193 | } | |
0957c29f BVA |
1194 | } else { |
1195 | /* | |
1196 | * The caller did not provide custom DMA operations. Use the | |
1197 | * DMA mapping operations of the parent device. | |
1198 | */ | |
02ee9da3 | 1199 | WARN_ON_ONCE(!parent); |
0957c29f BVA |
1200 | device->dma_device = parent; |
1201 | } | |
d10bcf94 | 1202 | |
c9121262 BVA |
1203 | if (!device->dev.dma_parms) { |
1204 | if (parent) { | |
1205 | /* | |
1206 | * The caller did not provide DMA parameters, so | |
1207 | * 'parent' probably represents a PCI device. The PCI | |
1208 | * core sets the maximum segment size to 64 | |
1209 | * KB. Increase this parameter to 2 GB. | |
1210 | */ | |
1211 | device->dev.dma_parms = parent->dma_parms; | |
1212 | dma_set_max_seg_size(device->dma_device, SZ_2G); | |
1213 | } else { | |
1214 | WARN_ON_ONCE(true); | |
1215 | } | |
1216 | } | |
548cb4fb | 1217 | } |
1da177e4 | 1218 | |
921eab11 JG |
1219 | /* |
1220 | * setup_device() allocates memory and sets up data that requires calling the | |
1221 | * device ops, this is the only reason these actions are not done during | |
1222 | * ib_alloc_device. It is undone by ib_dealloc_device(). | |
1223 | */ | |
548cb4fb PP |
1224 | static int setup_device(struct ib_device *device) |
1225 | { | |
1226 | struct ib_udata uhw = {.outlen = 0, .inlen = 0}; | |
1227 | int ret; | |
1da177e4 | 1228 | |
921eab11 | 1229 | setup_dma_device(device); |
deee3c7e | 1230 | ib_device_check_mandatory(device); |
1da177e4 | 1231 | |
8ceb1357 | 1232 | ret = setup_port_data(device); |
5eb620c8 | 1233 | if (ret) { |
8ceb1357 | 1234 | dev_warn(&device->dev, "Couldn't create per-port data\n"); |
548cb4fb PP |
1235 | return ret; |
1236 | } | |
1237 | ||
1238 | memset(&device->attrs, 0, sizeof(device->attrs)); | |
3023a1e9 | 1239 | ret = device->ops.query_device(device, &device->attrs, &uhw); |
548cb4fb PP |
1240 | if (ret) { |
1241 | dev_warn(&device->dev, | |
1242 | "Couldn't query the device attributes\n"); | |
d45f89d5 | 1243 | return ret; |
5eb620c8 YE |
1244 | } |
1245 | ||
d45f89d5 | 1246 | return 0; |
548cb4fb PP |
1247 | } |
1248 | ||
921eab11 JG |
1249 | static void disable_device(struct ib_device *device) |
1250 | { | |
9cd58817 | 1251 | u32 cid; |
921eab11 JG |
1252 | |
1253 | WARN_ON(!refcount_read(&device->refcount)); | |
1254 | ||
1255 | down_write(&devices_rwsem); | |
1256 | xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); | |
1257 | up_write(&devices_rwsem); | |
1258 | ||
9cd58817 JG |
1259 | /* |
1260 | * Remove clients in LIFO order, see assign_client_id. This could be | |
1261 | * more efficient if xarray learns to reverse iterate. Since no new | |
1262 | * clients can be added to this ib_device past this point we only need | |
1263 | * the maximum possible client_id value here. | |
1264 | */ | |
921eab11 | 1265 | down_read(&clients_rwsem); |
9cd58817 | 1266 | cid = highest_client_id; |
921eab11 | 1267 | up_read(&clients_rwsem); |
9cd58817 JG |
1268 | while (cid) { |
1269 | cid--; | |
1270 | remove_client_context(device, cid); | |
1271 | } | |
921eab11 JG |
1272 | |
1273 | /* Pairs with refcount_set in enable_device */ | |
1274 | ib_device_put(device); | |
1275 | wait_for_completion(&device->unreg_completion); | |
c2261dd7 | 1276 | |
4e0f7b90 PP |
1277 | /* |
1278 | * compat devices must be removed after device refcount drops to zero. | |
1279 | * Otherwise init_net() may add more compatdevs after removing compat | |
1280 | * devices and before device is disabled. | |
1281 | */ | |
1282 | remove_compat_devs(device); | |
921eab11 JG |
1283 | } |
1284 | ||
1285 | /* | |
1286 | * An enabled device is visible to all clients and to all the public facing | |
d0899892 JG |
1287 | * APIs that return a device pointer. This always returns with a new get, even |
1288 | * if it fails. | |
921eab11 | 1289 | */ |
d0899892 | 1290 | static int enable_device_and_get(struct ib_device *device) |
921eab11 JG |
1291 | { |
1292 | struct ib_client *client; | |
1293 | unsigned long index; | |
d0899892 | 1294 | int ret = 0; |
921eab11 | 1295 | |
d0899892 JG |
1296 | /* |
1297 | * One ref belongs to the xa and the other belongs to this | |
1298 | * thread. This is needed to guard against parallel unregistration. | |
1299 | */ | |
1300 | refcount_set(&device->refcount, 2); | |
921eab11 JG |
1301 | down_write(&devices_rwsem); |
1302 | xa_set_mark(&devices, device->index, DEVICE_REGISTERED); | |
d0899892 JG |
1303 | |
1304 | /* | |
1305 | * By using downgrade_write() we ensure that no other thread can clear | |
1306 | * DEVICE_REGISTERED while we are completing the client setup. | |
1307 | */ | |
1308 | downgrade_write(&devices_rwsem); | |
921eab11 | 1309 | |
ca22354b JG |
1310 | if (device->ops.enable_driver) { |
1311 | ret = device->ops.enable_driver(device); | |
1312 | if (ret) | |
1313 | goto out; | |
1314 | } | |
1315 | ||
921eab11 JG |
1316 | down_read(&clients_rwsem); |
1317 | xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { | |
1318 | ret = add_client_context(device, client); | |
d0899892 JG |
1319 | if (ret) |
1320 | break; | |
921eab11 JG |
1321 | } |
1322 | up_read(&clients_rwsem); | |
4e0f7b90 PP |
1323 | if (!ret) |
1324 | ret = add_compat_devs(device); | |
ca22354b | 1325 | out: |
d0899892 JG |
1326 | up_read(&devices_rwsem); |
1327 | return ret; | |
921eab11 JG |
1328 | } |
1329 | ||
548cb4fb PP |
1330 | /** |
1331 | * ib_register_device - Register an IB device with IB core | |
d6537c1a | 1332 | * @device: Device to register |
1333 | * @name: unique string device name. This may include a '%' which will | |
1334 | * cause a unique index to be added to the passed device name. | |
548cb4fb PP |
1335 | * |
1336 | * Low-level drivers use ib_register_device() to register their | |
1337 | * devices with the IB core. All registered clients will receive a | |
1338 | * callback for each device that is added. @device must be allocated | |
1339 | * with ib_alloc_device(). | |
d0899892 JG |
1340 | * |
1341 | * If the driver uses ops.dealloc_driver and calls any ib_unregister_device() | |
1342 | * asynchronously then the device pointer may become freed as soon as this | |
1343 | * function returns. | |
548cb4fb | 1344 | */ |
ea4baf7f | 1345 | int ib_register_device(struct ib_device *device, const char *name) |
548cb4fb PP |
1346 | { |
1347 | int ret; | |
548cb4fb | 1348 | |
0df91bb6 JG |
1349 | ret = assign_name(device, name); |
1350 | if (ret) | |
921eab11 | 1351 | return ret; |
548cb4fb PP |
1352 | |
1353 | ret = setup_device(device); | |
1354 | if (ret) | |
d0899892 | 1355 | return ret; |
03db3a2d | 1356 | |
d45f89d5 JG |
1357 | ret = ib_cache_setup_one(device); |
1358 | if (ret) { | |
1359 | dev_warn(&device->dev, | |
1360 | "Couldn't set up InfiniBand P_Key/GID cache\n"); | |
d0899892 | 1361 | return ret; |
d45f89d5 JG |
1362 | } |
1363 | ||
7527a7b1 | 1364 | ib_device_register_rdmacg(device); |
3e153a93 | 1365 | |
413d3347 MZ |
1366 | rdma_counter_init(device); |
1367 | ||
e7a5b4aa LR |
1368 | /* |
1369 | * Ensure that ADD uevent is not fired because it | |
1370 | * is too early amd device is not initialized yet. | |
1371 | */ | |
1372 | dev_set_uevent_suppress(&device->dev, true); | |
5f8f5499 PP |
1373 | ret = device_add(&device->dev); |
1374 | if (ret) | |
1375 | goto cg_cleanup; | |
1376 | ||
ea4baf7f | 1377 | ret = ib_device_register_sysfs(device); |
1da177e4 | 1378 | if (ret) { |
43c7c851 JG |
1379 | dev_warn(&device->dev, |
1380 | "Couldn't register device with driver model\n"); | |
5f8f5499 | 1381 | goto dev_cleanup; |
1da177e4 LT |
1382 | } |
1383 | ||
d0899892 | 1384 | ret = enable_device_and_get(device); |
e7a5b4aa LR |
1385 | dev_set_uevent_suppress(&device->dev, false); |
1386 | /* Mark for userspace that device is ready */ | |
1387 | kobject_uevent(&device->dev.kobj, KOBJ_ADD); | |
d0899892 JG |
1388 | if (ret) { |
1389 | void (*dealloc_fn)(struct ib_device *); | |
1390 | ||
1391 | /* | |
1392 | * If we hit this error flow then we don't want to | |
1393 | * automatically dealloc the device since the caller is | |
1394 | * expected to call ib_dealloc_device() after | |
1395 | * ib_register_device() fails. This is tricky due to the | |
1396 | * possibility for a parallel unregistration along with this | |
1397 | * error flow. Since we have a refcount here we know any | |
1398 | * parallel flow is stopped in disable_device and will see the | |
1399 | * NULL pointers, causing the responsibility to | |
1400 | * ib_dealloc_device() to revert back to this thread. | |
1401 | */ | |
1402 | dealloc_fn = device->ops.dealloc_driver; | |
1403 | device->ops.dealloc_driver = NULL; | |
1404 | ib_device_put(device); | |
1405 | __ib_unregister_device(device); | |
1406 | device->ops.dealloc_driver = dealloc_fn; | |
1407 | return ret; | |
1408 | } | |
1409 | ib_device_put(device); | |
1da177e4 | 1410 | |
4be3a4fa PP |
1411 | return 0; |
1412 | ||
5f8f5499 PP |
1413 | dev_cleanup: |
1414 | device_del(&device->dev); | |
2fb4f4ea | 1415 | cg_cleanup: |
e7a5b4aa | 1416 | dev_set_uevent_suppress(&device->dev, false); |
2fb4f4ea | 1417 | ib_device_unregister_rdmacg(device); |
d45f89d5 | 1418 | ib_cache_cleanup_one(device); |
1da177e4 LT |
1419 | return ret; |
1420 | } | |
1421 | EXPORT_SYMBOL(ib_register_device); | |
1422 | ||
d0899892 JG |
1423 | /* Callers must hold a get on the device. */ |
1424 | static void __ib_unregister_device(struct ib_device *ib_dev) | |
1425 | { | |
1426 | /* | |
1427 | * We have a registration lock so that all the calls to unregister are | |
1428 | * fully fenced, once any unregister returns the device is truely | |
1429 | * unregistered even if multiple callers are unregistering it at the | |
1430 | * same time. This also interacts with the registration flow and | |
1431 | * provides sane semantics if register and unregister are racing. | |
1432 | */ | |
1433 | mutex_lock(&ib_dev->unregistration_lock); | |
1434 | if (!refcount_read(&ib_dev->refcount)) | |
1435 | goto out; | |
1436 | ||
1437 | disable_device(ib_dev); | |
3042492b PP |
1438 | |
1439 | /* Expedite removing unregistered pointers from the hash table */ | |
1440 | free_netdevs(ib_dev); | |
1441 | ||
d0899892 JG |
1442 | ib_device_unregister_sysfs(ib_dev); |
1443 | device_del(&ib_dev->dev); | |
1444 | ib_device_unregister_rdmacg(ib_dev); | |
1445 | ib_cache_cleanup_one(ib_dev); | |
1446 | ||
1447 | /* | |
1448 | * Drivers using the new flow may not call ib_dealloc_device except | |
1449 | * in error unwind prior to registration success. | |
1450 | */ | |
1451 | if (ib_dev->ops.dealloc_driver) { | |
1452 | WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1); | |
1453 | ib_dealloc_device(ib_dev); | |
1454 | } | |
1455 | out: | |
1456 | mutex_unlock(&ib_dev->unregistration_lock); | |
1457 | } | |
1458 | ||
1da177e4 LT |
1459 | /** |
1460 | * ib_unregister_device - Unregister an IB device | |
d6537c1a | 1461 | * @ib_dev: The device to unregister |
1da177e4 LT |
1462 | * |
1463 | * Unregister an IB device. All clients will receive a remove callback. | |
d0899892 JG |
1464 | * |
1465 | * Callers should call this routine only once, and protect against races with | |
1466 | * registration. Typically it should only be called as part of a remove | |
1467 | * callback in an implementation of driver core's struct device_driver and | |
1468 | * related. | |
1469 | * | |
1470 | * If ops.dealloc_driver is used then ib_dev will be freed upon return from | |
1471 | * this function. | |
1da177e4 | 1472 | */ |
d0899892 | 1473 | void ib_unregister_device(struct ib_device *ib_dev) |
1da177e4 | 1474 | { |
d0899892 JG |
1475 | get_device(&ib_dev->dev); |
1476 | __ib_unregister_device(ib_dev); | |
1477 | put_device(&ib_dev->dev); | |
1da177e4 LT |
1478 | } |
1479 | EXPORT_SYMBOL(ib_unregister_device); | |
1480 | ||
d0899892 JG |
1481 | /** |
1482 | * ib_unregister_device_and_put - Unregister a device while holding a 'get' | |
d6537c1a | 1483 | * @ib_dev: The device to unregister |
d0899892 JG |
1484 | * |
1485 | * This is the same as ib_unregister_device(), except it includes an internal | |
1486 | * ib_device_put() that should match a 'get' obtained by the caller. | |
1487 | * | |
1488 | * It is safe to call this routine concurrently from multiple threads while | |
1489 | * holding the 'get'. When the function returns the device is fully | |
1490 | * unregistered. | |
1491 | * | |
1492 | * Drivers using this flow MUST use the driver_unregister callback to clean up | |
1493 | * their resources associated with the device and dealloc it. | |
1494 | */ | |
1495 | void ib_unregister_device_and_put(struct ib_device *ib_dev) | |
1496 | { | |
1497 | WARN_ON(!ib_dev->ops.dealloc_driver); | |
1498 | get_device(&ib_dev->dev); | |
1499 | ib_device_put(ib_dev); | |
1500 | __ib_unregister_device(ib_dev); | |
1501 | put_device(&ib_dev->dev); | |
1502 | } | |
1503 | EXPORT_SYMBOL(ib_unregister_device_and_put); | |
1504 | ||
1505 | /** | |
1506 | * ib_unregister_driver - Unregister all IB devices for a driver | |
1507 | * @driver_id: The driver to unregister | |
1508 | * | |
1509 | * This implements a fence for device unregistration. It only returns once all | |
1510 | * devices associated with the driver_id have fully completed their | |
1511 | * unregistration and returned from ib_unregister_device*(). | |
1512 | * | |
1513 | * If device's are not yet unregistered it goes ahead and starts unregistering | |
1514 | * them. | |
1515 | * | |
1516 | * This does not block creation of new devices with the given driver_id, that | |
1517 | * is the responsibility of the caller. | |
1518 | */ | |
1519 | void ib_unregister_driver(enum rdma_driver_id driver_id) | |
1520 | { | |
1521 | struct ib_device *ib_dev; | |
1522 | unsigned long index; | |
1523 | ||
1524 | down_read(&devices_rwsem); | |
1525 | xa_for_each (&devices, index, ib_dev) { | |
b9560a41 | 1526 | if (ib_dev->ops.driver_id != driver_id) |
d0899892 JG |
1527 | continue; |
1528 | ||
1529 | get_device(&ib_dev->dev); | |
1530 | up_read(&devices_rwsem); | |
1531 | ||
1532 | WARN_ON(!ib_dev->ops.dealloc_driver); | |
1533 | __ib_unregister_device(ib_dev); | |
1534 | ||
1535 | put_device(&ib_dev->dev); | |
1536 | down_read(&devices_rwsem); | |
1537 | } | |
1538 | up_read(&devices_rwsem); | |
1539 | } | |
1540 | EXPORT_SYMBOL(ib_unregister_driver); | |
1541 | ||
1542 | static void ib_unregister_work(struct work_struct *work) | |
1543 | { | |
1544 | struct ib_device *ib_dev = | |
1545 | container_of(work, struct ib_device, unregistration_work); | |
1546 | ||
1547 | __ib_unregister_device(ib_dev); | |
1548 | put_device(&ib_dev->dev); | |
1549 | } | |
1550 | ||
1551 | /** | |
1552 | * ib_unregister_device_queued - Unregister a device using a work queue | |
d6537c1a | 1553 | * @ib_dev: The device to unregister |
d0899892 JG |
1554 | * |
1555 | * This schedules an asynchronous unregistration using a WQ for the device. A | |
1556 | * driver should use this to avoid holding locks while doing unregistration, | |
1557 | * such as holding the RTNL lock. | |
1558 | * | |
1559 | * Drivers using this API must use ib_unregister_driver before module unload | |
1560 | * to ensure that all scheduled unregistrations have completed. | |
1561 | */ | |
1562 | void ib_unregister_device_queued(struct ib_device *ib_dev) | |
1563 | { | |
1564 | WARN_ON(!refcount_read(&ib_dev->refcount)); | |
1565 | WARN_ON(!ib_dev->ops.dealloc_driver); | |
1566 | get_device(&ib_dev->dev); | |
1567 | if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work)) | |
1568 | put_device(&ib_dev->dev); | |
1569 | } | |
1570 | EXPORT_SYMBOL(ib_unregister_device_queued); | |
1571 | ||
decbc7a6 PP |
1572 | /* |
1573 | * The caller must pass in a device that has the kref held and the refcount | |
1574 | * released. If the device is in cur_net and still registered then it is moved | |
1575 | * into net. | |
1576 | */ | |
1577 | static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, | |
1578 | struct net *net) | |
1579 | { | |
1580 | int ret2 = -EINVAL; | |
1581 | int ret; | |
1582 | ||
1583 | mutex_lock(&device->unregistration_lock); | |
1584 | ||
1585 | /* | |
2e5b8a01 PP |
1586 | * If a device not under ib_device_get() or if the unregistration_lock |
1587 | * is not held, the namespace can be changed, or it can be unregistered. | |
1588 | * Check again under the lock. | |
decbc7a6 PP |
1589 | */ |
1590 | if (refcount_read(&device->refcount) == 0 || | |
1591 | !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) { | |
1592 | ret = -ENODEV; | |
1593 | goto out; | |
1594 | } | |
1595 | ||
1596 | kobject_uevent(&device->dev.kobj, KOBJ_REMOVE); | |
1597 | disable_device(device); | |
1598 | ||
1599 | /* | |
1600 | * At this point no one can be using the device, so it is safe to | |
1601 | * change the namespace. | |
1602 | */ | |
1603 | write_pnet(&device->coredev.rdma_net, net); | |
1604 | ||
2e5b8a01 | 1605 | down_read(&devices_rwsem); |
decbc7a6 PP |
1606 | /* |
1607 | * Currently rdma devices are system wide unique. So the device name | |
1608 | * is guaranteed free in the new namespace. Publish the new namespace | |
1609 | * at the sysfs level. | |
1610 | */ | |
decbc7a6 PP |
1611 | ret = device_rename(&device->dev, dev_name(&device->dev)); |
1612 | up_read(&devices_rwsem); | |
1613 | if (ret) { | |
1614 | dev_warn(&device->dev, | |
1615 | "%s: Couldn't rename device after namespace change\n", | |
1616 | __func__); | |
1617 | /* Try and put things back and re-enable the device */ | |
1618 | write_pnet(&device->coredev.rdma_net, cur_net); | |
1619 | } | |
1620 | ||
1621 | ret2 = enable_device_and_get(device); | |
2e5b8a01 | 1622 | if (ret2) { |
decbc7a6 PP |
1623 | /* |
1624 | * This shouldn't really happen, but if it does, let the user | |
1625 | * retry at later point. So don't disable the device. | |
1626 | */ | |
1627 | dev_warn(&device->dev, | |
1628 | "%s: Couldn't re-enable device after namespace change\n", | |
1629 | __func__); | |
2e5b8a01 | 1630 | } |
decbc7a6 | 1631 | kobject_uevent(&device->dev.kobj, KOBJ_ADD); |
2e5b8a01 | 1632 | |
decbc7a6 PP |
1633 | ib_device_put(device); |
1634 | out: | |
1635 | mutex_unlock(&device->unregistration_lock); | |
1636 | if (ret) | |
1637 | return ret; | |
1638 | return ret2; | |
1639 | } | |
1640 | ||
2e5b8a01 PP |
1641 | int ib_device_set_netns_put(struct sk_buff *skb, |
1642 | struct ib_device *dev, u32 ns_fd) | |
1643 | { | |
1644 | struct net *net; | |
1645 | int ret; | |
1646 | ||
1647 | net = get_net_ns_by_fd(ns_fd); | |
1648 | if (IS_ERR(net)) { | |
1649 | ret = PTR_ERR(net); | |
1650 | goto net_err; | |
1651 | } | |
1652 | ||
1653 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { | |
1654 | ret = -EPERM; | |
1655 | goto ns_err; | |
1656 | } | |
1657 | ||
1658 | /* | |
1659 | * Currently supported only for those providers which support | |
1660 | * disassociation and don't do port specific sysfs init. Once a | |
1661 | * port_cleanup infrastructure is implemented, this limitation will be | |
1662 | * removed. | |
1663 | */ | |
1664 | if (!dev->ops.disassociate_ucontext || dev->ops.init_port || | |
1665 | ib_devices_shared_netns) { | |
1666 | ret = -EOPNOTSUPP; | |
1667 | goto ns_err; | |
1668 | } | |
1669 | ||
1670 | get_device(&dev->dev); | |
1671 | ib_device_put(dev); | |
1672 | ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net); | |
1673 | put_device(&dev->dev); | |
1674 | ||
1675 | put_net(net); | |
1676 | return ret; | |
1677 | ||
1678 | ns_err: | |
1679 | put_net(net); | |
1680 | net_err: | |
1681 | ib_device_put(dev); | |
1682 | return ret; | |
1683 | } | |
1684 | ||
4e0f7b90 PP |
1685 | static struct pernet_operations rdma_dev_net_ops = { |
1686 | .init = rdma_dev_init_net, | |
1687 | .exit = rdma_dev_exit_net, | |
1688 | .id = &rdma_dev_net_id, | |
1689 | .size = sizeof(struct rdma_dev_net), | |
1690 | }; | |
1691 | ||
e59178d8 JG |
1692 | static int assign_client_id(struct ib_client *client) |
1693 | { | |
1694 | int ret; | |
1695 | ||
921eab11 | 1696 | down_write(&clients_rwsem); |
e59178d8 JG |
1697 | /* |
1698 | * The add/remove callbacks must be called in FIFO/LIFO order. To | |
1699 | * achieve this we assign client_ids so they are sorted in | |
9cd58817 | 1700 | * registration order. |
e59178d8 | 1701 | */ |
9cd58817 | 1702 | client->client_id = highest_client_id; |
ea295481 | 1703 | ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); |
e59178d8 JG |
1704 | if (ret) |
1705 | goto out; | |
1706 | ||
9cd58817 | 1707 | highest_client_id++; |
921eab11 | 1708 | xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); |
921eab11 | 1709 | |
e59178d8 | 1710 | out: |
921eab11 | 1711 | up_write(&clients_rwsem); |
e59178d8 JG |
1712 | return ret; |
1713 | } | |
1714 | ||
9cd58817 JG |
1715 | static void remove_client_id(struct ib_client *client) |
1716 | { | |
1717 | down_write(&clients_rwsem); | |
1718 | xa_erase(&clients, client->client_id); | |
1719 | for (; highest_client_id; highest_client_id--) | |
1720 | if (xa_load(&clients, highest_client_id - 1)) | |
1721 | break; | |
1722 | up_write(&clients_rwsem); | |
1723 | } | |
1724 | ||
1da177e4 LT |
1725 | /** |
1726 | * ib_register_client - Register an IB client | |
1727 | * @client:Client to register | |
1728 | * | |
1729 | * Upper level users of the IB drivers can use ib_register_client() to | |
1730 | * register callbacks for IB device addition and removal. When an IB | |
1731 | * device is added, each registered client's add method will be called | |
1732 | * (in the order the clients were registered), and when a device is | |
1733 | * removed, each client's remove method will be called (in the reverse | |
1734 | * order that clients were registered). In addition, when | |
1735 | * ib_register_client() is called, the client will receive an add | |
1736 | * callback for all devices already registered. | |
1737 | */ | |
1738 | int ib_register_client(struct ib_client *client) | |
1739 | { | |
1740 | struct ib_device *device; | |
0df91bb6 | 1741 | unsigned long index; |
e59178d8 | 1742 | int ret; |
1da177e4 | 1743 | |
621e55ff JG |
1744 | refcount_set(&client->uses, 1); |
1745 | init_completion(&client->uses_zero); | |
e59178d8 | 1746 | ret = assign_client_id(client); |
921eab11 | 1747 | if (ret) |
e59178d8 | 1748 | return ret; |
1da177e4 | 1749 | |
921eab11 JG |
1750 | down_read(&devices_rwsem); |
1751 | xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) { | |
1752 | ret = add_client_context(device, client); | |
1753 | if (ret) { | |
1754 | up_read(&devices_rwsem); | |
1755 | ib_unregister_client(client); | |
1756 | return ret; | |
1757 | } | |
1758 | } | |
1759 | up_read(&devices_rwsem); | |
1da177e4 LT |
1760 | return 0; |
1761 | } | |
1762 | EXPORT_SYMBOL(ib_register_client); | |
1763 | ||
1764 | /** | |
1765 | * ib_unregister_client - Unregister an IB client | |
1766 | * @client:Client to unregister | |
1767 | * | |
1768 | * Upper level users use ib_unregister_client() to remove their client | |
1769 | * registration. When ib_unregister_client() is called, the client | |
1770 | * will receive a remove callback for each IB device still registered. | |
921eab11 JG |
1771 | * |
1772 | * This is a full fence, once it returns no client callbacks will be called, | |
1773 | * or are running in another thread. | |
1da177e4 LT |
1774 | */ |
1775 | void ib_unregister_client(struct ib_client *client) | |
1776 | { | |
1da177e4 | 1777 | struct ib_device *device; |
0df91bb6 | 1778 | unsigned long index; |
1da177e4 | 1779 | |
921eab11 | 1780 | down_write(&clients_rwsem); |
621e55ff | 1781 | ib_client_put(client); |
e59178d8 | 1782 | xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); |
921eab11 | 1783 | up_write(&clients_rwsem); |
621e55ff JG |
1784 | |
1785 | /* We do not want to have locks while calling client->remove() */ | |
1786 | rcu_read_lock(); | |
1787 | xa_for_each (&devices, index, device) { | |
1788 | if (!ib_device_try_get(device)) | |
1789 | continue; | |
1790 | rcu_read_unlock(); | |
1791 | ||
921eab11 | 1792 | remove_client_context(device, client->client_id); |
1da177e4 | 1793 | |
621e55ff JG |
1794 | ib_device_put(device); |
1795 | rcu_read_lock(); | |
1796 | } | |
1797 | rcu_read_unlock(); | |
1798 | ||
921eab11 | 1799 | /* |
621e55ff JG |
1800 | * remove_client_context() is not a fence, it can return even though a |
1801 | * removal is ongoing. Wait until all removals are completed. | |
921eab11 | 1802 | */ |
621e55ff | 1803 | wait_for_completion(&client->uses_zero); |
9cd58817 | 1804 | remove_client_id(client); |
1da177e4 LT |
1805 | } |
1806 | EXPORT_SYMBOL(ib_unregister_client); | |
1807 | ||
0e2d00eb JG |
1808 | static int __ib_get_global_client_nl_info(const char *client_name, |
1809 | struct ib_client_nl_info *res) | |
1810 | { | |
1811 | struct ib_client *client; | |
1812 | unsigned long index; | |
1813 | int ret = -ENOENT; | |
1814 | ||
1815 | down_read(&clients_rwsem); | |
1816 | xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { | |
1817 | if (strcmp(client->name, client_name) != 0) | |
1818 | continue; | |
1819 | if (!client->get_global_nl_info) { | |
1820 | ret = -EOPNOTSUPP; | |
1821 | break; | |
1822 | } | |
1823 | ret = client->get_global_nl_info(res); | |
1824 | if (WARN_ON(ret == -ENOENT)) | |
1825 | ret = -EINVAL; | |
1826 | if (!ret && res->cdev) | |
1827 | get_device(res->cdev); | |
1828 | break; | |
1829 | } | |
1830 | up_read(&clients_rwsem); | |
1831 | return ret; | |
1832 | } | |
1833 | ||
1834 | static int __ib_get_client_nl_info(struct ib_device *ibdev, | |
1835 | const char *client_name, | |
1836 | struct ib_client_nl_info *res) | |
1837 | { | |
1838 | unsigned long index; | |
1839 | void *client_data; | |
1840 | int ret = -ENOENT; | |
1841 | ||
1842 | down_read(&ibdev->client_data_rwsem); | |
1843 | xan_for_each_marked (&ibdev->client_data, index, client_data, | |
1844 | CLIENT_DATA_REGISTERED) { | |
1845 | struct ib_client *client = xa_load(&clients, index); | |
1846 | ||
1847 | if (!client || strcmp(client->name, client_name) != 0) | |
1848 | continue; | |
1849 | if (!client->get_nl_info) { | |
1850 | ret = -EOPNOTSUPP; | |
1851 | break; | |
1852 | } | |
1853 | ret = client->get_nl_info(ibdev, client_data, res); | |
1854 | if (WARN_ON(ret == -ENOENT)) | |
1855 | ret = -EINVAL; | |
1856 | ||
1857 | /* | |
1858 | * The cdev is guaranteed valid as long as we are inside the | |
1859 | * client_data_rwsem as remove_one can't be called. Keep it | |
1860 | * valid for the caller. | |
1861 | */ | |
1862 | if (!ret && res->cdev) | |
1863 | get_device(res->cdev); | |
1864 | break; | |
1865 | } | |
1866 | up_read(&ibdev->client_data_rwsem); | |
1867 | ||
1868 | return ret; | |
1869 | } | |
1870 | ||
1871 | /** | |
1872 | * ib_get_client_nl_info - Fetch the nl_info from a client | |
1873 | * @device - IB device | |
1874 | * @client_name - Name of the client | |
1875 | * @res - Result of the query | |
1876 | */ | |
1877 | int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, | |
1878 | struct ib_client_nl_info *res) | |
1879 | { | |
1880 | int ret; | |
1881 | ||
1882 | if (ibdev) | |
1883 | ret = __ib_get_client_nl_info(ibdev, client_name, res); | |
1884 | else | |
1885 | ret = __ib_get_global_client_nl_info(client_name, res); | |
1886 | #ifdef CONFIG_MODULES | |
1887 | if (ret == -ENOENT) { | |
1888 | request_module("rdma-client-%s", client_name); | |
1889 | if (ibdev) | |
1890 | ret = __ib_get_client_nl_info(ibdev, client_name, res); | |
1891 | else | |
1892 | ret = __ib_get_global_client_nl_info(client_name, res); | |
1893 | } | |
1894 | #endif | |
1895 | if (ret) { | |
1896 | if (ret == -ENOENT) | |
1897 | return -EOPNOTSUPP; | |
1898 | return ret; | |
1899 | } | |
1900 | ||
1901 | if (WARN_ON(!res->cdev)) | |
1902 | return -EINVAL; | |
1903 | return 0; | |
1904 | } | |
1905 | ||
1da177e4 | 1906 | /** |
9cd330d3 | 1907 | * ib_set_client_data - Set IB client context |
1da177e4 LT |
1908 | * @device:Device to set context for |
1909 | * @client:Client to set context for | |
1910 | * @data:Context to set | |
1911 | * | |
0df91bb6 JG |
1912 | * ib_set_client_data() sets client context data that can be retrieved with |
1913 | * ib_get_client_data(). This can only be called while the client is | |
1914 | * registered to the device, once the ib_client remove() callback returns this | |
1915 | * cannot be called. | |
1da177e4 LT |
1916 | */ |
1917 | void ib_set_client_data(struct ib_device *device, struct ib_client *client, | |
1918 | void *data) | |
1919 | { | |
0df91bb6 | 1920 | void *rc; |
1da177e4 | 1921 | |
0df91bb6 JG |
1922 | if (WARN_ON(IS_ERR(data))) |
1923 | data = NULL; | |
1da177e4 | 1924 | |
0df91bb6 JG |
1925 | rc = xa_store(&device->client_data, client->client_id, data, |
1926 | GFP_KERNEL); | |
1927 | WARN_ON(xa_is_err(rc)); | |
1da177e4 LT |
1928 | } |
1929 | EXPORT_SYMBOL(ib_set_client_data); | |
1930 | ||
1931 | /** | |
1932 | * ib_register_event_handler - Register an IB event handler | |
1933 | * @event_handler:Handler to register | |
1934 | * | |
1935 | * ib_register_event_handler() registers an event handler that will be | |
1936 | * called back when asynchronous IB events occur (as defined in | |
6b57cea9 PP |
1937 | * chapter 11 of the InfiniBand Architecture Specification). This |
1938 | * callback occurs in workqueue context. | |
1da177e4 | 1939 | */ |
dcc9881e | 1940 | void ib_register_event_handler(struct ib_event_handler *event_handler) |
1da177e4 | 1941 | { |
6b57cea9 | 1942 | down_write(&event_handler->device->event_handler_rwsem); |
1da177e4 LT |
1943 | list_add_tail(&event_handler->list, |
1944 | &event_handler->device->event_handler_list); | |
6b57cea9 | 1945 | up_write(&event_handler->device->event_handler_rwsem); |
1da177e4 LT |
1946 | } |
1947 | EXPORT_SYMBOL(ib_register_event_handler); | |
1948 | ||
1949 | /** | |
1950 | * ib_unregister_event_handler - Unregister an event handler | |
1951 | * @event_handler:Handler to unregister | |
1952 | * | |
1953 | * Unregister an event handler registered with | |
1954 | * ib_register_event_handler(). | |
1955 | */ | |
dcc9881e | 1956 | void ib_unregister_event_handler(struct ib_event_handler *event_handler) |
1da177e4 | 1957 | { |
6b57cea9 | 1958 | down_write(&event_handler->device->event_handler_rwsem); |
1da177e4 | 1959 | list_del(&event_handler->list); |
6b57cea9 | 1960 | up_write(&event_handler->device->event_handler_rwsem); |
1da177e4 LT |
1961 | } |
1962 | EXPORT_SYMBOL(ib_unregister_event_handler); | |
1963 | ||
6b57cea9 | 1964 | void ib_dispatch_event_clients(struct ib_event *event) |
1da177e4 | 1965 | { |
1da177e4 LT |
1966 | struct ib_event_handler *handler; |
1967 | ||
6b57cea9 | 1968 | down_read(&event->device->event_handler_rwsem); |
1da177e4 LT |
1969 | |
1970 | list_for_each_entry(handler, &event->device->event_handler_list, list) | |
1971 | handler->handler(handler, event); | |
1972 | ||
6b57cea9 | 1973 | up_read(&event->device->event_handler_rwsem); |
1da177e4 | 1974 | } |
1da177e4 | 1975 | |
4929116b KH |
1976 | static int iw_query_port(struct ib_device *device, |
1977 | u8 port_num, | |
1978 | struct ib_port_attr *port_attr) | |
1da177e4 | 1979 | { |
4929116b KH |
1980 | struct in_device *inetdev; |
1981 | struct net_device *netdev; | |
fad61ad4 | 1982 | |
4929116b KH |
1983 | memset(port_attr, 0, sizeof(*port_attr)); |
1984 | ||
1985 | netdev = ib_device_get_netdev(device, port_num); | |
1986 | if (!netdev) | |
1987 | return -ENODEV; | |
1988 | ||
4929116b KH |
1989 | port_attr->max_mtu = IB_MTU_4096; |
1990 | port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu); | |
1991 | ||
1992 | if (!netif_carrier_ok(netdev)) { | |
1993 | port_attr->state = IB_PORT_DOWN; | |
1994 | port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; | |
1995 | } else { | |
390d3fdc MK |
1996 | rcu_read_lock(); |
1997 | inetdev = __in_dev_get_rcu(netdev); | |
4929116b KH |
1998 | |
1999 | if (inetdev && inetdev->ifa_list) { | |
2000 | port_attr->state = IB_PORT_ACTIVE; | |
2001 | port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; | |
4929116b KH |
2002 | } else { |
2003 | port_attr->state = IB_PORT_INIT; | |
2004 | port_attr->phys_state = | |
2005 | IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING; | |
2006 | } | |
390d3fdc MK |
2007 | |
2008 | rcu_read_unlock(); | |
4929116b KH |
2009 | } |
2010 | ||
390d3fdc | 2011 | dev_put(netdev); |
1e123d96 | 2012 | return device->ops.query_port(device, port_num, port_attr); |
4929116b KH |
2013 | } |
2014 | ||
2015 | static int __ib_query_port(struct ib_device *device, | |
2016 | u8 port_num, | |
2017 | struct ib_port_attr *port_attr) | |
2018 | { | |
2019 | union ib_gid gid = {}; | |
2020 | int err; | |
116c0074 | 2021 | |
fad61ad4 | 2022 | memset(port_attr, 0, sizeof(*port_attr)); |
4929116b | 2023 | |
3023a1e9 | 2024 | err = device->ops.query_port(device, port_num, port_attr); |
fad61ad4 EC |
2025 | if (err || port_attr->subnet_prefix) |
2026 | return err; | |
2027 | ||
4929116b KH |
2028 | if (rdma_port_get_link_layer(device, port_num) != |
2029 | IB_LINK_LAYER_INFINIBAND) | |
d7012467 EC |
2030 | return 0; |
2031 | ||
3023a1e9 | 2032 | err = device->ops.query_gid(device, port_num, 0, &gid); |
fad61ad4 EC |
2033 | if (err) |
2034 | return err; | |
2035 | ||
2036 | port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); | |
2037 | return 0; | |
1da177e4 | 2038 | } |
4929116b KH |
2039 | |
2040 | /** | |
2041 | * ib_query_port - Query IB port attributes | |
2042 | * @device:Device to query | |
2043 | * @port_num:Port number to query | |
2044 | * @port_attr:Port attributes | |
2045 | * | |
2046 | * ib_query_port() returns the attributes of a port through the | |
2047 | * @port_attr pointer. | |
2048 | */ | |
2049 | int ib_query_port(struct ib_device *device, | |
2050 | u8 port_num, | |
2051 | struct ib_port_attr *port_attr) | |
2052 | { | |
2053 | if (!rdma_is_port_valid(device, port_num)) | |
2054 | return -EINVAL; | |
2055 | ||
2056 | if (rdma_protocol_iwarp(device, port_num)) | |
2057 | return iw_query_port(device, port_num, port_attr); | |
2058 | else | |
2059 | return __ib_query_port(device, port_num, port_attr); | |
2060 | } | |
1da177e4 LT |
2061 | EXPORT_SYMBOL(ib_query_port); |
2062 | ||
324e227e JG |
2063 | static void add_ndev_hash(struct ib_port_data *pdata) |
2064 | { | |
2065 | unsigned long flags; | |
2066 | ||
2067 | might_sleep(); | |
2068 | ||
2069 | spin_lock_irqsave(&ndev_hash_lock, flags); | |
2070 | if (hash_hashed(&pdata->ndev_hash_link)) { | |
2071 | hash_del_rcu(&pdata->ndev_hash_link); | |
2072 | spin_unlock_irqrestore(&ndev_hash_lock, flags); | |
2073 | /* | |
2074 | * We cannot do hash_add_rcu after a hash_del_rcu until the | |
2075 | * grace period | |
2076 | */ | |
2077 | synchronize_rcu(); | |
2078 | spin_lock_irqsave(&ndev_hash_lock, flags); | |
2079 | } | |
2080 | if (pdata->netdev) | |
2081 | hash_add_rcu(ndev_hash, &pdata->ndev_hash_link, | |
2082 | (uintptr_t)pdata->netdev); | |
2083 | spin_unlock_irqrestore(&ndev_hash_lock, flags); | |
2084 | } | |
2085 | ||
c2261dd7 JG |
2086 | /** |
2087 | * ib_device_set_netdev - Associate the ib_dev with an underlying net_device | |
2088 | * @ib_dev: Device to modify | |
2089 | * @ndev: net_device to affiliate, may be NULL | |
2090 | * @port: IB port the net_device is connected to | |
2091 | * | |
2092 | * Drivers should use this to link the ib_device to a netdev so the netdev | |
2093 | * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be | |
2094 | * affiliated with any port. | |
2095 | * | |
2096 | * The caller must ensure that the given ndev is not unregistered or | |
2097 | * unregistering, and that either the ib_device is unregistered or | |
2098 | * ib_device_set_netdev() is called with NULL when the ndev sends a | |
2099 | * NETDEV_UNREGISTER event. | |
2100 | */ | |
2101 | int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, | |
2102 | unsigned int port) | |
2103 | { | |
2104 | struct net_device *old_ndev; | |
2105 | struct ib_port_data *pdata; | |
2106 | unsigned long flags; | |
2107 | int ret; | |
2108 | ||
2109 | /* | |
2110 | * Drivers wish to call this before ib_register_driver, so we have to | |
2111 | * setup the port data early. | |
2112 | */ | |
2113 | ret = alloc_port_data(ib_dev); | |
2114 | if (ret) | |
2115 | return ret; | |
2116 | ||
2117 | if (!rdma_is_port_valid(ib_dev, port)) | |
2118 | return -EINVAL; | |
2119 | ||
2120 | pdata = &ib_dev->port_data[port]; | |
2121 | spin_lock_irqsave(&pdata->netdev_lock, flags); | |
324e227e JG |
2122 | old_ndev = rcu_dereference_protected( |
2123 | pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); | |
2124 | if (old_ndev == ndev) { | |
c2261dd7 JG |
2125 | spin_unlock_irqrestore(&pdata->netdev_lock, flags); |
2126 | return 0; | |
2127 | } | |
c2261dd7 JG |
2128 | |
2129 | if (ndev) | |
2130 | dev_hold(ndev); | |
324e227e | 2131 | rcu_assign_pointer(pdata->netdev, ndev); |
c2261dd7 JG |
2132 | spin_unlock_irqrestore(&pdata->netdev_lock, flags); |
2133 | ||
324e227e | 2134 | add_ndev_hash(pdata); |
c2261dd7 JG |
2135 | if (old_ndev) |
2136 | dev_put(old_ndev); | |
2137 | ||
2138 | return 0; | |
2139 | } | |
2140 | EXPORT_SYMBOL(ib_device_set_netdev); | |
2141 | ||
2142 | static void free_netdevs(struct ib_device *ib_dev) | |
2143 | { | |
2144 | unsigned long flags; | |
2145 | unsigned int port; | |
2146 | ||
46bdf370 KH |
2147 | if (!ib_dev->port_data) |
2148 | return; | |
2149 | ||
c2261dd7 JG |
2150 | rdma_for_each_port (ib_dev, port) { |
2151 | struct ib_port_data *pdata = &ib_dev->port_data[port]; | |
324e227e | 2152 | struct net_device *ndev; |
c2261dd7 JG |
2153 | |
2154 | spin_lock_irqsave(&pdata->netdev_lock, flags); | |
324e227e JG |
2155 | ndev = rcu_dereference_protected( |
2156 | pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); | |
2157 | if (ndev) { | |
2158 | spin_lock(&ndev_hash_lock); | |
2159 | hash_del_rcu(&pdata->ndev_hash_link); | |
2160 | spin_unlock(&ndev_hash_lock); | |
2161 | ||
2162 | /* | |
2163 | * If this is the last dev_put there is still a | |
2164 | * synchronize_rcu before the netdev is kfreed, so we | |
2165 | * can continue to rely on unlocked pointer | |
2166 | * comparisons after the put | |
2167 | */ | |
2168 | rcu_assign_pointer(pdata->netdev, NULL); | |
2169 | dev_put(ndev); | |
c2261dd7 JG |
2170 | } |
2171 | spin_unlock_irqrestore(&pdata->netdev_lock, flags); | |
2172 | } | |
2173 | } | |
2174 | ||
2175 | struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, | |
2176 | unsigned int port) | |
2177 | { | |
2178 | struct ib_port_data *pdata; | |
2179 | struct net_device *res; | |
2180 | ||
2181 | if (!rdma_is_port_valid(ib_dev, port)) | |
2182 | return NULL; | |
2183 | ||
2184 | pdata = &ib_dev->port_data[port]; | |
2185 | ||
2186 | /* | |
2187 | * New drivers should use ib_device_set_netdev() not the legacy | |
2188 | * get_netdev(). | |
2189 | */ | |
2190 | if (ib_dev->ops.get_netdev) | |
2191 | res = ib_dev->ops.get_netdev(ib_dev, port); | |
2192 | else { | |
2193 | spin_lock(&pdata->netdev_lock); | |
324e227e JG |
2194 | res = rcu_dereference_protected( |
2195 | pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); | |
c2261dd7 JG |
2196 | if (res) |
2197 | dev_hold(res); | |
2198 | spin_unlock(&pdata->netdev_lock); | |
2199 | } | |
2200 | ||
2201 | /* | |
2202 | * If we are starting to unregister expedite things by preventing | |
2203 | * propagation of an unregistering netdev. | |
2204 | */ | |
2205 | if (res && res->reg_state != NETREG_REGISTERED) { | |
2206 | dev_put(res); | |
2207 | return NULL; | |
2208 | } | |
2209 | ||
2210 | return res; | |
2211 | } | |
2212 | ||
324e227e JG |
2213 | /** |
2214 | * ib_device_get_by_netdev - Find an IB device associated with a netdev | |
2215 | * @ndev: netdev to locate | |
2216 | * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) | |
2217 | * | |
2218 | * Find and hold an ib_device that is associated with a netdev via | |
2219 | * ib_device_set_netdev(). The caller must call ib_device_put() on the | |
2220 | * returned pointer. | |
2221 | */ | |
2222 | struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, | |
2223 | enum rdma_driver_id driver_id) | |
2224 | { | |
2225 | struct ib_device *res = NULL; | |
2226 | struct ib_port_data *cur; | |
2227 | ||
2228 | rcu_read_lock(); | |
2229 | hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link, | |
2230 | (uintptr_t)ndev) { | |
2231 | if (rcu_access_pointer(cur->netdev) == ndev && | |
2232 | (driver_id == RDMA_DRIVER_UNKNOWN || | |
b9560a41 | 2233 | cur->ib_dev->ops.driver_id == driver_id) && |
324e227e JG |
2234 | ib_device_try_get(cur->ib_dev)) { |
2235 | res = cur->ib_dev; | |
2236 | break; | |
2237 | } | |
2238 | } | |
2239 | rcu_read_unlock(); | |
2240 | ||
2241 | return res; | |
2242 | } | |
2243 | EXPORT_SYMBOL(ib_device_get_by_netdev); | |
2244 | ||
03db3a2d MB |
2245 | /** |
2246 | * ib_enum_roce_netdev - enumerate all RoCE ports | |
2247 | * @ib_dev : IB device we want to query | |
2248 | * @filter: Should we call the callback? | |
2249 | * @filter_cookie: Cookie passed to filter | |
2250 | * @cb: Callback to call for each found RoCE ports | |
2251 | * @cookie: Cookie passed back to the callback | |
2252 | * | |
2253 | * Enumerates all of the physical RoCE ports of ib_dev | |
2254 | * which are related to netdevice and calls callback() on each | |
2255 | * device for which filter() function returns non zero. | |
2256 | */ | |
2257 | void ib_enum_roce_netdev(struct ib_device *ib_dev, | |
2258 | roce_netdev_filter filter, | |
2259 | void *filter_cookie, | |
2260 | roce_netdev_callback cb, | |
2261 | void *cookie) | |
2262 | { | |
ea1075ed | 2263 | unsigned int port; |
03db3a2d | 2264 | |
ea1075ed | 2265 | rdma_for_each_port (ib_dev, port) |
03db3a2d | 2266 | if (rdma_protocol_roce(ib_dev, port)) { |
c2261dd7 JG |
2267 | struct net_device *idev = |
2268 | ib_device_get_netdev(ib_dev, port); | |
03db3a2d MB |
2269 | |
2270 | if (filter(ib_dev, port, idev, filter_cookie)) | |
2271 | cb(ib_dev, port, idev, cookie); | |
2272 | ||
2273 | if (idev) | |
2274 | dev_put(idev); | |
2275 | } | |
2276 | } | |
2277 | ||
2278 | /** | |
2279 | * ib_enum_all_roce_netdevs - enumerate all RoCE devices | |
2280 | * @filter: Should we call the callback? | |
2281 | * @filter_cookie: Cookie passed to filter | |
2282 | * @cb: Callback to call for each found RoCE ports | |
2283 | * @cookie: Cookie passed back to the callback | |
2284 | * | |
2285 | * Enumerates all RoCE devices' physical ports which are related | |
2286 | * to netdevices and calls callback() on each device for which | |
2287 | * filter() function returns non zero. | |
2288 | */ | |
2289 | void ib_enum_all_roce_netdevs(roce_netdev_filter filter, | |
2290 | void *filter_cookie, | |
2291 | roce_netdev_callback cb, | |
2292 | void *cookie) | |
2293 | { | |
2294 | struct ib_device *dev; | |
0df91bb6 | 2295 | unsigned long index; |
03db3a2d | 2296 | |
921eab11 | 2297 | down_read(&devices_rwsem); |
0df91bb6 | 2298 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) |
03db3a2d | 2299 | ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); |
921eab11 | 2300 | up_read(&devices_rwsem); |
8030c835 LR |
2301 | } |
2302 | ||
2303 | /** | |
2304 | * ib_enum_all_devs - enumerate all ib_devices | |
2305 | * @cb: Callback to call for each found ib_device | |
2306 | * | |
2307 | * Enumerates all ib_devices and calls callback() on each device. | |
2308 | */ | |
2309 | int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, | |
2310 | struct netlink_callback *cb) | |
2311 | { | |
0df91bb6 | 2312 | unsigned long index; |
8030c835 LR |
2313 | struct ib_device *dev; |
2314 | unsigned int idx = 0; | |
2315 | int ret = 0; | |
2316 | ||
921eab11 | 2317 | down_read(&devices_rwsem); |
0df91bb6 | 2318 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { |
37eeab55 PP |
2319 | if (!rdma_dev_access_netns(dev, sock_net(skb->sk))) |
2320 | continue; | |
2321 | ||
8030c835 LR |
2322 | ret = nldev_cb(dev, skb, cb, idx); |
2323 | if (ret) | |
2324 | break; | |
2325 | idx++; | |
2326 | } | |
921eab11 | 2327 | up_read(&devices_rwsem); |
8030c835 | 2328 | return ret; |
03db3a2d MB |
2329 | } |
2330 | ||
1da177e4 LT |
2331 | /** |
2332 | * ib_query_pkey - Get P_Key table entry | |
2333 | * @device:Device to query | |
2334 | * @port_num:Port number to query | |
2335 | * @index:P_Key table index to query | |
2336 | * @pkey:Returned P_Key | |
2337 | * | |
2338 | * ib_query_pkey() fetches the specified P_Key table entry. | |
2339 | */ | |
2340 | int ib_query_pkey(struct ib_device *device, | |
2341 | u8 port_num, u16 index, u16 *pkey) | |
2342 | { | |
9af3f5cf YS |
2343 | if (!rdma_is_port_valid(device, port_num)) |
2344 | return -EINVAL; | |
2345 | ||
3023a1e9 | 2346 | return device->ops.query_pkey(device, port_num, index, pkey); |
1da177e4 LT |
2347 | } |
2348 | EXPORT_SYMBOL(ib_query_pkey); | |
2349 | ||
2350 | /** | |
2351 | * ib_modify_device - Change IB device attributes | |
2352 | * @device:Device to modify | |
2353 | * @device_modify_mask:Mask of attributes to change | |
2354 | * @device_modify:New attribute values | |
2355 | * | |
2356 | * ib_modify_device() changes a device's attributes as specified by | |
2357 | * the @device_modify_mask and @device_modify structure. | |
2358 | */ | |
2359 | int ib_modify_device(struct ib_device *device, | |
2360 | int device_modify_mask, | |
2361 | struct ib_device_modify *device_modify) | |
2362 | { | |
3023a1e9 | 2363 | if (!device->ops.modify_device) |
d0f3ef36 | 2364 | return -EOPNOTSUPP; |
10e1b54b | 2365 | |
3023a1e9 KH |
2366 | return device->ops.modify_device(device, device_modify_mask, |
2367 | device_modify); | |
1da177e4 LT |
2368 | } |
2369 | EXPORT_SYMBOL(ib_modify_device); | |
2370 | ||
2371 | /** | |
2372 | * ib_modify_port - Modifies the attributes for the specified port. | |
2373 | * @device: The device to modify. | |
2374 | * @port_num: The number of the port to modify. | |
2375 | * @port_modify_mask: Mask used to specify which attributes of the port | |
2376 | * to change. | |
2377 | * @port_modify: New attribute values for the port. | |
2378 | * | |
2379 | * ib_modify_port() changes a port's attributes as specified by the | |
2380 | * @port_modify_mask and @port_modify structure. | |
2381 | */ | |
2382 | int ib_modify_port(struct ib_device *device, | |
2383 | u8 port_num, int port_modify_mask, | |
2384 | struct ib_port_modify *port_modify) | |
2385 | { | |
61e0962d | 2386 | int rc; |
10e1b54b | 2387 | |
24dc831b | 2388 | if (!rdma_is_port_valid(device, port_num)) |
116c0074 RD |
2389 | return -EINVAL; |
2390 | ||
3023a1e9 KH |
2391 | if (device->ops.modify_port) |
2392 | rc = device->ops.modify_port(device, port_num, | |
2393 | port_modify_mask, | |
2394 | port_modify); | |
55bfe905 KH |
2395 | else if (rdma_protocol_roce(device, port_num) && |
2396 | ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 || | |
2397 | (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0)) | |
2398 | rc = 0; | |
61e0962d | 2399 | else |
55bfe905 | 2400 | rc = -EOPNOTSUPP; |
61e0962d | 2401 | return rc; |
1da177e4 LT |
2402 | } |
2403 | EXPORT_SYMBOL(ib_modify_port); | |
2404 | ||
5eb620c8 YE |
2405 | /** |
2406 | * ib_find_gid - Returns the port number and GID table index where | |
dbb12562 | 2407 | * a specified GID value occurs. Its searches only for IB link layer. |
5eb620c8 YE |
2408 | * @device: The device to query. |
2409 | * @gid: The GID value to search for. | |
2410 | * @port_num: The port number of the device where the GID value was found. | |
2411 | * @index: The index into the GID table where the GID was found. This | |
2412 | * parameter may be NULL. | |
2413 | */ | |
2414 | int ib_find_gid(struct ib_device *device, union ib_gid *gid, | |
b26c4a11 | 2415 | u8 *port_num, u16 *index) |
5eb620c8 YE |
2416 | { |
2417 | union ib_gid tmp_gid; | |
ea1075ed JG |
2418 | unsigned int port; |
2419 | int ret, i; | |
5eb620c8 | 2420 | |
ea1075ed | 2421 | rdma_for_each_port (device, port) { |
22d24f75 | 2422 | if (!rdma_protocol_ib(device, port)) |
b39ffa1d MB |
2423 | continue; |
2424 | ||
8ceb1357 JG |
2425 | for (i = 0; i < device->port_data[port].immutable.gid_tbl_len; |
2426 | ++i) { | |
1dfce294 | 2427 | ret = rdma_query_gid(device, port, i, &tmp_gid); |
5eb620c8 YE |
2428 | if (ret) |
2429 | return ret; | |
2430 | if (!memcmp(&tmp_gid, gid, sizeof *gid)) { | |
2431 | *port_num = port; | |
2432 | if (index) | |
2433 | *index = i; | |
2434 | return 0; | |
2435 | } | |
2436 | } | |
2437 | } | |
2438 | ||
2439 | return -ENOENT; | |
2440 | } | |
2441 | EXPORT_SYMBOL(ib_find_gid); | |
2442 | ||
2443 | /** | |
2444 | * ib_find_pkey - Returns the PKey table index where a specified | |
2445 | * PKey value occurs. | |
2446 | * @device: The device to query. | |
2447 | * @port_num: The port number of the device to search for the PKey. | |
2448 | * @pkey: The PKey value to search for. | |
2449 | * @index: The index into the PKey table where the PKey was found. | |
2450 | */ | |
2451 | int ib_find_pkey(struct ib_device *device, | |
2452 | u8 port_num, u16 pkey, u16 *index) | |
2453 | { | |
2454 | int ret, i; | |
2455 | u16 tmp_pkey; | |
ff7166c4 | 2456 | int partial_ix = -1; |
5eb620c8 | 2457 | |
8ceb1357 JG |
2458 | for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len; |
2459 | ++i) { | |
5eb620c8 YE |
2460 | ret = ib_query_pkey(device, port_num, i, &tmp_pkey); |
2461 | if (ret) | |
2462 | return ret; | |
36026ecc | 2463 | if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { |
ff7166c4 JM |
2464 | /* if there is full-member pkey take it.*/ |
2465 | if (tmp_pkey & 0x8000) { | |
2466 | *index = i; | |
2467 | return 0; | |
2468 | } | |
2469 | if (partial_ix < 0) | |
2470 | partial_ix = i; | |
5eb620c8 YE |
2471 | } |
2472 | } | |
2473 | ||
ff7166c4 JM |
2474 | /*no full-member, if exists take the limited*/ |
2475 | if (partial_ix >= 0) { | |
2476 | *index = partial_ix; | |
2477 | return 0; | |
2478 | } | |
5eb620c8 YE |
2479 | return -ENOENT; |
2480 | } | |
2481 | EXPORT_SYMBOL(ib_find_pkey); | |
2482 | ||
9268f72d YK |
2483 | /** |
2484 | * ib_get_net_dev_by_params() - Return the appropriate net_dev | |
2485 | * for a received CM request | |
2486 | * @dev: An RDMA device on which the request has been received. | |
2487 | * @port: Port number on the RDMA device. | |
2488 | * @pkey: The Pkey the request came on. | |
2489 | * @gid: A GID that the net_dev uses to communicate. | |
2490 | * @addr: Contains the IP address that the request specified as its | |
2491 | * destination. | |
921eab11 | 2492 | * |
9268f72d YK |
2493 | */ |
2494 | struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, | |
2495 | u8 port, | |
2496 | u16 pkey, | |
2497 | const union ib_gid *gid, | |
2498 | const struct sockaddr *addr) | |
2499 | { | |
2500 | struct net_device *net_dev = NULL; | |
0df91bb6 JG |
2501 | unsigned long index; |
2502 | void *client_data; | |
9268f72d YK |
2503 | |
2504 | if (!rdma_protocol_ib(dev, port)) | |
2505 | return NULL; | |
2506 | ||
921eab11 JG |
2507 | /* |
2508 | * Holding the read side guarantees that the client will not become | |
2509 | * unregistered while we are calling get_net_dev_by_params() | |
2510 | */ | |
2511 | down_read(&dev->client_data_rwsem); | |
0df91bb6 JG |
2512 | xan_for_each_marked (&dev->client_data, index, client_data, |
2513 | CLIENT_DATA_REGISTERED) { | |
2514 | struct ib_client *client = xa_load(&clients, index); | |
9268f72d | 2515 | |
0df91bb6 | 2516 | if (!client || !client->get_net_dev_by_params) |
9268f72d YK |
2517 | continue; |
2518 | ||
0df91bb6 JG |
2519 | net_dev = client->get_net_dev_by_params(dev, port, pkey, gid, |
2520 | addr, client_data); | |
2521 | if (net_dev) | |
2522 | break; | |
9268f72d | 2523 | } |
921eab11 | 2524 | up_read(&dev->client_data_rwsem); |
9268f72d YK |
2525 | |
2526 | return net_dev; | |
2527 | } | |
2528 | EXPORT_SYMBOL(ib_get_net_dev_by_params); | |
2529 | ||
521ed0d9 KH |
2530 | void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) |
2531 | { | |
3023a1e9 | 2532 | struct ib_device_ops *dev_ops = &dev->ops; |
521ed0d9 KH |
2533 | #define SET_DEVICE_OP(ptr, name) \ |
2534 | do { \ | |
2535 | if (ops->name) \ | |
2536 | if (!((ptr)->name)) \ | |
2537 | (ptr)->name = ops->name; \ | |
2538 | } while (0) | |
2539 | ||
30471d4b LR |
2540 | #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name) |
2541 | ||
b9560a41 JG |
2542 | if (ops->driver_id != RDMA_DRIVER_UNKNOWN) { |
2543 | WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN && | |
2544 | dev_ops->driver_id != ops->driver_id); | |
2545 | dev_ops->driver_id = ops->driver_id; | |
2546 | } | |
7a154142 JG |
2547 | if (ops->owner) { |
2548 | WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner); | |
2549 | dev_ops->owner = ops->owner; | |
2550 | } | |
72c6ec18 JG |
2551 | if (ops->uverbs_abi_ver) |
2552 | dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver; | |
b9560a41 | 2553 | |
8f71bb00 JG |
2554 | dev_ops->uverbs_no_driver_id_binding |= |
2555 | ops->uverbs_no_driver_id_binding; | |
2556 | ||
3023a1e9 | 2557 | SET_DEVICE_OP(dev_ops, add_gid); |
2f1927b0 | 2558 | SET_DEVICE_OP(dev_ops, advise_mr); |
3023a1e9 KH |
2559 | SET_DEVICE_OP(dev_ops, alloc_dm); |
2560 | SET_DEVICE_OP(dev_ops, alloc_fmr); | |
2561 | SET_DEVICE_OP(dev_ops, alloc_hw_stats); | |
2562 | SET_DEVICE_OP(dev_ops, alloc_mr); | |
26bc7eae | 2563 | SET_DEVICE_OP(dev_ops, alloc_mr_integrity); |
3023a1e9 KH |
2564 | SET_DEVICE_OP(dev_ops, alloc_mw); |
2565 | SET_DEVICE_OP(dev_ops, alloc_pd); | |
2566 | SET_DEVICE_OP(dev_ops, alloc_rdma_netdev); | |
2567 | SET_DEVICE_OP(dev_ops, alloc_ucontext); | |
2568 | SET_DEVICE_OP(dev_ops, alloc_xrcd); | |
2569 | SET_DEVICE_OP(dev_ops, attach_mcast); | |
2570 | SET_DEVICE_OP(dev_ops, check_mr_status); | |
c4ffee7c | 2571 | SET_DEVICE_OP(dev_ops, counter_alloc_stats); |
99fa331d MZ |
2572 | SET_DEVICE_OP(dev_ops, counter_bind_qp); |
2573 | SET_DEVICE_OP(dev_ops, counter_dealloc); | |
2574 | SET_DEVICE_OP(dev_ops, counter_unbind_qp); | |
c4ffee7c | 2575 | SET_DEVICE_OP(dev_ops, counter_update_stats); |
3023a1e9 KH |
2576 | SET_DEVICE_OP(dev_ops, create_ah); |
2577 | SET_DEVICE_OP(dev_ops, create_counters); | |
2578 | SET_DEVICE_OP(dev_ops, create_cq); | |
2579 | SET_DEVICE_OP(dev_ops, create_flow); | |
2580 | SET_DEVICE_OP(dev_ops, create_flow_action_esp); | |
2581 | SET_DEVICE_OP(dev_ops, create_qp); | |
2582 | SET_DEVICE_OP(dev_ops, create_rwq_ind_table); | |
2583 | SET_DEVICE_OP(dev_ops, create_srq); | |
2584 | SET_DEVICE_OP(dev_ops, create_wq); | |
2585 | SET_DEVICE_OP(dev_ops, dealloc_dm); | |
d0899892 | 2586 | SET_DEVICE_OP(dev_ops, dealloc_driver); |
3023a1e9 KH |
2587 | SET_DEVICE_OP(dev_ops, dealloc_fmr); |
2588 | SET_DEVICE_OP(dev_ops, dealloc_mw); | |
2589 | SET_DEVICE_OP(dev_ops, dealloc_pd); | |
2590 | SET_DEVICE_OP(dev_ops, dealloc_ucontext); | |
2591 | SET_DEVICE_OP(dev_ops, dealloc_xrcd); | |
2592 | SET_DEVICE_OP(dev_ops, del_gid); | |
2593 | SET_DEVICE_OP(dev_ops, dereg_mr); | |
2594 | SET_DEVICE_OP(dev_ops, destroy_ah); | |
2595 | SET_DEVICE_OP(dev_ops, destroy_counters); | |
2596 | SET_DEVICE_OP(dev_ops, destroy_cq); | |
2597 | SET_DEVICE_OP(dev_ops, destroy_flow); | |
2598 | SET_DEVICE_OP(dev_ops, destroy_flow_action); | |
2599 | SET_DEVICE_OP(dev_ops, destroy_qp); | |
2600 | SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table); | |
2601 | SET_DEVICE_OP(dev_ops, destroy_srq); | |
2602 | SET_DEVICE_OP(dev_ops, destroy_wq); | |
2603 | SET_DEVICE_OP(dev_ops, detach_mcast); | |
2604 | SET_DEVICE_OP(dev_ops, disassociate_ucontext); | |
2605 | SET_DEVICE_OP(dev_ops, drain_rq); | |
2606 | SET_DEVICE_OP(dev_ops, drain_sq); | |
ca22354b | 2607 | SET_DEVICE_OP(dev_ops, enable_driver); |
02da3750 | 2608 | SET_DEVICE_OP(dev_ops, fill_res_entry); |
4061ff7a | 2609 | SET_DEVICE_OP(dev_ops, fill_stat_entry); |
3023a1e9 KH |
2610 | SET_DEVICE_OP(dev_ops, get_dev_fw_str); |
2611 | SET_DEVICE_OP(dev_ops, get_dma_mr); | |
2612 | SET_DEVICE_OP(dev_ops, get_hw_stats); | |
2613 | SET_DEVICE_OP(dev_ops, get_link_layer); | |
2614 | SET_DEVICE_OP(dev_ops, get_netdev); | |
2615 | SET_DEVICE_OP(dev_ops, get_port_immutable); | |
2616 | SET_DEVICE_OP(dev_ops, get_vector_affinity); | |
2617 | SET_DEVICE_OP(dev_ops, get_vf_config); | |
bfcb3c5d | 2618 | SET_DEVICE_OP(dev_ops, get_vf_guid); |
3023a1e9 | 2619 | SET_DEVICE_OP(dev_ops, get_vf_stats); |
ea4baf7f | 2620 | SET_DEVICE_OP(dev_ops, init_port); |
dd05cb82 KH |
2621 | SET_DEVICE_OP(dev_ops, iw_accept); |
2622 | SET_DEVICE_OP(dev_ops, iw_add_ref); | |
2623 | SET_DEVICE_OP(dev_ops, iw_connect); | |
2624 | SET_DEVICE_OP(dev_ops, iw_create_listen); | |
2625 | SET_DEVICE_OP(dev_ops, iw_destroy_listen); | |
2626 | SET_DEVICE_OP(dev_ops, iw_get_qp); | |
2627 | SET_DEVICE_OP(dev_ops, iw_reject); | |
2628 | SET_DEVICE_OP(dev_ops, iw_rem_ref); | |
3023a1e9 | 2629 | SET_DEVICE_OP(dev_ops, map_mr_sg); |
2cdfcdd8 | 2630 | SET_DEVICE_OP(dev_ops, map_mr_sg_pi); |
3023a1e9 KH |
2631 | SET_DEVICE_OP(dev_ops, map_phys_fmr); |
2632 | SET_DEVICE_OP(dev_ops, mmap); | |
3411f9f0 | 2633 | SET_DEVICE_OP(dev_ops, mmap_free); |
3023a1e9 KH |
2634 | SET_DEVICE_OP(dev_ops, modify_ah); |
2635 | SET_DEVICE_OP(dev_ops, modify_cq); | |
2636 | SET_DEVICE_OP(dev_ops, modify_device); | |
2637 | SET_DEVICE_OP(dev_ops, modify_flow_action_esp); | |
2638 | SET_DEVICE_OP(dev_ops, modify_port); | |
2639 | SET_DEVICE_OP(dev_ops, modify_qp); | |
2640 | SET_DEVICE_OP(dev_ops, modify_srq); | |
2641 | SET_DEVICE_OP(dev_ops, modify_wq); | |
2642 | SET_DEVICE_OP(dev_ops, peek_cq); | |
2643 | SET_DEVICE_OP(dev_ops, poll_cq); | |
2644 | SET_DEVICE_OP(dev_ops, post_recv); | |
2645 | SET_DEVICE_OP(dev_ops, post_send); | |
2646 | SET_DEVICE_OP(dev_ops, post_srq_recv); | |
2647 | SET_DEVICE_OP(dev_ops, process_mad); | |
2648 | SET_DEVICE_OP(dev_ops, query_ah); | |
2649 | SET_DEVICE_OP(dev_ops, query_device); | |
2650 | SET_DEVICE_OP(dev_ops, query_gid); | |
2651 | SET_DEVICE_OP(dev_ops, query_pkey); | |
2652 | SET_DEVICE_OP(dev_ops, query_port); | |
2653 | SET_DEVICE_OP(dev_ops, query_qp); | |
2654 | SET_DEVICE_OP(dev_ops, query_srq); | |
2655 | SET_DEVICE_OP(dev_ops, rdma_netdev_get_params); | |
2656 | SET_DEVICE_OP(dev_ops, read_counters); | |
2657 | SET_DEVICE_OP(dev_ops, reg_dm_mr); | |
2658 | SET_DEVICE_OP(dev_ops, reg_user_mr); | |
2659 | SET_DEVICE_OP(dev_ops, req_ncomp_notif); | |
2660 | SET_DEVICE_OP(dev_ops, req_notify_cq); | |
2661 | SET_DEVICE_OP(dev_ops, rereg_user_mr); | |
2662 | SET_DEVICE_OP(dev_ops, resize_cq); | |
2663 | SET_DEVICE_OP(dev_ops, set_vf_guid); | |
2664 | SET_DEVICE_OP(dev_ops, set_vf_link_state); | |
2665 | SET_DEVICE_OP(dev_ops, unmap_fmr); | |
21a428a0 | 2666 | |
d3456914 | 2667 | SET_OBJ_SIZE(dev_ops, ib_ah); |
e39afe3d | 2668 | SET_OBJ_SIZE(dev_ops, ib_cq); |
21a428a0 | 2669 | SET_OBJ_SIZE(dev_ops, ib_pd); |
68e326de | 2670 | SET_OBJ_SIZE(dev_ops, ib_srq); |
a2a074ef | 2671 | SET_OBJ_SIZE(dev_ops, ib_ucontext); |
521ed0d9 KH |
2672 | } |
2673 | EXPORT_SYMBOL(ib_set_device_ops); | |
2674 | ||
d0e312fe | 2675 | static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { |
735c631a | 2676 | [RDMA_NL_LS_OP_RESOLVE] = { |
647c75ac | 2677 | .doit = ib_nl_handle_resolve_resp, |
e3a2b93d LR |
2678 | .flags = RDMA_NL_ADMIN_PERM, |
2679 | }, | |
735c631a | 2680 | [RDMA_NL_LS_OP_SET_TIMEOUT] = { |
647c75ac | 2681 | .doit = ib_nl_handle_set_timeout, |
e3a2b93d LR |
2682 | .flags = RDMA_NL_ADMIN_PERM, |
2683 | }, | |
ae43f828 | 2684 | [RDMA_NL_LS_OP_IP_RESOLVE] = { |
647c75ac | 2685 | .doit = ib_nl_handle_ip_res_resp, |
e3a2b93d LR |
2686 | .flags = RDMA_NL_ADMIN_PERM, |
2687 | }, | |
735c631a MB |
2688 | }; |
2689 | ||
1da177e4 LT |
2690 | static int __init ib_core_init(void) |
2691 | { | |
2692 | int ret; | |
2693 | ||
f0626710 TH |
2694 | ib_wq = alloc_workqueue("infiniband", 0, 0); |
2695 | if (!ib_wq) | |
2696 | return -ENOMEM; | |
2697 | ||
14d3a3b2 | 2698 | ib_comp_wq = alloc_workqueue("ib-comp-wq", |
b7363e67 | 2699 | WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); |
14d3a3b2 CH |
2700 | if (!ib_comp_wq) { |
2701 | ret = -ENOMEM; | |
2702 | goto err; | |
2703 | } | |
2704 | ||
f794809a JM |
2705 | ib_comp_unbound_wq = |
2706 | alloc_workqueue("ib-comp-unb-wq", | |
2707 | WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM | | |
2708 | WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE); | |
2709 | if (!ib_comp_unbound_wq) { | |
2710 | ret = -ENOMEM; | |
2711 | goto err_comp; | |
2712 | } | |
2713 | ||
55aeed06 | 2714 | ret = class_register(&ib_class); |
fd75c789 | 2715 | if (ret) { |
aba25a3e | 2716 | pr_warn("Couldn't create InfiniBand device class\n"); |
f794809a | 2717 | goto err_comp_unbound; |
fd75c789 | 2718 | } |
1da177e4 | 2719 | |
549af008 PP |
2720 | rdma_nl_init(); |
2721 | ||
e3f20f02 LR |
2722 | ret = addr_init(); |
2723 | if (ret) { | |
2724 | pr_warn("Could't init IB address resolution\n"); | |
2725 | goto err_ibnl; | |
2726 | } | |
2727 | ||
4c2cb422 MB |
2728 | ret = ib_mad_init(); |
2729 | if (ret) { | |
2730 | pr_warn("Couldn't init IB MAD\n"); | |
2731 | goto err_addr; | |
2732 | } | |
2733 | ||
c2e49c92 MB |
2734 | ret = ib_sa_init(); |
2735 | if (ret) { | |
2736 | pr_warn("Couldn't init SA\n"); | |
2737 | goto err_mad; | |
2738 | } | |
2739 | ||
42df744c | 2740 | ret = register_blocking_lsm_notifier(&ibdev_lsm_nb); |
8f408ab6 DJ |
2741 | if (ret) { |
2742 | pr_warn("Couldn't register LSM notifier. ret %d\n", ret); | |
c9901724 | 2743 | goto err_sa; |
8f408ab6 DJ |
2744 | } |
2745 | ||
4e0f7b90 PP |
2746 | ret = register_pernet_device(&rdma_dev_net_ops); |
2747 | if (ret) { | |
2748 | pr_warn("Couldn't init compat dev. ret %d\n", ret); | |
2749 | goto err_compat; | |
2750 | } | |
2751 | ||
6c80b41a | 2752 | nldev_init(); |
c9901724 | 2753 | rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); |
5ef8c0c1 | 2754 | roce_gid_mgmt_init(); |
1da177e4 | 2755 | |
fd75c789 NM |
2756 | return 0; |
2757 | ||
4e0f7b90 | 2758 | err_compat: |
42df744c | 2759 | unregister_blocking_lsm_notifier(&ibdev_lsm_nb); |
735c631a MB |
2760 | err_sa: |
2761 | ib_sa_cleanup(); | |
c2e49c92 MB |
2762 | err_mad: |
2763 | ib_mad_cleanup(); | |
4c2cb422 MB |
2764 | err_addr: |
2765 | addr_cleanup(); | |
e3f20f02 | 2766 | err_ibnl: |
55aeed06 | 2767 | class_unregister(&ib_class); |
f794809a JM |
2768 | err_comp_unbound: |
2769 | destroy_workqueue(ib_comp_unbound_wq); | |
14d3a3b2 CH |
2770 | err_comp: |
2771 | destroy_workqueue(ib_comp_wq); | |
fd75c789 NM |
2772 | err: |
2773 | destroy_workqueue(ib_wq); | |
1da177e4 LT |
2774 | return ret; |
2775 | } | |
2776 | ||
2777 | static void __exit ib_core_cleanup(void) | |
2778 | { | |
5ef8c0c1 | 2779 | roce_gid_mgmt_cleanup(); |
6c80b41a | 2780 | nldev_exit(); |
c9901724 | 2781 | rdma_nl_unregister(RDMA_NL_LS); |
4e0f7b90 | 2782 | unregister_pernet_device(&rdma_dev_net_ops); |
42df744c | 2783 | unregister_blocking_lsm_notifier(&ibdev_lsm_nb); |
c2e49c92 | 2784 | ib_sa_cleanup(); |
4c2cb422 | 2785 | ib_mad_cleanup(); |
e3f20f02 | 2786 | addr_cleanup(); |
c9901724 | 2787 | rdma_nl_exit(); |
55aeed06 | 2788 | class_unregister(&ib_class); |
f794809a | 2789 | destroy_workqueue(ib_comp_unbound_wq); |
14d3a3b2 | 2790 | destroy_workqueue(ib_comp_wq); |
f7c6a7b5 | 2791 | /* Make sure that any pending umem accounting work is done. */ |
f0626710 | 2792 | destroy_workqueue(ib_wq); |
d0899892 | 2793 | flush_workqueue(system_unbound_wq); |
e59178d8 | 2794 | WARN_ON(!xa_empty(&clients)); |
0df91bb6 | 2795 | WARN_ON(!xa_empty(&devices)); |
1da177e4 LT |
2796 | } |
2797 | ||
e3bf14bd JG |
2798 | MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); |
2799 | ||
62dfa795 PP |
2800 | /* ib core relies on netdev stack to first register net_ns_type_operations |
2801 | * ns kobject type before ib_core initialization. | |
2802 | */ | |
2803 | fs_initcall(ib_core_init); | |
1da177e4 | 2804 | module_exit(ib_core_cleanup); |