Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
2a1d9b7f | 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
1da177e4 LT |
32 | */ |
33 | ||
34 | #include <linux/module.h> | |
35 | #include <linux/string.h> | |
36 | #include <linux/errno.h> | |
9a6b090c | 37 | #include <linux/kernel.h> |
1da177e4 LT |
38 | #include <linux/slab.h> |
39 | #include <linux/init.h> | |
9268f72d | 40 | #include <linux/netdevice.h> |
4e0f7b90 | 41 | #include <net/net_namespace.h> |
8f408ab6 DJ |
42 | #include <linux/security.h> |
43 | #include <linux/notifier.h> | |
324e227e | 44 | #include <linux/hashtable.h> |
b2cbae2c | 45 | #include <rdma/rdma_netlink.h> |
03db3a2d MB |
46 | #include <rdma/ib_addr.h> |
47 | #include <rdma/ib_cache.h> | |
413d3347 | 48 | #include <rdma/rdma_counter.h> |
1da177e4 LT |
49 | |
50 | #include "core_priv.h" | |
41eda65c | 51 | #include "restrack.h" |
1da177e4 LT |
52 | |
53 | MODULE_AUTHOR("Roland Dreier"); | |
54 | MODULE_DESCRIPTION("core kernel InfiniBand API"); | |
55 | MODULE_LICENSE("Dual BSD/GPL"); | |
56 | ||
14d3a3b2 | 57 | struct workqueue_struct *ib_comp_wq; |
f794809a | 58 | struct workqueue_struct *ib_comp_unbound_wq; |
f0626710 TH |
59 | struct workqueue_struct *ib_wq; |
60 | EXPORT_SYMBOL_GPL(ib_wq); | |
ff815a89 | 61 | static struct workqueue_struct *ib_unreg_wq; |
f0626710 | 62 | |
921eab11 JG |
63 | /* |
64 | * Each of the three rwsem locks (devices, clients, client_data) protects the | |
65 | * xarray of the same name. Specifically it allows the caller to assert that | |
66 | * the MARK will/will not be changing under the lock, and for devices and | |
67 | * clients, that the value in the xarray is still a valid pointer. Change of | |
68 | * the MARK is linked to the object state, so holding the lock and testing the | |
69 | * MARK also asserts that the contained object is in a certain state. | |
70 | * | |
71 | * This is used to build a two stage register/unregister flow where objects | |
72 | * can continue to be in the xarray even though they are still in progress to | |
73 | * register/unregister. | |
74 | * | |
75 | * The xarray itself provides additional locking, and restartable iteration, | |
76 | * which is also relied on. | |
77 | * | |
78 | * Locks should not be nested, with the exception of client_data, which is | |
79 | * allowed to nest under the read side of the other two locks. | |
80 | * | |
81 | * The devices_rwsem also protects the device name list, any change or | |
82 | * assignment of device name must also hold the write side to guarantee unique | |
83 | * names. | |
84 | */ | |
85 | ||
0df91bb6 JG |
86 | /* |
87 | * devices contains devices that have had their names assigned. The | |
88 | * devices may not be registered. Users that care about the registration | |
89 | * status need to call ib_device_try_get() on the device to ensure it is | |
90 | * registered, and keep it registered, for the required duration. | |
91 | * | |
92 | */ | |
93 | static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); | |
921eab11 | 94 | static DECLARE_RWSEM(devices_rwsem); |
0df91bb6 JG |
95 | #define DEVICE_REGISTERED XA_MARK_1 |
96 | ||
9cd58817 | 97 | static u32 highest_client_id; |
e59178d8 JG |
98 | #define CLIENT_REGISTERED XA_MARK_1 |
99 | static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); | |
921eab11 | 100 | static DECLARE_RWSEM(clients_rwsem); |
1da177e4 | 101 | |
621e55ff JG |
102 | static void ib_client_put(struct ib_client *client) |
103 | { | |
104 | if (refcount_dec_and_test(&client->uses)) | |
105 | complete(&client->uses_zero); | |
106 | } | |
107 | ||
1da177e4 | 108 | /* |
0df91bb6 JG |
109 | * If client_data is registered then the corresponding client must also still |
110 | * be registered. | |
111 | */ | |
112 | #define CLIENT_DATA_REGISTERED XA_MARK_1 | |
4e0f7b90 | 113 | |
1d2fedd8 | 114 | unsigned int rdma_dev_net_id; |
4e0f7b90 PP |
115 | |
116 | /* | |
117 | * A list of net namespaces is maintained in an xarray. This is necessary | |
118 | * because we can't get the locking right using the existing net ns list. We | |
119 | * would require a init_net callback after the list is updated. | |
120 | */ | |
121 | static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC); | |
122 | /* | |
123 | * rwsem to protect accessing the rdma_nets xarray entries. | |
124 | */ | |
125 | static DECLARE_RWSEM(rdma_nets_rwsem); | |
126 | ||
cb7e0e13 | 127 | bool ib_devices_shared_netns = true; |
a56bc45b PP |
128 | module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444); |
129 | MODULE_PARM_DESC(netns_mode, | |
130 | "Share device among net namespaces; default=1 (shared)"); | |
41c61401 | 131 | /** |
d6537c1a | 132 | * rdma_dev_access_netns() - Return whether an rdma device can be accessed |
41c61401 | 133 | * from a specified net namespace or not. |
d6537c1a | 134 | * @dev: Pointer to rdma device which needs to be checked |
41c61401 PP |
135 | * @net: Pointer to net namesapce for which access to be checked |
136 | * | |
d6537c1a | 137 | * When the rdma device is in shared mode, it ignores the net namespace. |
138 | * When the rdma device is exclusive to a net namespace, rdma device net | |
139 | * namespace is checked against the specified one. | |
41c61401 PP |
140 | */ |
141 | bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net) | |
142 | { | |
143 | return (ib_devices_shared_netns || | |
144 | net_eq(read_pnet(&dev->coredev.rdma_net), net)); | |
145 | } | |
146 | EXPORT_SYMBOL(rdma_dev_access_netns); | |
147 | ||
0df91bb6 JG |
148 | /* |
149 | * xarray has this behavior where it won't iterate over NULL values stored in | |
150 | * allocated arrays. So we need our own iterator to see all values stored in | |
151 | * the array. This does the same thing as xa_for_each except that it also | |
152 | * returns NULL valued entries if the array is allocating. Simplified to only | |
153 | * work on simple xarrays. | |
154 | */ | |
155 | static void *xan_find_marked(struct xarray *xa, unsigned long *indexp, | |
156 | xa_mark_t filter) | |
157 | { | |
158 | XA_STATE(xas, xa, *indexp); | |
159 | void *entry; | |
160 | ||
161 | rcu_read_lock(); | |
162 | do { | |
163 | entry = xas_find_marked(&xas, ULONG_MAX, filter); | |
164 | if (xa_is_zero(entry)) | |
165 | break; | |
166 | } while (xas_retry(&xas, entry)); | |
167 | rcu_read_unlock(); | |
168 | ||
169 | if (entry) { | |
170 | *indexp = xas.xa_index; | |
171 | if (xa_is_zero(entry)) | |
172 | return NULL; | |
173 | return entry; | |
174 | } | |
175 | return XA_ERROR(-ENOENT); | |
176 | } | |
177 | #define xan_for_each_marked(xa, index, entry, filter) \ | |
178 | for (index = 0, entry = xan_find_marked(xa, &(index), filter); \ | |
179 | !xa_is_err(entry); \ | |
180 | (index)++, entry = xan_find_marked(xa, &(index), filter)) | |
181 | ||
324e227e JG |
182 | /* RCU hash table mapping netdevice pointers to struct ib_port_data */ |
183 | static DEFINE_SPINLOCK(ndev_hash_lock); | |
184 | static DECLARE_HASHTABLE(ndev_hash, 5); | |
185 | ||
c2261dd7 | 186 | static void free_netdevs(struct ib_device *ib_dev); |
d0899892 JG |
187 | static void ib_unregister_work(struct work_struct *work); |
188 | static void __ib_unregister_device(struct ib_device *device); | |
8f408ab6 DJ |
189 | static int ib_security_change(struct notifier_block *nb, unsigned long event, |
190 | void *lsm_data); | |
191 | static void ib_policy_change_task(struct work_struct *work); | |
192 | static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); | |
193 | ||
923abb9d GP |
194 | static void __ibdev_printk(const char *level, const struct ib_device *ibdev, |
195 | struct va_format *vaf) | |
196 | { | |
197 | if (ibdev && ibdev->dev.parent) | |
198 | dev_printk_emit(level[1] - '0', | |
199 | ibdev->dev.parent, | |
200 | "%s %s %s: %pV", | |
201 | dev_driver_string(ibdev->dev.parent), | |
202 | dev_name(ibdev->dev.parent), | |
203 | dev_name(&ibdev->dev), | |
204 | vaf); | |
205 | else if (ibdev) | |
206 | printk("%s%s: %pV", | |
207 | level, dev_name(&ibdev->dev), vaf); | |
208 | else | |
209 | printk("%s(NULL ib_device): %pV", level, vaf); | |
210 | } | |
211 | ||
923abb9d GP |
212 | #define define_ibdev_printk_level(func, level) \ |
213 | void func(const struct ib_device *ibdev, const char *fmt, ...) \ | |
214 | { \ | |
215 | struct va_format vaf; \ | |
216 | va_list args; \ | |
217 | \ | |
218 | va_start(args, fmt); \ | |
219 | \ | |
220 | vaf.fmt = fmt; \ | |
221 | vaf.va = &args; \ | |
222 | \ | |
223 | __ibdev_printk(level, ibdev, &vaf); \ | |
224 | \ | |
225 | va_end(args); \ | |
226 | } \ | |
227 | EXPORT_SYMBOL(func); | |
228 | ||
229 | define_ibdev_printk_level(ibdev_emerg, KERN_EMERG); | |
230 | define_ibdev_printk_level(ibdev_alert, KERN_ALERT); | |
231 | define_ibdev_printk_level(ibdev_crit, KERN_CRIT); | |
232 | define_ibdev_printk_level(ibdev_err, KERN_ERR); | |
233 | define_ibdev_printk_level(ibdev_warn, KERN_WARNING); | |
234 | define_ibdev_printk_level(ibdev_notice, KERN_NOTICE); | |
235 | define_ibdev_printk_level(ibdev_info, KERN_INFO); | |
236 | ||
8f408ab6 DJ |
237 | static struct notifier_block ibdev_lsm_nb = { |
238 | .notifier_call = ib_security_change, | |
239 | }; | |
1da177e4 | 240 | |
decbc7a6 PP |
241 | static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, |
242 | struct net *net); | |
243 | ||
324e227e JG |
244 | /* Pointer to the RCU head at the start of the ib_port_data array */ |
245 | struct ib_port_data_rcu { | |
246 | struct rcu_head rcu_head; | |
247 | struct ib_port_data pdata[]; | |
248 | }; | |
249 | ||
deee3c7e | 250 | static void ib_device_check_mandatory(struct ib_device *device) |
1da177e4 | 251 | { |
3023a1e9 | 252 | #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x } |
1da177e4 LT |
253 | static const struct { |
254 | size_t offset; | |
255 | char *name; | |
256 | } mandatory_table[] = { | |
257 | IB_MANDATORY_FUNC(query_device), | |
258 | IB_MANDATORY_FUNC(query_port), | |
1da177e4 LT |
259 | IB_MANDATORY_FUNC(alloc_pd), |
260 | IB_MANDATORY_FUNC(dealloc_pd), | |
1da177e4 LT |
261 | IB_MANDATORY_FUNC(create_qp), |
262 | IB_MANDATORY_FUNC(modify_qp), | |
263 | IB_MANDATORY_FUNC(destroy_qp), | |
264 | IB_MANDATORY_FUNC(post_send), | |
265 | IB_MANDATORY_FUNC(post_recv), | |
266 | IB_MANDATORY_FUNC(create_cq), | |
267 | IB_MANDATORY_FUNC(destroy_cq), | |
268 | IB_MANDATORY_FUNC(poll_cq), | |
269 | IB_MANDATORY_FUNC(req_notify_cq), | |
270 | IB_MANDATORY_FUNC(get_dma_mr), | |
44ce37bc | 271 | IB_MANDATORY_FUNC(reg_user_mr), |
7738613e IW |
272 | IB_MANDATORY_FUNC(dereg_mr), |
273 | IB_MANDATORY_FUNC(get_port_immutable) | |
1da177e4 LT |
274 | }; |
275 | int i; | |
276 | ||
6780c4fa | 277 | device->kverbs_provider = true; |
9a6b090c | 278 | for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { |
3023a1e9 KH |
279 | if (!*(void **) ((void *) &device->ops + |
280 | mandatory_table[i].offset)) { | |
6780c4fa GP |
281 | device->kverbs_provider = false; |
282 | break; | |
1da177e4 LT |
283 | } |
284 | } | |
1da177e4 LT |
285 | } |
286 | ||
f8978bd9 | 287 | /* |
01b67117 PP |
288 | * Caller must perform ib_device_put() to return the device reference count |
289 | * when ib_device_get_by_index() returns valid device pointer. | |
f8978bd9 | 290 | */ |
37eeab55 | 291 | struct ib_device *ib_device_get_by_index(const struct net *net, u32 index) |
f8978bd9 LR |
292 | { |
293 | struct ib_device *device; | |
294 | ||
921eab11 | 295 | down_read(&devices_rwsem); |
0df91bb6 | 296 | device = xa_load(&devices, index); |
01b67117 | 297 | if (device) { |
37eeab55 PP |
298 | if (!rdma_dev_access_netns(device, net)) { |
299 | device = NULL; | |
300 | goto out; | |
301 | } | |
302 | ||
d79af724 | 303 | if (!ib_device_try_get(device)) |
01b67117 PP |
304 | device = NULL; |
305 | } | |
37eeab55 | 306 | out: |
921eab11 | 307 | up_read(&devices_rwsem); |
f8978bd9 LR |
308 | return device; |
309 | } | |
310 | ||
d79af724 JG |
311 | /** |
312 | * ib_device_put - Release IB device reference | |
313 | * @device: device whose reference to be released | |
314 | * | |
315 | * ib_device_put() releases reference to the IB device to allow it to be | |
316 | * unregistered and eventually free. | |
317 | */ | |
01b67117 PP |
318 | void ib_device_put(struct ib_device *device) |
319 | { | |
320 | if (refcount_dec_and_test(&device->refcount)) | |
321 | complete(&device->unreg_completion); | |
322 | } | |
d79af724 | 323 | EXPORT_SYMBOL(ib_device_put); |
01b67117 | 324 | |
1da177e4 LT |
325 | static struct ib_device *__ib_device_get_by_name(const char *name) |
326 | { | |
327 | struct ib_device *device; | |
0df91bb6 | 328 | unsigned long index; |
1da177e4 | 329 | |
0df91bb6 | 330 | xa_for_each (&devices, index, device) |
896de009 | 331 | if (!strcmp(name, dev_name(&device->dev))) |
1da177e4 LT |
332 | return device; |
333 | ||
334 | return NULL; | |
335 | } | |
336 | ||
6cc2c8e5 JG |
337 | /** |
338 | * ib_device_get_by_name - Find an IB device by name | |
339 | * @name: The name to look for | |
340 | * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) | |
341 | * | |
342 | * Find and hold an ib_device by its name. The caller must call | |
343 | * ib_device_put() on the returned pointer. | |
344 | */ | |
345 | struct ib_device *ib_device_get_by_name(const char *name, | |
346 | enum rdma_driver_id driver_id) | |
347 | { | |
348 | struct ib_device *device; | |
349 | ||
350 | down_read(&devices_rwsem); | |
351 | device = __ib_device_get_by_name(name); | |
352 | if (device && driver_id != RDMA_DRIVER_UNKNOWN && | |
b9560a41 | 353 | device->ops.driver_id != driver_id) |
6cc2c8e5 JG |
354 | device = NULL; |
355 | ||
356 | if (device) { | |
357 | if (!ib_device_try_get(device)) | |
358 | device = NULL; | |
359 | } | |
360 | up_read(&devices_rwsem); | |
361 | return device; | |
362 | } | |
363 | EXPORT_SYMBOL(ib_device_get_by_name); | |
364 | ||
4e0f7b90 PP |
365 | static int rename_compat_devs(struct ib_device *device) |
366 | { | |
367 | struct ib_core_device *cdev; | |
368 | unsigned long index; | |
369 | int ret = 0; | |
370 | ||
371 | mutex_lock(&device->compat_devs_mutex); | |
372 | xa_for_each (&device->compat_devs, index, cdev) { | |
373 | ret = device_rename(&cdev->dev, dev_name(&device->dev)); | |
374 | if (ret) { | |
375 | dev_warn(&cdev->dev, | |
376 | "Fail to rename compatdev to new name %s\n", | |
377 | dev_name(&device->dev)); | |
378 | break; | |
379 | } | |
380 | } | |
381 | mutex_unlock(&device->compat_devs_mutex); | |
382 | return ret; | |
383 | } | |
384 | ||
d21943dd LR |
385 | int ib_device_rename(struct ib_device *ibdev, const char *name) |
386 | { | |
dc1435c0 LR |
387 | unsigned long index; |
388 | void *client_data; | |
e3593b56 | 389 | int ret; |
d21943dd | 390 | |
921eab11 | 391 | down_write(&devices_rwsem); |
e3593b56 | 392 | if (!strcmp(name, dev_name(&ibdev->dev))) { |
dc1435c0 LR |
393 | up_write(&devices_rwsem); |
394 | return 0; | |
e3593b56 JG |
395 | } |
396 | ||
344684e6 | 397 | if (__ib_device_get_by_name(name)) { |
dc1435c0 LR |
398 | up_write(&devices_rwsem); |
399 | return -EEXIST; | |
d21943dd LR |
400 | } |
401 | ||
402 | ret = device_rename(&ibdev->dev, name); | |
dc1435c0 LR |
403 | if (ret) { |
404 | up_write(&devices_rwsem); | |
405 | return ret; | |
406 | } | |
407 | ||
2c34bb6d | 408 | strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX); |
4e0f7b90 | 409 | ret = rename_compat_devs(ibdev); |
dc1435c0 LR |
410 | |
411 | downgrade_write(&devices_rwsem); | |
412 | down_read(&ibdev->client_data_rwsem); | |
413 | xan_for_each_marked(&ibdev->client_data, index, client_data, | |
414 | CLIENT_DATA_REGISTERED) { | |
415 | struct ib_client *client = xa_load(&clients, index); | |
416 | ||
417 | if (!client || !client->rename) | |
418 | continue; | |
419 | ||
420 | client->rename(ibdev, client_data); | |
421 | } | |
422 | up_read(&ibdev->client_data_rwsem); | |
7566752e | 423 | rdma_nl_notify_event(ibdev, 0, RDMA_RENAME_EVENT); |
dc1435c0 LR |
424 | up_read(&devices_rwsem); |
425 | return 0; | |
d21943dd LR |
426 | } |
427 | ||
f8fc8cd9 YF |
428 | int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim) |
429 | { | |
430 | if (use_dim > 1) | |
431 | return -EINVAL; | |
432 | ibdev->use_cq_dim = use_dim; | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
e349f858 | 437 | static int alloc_name(struct ib_device *ibdev, const char *name) |
1da177e4 | 438 | { |
1da177e4 | 439 | struct ib_device *device; |
0df91bb6 | 440 | unsigned long index; |
3b88afd3 JG |
441 | struct ida inuse; |
442 | int rc; | |
1da177e4 LT |
443 | int i; |
444 | ||
9ffbe8ac | 445 | lockdep_assert_held_write(&devices_rwsem); |
3b88afd3 | 446 | ida_init(&inuse); |
0df91bb6 | 447 | xa_for_each (&devices, index, device) { |
e349f858 JG |
448 | char buf[IB_DEVICE_NAME_MAX]; |
449 | ||
896de009 | 450 | if (sscanf(dev_name(&device->dev), name, &i) != 1) |
1da177e4 | 451 | continue; |
3b88afd3 | 452 | if (i < 0 || i >= INT_MAX) |
1da177e4 LT |
453 | continue; |
454 | snprintf(buf, sizeof buf, name, i); | |
3b88afd3 JG |
455 | if (strcmp(buf, dev_name(&device->dev)) != 0) |
456 | continue; | |
457 | ||
458 | rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL); | |
459 | if (rc < 0) | |
460 | goto out; | |
1da177e4 LT |
461 | } |
462 | ||
3b88afd3 JG |
463 | rc = ida_alloc(&inuse, GFP_KERNEL); |
464 | if (rc < 0) | |
465 | goto out; | |
1da177e4 | 466 | |
3b88afd3 JG |
467 | rc = dev_set_name(&ibdev->dev, name, rc); |
468 | out: | |
469 | ida_destroy(&inuse); | |
470 | return rc; | |
1da177e4 LT |
471 | } |
472 | ||
55aeed06 JG |
473 | static void ib_device_release(struct device *device) |
474 | { | |
475 | struct ib_device *dev = container_of(device, struct ib_device, dev); | |
476 | ||
c2261dd7 | 477 | free_netdevs(dev); |
652432f3 | 478 | WARN_ON(refcount_read(&dev->refcount)); |
b7066b32 JG |
479 | if (dev->hw_stats_data) |
480 | ib_device_release_hw_stats(dev->hw_stats_data); | |
46bdf370 KH |
481 | if (dev->port_data) { |
482 | ib_cache_release_one(dev); | |
483 | ib_security_release_port_pkey_list(dev); | |
413d3347 | 484 | rdma_counter_release(dev); |
324e227e JG |
485 | kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu, |
486 | pdata[0]), | |
487 | rcu_head); | |
46bdf370 | 488 | } |
413d3347 | 489 | |
bca51197 | 490 | mutex_destroy(&dev->subdev_lock); |
56594ae1 PP |
491 | mutex_destroy(&dev->unregistration_lock); |
492 | mutex_destroy(&dev->compat_devs_mutex); | |
493 | ||
46bdf370 KH |
494 | xa_destroy(&dev->compat_devs); |
495 | xa_destroy(&dev->client_data); | |
324e227e | 496 | kfree_rcu(dev, rcu_head); |
55aeed06 JG |
497 | } |
498 | ||
23680f0b | 499 | static int ib_device_uevent(const struct device *device, |
55aeed06 JG |
500 | struct kobj_uevent_env *env) |
501 | { | |
896de009 | 502 | if (add_uevent_var(env, "NAME=%s", dev_name(device))) |
55aeed06 JG |
503 | return -ENOMEM; |
504 | ||
505 | /* | |
506 | * It would be nice to pass the node GUID with the event... | |
507 | */ | |
508 | ||
509 | return 0; | |
510 | } | |
511 | ||
fa627348 | 512 | static const void *net_namespace(const struct device *d) |
62dfa795 | 513 | { |
fa627348 | 514 | const struct ib_core_device *coredev = |
4e0f7b90 PP |
515 | container_of(d, struct ib_core_device, dev); |
516 | ||
517 | return read_pnet(&coredev->rdma_net); | |
62dfa795 PP |
518 | } |
519 | ||
55aeed06 JG |
520 | static struct class ib_class = { |
521 | .name = "infiniband", | |
522 | .dev_release = ib_device_release, | |
523 | .dev_uevent = ib_device_uevent, | |
62dfa795 PP |
524 | .ns_type = &net_ns_type_operations, |
525 | .namespace = net_namespace, | |
55aeed06 JG |
526 | }; |
527 | ||
cebe556b | 528 | static void rdma_init_coredev(struct ib_core_device *coredev, |
4e0f7b90 | 529 | struct ib_device *dev, struct net *net) |
cebe556b | 530 | { |
a1ecb30f RG |
531 | bool is_full_dev = &dev->coredev == coredev; |
532 | ||
cebe556b PP |
533 | /* This BUILD_BUG_ON is intended to catch layout change |
534 | * of union of ib_core_device and device. | |
535 | * dev must be the first element as ib_core and providers | |
536 | * driver uses it. Adding anything in ib_core_device before | |
537 | * device will break this assumption. | |
538 | */ | |
539 | BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) != | |
540 | offsetof(struct ib_device, dev)); | |
541 | ||
542 | coredev->dev.class = &ib_class; | |
543 | coredev->dev.groups = dev->groups; | |
a1ecb30f RG |
544 | |
545 | /* | |
546 | * Don't expose hw counters outside of the init namespace. | |
547 | */ | |
548 | if (!is_full_dev && dev->hw_stats_attr_index) | |
549 | coredev->dev.groups[dev->hw_stats_attr_index] = NULL; | |
550 | ||
cebe556b PP |
551 | device_initialize(&coredev->dev); |
552 | coredev->owner = dev; | |
553 | INIT_LIST_HEAD(&coredev->port_list); | |
4e0f7b90 | 554 | write_pnet(&coredev->rdma_net, net); |
cebe556b PP |
555 | } |
556 | ||
1da177e4 | 557 | /** |
459cc69f | 558 | * _ib_alloc_device - allocate an IB device struct |
1da177e4 LT |
559 | * @size:size of structure to allocate |
560 | * | |
561 | * Low-level drivers should use ib_alloc_device() to allocate &struct | |
562 | * ib_device. @size is the size of the structure to be allocated, | |
563 | * including any private data used by the low-level driver. | |
564 | * ib_dealloc_device() must be used to free structures allocated with | |
565 | * ib_alloc_device(). | |
566 | */ | |
459cc69f | 567 | struct ib_device *_ib_alloc_device(size_t size) |
1da177e4 | 568 | { |
55aeed06 | 569 | struct ib_device *device; |
286e1d3f | 570 | unsigned int i; |
55aeed06 JG |
571 | |
572 | if (WARN_ON(size < sizeof(struct ib_device))) | |
573 | return NULL; | |
574 | ||
575 | device = kzalloc(size, GFP_KERNEL); | |
576 | if (!device) | |
577 | return NULL; | |
578 | ||
41eda65c LR |
579 | if (rdma_restrack_init(device)) { |
580 | kfree(device); | |
581 | return NULL; | |
582 | } | |
02d8883f | 583 | |
4e0f7b90 | 584 | rdma_init_coredev(&device->coredev, device, &init_net); |
55aeed06 | 585 | |
55aeed06 | 586 | INIT_LIST_HEAD(&device->event_handler_list); |
40adf686 | 587 | spin_lock_init(&device->qp_open_list_lock); |
6b57cea9 | 588 | init_rwsem(&device->event_handler_rwsem); |
d0899892 | 589 | mutex_init(&device->unregistration_lock); |
0df91bb6 JG |
590 | /* |
591 | * client_data needs to be alloc because we don't want our mark to be | |
592 | * destroyed if the user stores NULL in the client data. | |
593 | */ | |
594 | xa_init_flags(&device->client_data, XA_FLAGS_ALLOC); | |
921eab11 | 595 | init_rwsem(&device->client_data_rwsem); |
4e0f7b90 PP |
596 | xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC); |
597 | mutex_init(&device->compat_devs_mutex); | |
01b67117 | 598 | init_completion(&device->unreg_completion); |
d0899892 | 599 | INIT_WORK(&device->unregistration_work, ib_unregister_work); |
1da177e4 | 600 | |
286e1d3f JM |
601 | spin_lock_init(&device->cq_pools_lock); |
602 | for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++) | |
603 | INIT_LIST_HEAD(&device->cq_pools[i]); | |
604 | ||
36721a6d AK |
605 | rwlock_init(&device->cache_lock); |
606 | ||
c074bb1e | 607 | device->uverbs_cmd_mask = |
44ce37bc | 608 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) | |
c074bb1e | 609 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) | |
44ce37bc JG |
610 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) | |
611 | BIT_ULL(IB_USER_VERBS_CMD_CLOSE_XRCD) | | |
676a80ad | 612 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) | |
c074bb1e JG |
613 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | |
614 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) | | |
615 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) | | |
44ce37bc | 616 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) | |
652caba5 | 617 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_XSRQ) | |
44ce37bc | 618 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) | |
c074bb1e JG |
619 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) | |
620 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) | | |
676a80ad | 621 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) | |
c074bb1e JG |
622 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) | |
623 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) | | |
44ce37bc JG |
624 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) | |
625 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) | | |
c074bb1e JG |
626 | BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | |
627 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) | | |
44ce37bc JG |
628 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) | |
629 | BIT_ULL(IB_USER_VERBS_CMD_OPEN_QP) | | |
630 | BIT_ULL(IB_USER_VERBS_CMD_OPEN_XRCD) | | |
c074bb1e JG |
631 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) | |
632 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) | | |
633 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) | | |
44ce37bc JG |
634 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) | |
635 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR) | | |
636 | BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) | | |
637 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ); | |
bca51197 MZ |
638 | |
639 | mutex_init(&device->subdev_lock); | |
640 | INIT_LIST_HEAD(&device->subdev_list_head); | |
641 | INIT_LIST_HEAD(&device->subdev_list); | |
642 | ||
55aeed06 | 643 | return device; |
1da177e4 | 644 | } |
459cc69f | 645 | EXPORT_SYMBOL(_ib_alloc_device); |
1da177e4 LT |
646 | |
647 | /** | |
648 | * ib_dealloc_device - free an IB device struct | |
649 | * @device:structure to free | |
650 | * | |
651 | * Free a structure allocated with ib_alloc_device(). | |
652 | */ | |
653 | void ib_dealloc_device(struct ib_device *device) | |
654 | { | |
d0899892 JG |
655 | if (device->ops.dealloc_driver) |
656 | device->ops.dealloc_driver(device); | |
657 | ||
658 | /* | |
659 | * ib_unregister_driver() requires all devices to remain in the xarray | |
660 | * while their ops are callable. The last op we call is dealloc_driver | |
661 | * above. This is needed to create a fence on op callbacks prior to | |
662 | * allowing the driver module to unload. | |
663 | */ | |
664 | down_write(&devices_rwsem); | |
665 | if (xa_load(&devices, device->index) == device) | |
666 | xa_erase(&devices, device->index); | |
667 | up_write(&devices_rwsem); | |
668 | ||
c2261dd7 JG |
669 | /* Expedite releasing netdev references */ |
670 | free_netdevs(device); | |
671 | ||
4e0f7b90 | 672 | WARN_ON(!xa_empty(&device->compat_devs)); |
0df91bb6 | 673 | WARN_ON(!xa_empty(&device->client_data)); |
652432f3 | 674 | WARN_ON(refcount_read(&device->refcount)); |
0ad699c0 | 675 | rdma_restrack_clean(device); |
e155755e | 676 | /* Balances with device_initialize */ |
924b8900 | 677 | put_device(&device->dev); |
1da177e4 LT |
678 | } |
679 | EXPORT_SYMBOL(ib_dealloc_device); | |
680 | ||
921eab11 JG |
681 | /* |
682 | * add_client_context() and remove_client_context() must be safe against | |
683 | * parallel calls on the same device - registration/unregistration of both the | |
684 | * device and client can be occurring in parallel. | |
685 | * | |
686 | * The routines need to be a fence, any caller must not return until the add | |
687 | * or remove is fully completed. | |
688 | */ | |
689 | static int add_client_context(struct ib_device *device, | |
690 | struct ib_client *client) | |
1da177e4 | 691 | { |
921eab11 | 692 | int ret = 0; |
1da177e4 | 693 | |
6780c4fa | 694 | if (!device->kverbs_provider && !client->no_kverbs_req) |
921eab11 JG |
695 | return 0; |
696 | ||
697 | down_write(&device->client_data_rwsem); | |
621e55ff JG |
698 | /* |
699 | * So long as the client is registered hold both the client and device | |
700 | * unregistration locks. | |
701 | */ | |
702 | if (!refcount_inc_not_zero(&client->uses)) | |
703 | goto out_unlock; | |
704 | refcount_inc(&device->refcount); | |
705 | ||
921eab11 JG |
706 | /* |
707 | * Another caller to add_client_context got here first and has already | |
708 | * completely initialized context. | |
709 | */ | |
710 | if (xa_get_mark(&device->client_data, client->client_id, | |
711 | CLIENT_DATA_REGISTERED)) | |
712 | goto out; | |
713 | ||
714 | ret = xa_err(xa_store(&device->client_data, client->client_id, NULL, | |
715 | GFP_KERNEL)); | |
716 | if (ret) | |
717 | goto out; | |
718 | downgrade_write(&device->client_data_rwsem); | |
11a0ae4c JG |
719 | if (client->add) { |
720 | if (client->add(device)) { | |
721 | /* | |
722 | * If a client fails to add then the error code is | |
723 | * ignored, but we won't call any more ops on this | |
724 | * client. | |
725 | */ | |
726 | xa_erase(&device->client_data, client->client_id); | |
727 | up_read(&device->client_data_rwsem); | |
728 | ib_device_put(device); | |
729 | ib_client_put(client); | |
730 | return 0; | |
731 | } | |
732 | } | |
921eab11 JG |
733 | |
734 | /* Readers shall not see a client until add has been completed */ | |
735 | xa_set_mark(&device->client_data, client->client_id, | |
736 | CLIENT_DATA_REGISTERED); | |
737 | up_read(&device->client_data_rwsem); | |
738 | return 0; | |
739 | ||
740 | out: | |
621e55ff JG |
741 | ib_device_put(device); |
742 | ib_client_put(client); | |
743 | out_unlock: | |
921eab11 JG |
744 | up_write(&device->client_data_rwsem); |
745 | return ret; | |
746 | } | |
747 | ||
748 | static void remove_client_context(struct ib_device *device, | |
749 | unsigned int client_id) | |
750 | { | |
751 | struct ib_client *client; | |
752 | void *client_data; | |
6780c4fa | 753 | |
921eab11 JG |
754 | down_write(&device->client_data_rwsem); |
755 | if (!xa_get_mark(&device->client_data, client_id, | |
756 | CLIENT_DATA_REGISTERED)) { | |
757 | up_write(&device->client_data_rwsem); | |
758 | return; | |
759 | } | |
760 | client_data = xa_load(&device->client_data, client_id); | |
761 | xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); | |
762 | client = xa_load(&clients, client_id); | |
621e55ff | 763 | up_write(&device->client_data_rwsem); |
1da177e4 | 764 | |
921eab11 JG |
765 | /* |
766 | * Notice we cannot be holding any exclusive locks when calling the | |
767 | * remove callback as the remove callback can recurse back into any | |
768 | * public functions in this module and thus try for any locks those | |
769 | * functions take. | |
770 | * | |
771 | * For this reason clients and drivers should not call the | |
772 | * unregistration functions will holdling any locks. | |
921eab11 JG |
773 | */ |
774 | if (client->remove) | |
775 | client->remove(device, client_data); | |
776 | ||
777 | xa_erase(&device->client_data, client_id); | |
621e55ff JG |
778 | ib_device_put(device); |
779 | ib_client_put(client); | |
1da177e4 LT |
780 | } |
781 | ||
c2261dd7 | 782 | static int alloc_port_data(struct ib_device *device) |
5eb620c8 | 783 | { |
324e227e | 784 | struct ib_port_data_rcu *pdata_rcu; |
1fb7f897 | 785 | u32 port; |
c2261dd7 JG |
786 | |
787 | if (device->port_data) | |
788 | return 0; | |
789 | ||
790 | /* This can only be called once the physical port range is defined */ | |
791 | if (WARN_ON(!device->phys_port_cnt)) | |
792 | return -EINVAL; | |
7738613e | 793 | |
1fb7f897 MB |
794 | /* Reserve U32_MAX so the logic to go over all the ports is sane */ |
795 | if (WARN_ON(device->phys_port_cnt == U32_MAX)) | |
796 | return -EINVAL; | |
797 | ||
8ceb1357 JG |
798 | /* |
799 | * device->port_data is indexed directly by the port number to make | |
7738613e IW |
800 | * access to this data as efficient as possible. |
801 | * | |
8ceb1357 JG |
802 | * Therefore port_data is declared as a 1 based array with potential |
803 | * empty slots at the beginning. | |
7738613e | 804 | */ |
324e227e | 805 | pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata, |
81760bed | 806 | size_add(rdma_end_port(device), 1)), |
324e227e JG |
807 | GFP_KERNEL); |
808 | if (!pdata_rcu) | |
55aeed06 | 809 | return -ENOMEM; |
324e227e JG |
810 | /* |
811 | * The rcu_head is put in front of the port data array and the stored | |
812 | * pointer is adjusted since we never need to see that member until | |
813 | * kfree_rcu. | |
814 | */ | |
815 | device->port_data = pdata_rcu->pdata; | |
5eb620c8 | 816 | |
ea1075ed | 817 | rdma_for_each_port (device, port) { |
8ceb1357 JG |
818 | struct ib_port_data *pdata = &device->port_data[port]; |
819 | ||
324e227e | 820 | pdata->ib_dev = device; |
8ceb1357 JG |
821 | spin_lock_init(&pdata->pkey_list_lock); |
822 | INIT_LIST_HEAD(&pdata->pkey_list); | |
c2261dd7 | 823 | spin_lock_init(&pdata->netdev_lock); |
324e227e | 824 | INIT_HLIST_NODE(&pdata->ndev_hash_link); |
c2261dd7 JG |
825 | } |
826 | return 0; | |
827 | } | |
828 | ||
1fb7f897 | 829 | static int verify_immutable(const struct ib_device *dev, u32 port) |
c2261dd7 JG |
830 | { |
831 | return WARN_ON(!rdma_cap_ib_mad(dev, port) && | |
832 | rdma_max_mad_size(dev, port) != 0); | |
833 | } | |
834 | ||
835 | static int setup_port_data(struct ib_device *device) | |
836 | { | |
1fb7f897 | 837 | u32 port; |
c2261dd7 JG |
838 | int ret; |
839 | ||
840 | ret = alloc_port_data(device); | |
841 | if (ret) | |
842 | return ret; | |
843 | ||
844 | rdma_for_each_port (device, port) { | |
845 | struct ib_port_data *pdata = &device->port_data[port]; | |
8ceb1357 JG |
846 | |
847 | ret = device->ops.get_port_immutable(device, port, | |
848 | &pdata->immutable); | |
5eb620c8 | 849 | if (ret) |
55aeed06 | 850 | return ret; |
337877a4 | 851 | |
55aeed06 JG |
852 | if (verify_immutable(device, port)) |
853 | return -EINVAL; | |
5eb620c8 | 854 | } |
55aeed06 | 855 | return 0; |
5eb620c8 YE |
856 | } |
857 | ||
7416790e PP |
858 | /** |
859 | * ib_port_immutable_read() - Read rdma port's immutable data | |
168e4cd9 LR |
860 | * @dev: IB device |
861 | * @port: port number whose immutable data to read. It starts with index 1 and | |
862 | * valid upto including rdma_end_port(). | |
7416790e PP |
863 | */ |
864 | const struct ib_port_immutable* | |
865 | ib_port_immutable_read(struct ib_device *dev, unsigned int port) | |
866 | { | |
867 | WARN_ON(!rdma_is_port_valid(dev, port)); | |
868 | return &dev->port_data[port].immutable; | |
869 | } | |
870 | EXPORT_SYMBOL(ib_port_immutable_read); | |
871 | ||
9abb0d1b | 872 | void ib_get_device_fw_str(struct ib_device *dev, char *str) |
5fa76c20 | 873 | { |
3023a1e9 KH |
874 | if (dev->ops.get_dev_fw_str) |
875 | dev->ops.get_dev_fw_str(dev, str); | |
5fa76c20 IW |
876 | else |
877 | str[0] = '\0'; | |
878 | } | |
879 | EXPORT_SYMBOL(ib_get_device_fw_str); | |
880 | ||
8f408ab6 DJ |
881 | static void ib_policy_change_task(struct work_struct *work) |
882 | { | |
883 | struct ib_device *dev; | |
0df91bb6 | 884 | unsigned long index; |
8f408ab6 | 885 | |
921eab11 | 886 | down_read(&devices_rwsem); |
0df91bb6 | 887 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { |
ea1075ed | 888 | unsigned int i; |
8f408ab6 | 889 | |
ea1075ed | 890 | rdma_for_each_port (dev, i) { |
8f408ab6 | 891 | u64 sp; |
c5f8f2c5 AK |
892 | ib_get_cached_subnet_prefix(dev, i, &sp); |
893 | ib_security_cache_change(dev, i, sp); | |
8f408ab6 DJ |
894 | } |
895 | } | |
921eab11 | 896 | up_read(&devices_rwsem); |
8f408ab6 DJ |
897 | } |
898 | ||
899 | static int ib_security_change(struct notifier_block *nb, unsigned long event, | |
900 | void *lsm_data) | |
901 | { | |
902 | if (event != LSM_POLICY_CHANGE) | |
903 | return NOTIFY_DONE; | |
904 | ||
905 | schedule_work(&ib_policy_change_work); | |
c66f6741 | 906 | ib_mad_agent_security_change(); |
8f408ab6 DJ |
907 | |
908 | return NOTIFY_OK; | |
909 | } | |
910 | ||
4e0f7b90 PP |
911 | static void compatdev_release(struct device *dev) |
912 | { | |
913 | struct ib_core_device *cdev = | |
914 | container_of(dev, struct ib_core_device, dev); | |
915 | ||
916 | kfree(cdev); | |
917 | } | |
918 | ||
919 | static int add_one_compat_dev(struct ib_device *device, | |
920 | struct rdma_dev_net *rnet) | |
921 | { | |
922 | struct ib_core_device *cdev; | |
923 | int ret; | |
924 | ||
2b34c558 | 925 | lockdep_assert_held(&rdma_nets_rwsem); |
a56bc45b PP |
926 | if (!ib_devices_shared_netns) |
927 | return 0; | |
928 | ||
4e0f7b90 PP |
929 | /* |
930 | * Create and add compat device in all namespaces other than where it | |
931 | * is currently bound to. | |
932 | */ | |
933 | if (net_eq(read_pnet(&rnet->net), | |
934 | read_pnet(&device->coredev.rdma_net))) | |
935 | return 0; | |
936 | ||
937 | /* | |
938 | * The first of init_net() or ib_register_device() to take the | |
939 | * compat_devs_mutex wins and gets to add the device. Others will wait | |
940 | * for completion here. | |
941 | */ | |
942 | mutex_lock(&device->compat_devs_mutex); | |
943 | cdev = xa_load(&device->compat_devs, rnet->id); | |
944 | if (cdev) { | |
945 | ret = 0; | |
946 | goto done; | |
947 | } | |
948 | ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL); | |
949 | if (ret) | |
950 | goto done; | |
951 | ||
952 | cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); | |
953 | if (!cdev) { | |
954 | ret = -ENOMEM; | |
955 | goto cdev_err; | |
956 | } | |
957 | ||
958 | cdev->dev.parent = device->dev.parent; | |
959 | rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); | |
960 | cdev->dev.release = compatdev_release; | |
f2f2b3bb JG |
961 | ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); |
962 | if (ret) | |
963 | goto add_err; | |
4e0f7b90 PP |
964 | |
965 | ret = device_add(&cdev->dev); | |
966 | if (ret) | |
967 | goto add_err; | |
eb15c78b | 968 | ret = ib_setup_port_attrs(cdev); |
5417783e PP |
969 | if (ret) |
970 | goto port_err; | |
4e0f7b90 PP |
971 | |
972 | ret = xa_err(xa_store(&device->compat_devs, rnet->id, | |
973 | cdev, GFP_KERNEL)); | |
974 | if (ret) | |
975 | goto insert_err; | |
976 | ||
977 | mutex_unlock(&device->compat_devs_mutex); | |
978 | return 0; | |
979 | ||
980 | insert_err: | |
5417783e PP |
981 | ib_free_port_attrs(cdev); |
982 | port_err: | |
4e0f7b90 PP |
983 | device_del(&cdev->dev); |
984 | add_err: | |
985 | put_device(&cdev->dev); | |
986 | cdev_err: | |
987 | xa_release(&device->compat_devs, rnet->id); | |
988 | done: | |
989 | mutex_unlock(&device->compat_devs_mutex); | |
990 | return ret; | |
991 | } | |
992 | ||
993 | static void remove_one_compat_dev(struct ib_device *device, u32 id) | |
994 | { | |
995 | struct ib_core_device *cdev; | |
996 | ||
997 | mutex_lock(&device->compat_devs_mutex); | |
998 | cdev = xa_erase(&device->compat_devs, id); | |
999 | mutex_unlock(&device->compat_devs_mutex); | |
1000 | if (cdev) { | |
5417783e | 1001 | ib_free_port_attrs(cdev); |
4e0f7b90 PP |
1002 | device_del(&cdev->dev); |
1003 | put_device(&cdev->dev); | |
1004 | } | |
1005 | } | |
1006 | ||
1007 | static void remove_compat_devs(struct ib_device *device) | |
1008 | { | |
1009 | struct ib_core_device *cdev; | |
1010 | unsigned long index; | |
1011 | ||
1012 | xa_for_each (&device->compat_devs, index, cdev) | |
1013 | remove_one_compat_dev(device, index); | |
1014 | } | |
1015 | ||
1016 | static int add_compat_devs(struct ib_device *device) | |
1017 | { | |
1018 | struct rdma_dev_net *rnet; | |
1019 | unsigned long index; | |
1020 | int ret = 0; | |
1021 | ||
decbc7a6 PP |
1022 | lockdep_assert_held(&devices_rwsem); |
1023 | ||
4e0f7b90 PP |
1024 | down_read(&rdma_nets_rwsem); |
1025 | xa_for_each (&rdma_nets, index, rnet) { | |
1026 | ret = add_one_compat_dev(device, rnet); | |
1027 | if (ret) | |
1028 | break; | |
1029 | } | |
1030 | up_read(&rdma_nets_rwsem); | |
1031 | return ret; | |
1032 | } | |
1033 | ||
2b34c558 PP |
1034 | static void remove_all_compat_devs(void) |
1035 | { | |
1036 | struct ib_compat_device *cdev; | |
1037 | struct ib_device *dev; | |
1038 | unsigned long index; | |
1039 | ||
1040 | down_read(&devices_rwsem); | |
1041 | xa_for_each (&devices, index, dev) { | |
1042 | unsigned long c_index = 0; | |
1043 | ||
1044 | /* Hold nets_rwsem so that any other thread modifying this | |
1045 | * system param can sync with this thread. | |
1046 | */ | |
1047 | down_read(&rdma_nets_rwsem); | |
1048 | xa_for_each (&dev->compat_devs, c_index, cdev) | |
1049 | remove_one_compat_dev(dev, c_index); | |
1050 | up_read(&rdma_nets_rwsem); | |
1051 | } | |
1052 | up_read(&devices_rwsem); | |
1053 | } | |
1054 | ||
1055 | static int add_all_compat_devs(void) | |
1056 | { | |
1057 | struct rdma_dev_net *rnet; | |
1058 | struct ib_device *dev; | |
1059 | unsigned long index; | |
1060 | int ret = 0; | |
1061 | ||
1062 | down_read(&devices_rwsem); | |
1063 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { | |
1064 | unsigned long net_index = 0; | |
1065 | ||
1066 | /* Hold nets_rwsem so that any other thread modifying this | |
1067 | * system param can sync with this thread. | |
1068 | */ | |
1069 | down_read(&rdma_nets_rwsem); | |
1070 | xa_for_each (&rdma_nets, net_index, rnet) { | |
1071 | ret = add_one_compat_dev(dev, rnet); | |
1072 | if (ret) | |
1073 | break; | |
1074 | } | |
1075 | up_read(&rdma_nets_rwsem); | |
1076 | } | |
1077 | up_read(&devices_rwsem); | |
1078 | if (ret) | |
1079 | remove_all_compat_devs(); | |
1080 | return ret; | |
1081 | } | |
1082 | ||
1083 | int rdma_compatdev_set(u8 enable) | |
1084 | { | |
1085 | struct rdma_dev_net *rnet; | |
1086 | unsigned long index; | |
1087 | int ret = 0; | |
1088 | ||
1089 | down_write(&rdma_nets_rwsem); | |
1090 | if (ib_devices_shared_netns == enable) { | |
1091 | up_write(&rdma_nets_rwsem); | |
1092 | return 0; | |
1093 | } | |
1094 | ||
1095 | /* enable/disable of compat devices is not supported | |
1096 | * when more than default init_net exists. | |
1097 | */ | |
1098 | xa_for_each (&rdma_nets, index, rnet) { | |
1099 | ret++; | |
1100 | break; | |
1101 | } | |
1102 | if (!ret) | |
1103 | ib_devices_shared_netns = enable; | |
1104 | up_write(&rdma_nets_rwsem); | |
1105 | if (ret) | |
1106 | return -EBUSY; | |
1107 | ||
1108 | if (enable) | |
1109 | ret = add_all_compat_devs(); | |
1110 | else | |
1111 | remove_all_compat_devs(); | |
1112 | return ret; | |
1113 | } | |
1114 | ||
4e0f7b90 PP |
1115 | static void rdma_dev_exit_net(struct net *net) |
1116 | { | |
1d2fedd8 | 1117 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); |
4e0f7b90 PP |
1118 | struct ib_device *dev; |
1119 | unsigned long index; | |
1120 | int ret; | |
1121 | ||
1122 | down_write(&rdma_nets_rwsem); | |
1123 | /* | |
1124 | * Prevent the ID from being re-used and hide the id from xa_for_each. | |
1125 | */ | |
1126 | ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL)); | |
1127 | WARN_ON(ret); | |
1128 | up_write(&rdma_nets_rwsem); | |
1129 | ||
1130 | down_read(&devices_rwsem); | |
1131 | xa_for_each (&devices, index, dev) { | |
1132 | get_device(&dev->dev); | |
1133 | /* | |
1134 | * Release the devices_rwsem so that pontentially blocking | |
1135 | * device_del, doesn't hold the devices_rwsem for too long. | |
1136 | */ | |
1137 | up_read(&devices_rwsem); | |
1138 | ||
1139 | remove_one_compat_dev(dev, rnet->id); | |
1140 | ||
decbc7a6 PP |
1141 | /* |
1142 | * If the real device is in the NS then move it back to init. | |
1143 | */ | |
1144 | rdma_dev_change_netns(dev, net, &init_net); | |
1145 | ||
4e0f7b90 PP |
1146 | put_device(&dev->dev); |
1147 | down_read(&devices_rwsem); | |
1148 | } | |
1149 | up_read(&devices_rwsem); | |
1150 | ||
1d2fedd8 | 1151 | rdma_nl_net_exit(rnet); |
4e0f7b90 PP |
1152 | xa_erase(&rdma_nets, rnet->id); |
1153 | } | |
1154 | ||
1155 | static __net_init int rdma_dev_init_net(struct net *net) | |
1156 | { | |
1d2fedd8 | 1157 | struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); |
4e0f7b90 PP |
1158 | unsigned long index; |
1159 | struct ib_device *dev; | |
1160 | int ret; | |
1161 | ||
1d2fedd8 PP |
1162 | write_pnet(&rnet->net, net); |
1163 | ||
1164 | ret = rdma_nl_net_init(rnet); | |
1165 | if (ret) | |
1166 | return ret; | |
1167 | ||
4e0f7b90 PP |
1168 | /* No need to create any compat devices in default init_net. */ |
1169 | if (net_eq(net, &init_net)) | |
1170 | return 0; | |
1171 | ||
4e0f7b90 | 1172 | ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); |
1d2fedd8 PP |
1173 | if (ret) { |
1174 | rdma_nl_net_exit(rnet); | |
4e0f7b90 | 1175 | return ret; |
1d2fedd8 | 1176 | } |
4e0f7b90 PP |
1177 | |
1178 | down_read(&devices_rwsem); | |
1179 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { | |
2b34c558 PP |
1180 | /* Hold nets_rwsem so that netlink command cannot change |
1181 | * system configuration for device sharing mode. | |
1182 | */ | |
1183 | down_read(&rdma_nets_rwsem); | |
4e0f7b90 | 1184 | ret = add_one_compat_dev(dev, rnet); |
2b34c558 | 1185 | up_read(&rdma_nets_rwsem); |
4e0f7b90 PP |
1186 | if (ret) |
1187 | break; | |
1188 | } | |
1189 | up_read(&devices_rwsem); | |
1190 | ||
1191 | if (ret) | |
1192 | rdma_dev_exit_net(net); | |
1193 | ||
1194 | return ret; | |
1195 | } | |
1196 | ||
0df91bb6 | 1197 | /* |
d0899892 JG |
1198 | * Assign the unique string device name and the unique device index. This is |
1199 | * undone by ib_dealloc_device. | |
ecc82c53 | 1200 | */ |
0df91bb6 | 1201 | static int assign_name(struct ib_device *device, const char *name) |
ecc82c53 | 1202 | { |
0df91bb6 JG |
1203 | static u32 last_id; |
1204 | int ret; | |
ecc82c53 | 1205 | |
921eab11 | 1206 | down_write(&devices_rwsem); |
0df91bb6 JG |
1207 | /* Assign a unique name to the device */ |
1208 | if (strchr(name, '%')) | |
1209 | ret = alloc_name(device, name); | |
1210 | else | |
1211 | ret = dev_set_name(&device->dev, name); | |
1212 | if (ret) | |
1213 | goto out; | |
1214 | ||
1215 | if (__ib_device_get_by_name(dev_name(&device->dev))) { | |
1216 | ret = -ENFILE; | |
1217 | goto out; | |
1218 | } | |
2c34bb6d | 1219 | strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX); |
ecc82c53 | 1220 | |
ea295481 LT |
1221 | ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b, |
1222 | &last_id, GFP_KERNEL); | |
1223 | if (ret > 0) | |
1224 | ret = 0; | |
921eab11 | 1225 | |
0df91bb6 | 1226 | out: |
921eab11 | 1227 | up_write(&devices_rwsem); |
0df91bb6 JG |
1228 | return ret; |
1229 | } | |
1230 | ||
921eab11 JG |
1231 | /* |
1232 | * setup_device() allocates memory and sets up data that requires calling the | |
1233 | * device ops, this is the only reason these actions are not done during | |
1234 | * ib_alloc_device. It is undone by ib_dealloc_device(). | |
1235 | */ | |
548cb4fb PP |
1236 | static int setup_device(struct ib_device *device) |
1237 | { | |
1238 | struct ib_udata uhw = {.outlen = 0, .inlen = 0}; | |
1239 | int ret; | |
1da177e4 | 1240 | |
deee3c7e | 1241 | ib_device_check_mandatory(device); |
1da177e4 | 1242 | |
8ceb1357 | 1243 | ret = setup_port_data(device); |
5eb620c8 | 1244 | if (ret) { |
8ceb1357 | 1245 | dev_warn(&device->dev, "Couldn't create per-port data\n"); |
548cb4fb PP |
1246 | return ret; |
1247 | } | |
1248 | ||
1249 | memset(&device->attrs, 0, sizeof(device->attrs)); | |
3023a1e9 | 1250 | ret = device->ops.query_device(device, &device->attrs, &uhw); |
548cb4fb PP |
1251 | if (ret) { |
1252 | dev_warn(&device->dev, | |
1253 | "Couldn't query the device attributes\n"); | |
d45f89d5 | 1254 | return ret; |
5eb620c8 YE |
1255 | } |
1256 | ||
d45f89d5 | 1257 | return 0; |
548cb4fb PP |
1258 | } |
1259 | ||
921eab11 JG |
1260 | static void disable_device(struct ib_device *device) |
1261 | { | |
9cd58817 | 1262 | u32 cid; |
921eab11 JG |
1263 | |
1264 | WARN_ON(!refcount_read(&device->refcount)); | |
1265 | ||
1266 | down_write(&devices_rwsem); | |
1267 | xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); | |
1268 | up_write(&devices_rwsem); | |
1269 | ||
9cd58817 JG |
1270 | /* |
1271 | * Remove clients in LIFO order, see assign_client_id. This could be | |
1272 | * more efficient if xarray learns to reverse iterate. Since no new | |
1273 | * clients can be added to this ib_device past this point we only need | |
1274 | * the maximum possible client_id value here. | |
1275 | */ | |
921eab11 | 1276 | down_read(&clients_rwsem); |
9cd58817 | 1277 | cid = highest_client_id; |
921eab11 | 1278 | up_read(&clients_rwsem); |
9cd58817 JG |
1279 | while (cid) { |
1280 | cid--; | |
1281 | remove_client_context(device, cid); | |
1282 | } | |
921eab11 | 1283 | |
286e1d3f | 1284 | ib_cq_pool_cleanup(device); |
4aa16152 | 1285 | |
921eab11 JG |
1286 | /* Pairs with refcount_set in enable_device */ |
1287 | ib_device_put(device); | |
1288 | wait_for_completion(&device->unreg_completion); | |
c2261dd7 | 1289 | |
4e0f7b90 PP |
1290 | /* |
1291 | * compat devices must be removed after device refcount drops to zero. | |
1292 | * Otherwise init_net() may add more compatdevs after removing compat | |
1293 | * devices and before device is disabled. | |
1294 | */ | |
1295 | remove_compat_devs(device); | |
921eab11 JG |
1296 | } |
1297 | ||
1298 | /* | |
1299 | * An enabled device is visible to all clients and to all the public facing | |
d0899892 JG |
1300 | * APIs that return a device pointer. This always returns with a new get, even |
1301 | * if it fails. | |
921eab11 | 1302 | */ |
d0899892 | 1303 | static int enable_device_and_get(struct ib_device *device) |
921eab11 JG |
1304 | { |
1305 | struct ib_client *client; | |
1306 | unsigned long index; | |
d0899892 | 1307 | int ret = 0; |
921eab11 | 1308 | |
d0899892 JG |
1309 | /* |
1310 | * One ref belongs to the xa and the other belongs to this | |
1311 | * thread. This is needed to guard against parallel unregistration. | |
1312 | */ | |
1313 | refcount_set(&device->refcount, 2); | |
921eab11 JG |
1314 | down_write(&devices_rwsem); |
1315 | xa_set_mark(&devices, device->index, DEVICE_REGISTERED); | |
d0899892 JG |
1316 | |
1317 | /* | |
1318 | * By using downgrade_write() we ensure that no other thread can clear | |
1319 | * DEVICE_REGISTERED while we are completing the client setup. | |
1320 | */ | |
1321 | downgrade_write(&devices_rwsem); | |
921eab11 | 1322 | |
ca22354b JG |
1323 | if (device->ops.enable_driver) { |
1324 | ret = device->ops.enable_driver(device); | |
1325 | if (ret) | |
1326 | goto out; | |
1327 | } | |
1328 | ||
921eab11 JG |
1329 | down_read(&clients_rwsem); |
1330 | xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { | |
1331 | ret = add_client_context(device, client); | |
d0899892 JG |
1332 | if (ret) |
1333 | break; | |
921eab11 JG |
1334 | } |
1335 | up_read(&clients_rwsem); | |
4e0f7b90 PP |
1336 | if (!ret) |
1337 | ret = add_compat_devs(device); | |
ca22354b | 1338 | out: |
d0899892 JG |
1339 | up_read(&devices_rwsem); |
1340 | return ret; | |
921eab11 JG |
1341 | } |
1342 | ||
0cb42c02 JG |
1343 | static void prevent_dealloc_device(struct ib_device *ib_dev) |
1344 | { | |
1345 | } | |
1346 | ||
9cbed5aa CM |
1347 | static void ib_device_notify_register(struct ib_device *device) |
1348 | { | |
1349 | struct net_device *netdev; | |
1350 | u32 port; | |
1351 | int ret; | |
1352 | ||
1d6a9e74 WL |
1353 | down_read(&devices_rwsem); |
1354 | ||
d0706bfd ZY |
1355 | /* Mark for userspace that device is ready */ |
1356 | kobject_uevent(&device->dev.kobj, KOBJ_ADD); | |
1357 | ||
9cbed5aa CM |
1358 | ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT); |
1359 | if (ret) | |
1d6a9e74 | 1360 | goto out; |
9cbed5aa CM |
1361 | |
1362 | rdma_for_each_port(device, port) { | |
1363 | netdev = ib_device_get_netdev(device, port); | |
1364 | if (!netdev) | |
1365 | continue; | |
1366 | ||
1367 | ret = rdma_nl_notify_event(device, port, | |
1368 | RDMA_NETDEV_ATTACH_EVENT); | |
1369 | dev_put(netdev); | |
1370 | if (ret) | |
1d6a9e74 | 1371 | goto out; |
9cbed5aa | 1372 | } |
1d6a9e74 WL |
1373 | |
1374 | out: | |
1375 | up_read(&devices_rwsem); | |
9cbed5aa CM |
1376 | } |
1377 | ||
548cb4fb PP |
1378 | /** |
1379 | * ib_register_device - Register an IB device with IB core | |
d6537c1a | 1380 | * @device: Device to register |
1381 | * @name: unique string device name. This may include a '%' which will | |
e0477b34 JG |
1382 | * cause a unique index to be added to the passed device name. |
1383 | * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB | |
1384 | * device will be used. In this case the caller should fully | |
1385 | * setup the ibdev for DMA. This usually means using dma_virt_ops. | |
548cb4fb PP |
1386 | * |
1387 | * Low-level drivers use ib_register_device() to register their | |
1388 | * devices with the IB core. All registered clients will receive a | |
1389 | * callback for each device that is added. @device must be allocated | |
1390 | * with ib_alloc_device(). | |
d0899892 JG |
1391 | * |
1392 | * If the driver uses ops.dealloc_driver and calls any ib_unregister_device() | |
1393 | * asynchronously then the device pointer may become freed as soon as this | |
1394 | * function returns. | |
548cb4fb | 1395 | */ |
e0477b34 JG |
1396 | int ib_register_device(struct ib_device *device, const char *name, |
1397 | struct device *dma_device) | |
548cb4fb PP |
1398 | { |
1399 | int ret; | |
548cb4fb | 1400 | |
0df91bb6 JG |
1401 | ret = assign_name(device, name); |
1402 | if (ret) | |
921eab11 | 1403 | return ret; |
548cb4fb | 1404 | |
5a7a9e03 CH |
1405 | /* |
1406 | * If the caller does not provide a DMA capable device then the IB core | |
1407 | * will set up ib_sge and scatterlist structures that stash the kernel | |
1408 | * virtual address into the address field. | |
1409 | */ | |
1410 | WARN_ON(dma_device && !dma_device->dma_parms); | |
1411 | device->dma_device = dma_device; | |
1412 | ||
548cb4fb PP |
1413 | ret = setup_device(device); |
1414 | if (ret) | |
d0899892 | 1415 | return ret; |
03db3a2d | 1416 | |
d45f89d5 JG |
1417 | ret = ib_cache_setup_one(device); |
1418 | if (ret) { | |
1419 | dev_warn(&device->dev, | |
1420 | "Couldn't set up InfiniBand P_Key/GID cache\n"); | |
d0899892 | 1421 | return ret; |
d45f89d5 JG |
1422 | } |
1423 | ||
915e4af5 JG |
1424 | device->groups[0] = &ib_dev_attr_group; |
1425 | device->groups[1] = device->ops.device_group; | |
b7066b32 JG |
1426 | ret = ib_setup_device_attrs(device); |
1427 | if (ret) | |
1428 | goto cache_cleanup; | |
1429 | ||
7527a7b1 | 1430 | ib_device_register_rdmacg(device); |
3e153a93 | 1431 | |
413d3347 MZ |
1432 | rdma_counter_init(device); |
1433 | ||
e7a5b4aa LR |
1434 | /* |
1435 | * Ensure that ADD uevent is not fired because it | |
1436 | * is too early amd device is not initialized yet. | |
1437 | */ | |
1438 | dev_set_uevent_suppress(&device->dev, true); | |
5f8f5499 PP |
1439 | ret = device_add(&device->dev); |
1440 | if (ret) | |
1441 | goto cg_cleanup; | |
1442 | ||
b7066b32 | 1443 | ret = ib_setup_port_attrs(&device->coredev); |
1da177e4 | 1444 | if (ret) { |
43c7c851 JG |
1445 | dev_warn(&device->dev, |
1446 | "Couldn't register device with driver model\n"); | |
5f8f5499 | 1447 | goto dev_cleanup; |
1da177e4 LT |
1448 | } |
1449 | ||
d0899892 JG |
1450 | ret = enable_device_and_get(device); |
1451 | if (ret) { | |
1452 | void (*dealloc_fn)(struct ib_device *); | |
1453 | ||
1454 | /* | |
1455 | * If we hit this error flow then we don't want to | |
1456 | * automatically dealloc the device since the caller is | |
1457 | * expected to call ib_dealloc_device() after | |
1458 | * ib_register_device() fails. This is tricky due to the | |
1459 | * possibility for a parallel unregistration along with this | |
1460 | * error flow. Since we have a refcount here we know any | |
1461 | * parallel flow is stopped in disable_device and will see the | |
0cb42c02 | 1462 | * special dealloc_driver pointer, causing the responsibility to |
d0899892 JG |
1463 | * ib_dealloc_device() to revert back to this thread. |
1464 | */ | |
1465 | dealloc_fn = device->ops.dealloc_driver; | |
0cb42c02 | 1466 | device->ops.dealloc_driver = prevent_dealloc_device; |
d0899892 JG |
1467 | ib_device_put(device); |
1468 | __ib_unregister_device(device); | |
1469 | device->ops.dealloc_driver = dealloc_fn; | |
779e0bf4 | 1470 | dev_set_uevent_suppress(&device->dev, false); |
d0899892 JG |
1471 | return ret; |
1472 | } | |
779e0bf4 | 1473 | dev_set_uevent_suppress(&device->dev, false); |
9cbed5aa CM |
1474 | |
1475 | ib_device_notify_register(device); | |
d0706bfd | 1476 | |
d0899892 | 1477 | ib_device_put(device); |
1da177e4 | 1478 | |
4be3a4fa PP |
1479 | return 0; |
1480 | ||
5f8f5499 PP |
1481 | dev_cleanup: |
1482 | device_del(&device->dev); | |
2fb4f4ea | 1483 | cg_cleanup: |
e7a5b4aa | 1484 | dev_set_uevent_suppress(&device->dev, false); |
2fb4f4ea | 1485 | ib_device_unregister_rdmacg(device); |
b7066b32 | 1486 | cache_cleanup: |
d45f89d5 | 1487 | ib_cache_cleanup_one(device); |
1da177e4 LT |
1488 | return ret; |
1489 | } | |
1490 | EXPORT_SYMBOL(ib_register_device); | |
1491 | ||
d0899892 JG |
1492 | /* Callers must hold a get on the device. */ |
1493 | static void __ib_unregister_device(struct ib_device *ib_dev) | |
1494 | { | |
bca51197 MZ |
1495 | struct ib_device *sub, *tmp; |
1496 | ||
1497 | mutex_lock(&ib_dev->subdev_lock); | |
1498 | list_for_each_entry_safe_reverse(sub, tmp, | |
1499 | &ib_dev->subdev_list_head, | |
1500 | subdev_list) { | |
1501 | list_del(&sub->subdev_list); | |
1502 | ib_dev->ops.del_sub_dev(sub); | |
1503 | ib_device_put(ib_dev); | |
1504 | } | |
1505 | mutex_unlock(&ib_dev->subdev_lock); | |
1506 | ||
d0899892 JG |
1507 | /* |
1508 | * We have a registration lock so that all the calls to unregister are | |
1509 | * fully fenced, once any unregister returns the device is truely | |
1510 | * unregistered even if multiple callers are unregistering it at the | |
1511 | * same time. This also interacts with the registration flow and | |
1512 | * provides sane semantics if register and unregister are racing. | |
1513 | */ | |
1514 | mutex_lock(&ib_dev->unregistration_lock); | |
1515 | if (!refcount_read(&ib_dev->refcount)) | |
1516 | goto out; | |
1517 | ||
1518 | disable_device(ib_dev); | |
9cbed5aa | 1519 | rdma_nl_notify_event(ib_dev, 0, RDMA_UNREGISTER_EVENT); |
3042492b PP |
1520 | |
1521 | /* Expedite removing unregistered pointers from the hash table */ | |
1522 | free_netdevs(ib_dev); | |
1523 | ||
b7066b32 | 1524 | ib_free_port_attrs(&ib_dev->coredev); |
d0899892 JG |
1525 | device_del(&ib_dev->dev); |
1526 | ib_device_unregister_rdmacg(ib_dev); | |
1527 | ib_cache_cleanup_one(ib_dev); | |
1528 | ||
1529 | /* | |
1530 | * Drivers using the new flow may not call ib_dealloc_device except | |
1531 | * in error unwind prior to registration success. | |
1532 | */ | |
0cb42c02 JG |
1533 | if (ib_dev->ops.dealloc_driver && |
1534 | ib_dev->ops.dealloc_driver != prevent_dealloc_device) { | |
d0899892 JG |
1535 | WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1); |
1536 | ib_dealloc_device(ib_dev); | |
1537 | } | |
1538 | out: | |
1539 | mutex_unlock(&ib_dev->unregistration_lock); | |
1540 | } | |
1541 | ||
1da177e4 LT |
1542 | /** |
1543 | * ib_unregister_device - Unregister an IB device | |
d6537c1a | 1544 | * @ib_dev: The device to unregister |
1da177e4 LT |
1545 | * |
1546 | * Unregister an IB device. All clients will receive a remove callback. | |
d0899892 JG |
1547 | * |
1548 | * Callers should call this routine only once, and protect against races with | |
1549 | * registration. Typically it should only be called as part of a remove | |
1550 | * callback in an implementation of driver core's struct device_driver and | |
1551 | * related. | |
1552 | * | |
1553 | * If ops.dealloc_driver is used then ib_dev will be freed upon return from | |
1554 | * this function. | |
1da177e4 | 1555 | */ |
d0899892 | 1556 | void ib_unregister_device(struct ib_device *ib_dev) |
1da177e4 | 1557 | { |
d0899892 JG |
1558 | get_device(&ib_dev->dev); |
1559 | __ib_unregister_device(ib_dev); | |
1560 | put_device(&ib_dev->dev); | |
1da177e4 LT |
1561 | } |
1562 | EXPORT_SYMBOL(ib_unregister_device); | |
1563 | ||
d0899892 JG |
1564 | /** |
1565 | * ib_unregister_device_and_put - Unregister a device while holding a 'get' | |
d6537c1a | 1566 | * @ib_dev: The device to unregister |
d0899892 JG |
1567 | * |
1568 | * This is the same as ib_unregister_device(), except it includes an internal | |
1569 | * ib_device_put() that should match a 'get' obtained by the caller. | |
1570 | * | |
1571 | * It is safe to call this routine concurrently from multiple threads while | |
1572 | * holding the 'get'. When the function returns the device is fully | |
1573 | * unregistered. | |
1574 | * | |
1575 | * Drivers using this flow MUST use the driver_unregister callback to clean up | |
1576 | * their resources associated with the device and dealloc it. | |
1577 | */ | |
1578 | void ib_unregister_device_and_put(struct ib_device *ib_dev) | |
1579 | { | |
1580 | WARN_ON(!ib_dev->ops.dealloc_driver); | |
1581 | get_device(&ib_dev->dev); | |
1582 | ib_device_put(ib_dev); | |
1583 | __ib_unregister_device(ib_dev); | |
1584 | put_device(&ib_dev->dev); | |
1585 | } | |
1586 | EXPORT_SYMBOL(ib_unregister_device_and_put); | |
1587 | ||
1588 | /** | |
1589 | * ib_unregister_driver - Unregister all IB devices for a driver | |
1590 | * @driver_id: The driver to unregister | |
1591 | * | |
1592 | * This implements a fence for device unregistration. It only returns once all | |
1593 | * devices associated with the driver_id have fully completed their | |
1594 | * unregistration and returned from ib_unregister_device*(). | |
1595 | * | |
1596 | * If device's are not yet unregistered it goes ahead and starts unregistering | |
1597 | * them. | |
1598 | * | |
1599 | * This does not block creation of new devices with the given driver_id, that | |
1600 | * is the responsibility of the caller. | |
1601 | */ | |
1602 | void ib_unregister_driver(enum rdma_driver_id driver_id) | |
1603 | { | |
1604 | struct ib_device *ib_dev; | |
1605 | unsigned long index; | |
1606 | ||
1607 | down_read(&devices_rwsem); | |
1608 | xa_for_each (&devices, index, ib_dev) { | |
b9560a41 | 1609 | if (ib_dev->ops.driver_id != driver_id) |
d0899892 JG |
1610 | continue; |
1611 | ||
1612 | get_device(&ib_dev->dev); | |
1613 | up_read(&devices_rwsem); | |
1614 | ||
1615 | WARN_ON(!ib_dev->ops.dealloc_driver); | |
1616 | __ib_unregister_device(ib_dev); | |
1617 | ||
1618 | put_device(&ib_dev->dev); | |
1619 | down_read(&devices_rwsem); | |
1620 | } | |
1621 | up_read(&devices_rwsem); | |
1622 | } | |
1623 | EXPORT_SYMBOL(ib_unregister_driver); | |
1624 | ||
1625 | static void ib_unregister_work(struct work_struct *work) | |
1626 | { | |
1627 | struct ib_device *ib_dev = | |
1628 | container_of(work, struct ib_device, unregistration_work); | |
1629 | ||
1630 | __ib_unregister_device(ib_dev); | |
1631 | put_device(&ib_dev->dev); | |
1632 | } | |
1633 | ||
1634 | /** | |
1635 | * ib_unregister_device_queued - Unregister a device using a work queue | |
d6537c1a | 1636 | * @ib_dev: The device to unregister |
d0899892 JG |
1637 | * |
1638 | * This schedules an asynchronous unregistration using a WQ for the device. A | |
1639 | * driver should use this to avoid holding locks while doing unregistration, | |
1640 | * such as holding the RTNL lock. | |
1641 | * | |
1642 | * Drivers using this API must use ib_unregister_driver before module unload | |
1643 | * to ensure that all scheduled unregistrations have completed. | |
1644 | */ | |
1645 | void ib_unregister_device_queued(struct ib_device *ib_dev) | |
1646 | { | |
1647 | WARN_ON(!refcount_read(&ib_dev->refcount)); | |
1648 | WARN_ON(!ib_dev->ops.dealloc_driver); | |
1649 | get_device(&ib_dev->dev); | |
ff815a89 | 1650 | if (!queue_work(ib_unreg_wq, &ib_dev->unregistration_work)) |
d0899892 JG |
1651 | put_device(&ib_dev->dev); |
1652 | } | |
1653 | EXPORT_SYMBOL(ib_unregister_device_queued); | |
1654 | ||
decbc7a6 PP |
1655 | /* |
1656 | * The caller must pass in a device that has the kref held and the refcount | |
1657 | * released. If the device is in cur_net and still registered then it is moved | |
1658 | * into net. | |
1659 | */ | |
1660 | static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, | |
1661 | struct net *net) | |
1662 | { | |
1663 | int ret2 = -EINVAL; | |
1664 | int ret; | |
1665 | ||
1666 | mutex_lock(&device->unregistration_lock); | |
1667 | ||
1668 | /* | |
2e5b8a01 PP |
1669 | * If a device not under ib_device_get() or if the unregistration_lock |
1670 | * is not held, the namespace can be changed, or it can be unregistered. | |
1671 | * Check again under the lock. | |
decbc7a6 PP |
1672 | */ |
1673 | if (refcount_read(&device->refcount) == 0 || | |
1674 | !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) { | |
1675 | ret = -ENODEV; | |
1676 | goto out; | |
1677 | } | |
1678 | ||
1679 | kobject_uevent(&device->dev.kobj, KOBJ_REMOVE); | |
1680 | disable_device(device); | |
1681 | ||
1682 | /* | |
1683 | * At this point no one can be using the device, so it is safe to | |
1684 | * change the namespace. | |
1685 | */ | |
1686 | write_pnet(&device->coredev.rdma_net, net); | |
1687 | ||
2e5b8a01 | 1688 | down_read(&devices_rwsem); |
decbc7a6 PP |
1689 | /* |
1690 | * Currently rdma devices are system wide unique. So the device name | |
1691 | * is guaranteed free in the new namespace. Publish the new namespace | |
1692 | * at the sysfs level. | |
1693 | */ | |
decbc7a6 PP |
1694 | ret = device_rename(&device->dev, dev_name(&device->dev)); |
1695 | up_read(&devices_rwsem); | |
1696 | if (ret) { | |
1697 | dev_warn(&device->dev, | |
1698 | "%s: Couldn't rename device after namespace change\n", | |
1699 | __func__); | |
1700 | /* Try and put things back and re-enable the device */ | |
1701 | write_pnet(&device->coredev.rdma_net, cur_net); | |
1702 | } | |
1703 | ||
1704 | ret2 = enable_device_and_get(device); | |
2e5b8a01 | 1705 | if (ret2) { |
decbc7a6 PP |
1706 | /* |
1707 | * This shouldn't really happen, but if it does, let the user | |
1708 | * retry at later point. So don't disable the device. | |
1709 | */ | |
1710 | dev_warn(&device->dev, | |
1711 | "%s: Couldn't re-enable device after namespace change\n", | |
1712 | __func__); | |
2e5b8a01 | 1713 | } |
decbc7a6 | 1714 | kobject_uevent(&device->dev.kobj, KOBJ_ADD); |
2e5b8a01 | 1715 | |
decbc7a6 PP |
1716 | ib_device_put(device); |
1717 | out: | |
1718 | mutex_unlock(&device->unregistration_lock); | |
1719 | if (ret) | |
1720 | return ret; | |
1721 | return ret2; | |
1722 | } | |
1723 | ||
2e5b8a01 PP |
1724 | int ib_device_set_netns_put(struct sk_buff *skb, |
1725 | struct ib_device *dev, u32 ns_fd) | |
1726 | { | |
1727 | struct net *net; | |
1728 | int ret; | |
1729 | ||
1730 | net = get_net_ns_by_fd(ns_fd); | |
1731 | if (IS_ERR(net)) { | |
1732 | ret = PTR_ERR(net); | |
1733 | goto net_err; | |
1734 | } | |
1735 | ||
1736 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { | |
1737 | ret = -EPERM; | |
1738 | goto ns_err; | |
1739 | } | |
1740 | ||
1741 | /* | |
69d86a66 JG |
1742 | * All the ib_clients, including uverbs, are reset when the namespace is |
1743 | * changed and this cannot be blocked waiting for userspace to do | |
1744 | * something, so disassociation is mandatory. | |
2e5b8a01 | 1745 | */ |
69d86a66 | 1746 | if (!dev->ops.disassociate_ucontext || ib_devices_shared_netns) { |
2e5b8a01 PP |
1747 | ret = -EOPNOTSUPP; |
1748 | goto ns_err; | |
1749 | } | |
1750 | ||
1751 | get_device(&dev->dev); | |
1752 | ib_device_put(dev); | |
1753 | ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net); | |
1754 | put_device(&dev->dev); | |
1755 | ||
1756 | put_net(net); | |
1757 | return ret; | |
1758 | ||
1759 | ns_err: | |
1760 | put_net(net); | |
1761 | net_err: | |
1762 | ib_device_put(dev); | |
1763 | return ret; | |
1764 | } | |
1765 | ||
4e0f7b90 PP |
1766 | static struct pernet_operations rdma_dev_net_ops = { |
1767 | .init = rdma_dev_init_net, | |
1768 | .exit = rdma_dev_exit_net, | |
1769 | .id = &rdma_dev_net_id, | |
1770 | .size = sizeof(struct rdma_dev_net), | |
1771 | }; | |
1772 | ||
e59178d8 JG |
1773 | static int assign_client_id(struct ib_client *client) |
1774 | { | |
1775 | int ret; | |
1776 | ||
7a8bccd8 | 1777 | lockdep_assert_held(&clients_rwsem); |
e59178d8 JG |
1778 | /* |
1779 | * The add/remove callbacks must be called in FIFO/LIFO order. To | |
1780 | * achieve this we assign client_ids so they are sorted in | |
9cd58817 | 1781 | * registration order. |
e59178d8 | 1782 | */ |
9cd58817 | 1783 | client->client_id = highest_client_id; |
ea295481 | 1784 | ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); |
e59178d8 | 1785 | if (ret) |
7a8bccd8 | 1786 | return ret; |
e59178d8 | 1787 | |
9cd58817 | 1788 | highest_client_id++; |
921eab11 | 1789 | xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); |
7a8bccd8 | 1790 | return 0; |
e59178d8 JG |
1791 | } |
1792 | ||
9cd58817 JG |
1793 | static void remove_client_id(struct ib_client *client) |
1794 | { | |
1795 | down_write(&clients_rwsem); | |
1796 | xa_erase(&clients, client->client_id); | |
1797 | for (; highest_client_id; highest_client_id--) | |
1798 | if (xa_load(&clients, highest_client_id - 1)) | |
1799 | break; | |
1800 | up_write(&clients_rwsem); | |
1801 | } | |
1802 | ||
1da177e4 LT |
1803 | /** |
1804 | * ib_register_client - Register an IB client | |
1805 | * @client:Client to register | |
1806 | * | |
1807 | * Upper level users of the IB drivers can use ib_register_client() to | |
1808 | * register callbacks for IB device addition and removal. When an IB | |
1809 | * device is added, each registered client's add method will be called | |
1810 | * (in the order the clients were registered), and when a device is | |
1811 | * removed, each client's remove method will be called (in the reverse | |
1812 | * order that clients were registered). In addition, when | |
1813 | * ib_register_client() is called, the client will receive an add | |
1814 | * callback for all devices already registered. | |
1815 | */ | |
1816 | int ib_register_client(struct ib_client *client) | |
1817 | { | |
1818 | struct ib_device *device; | |
0df91bb6 | 1819 | unsigned long index; |
7a8bccd8 | 1820 | bool need_unreg = false; |
e59178d8 | 1821 | int ret; |
1da177e4 | 1822 | |
621e55ff JG |
1823 | refcount_set(&client->uses, 1); |
1824 | init_completion(&client->uses_zero); | |
7a8bccd8 SL |
1825 | |
1826 | /* | |
1827 | * The devices_rwsem is held in write mode to ensure that a racing | |
1828 | * ib_register_device() sees a consisent view of clients and devices. | |
1829 | */ | |
1830 | down_write(&devices_rwsem); | |
1831 | down_write(&clients_rwsem); | |
e59178d8 | 1832 | ret = assign_client_id(client); |
921eab11 | 1833 | if (ret) |
7a8bccd8 | 1834 | goto out; |
1da177e4 | 1835 | |
7a8bccd8 | 1836 | need_unreg = true; |
921eab11 JG |
1837 | xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) { |
1838 | ret = add_client_context(device, client); | |
7a8bccd8 SL |
1839 | if (ret) |
1840 | goto out; | |
921eab11 | 1841 | } |
7a8bccd8 SL |
1842 | ret = 0; |
1843 | out: | |
1844 | up_write(&clients_rwsem); | |
1845 | up_write(&devices_rwsem); | |
1846 | if (need_unreg && ret) | |
1847 | ib_unregister_client(client); | |
1848 | return ret; | |
1da177e4 LT |
1849 | } |
1850 | EXPORT_SYMBOL(ib_register_client); | |
1851 | ||
1852 | /** | |
1853 | * ib_unregister_client - Unregister an IB client | |
1854 | * @client:Client to unregister | |
1855 | * | |
1856 | * Upper level users use ib_unregister_client() to remove their client | |
1857 | * registration. When ib_unregister_client() is called, the client | |
1858 | * will receive a remove callback for each IB device still registered. | |
921eab11 JG |
1859 | * |
1860 | * This is a full fence, once it returns no client callbacks will be called, | |
1861 | * or are running in another thread. | |
1da177e4 LT |
1862 | */ |
1863 | void ib_unregister_client(struct ib_client *client) | |
1864 | { | |
1da177e4 | 1865 | struct ib_device *device; |
0df91bb6 | 1866 | unsigned long index; |
1da177e4 | 1867 | |
921eab11 | 1868 | down_write(&clients_rwsem); |
621e55ff | 1869 | ib_client_put(client); |
e59178d8 | 1870 | xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); |
921eab11 | 1871 | up_write(&clients_rwsem); |
621e55ff JG |
1872 | |
1873 | /* We do not want to have locks while calling client->remove() */ | |
1874 | rcu_read_lock(); | |
1875 | xa_for_each (&devices, index, device) { | |
1876 | if (!ib_device_try_get(device)) | |
1877 | continue; | |
1878 | rcu_read_unlock(); | |
1879 | ||
921eab11 | 1880 | remove_client_context(device, client->client_id); |
1da177e4 | 1881 | |
621e55ff JG |
1882 | ib_device_put(device); |
1883 | rcu_read_lock(); | |
1884 | } | |
1885 | rcu_read_unlock(); | |
1886 | ||
921eab11 | 1887 | /* |
621e55ff JG |
1888 | * remove_client_context() is not a fence, it can return even though a |
1889 | * removal is ongoing. Wait until all removals are completed. | |
921eab11 | 1890 | */ |
621e55ff | 1891 | wait_for_completion(&client->uses_zero); |
9cd58817 | 1892 | remove_client_id(client); |
1da177e4 LT |
1893 | } |
1894 | EXPORT_SYMBOL(ib_unregister_client); | |
1895 | ||
0e2d00eb JG |
1896 | static int __ib_get_global_client_nl_info(const char *client_name, |
1897 | struct ib_client_nl_info *res) | |
1898 | { | |
1899 | struct ib_client *client; | |
1900 | unsigned long index; | |
1901 | int ret = -ENOENT; | |
1902 | ||
1903 | down_read(&clients_rwsem); | |
1904 | xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { | |
1905 | if (strcmp(client->name, client_name) != 0) | |
1906 | continue; | |
1907 | if (!client->get_global_nl_info) { | |
1908 | ret = -EOPNOTSUPP; | |
1909 | break; | |
1910 | } | |
1911 | ret = client->get_global_nl_info(res); | |
1912 | if (WARN_ON(ret == -ENOENT)) | |
1913 | ret = -EINVAL; | |
1914 | if (!ret && res->cdev) | |
1915 | get_device(res->cdev); | |
1916 | break; | |
1917 | } | |
1918 | up_read(&clients_rwsem); | |
1919 | return ret; | |
1920 | } | |
1921 | ||
1922 | static int __ib_get_client_nl_info(struct ib_device *ibdev, | |
1923 | const char *client_name, | |
1924 | struct ib_client_nl_info *res) | |
1925 | { | |
1926 | unsigned long index; | |
1927 | void *client_data; | |
1928 | int ret = -ENOENT; | |
1929 | ||
1930 | down_read(&ibdev->client_data_rwsem); | |
1931 | xan_for_each_marked (&ibdev->client_data, index, client_data, | |
1932 | CLIENT_DATA_REGISTERED) { | |
1933 | struct ib_client *client = xa_load(&clients, index); | |
1934 | ||
1935 | if (!client || strcmp(client->name, client_name) != 0) | |
1936 | continue; | |
1937 | if (!client->get_nl_info) { | |
1938 | ret = -EOPNOTSUPP; | |
1939 | break; | |
1940 | } | |
1941 | ret = client->get_nl_info(ibdev, client_data, res); | |
1942 | if (WARN_ON(ret == -ENOENT)) | |
1943 | ret = -EINVAL; | |
1944 | ||
1945 | /* | |
1946 | * The cdev is guaranteed valid as long as we are inside the | |
1947 | * client_data_rwsem as remove_one can't be called. Keep it | |
1948 | * valid for the caller. | |
1949 | */ | |
1950 | if (!ret && res->cdev) | |
1951 | get_device(res->cdev); | |
1952 | break; | |
1953 | } | |
1954 | up_read(&ibdev->client_data_rwsem); | |
1955 | ||
1956 | return ret; | |
1957 | } | |
1958 | ||
1959 | /** | |
1960 | * ib_get_client_nl_info - Fetch the nl_info from a client | |
4c3b53e1 LJ |
1961 | * @ibdev: IB device |
1962 | * @client_name: Name of the client | |
1963 | * @res: Result of the query | |
0e2d00eb JG |
1964 | */ |
1965 | int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, | |
1966 | struct ib_client_nl_info *res) | |
1967 | { | |
1968 | int ret; | |
1969 | ||
1970 | if (ibdev) | |
1971 | ret = __ib_get_client_nl_info(ibdev, client_name, res); | |
1972 | else | |
1973 | ret = __ib_get_global_client_nl_info(client_name, res); | |
1974 | #ifdef CONFIG_MODULES | |
1975 | if (ret == -ENOENT) { | |
1976 | request_module("rdma-client-%s", client_name); | |
1977 | if (ibdev) | |
1978 | ret = __ib_get_client_nl_info(ibdev, client_name, res); | |
1979 | else | |
1980 | ret = __ib_get_global_client_nl_info(client_name, res); | |
1981 | } | |
1982 | #endif | |
1983 | if (ret) { | |
1984 | if (ret == -ENOENT) | |
1985 | return -EOPNOTSUPP; | |
1986 | return ret; | |
1987 | } | |
1988 | ||
1989 | if (WARN_ON(!res->cdev)) | |
1990 | return -EINVAL; | |
1991 | return 0; | |
1992 | } | |
1993 | ||
1da177e4 | 1994 | /** |
9cd330d3 | 1995 | * ib_set_client_data - Set IB client context |
1da177e4 LT |
1996 | * @device:Device to set context for |
1997 | * @client:Client to set context for | |
1998 | * @data:Context to set | |
1999 | * | |
0df91bb6 JG |
2000 | * ib_set_client_data() sets client context data that can be retrieved with |
2001 | * ib_get_client_data(). This can only be called while the client is | |
2002 | * registered to the device, once the ib_client remove() callback returns this | |
2003 | * cannot be called. | |
1da177e4 LT |
2004 | */ |
2005 | void ib_set_client_data(struct ib_device *device, struct ib_client *client, | |
2006 | void *data) | |
2007 | { | |
0df91bb6 | 2008 | void *rc; |
1da177e4 | 2009 | |
0df91bb6 JG |
2010 | if (WARN_ON(IS_ERR(data))) |
2011 | data = NULL; | |
1da177e4 | 2012 | |
0df91bb6 JG |
2013 | rc = xa_store(&device->client_data, client->client_id, data, |
2014 | GFP_KERNEL); | |
2015 | WARN_ON(xa_is_err(rc)); | |
1da177e4 LT |
2016 | } |
2017 | EXPORT_SYMBOL(ib_set_client_data); | |
2018 | ||
2019 | /** | |
2020 | * ib_register_event_handler - Register an IB event handler | |
2021 | * @event_handler:Handler to register | |
2022 | * | |
2023 | * ib_register_event_handler() registers an event handler that will be | |
2024 | * called back when asynchronous IB events occur (as defined in | |
6b57cea9 PP |
2025 | * chapter 11 of the InfiniBand Architecture Specification). This |
2026 | * callback occurs in workqueue context. | |
1da177e4 | 2027 | */ |
dcc9881e | 2028 | void ib_register_event_handler(struct ib_event_handler *event_handler) |
1da177e4 | 2029 | { |
6b57cea9 | 2030 | down_write(&event_handler->device->event_handler_rwsem); |
1da177e4 LT |
2031 | list_add_tail(&event_handler->list, |
2032 | &event_handler->device->event_handler_list); | |
6b57cea9 | 2033 | up_write(&event_handler->device->event_handler_rwsem); |
1da177e4 LT |
2034 | } |
2035 | EXPORT_SYMBOL(ib_register_event_handler); | |
2036 | ||
2037 | /** | |
2038 | * ib_unregister_event_handler - Unregister an event handler | |
2039 | * @event_handler:Handler to unregister | |
2040 | * | |
2041 | * Unregister an event handler registered with | |
2042 | * ib_register_event_handler(). | |
2043 | */ | |
dcc9881e | 2044 | void ib_unregister_event_handler(struct ib_event_handler *event_handler) |
1da177e4 | 2045 | { |
6b57cea9 | 2046 | down_write(&event_handler->device->event_handler_rwsem); |
1da177e4 | 2047 | list_del(&event_handler->list); |
6b57cea9 | 2048 | up_write(&event_handler->device->event_handler_rwsem); |
1da177e4 LT |
2049 | } |
2050 | EXPORT_SYMBOL(ib_unregister_event_handler); | |
2051 | ||
6b57cea9 | 2052 | void ib_dispatch_event_clients(struct ib_event *event) |
1da177e4 | 2053 | { |
1da177e4 LT |
2054 | struct ib_event_handler *handler; |
2055 | ||
6b57cea9 | 2056 | down_read(&event->device->event_handler_rwsem); |
1da177e4 LT |
2057 | |
2058 | list_for_each_entry(handler, &event->device->event_handler_list, list) | |
2059 | handler->handler(handler, event); | |
2060 | ||
6b57cea9 | 2061 | up_read(&event->device->event_handler_rwsem); |
1da177e4 | 2062 | } |
1da177e4 | 2063 | |
4929116b | 2064 | static int iw_query_port(struct ib_device *device, |
1fb7f897 | 2065 | u32 port_num, |
4929116b | 2066 | struct ib_port_attr *port_attr) |
1da177e4 | 2067 | { |
4929116b KH |
2068 | struct in_device *inetdev; |
2069 | struct net_device *netdev; | |
fad61ad4 | 2070 | |
4929116b KH |
2071 | memset(port_attr, 0, sizeof(*port_attr)); |
2072 | ||
2073 | netdev = ib_device_get_netdev(device, port_num); | |
2074 | if (!netdev) | |
2075 | return -ENODEV; | |
2076 | ||
4929116b KH |
2077 | port_attr->max_mtu = IB_MTU_4096; |
2078 | port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu); | |
2079 | ||
2080 | if (!netif_carrier_ok(netdev)) { | |
2081 | port_attr->state = IB_PORT_DOWN; | |
2082 | port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; | |
2083 | } else { | |
390d3fdc MK |
2084 | rcu_read_lock(); |
2085 | inetdev = __in_dev_get_rcu(netdev); | |
4929116b KH |
2086 | |
2087 | if (inetdev && inetdev->ifa_list) { | |
2088 | port_attr->state = IB_PORT_ACTIVE; | |
2089 | port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; | |
4929116b KH |
2090 | } else { |
2091 | port_attr->state = IB_PORT_INIT; | |
2092 | port_attr->phys_state = | |
2093 | IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING; | |
2094 | } | |
390d3fdc MK |
2095 | |
2096 | rcu_read_unlock(); | |
4929116b KH |
2097 | } |
2098 | ||
390d3fdc | 2099 | dev_put(netdev); |
1e123d96 | 2100 | return device->ops.query_port(device, port_num, port_attr); |
4929116b KH |
2101 | } |
2102 | ||
2103 | static int __ib_query_port(struct ib_device *device, | |
1fb7f897 | 2104 | u32 port_num, |
4929116b KH |
2105 | struct ib_port_attr *port_attr) |
2106 | { | |
4929116b | 2107 | int err; |
116c0074 | 2108 | |
fad61ad4 | 2109 | memset(port_attr, 0, sizeof(*port_attr)); |
4929116b | 2110 | |
3023a1e9 | 2111 | err = device->ops.query_port(device, port_num, port_attr); |
fad61ad4 EC |
2112 | if (err || port_attr->subnet_prefix) |
2113 | return err; | |
2114 | ||
4929116b KH |
2115 | if (rdma_port_get_link_layer(device, port_num) != |
2116 | IB_LINK_LAYER_INFINIBAND) | |
d7012467 EC |
2117 | return 0; |
2118 | ||
21bfee9c AK |
2119 | ib_get_cached_subnet_prefix(device, port_num, |
2120 | &port_attr->subnet_prefix); | |
fad61ad4 | 2121 | return 0; |
1da177e4 | 2122 | } |
4929116b KH |
2123 | |
2124 | /** | |
2125 | * ib_query_port - Query IB port attributes | |
2126 | * @device:Device to query | |
2127 | * @port_num:Port number to query | |
2128 | * @port_attr:Port attributes | |
2129 | * | |
2130 | * ib_query_port() returns the attributes of a port through the | |
2131 | * @port_attr pointer. | |
2132 | */ | |
2133 | int ib_query_port(struct ib_device *device, | |
1fb7f897 | 2134 | u32 port_num, |
4929116b KH |
2135 | struct ib_port_attr *port_attr) |
2136 | { | |
2137 | if (!rdma_is_port_valid(device, port_num)) | |
2138 | return -EINVAL; | |
2139 | ||
2140 | if (rdma_protocol_iwarp(device, port_num)) | |
2141 | return iw_query_port(device, port_num, port_attr); | |
2142 | else | |
2143 | return __ib_query_port(device, port_num, port_attr); | |
2144 | } | |
1da177e4 LT |
2145 | EXPORT_SYMBOL(ib_query_port); |
2146 | ||
324e227e JG |
2147 | static void add_ndev_hash(struct ib_port_data *pdata) |
2148 | { | |
2149 | unsigned long flags; | |
2150 | ||
2151 | might_sleep(); | |
2152 | ||
2153 | spin_lock_irqsave(&ndev_hash_lock, flags); | |
2154 | if (hash_hashed(&pdata->ndev_hash_link)) { | |
2155 | hash_del_rcu(&pdata->ndev_hash_link); | |
2156 | spin_unlock_irqrestore(&ndev_hash_lock, flags); | |
2157 | /* | |
2158 | * We cannot do hash_add_rcu after a hash_del_rcu until the | |
2159 | * grace period | |
2160 | */ | |
2161 | synchronize_rcu(); | |
2162 | spin_lock_irqsave(&ndev_hash_lock, flags); | |
2163 | } | |
2164 | if (pdata->netdev) | |
2165 | hash_add_rcu(ndev_hash, &pdata->ndev_hash_link, | |
2166 | (uintptr_t)pdata->netdev); | |
2167 | spin_unlock_irqrestore(&ndev_hash_lock, flags); | |
2168 | } | |
2169 | ||
c2261dd7 JG |
2170 | /** |
2171 | * ib_device_set_netdev - Associate the ib_dev with an underlying net_device | |
2172 | * @ib_dev: Device to modify | |
2173 | * @ndev: net_device to affiliate, may be NULL | |
2174 | * @port: IB port the net_device is connected to | |
2175 | * | |
2176 | * Drivers should use this to link the ib_device to a netdev so the netdev | |
2177 | * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be | |
2178 | * affiliated with any port. | |
2179 | * | |
2180 | * The caller must ensure that the given ndev is not unregistered or | |
2181 | * unregistering, and that either the ib_device is unregistered or | |
2182 | * ib_device_set_netdev() is called with NULL when the ndev sends a | |
2183 | * NETDEV_UNREGISTER event. | |
2184 | */ | |
2185 | int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, | |
1fb7f897 | 2186 | u32 port) |
c2261dd7 | 2187 | { |
9cbed5aa | 2188 | enum rdma_nl_notify_event_type etype; |
c2261dd7 JG |
2189 | struct net_device *old_ndev; |
2190 | struct ib_port_data *pdata; | |
2191 | unsigned long flags; | |
2192 | int ret; | |
2193 | ||
917918f5 LR |
2194 | if (!rdma_is_port_valid(ib_dev, port)) |
2195 | return -EINVAL; | |
2196 | ||
c2261dd7 JG |
2197 | /* |
2198 | * Drivers wish to call this before ib_register_driver, so we have to | |
2199 | * setup the port data early. | |
2200 | */ | |
2201 | ret = alloc_port_data(ib_dev); | |
2202 | if (ret) | |
2203 | return ret; | |
2204 | ||
c2261dd7 JG |
2205 | pdata = &ib_dev->port_data[port]; |
2206 | spin_lock_irqsave(&pdata->netdev_lock, flags); | |
324e227e JG |
2207 | old_ndev = rcu_dereference_protected( |
2208 | pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); | |
2209 | if (old_ndev == ndev) { | |
c2261dd7 JG |
2210 | spin_unlock_irqrestore(&pdata->netdev_lock, flags); |
2211 | return 0; | |
2212 | } | |
c2261dd7 | 2213 | |
324e227e | 2214 | rcu_assign_pointer(pdata->netdev, ndev); |
2043a14f DA |
2215 | netdev_put(old_ndev, &pdata->netdev_tracker); |
2216 | netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC); | |
c2261dd7 JG |
2217 | spin_unlock_irqrestore(&pdata->netdev_lock, flags); |
2218 | ||
324e227e | 2219 | add_ndev_hash(pdata); |
9cbed5aa CM |
2220 | |
2221 | /* Make sure that the device is registered before we send events */ | |
2222 | if (xa_load(&devices, ib_dev->index) != ib_dev) | |
2223 | return 0; | |
2224 | ||
2225 | etype = ndev ? RDMA_NETDEV_ATTACH_EVENT : RDMA_NETDEV_DETACH_EVENT; | |
2226 | rdma_nl_notify_event(ib_dev, port, etype); | |
2227 | ||
c2261dd7 JG |
2228 | return 0; |
2229 | } | |
2230 | EXPORT_SYMBOL(ib_device_set_netdev); | |
2231 | ||
2232 | static void free_netdevs(struct ib_device *ib_dev) | |
2233 | { | |
2234 | unsigned long flags; | |
1fb7f897 | 2235 | u32 port; |
c2261dd7 | 2236 | |
46bdf370 KH |
2237 | if (!ib_dev->port_data) |
2238 | return; | |
2239 | ||
c2261dd7 JG |
2240 | rdma_for_each_port (ib_dev, port) { |
2241 | struct ib_port_data *pdata = &ib_dev->port_data[port]; | |
324e227e | 2242 | struct net_device *ndev; |
c2261dd7 JG |
2243 | |
2244 | spin_lock_irqsave(&pdata->netdev_lock, flags); | |
324e227e JG |
2245 | ndev = rcu_dereference_protected( |
2246 | pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); | |
2247 | if (ndev) { | |
2248 | spin_lock(&ndev_hash_lock); | |
2249 | hash_del_rcu(&pdata->ndev_hash_link); | |
2250 | spin_unlock(&ndev_hash_lock); | |
2251 | ||
2252 | /* | |
2253 | * If this is the last dev_put there is still a | |
2254 | * synchronize_rcu before the netdev is kfreed, so we | |
2255 | * can continue to rely on unlocked pointer | |
2256 | * comparisons after the put | |
2257 | */ | |
2258 | rcu_assign_pointer(pdata->netdev, NULL); | |
e42f9c2e | 2259 | netdev_put(ndev, &pdata->netdev_tracker); |
c2261dd7 JG |
2260 | } |
2261 | spin_unlock_irqrestore(&pdata->netdev_lock, flags); | |
2262 | } | |
2263 | } | |
2264 | ||
2265 | struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, | |
1fb7f897 | 2266 | u32 port) |
c2261dd7 JG |
2267 | { |
2268 | struct ib_port_data *pdata; | |
2269 | struct net_device *res; | |
2270 | ||
2271 | if (!rdma_is_port_valid(ib_dev, port)) | |
2272 | return NULL; | |
2273 | ||
8d159eb2 CM |
2274 | if (!ib_dev->port_data) |
2275 | return NULL; | |
2276 | ||
c2261dd7 JG |
2277 | pdata = &ib_dev->port_data[port]; |
2278 | ||
2279 | /* | |
2280 | * New drivers should use ib_device_set_netdev() not the legacy | |
2281 | * get_netdev(). | |
2282 | */ | |
2283 | if (ib_dev->ops.get_netdev) | |
2284 | res = ib_dev->ops.get_netdev(ib_dev, port); | |
2285 | else { | |
2286 | spin_lock(&pdata->netdev_lock); | |
324e227e JG |
2287 | res = rcu_dereference_protected( |
2288 | pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); | |
48d80b48 | 2289 | dev_hold(res); |
c2261dd7 JG |
2290 | spin_unlock(&pdata->netdev_lock); |
2291 | } | |
2292 | ||
c2261dd7 JG |
2293 | return res; |
2294 | } | |
8d159eb2 | 2295 | EXPORT_SYMBOL(ib_device_get_netdev); |
c2261dd7 | 2296 | |
0c039a57 YL |
2297 | /** |
2298 | * ib_query_netdev_port - Query the port number of a net_device | |
2299 | * associated with an ibdev | |
2300 | * @ibdev: IB device | |
2301 | * @ndev: Network device | |
2302 | * @port: IB port the net_device is connected to | |
2303 | */ | |
2304 | int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev, | |
2305 | u32 *port) | |
2306 | { | |
2307 | struct net_device *ib_ndev; | |
2308 | u32 port_num; | |
2309 | ||
2310 | rdma_for_each_port(ibdev, port_num) { | |
2311 | ib_ndev = ib_device_get_netdev(ibdev, port_num); | |
2312 | if (ndev == ib_ndev) { | |
2313 | *port = port_num; | |
2314 | dev_put(ib_ndev); | |
2315 | return 0; | |
2316 | } | |
2317 | dev_put(ib_ndev); | |
2318 | } | |
2319 | ||
2320 | return -ENOENT; | |
2321 | } | |
2322 | EXPORT_SYMBOL(ib_query_netdev_port); | |
2323 | ||
324e227e JG |
2324 | /** |
2325 | * ib_device_get_by_netdev - Find an IB device associated with a netdev | |
2326 | * @ndev: netdev to locate | |
2327 | * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) | |
2328 | * | |
2329 | * Find and hold an ib_device that is associated with a netdev via | |
2330 | * ib_device_set_netdev(). The caller must call ib_device_put() on the | |
2331 | * returned pointer. | |
2332 | */ | |
2333 | struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, | |
2334 | enum rdma_driver_id driver_id) | |
2335 | { | |
2336 | struct ib_device *res = NULL; | |
2337 | struct ib_port_data *cur; | |
2338 | ||
2339 | rcu_read_lock(); | |
2340 | hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link, | |
2341 | (uintptr_t)ndev) { | |
2342 | if (rcu_access_pointer(cur->netdev) == ndev && | |
2343 | (driver_id == RDMA_DRIVER_UNKNOWN || | |
b9560a41 | 2344 | cur->ib_dev->ops.driver_id == driver_id) && |
324e227e JG |
2345 | ib_device_try_get(cur->ib_dev)) { |
2346 | res = cur->ib_dev; | |
2347 | break; | |
2348 | } | |
2349 | } | |
2350 | rcu_read_unlock(); | |
2351 | ||
2352 | return res; | |
2353 | } | |
2354 | EXPORT_SYMBOL(ib_device_get_by_netdev); | |
2355 | ||
03db3a2d MB |
2356 | /** |
2357 | * ib_enum_roce_netdev - enumerate all RoCE ports | |
2358 | * @ib_dev : IB device we want to query | |
2359 | * @filter: Should we call the callback? | |
2360 | * @filter_cookie: Cookie passed to filter | |
2361 | * @cb: Callback to call for each found RoCE ports | |
2362 | * @cookie: Cookie passed back to the callback | |
2363 | * | |
2364 | * Enumerates all of the physical RoCE ports of ib_dev | |
2365 | * which are related to netdevice and calls callback() on each | |
2366 | * device for which filter() function returns non zero. | |
2367 | */ | |
2368 | void ib_enum_roce_netdev(struct ib_device *ib_dev, | |
2369 | roce_netdev_filter filter, | |
2370 | void *filter_cookie, | |
2371 | roce_netdev_callback cb, | |
2372 | void *cookie) | |
2373 | { | |
1fb7f897 | 2374 | u32 port; |
03db3a2d | 2375 | |
ea1075ed | 2376 | rdma_for_each_port (ib_dev, port) |
03db3a2d | 2377 | if (rdma_protocol_roce(ib_dev, port)) { |
c2261dd7 JG |
2378 | struct net_device *idev = |
2379 | ib_device_get_netdev(ib_dev, port); | |
03db3a2d MB |
2380 | |
2381 | if (filter(ib_dev, port, idev, filter_cookie)) | |
2382 | cb(ib_dev, port, idev, cookie); | |
48d80b48 | 2383 | dev_put(idev); |
03db3a2d MB |
2384 | } |
2385 | } | |
2386 | ||
2387 | /** | |
2388 | * ib_enum_all_roce_netdevs - enumerate all RoCE devices | |
2389 | * @filter: Should we call the callback? | |
2390 | * @filter_cookie: Cookie passed to filter | |
2391 | * @cb: Callback to call for each found RoCE ports | |
2392 | * @cookie: Cookie passed back to the callback | |
2393 | * | |
2394 | * Enumerates all RoCE devices' physical ports which are related | |
2395 | * to netdevices and calls callback() on each device for which | |
2396 | * filter() function returns non zero. | |
2397 | */ | |
2398 | void ib_enum_all_roce_netdevs(roce_netdev_filter filter, | |
2399 | void *filter_cookie, | |
2400 | roce_netdev_callback cb, | |
2401 | void *cookie) | |
2402 | { | |
2403 | struct ib_device *dev; | |
0df91bb6 | 2404 | unsigned long index; |
03db3a2d | 2405 | |
921eab11 | 2406 | down_read(&devices_rwsem); |
0df91bb6 | 2407 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) |
03db3a2d | 2408 | ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); |
921eab11 | 2409 | up_read(&devices_rwsem); |
8030c835 LR |
2410 | } |
2411 | ||
4c3b53e1 | 2412 | /* |
8030c835 LR |
2413 | * ib_enum_all_devs - enumerate all ib_devices |
2414 | * @cb: Callback to call for each found ib_device | |
2415 | * | |
2416 | * Enumerates all ib_devices and calls callback() on each device. | |
2417 | */ | |
2418 | int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, | |
2419 | struct netlink_callback *cb) | |
2420 | { | |
0df91bb6 | 2421 | unsigned long index; |
8030c835 LR |
2422 | struct ib_device *dev; |
2423 | unsigned int idx = 0; | |
2424 | int ret = 0; | |
2425 | ||
921eab11 | 2426 | down_read(&devices_rwsem); |
0df91bb6 | 2427 | xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { |
37eeab55 PP |
2428 | if (!rdma_dev_access_netns(dev, sock_net(skb->sk))) |
2429 | continue; | |
2430 | ||
8030c835 LR |
2431 | ret = nldev_cb(dev, skb, cb, idx); |
2432 | if (ret) | |
2433 | break; | |
2434 | idx++; | |
2435 | } | |
921eab11 | 2436 | up_read(&devices_rwsem); |
8030c835 | 2437 | return ret; |
03db3a2d MB |
2438 | } |
2439 | ||
1da177e4 LT |
2440 | /** |
2441 | * ib_query_pkey - Get P_Key table entry | |
2442 | * @device:Device to query | |
2443 | * @port_num:Port number to query | |
2444 | * @index:P_Key table index to query | |
2445 | * @pkey:Returned P_Key | |
2446 | * | |
2447 | * ib_query_pkey() fetches the specified P_Key table entry. | |
2448 | */ | |
2449 | int ib_query_pkey(struct ib_device *device, | |
1fb7f897 | 2450 | u32 port_num, u16 index, u16 *pkey) |
1da177e4 | 2451 | { |
9af3f5cf YS |
2452 | if (!rdma_is_port_valid(device, port_num)) |
2453 | return -EINVAL; | |
2454 | ||
ab75a6cb KH |
2455 | if (!device->ops.query_pkey) |
2456 | return -EOPNOTSUPP; | |
2457 | ||
3023a1e9 | 2458 | return device->ops.query_pkey(device, port_num, index, pkey); |
1da177e4 LT |
2459 | } |
2460 | EXPORT_SYMBOL(ib_query_pkey); | |
2461 | ||
2462 | /** | |
2463 | * ib_modify_device - Change IB device attributes | |
2464 | * @device:Device to modify | |
2465 | * @device_modify_mask:Mask of attributes to change | |
2466 | * @device_modify:New attribute values | |
2467 | * | |
2468 | * ib_modify_device() changes a device's attributes as specified by | |
2469 | * the @device_modify_mask and @device_modify structure. | |
2470 | */ | |
2471 | int ib_modify_device(struct ib_device *device, | |
2472 | int device_modify_mask, | |
2473 | struct ib_device_modify *device_modify) | |
2474 | { | |
3023a1e9 | 2475 | if (!device->ops.modify_device) |
d0f3ef36 | 2476 | return -EOPNOTSUPP; |
10e1b54b | 2477 | |
3023a1e9 KH |
2478 | return device->ops.modify_device(device, device_modify_mask, |
2479 | device_modify); | |
1da177e4 LT |
2480 | } |
2481 | EXPORT_SYMBOL(ib_modify_device); | |
2482 | ||
2483 | /** | |
2484 | * ib_modify_port - Modifies the attributes for the specified port. | |
2485 | * @device: The device to modify. | |
2486 | * @port_num: The number of the port to modify. | |
2487 | * @port_modify_mask: Mask used to specify which attributes of the port | |
2488 | * to change. | |
2489 | * @port_modify: New attribute values for the port. | |
2490 | * | |
2491 | * ib_modify_port() changes a port's attributes as specified by the | |
2492 | * @port_modify_mask and @port_modify structure. | |
2493 | */ | |
2494 | int ib_modify_port(struct ib_device *device, | |
1fb7f897 | 2495 | u32 port_num, int port_modify_mask, |
1da177e4 LT |
2496 | struct ib_port_modify *port_modify) |
2497 | { | |
61e0962d | 2498 | int rc; |
10e1b54b | 2499 | |
24dc831b | 2500 | if (!rdma_is_port_valid(device, port_num)) |
116c0074 RD |
2501 | return -EINVAL; |
2502 | ||
3023a1e9 KH |
2503 | if (device->ops.modify_port) |
2504 | rc = device->ops.modify_port(device, port_num, | |
2505 | port_modify_mask, | |
2506 | port_modify); | |
55bfe905 KH |
2507 | else if (rdma_protocol_roce(device, port_num) && |
2508 | ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 || | |
2509 | (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0)) | |
2510 | rc = 0; | |
61e0962d | 2511 | else |
55bfe905 | 2512 | rc = -EOPNOTSUPP; |
61e0962d | 2513 | return rc; |
1da177e4 LT |
2514 | } |
2515 | EXPORT_SYMBOL(ib_modify_port); | |
2516 | ||
5eb620c8 YE |
2517 | /** |
2518 | * ib_find_gid - Returns the port number and GID table index where | |
dbb12562 | 2519 | * a specified GID value occurs. Its searches only for IB link layer. |
5eb620c8 YE |
2520 | * @device: The device to query. |
2521 | * @gid: The GID value to search for. | |
2522 | * @port_num: The port number of the device where the GID value was found. | |
2523 | * @index: The index into the GID table where the GID was found. This | |
2524 | * parameter may be NULL. | |
2525 | */ | |
2526 | int ib_find_gid(struct ib_device *device, union ib_gid *gid, | |
1fb7f897 | 2527 | u32 *port_num, u16 *index) |
5eb620c8 YE |
2528 | { |
2529 | union ib_gid tmp_gid; | |
1fb7f897 | 2530 | u32 port; |
ea1075ed | 2531 | int ret, i; |
5eb620c8 | 2532 | |
ea1075ed | 2533 | rdma_for_each_port (device, port) { |
22d24f75 | 2534 | if (!rdma_protocol_ib(device, port)) |
b39ffa1d MB |
2535 | continue; |
2536 | ||
8ceb1357 JG |
2537 | for (i = 0; i < device->port_data[port].immutable.gid_tbl_len; |
2538 | ++i) { | |
1dfce294 | 2539 | ret = rdma_query_gid(device, port, i, &tmp_gid); |
5eb620c8 | 2540 | if (ret) |
483d8051 AH |
2541 | continue; |
2542 | ||
5eb620c8 YE |
2543 | if (!memcmp(&tmp_gid, gid, sizeof *gid)) { |
2544 | *port_num = port; | |
2545 | if (index) | |
2546 | *index = i; | |
2547 | return 0; | |
2548 | } | |
2549 | } | |
2550 | } | |
2551 | ||
2552 | return -ENOENT; | |
2553 | } | |
2554 | EXPORT_SYMBOL(ib_find_gid); | |
2555 | ||
2556 | /** | |
2557 | * ib_find_pkey - Returns the PKey table index where a specified | |
2558 | * PKey value occurs. | |
2559 | * @device: The device to query. | |
2560 | * @port_num: The port number of the device to search for the PKey. | |
2561 | * @pkey: The PKey value to search for. | |
2562 | * @index: The index into the PKey table where the PKey was found. | |
2563 | */ | |
2564 | int ib_find_pkey(struct ib_device *device, | |
1fb7f897 | 2565 | u32 port_num, u16 pkey, u16 *index) |
5eb620c8 YE |
2566 | { |
2567 | int ret, i; | |
2568 | u16 tmp_pkey; | |
ff7166c4 | 2569 | int partial_ix = -1; |
5eb620c8 | 2570 | |
8ceb1357 JG |
2571 | for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len; |
2572 | ++i) { | |
5eb620c8 YE |
2573 | ret = ib_query_pkey(device, port_num, i, &tmp_pkey); |
2574 | if (ret) | |
2575 | return ret; | |
36026ecc | 2576 | if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { |
ff7166c4 JM |
2577 | /* if there is full-member pkey take it.*/ |
2578 | if (tmp_pkey & 0x8000) { | |
2579 | *index = i; | |
2580 | return 0; | |
2581 | } | |
2582 | if (partial_ix < 0) | |
2583 | partial_ix = i; | |
5eb620c8 YE |
2584 | } |
2585 | } | |
2586 | ||
ff7166c4 JM |
2587 | /*no full-member, if exists take the limited*/ |
2588 | if (partial_ix >= 0) { | |
2589 | *index = partial_ix; | |
2590 | return 0; | |
2591 | } | |
5eb620c8 YE |
2592 | return -ENOENT; |
2593 | } | |
2594 | EXPORT_SYMBOL(ib_find_pkey); | |
2595 | ||
9268f72d YK |
2596 | /** |
2597 | * ib_get_net_dev_by_params() - Return the appropriate net_dev | |
2598 | * for a received CM request | |
2599 | * @dev: An RDMA device on which the request has been received. | |
2600 | * @port: Port number on the RDMA device. | |
2601 | * @pkey: The Pkey the request came on. | |
2602 | * @gid: A GID that the net_dev uses to communicate. | |
2603 | * @addr: Contains the IP address that the request specified as its | |
2604 | * destination. | |
921eab11 | 2605 | * |
9268f72d YK |
2606 | */ |
2607 | struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, | |
1fb7f897 | 2608 | u32 port, |
9268f72d YK |
2609 | u16 pkey, |
2610 | const union ib_gid *gid, | |
2611 | const struct sockaddr *addr) | |
2612 | { | |
2613 | struct net_device *net_dev = NULL; | |
0df91bb6 JG |
2614 | unsigned long index; |
2615 | void *client_data; | |
9268f72d YK |
2616 | |
2617 | if (!rdma_protocol_ib(dev, port)) | |
2618 | return NULL; | |
2619 | ||
921eab11 JG |
2620 | /* |
2621 | * Holding the read side guarantees that the client will not become | |
2622 | * unregistered while we are calling get_net_dev_by_params() | |
2623 | */ | |
2624 | down_read(&dev->client_data_rwsem); | |
0df91bb6 JG |
2625 | xan_for_each_marked (&dev->client_data, index, client_data, |
2626 | CLIENT_DATA_REGISTERED) { | |
2627 | struct ib_client *client = xa_load(&clients, index); | |
9268f72d | 2628 | |
0df91bb6 | 2629 | if (!client || !client->get_net_dev_by_params) |
9268f72d YK |
2630 | continue; |
2631 | ||
0df91bb6 JG |
2632 | net_dev = client->get_net_dev_by_params(dev, port, pkey, gid, |
2633 | addr, client_data); | |
2634 | if (net_dev) | |
2635 | break; | |
9268f72d | 2636 | } |
921eab11 | 2637 | up_read(&dev->client_data_rwsem); |
9268f72d YK |
2638 | |
2639 | return net_dev; | |
2640 | } | |
2641 | EXPORT_SYMBOL(ib_get_net_dev_by_params); | |
2642 | ||
521ed0d9 KH |
2643 | void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) |
2644 | { | |
3023a1e9 | 2645 | struct ib_device_ops *dev_ops = &dev->ops; |
521ed0d9 KH |
2646 | #define SET_DEVICE_OP(ptr, name) \ |
2647 | do { \ | |
2648 | if (ops->name) \ | |
2649 | if (!((ptr)->name)) \ | |
2650 | (ptr)->name = ops->name; \ | |
2651 | } while (0) | |
2652 | ||
30471d4b LR |
2653 | #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name) |
2654 | ||
b9560a41 JG |
2655 | if (ops->driver_id != RDMA_DRIVER_UNKNOWN) { |
2656 | WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN && | |
2657 | dev_ops->driver_id != ops->driver_id); | |
2658 | dev_ops->driver_id = ops->driver_id; | |
2659 | } | |
7a154142 JG |
2660 | if (ops->owner) { |
2661 | WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner); | |
2662 | dev_ops->owner = ops->owner; | |
2663 | } | |
72c6ec18 JG |
2664 | if (ops->uverbs_abi_ver) |
2665 | dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver; | |
b9560a41 | 2666 | |
8f71bb00 JG |
2667 | dev_ops->uverbs_no_driver_id_binding |= |
2668 | ops->uverbs_no_driver_id_binding; | |
2669 | ||
3023a1e9 | 2670 | SET_DEVICE_OP(dev_ops, add_gid); |
bca51197 | 2671 | SET_DEVICE_OP(dev_ops, add_sub_dev); |
2f1927b0 | 2672 | SET_DEVICE_OP(dev_ops, advise_mr); |
3023a1e9 | 2673 | SET_DEVICE_OP(dev_ops, alloc_dm); |
4b5f4d3f JG |
2674 | SET_DEVICE_OP(dev_ops, alloc_hw_device_stats); |
2675 | SET_DEVICE_OP(dev_ops, alloc_hw_port_stats); | |
3023a1e9 | 2676 | SET_DEVICE_OP(dev_ops, alloc_mr); |
26bc7eae | 2677 | SET_DEVICE_OP(dev_ops, alloc_mr_integrity); |
3023a1e9 KH |
2678 | SET_DEVICE_OP(dev_ops, alloc_mw); |
2679 | SET_DEVICE_OP(dev_ops, alloc_pd); | |
2680 | SET_DEVICE_OP(dev_ops, alloc_rdma_netdev); | |
2681 | SET_DEVICE_OP(dev_ops, alloc_ucontext); | |
2682 | SET_DEVICE_OP(dev_ops, alloc_xrcd); | |
2683 | SET_DEVICE_OP(dev_ops, attach_mcast); | |
2684 | SET_DEVICE_OP(dev_ops, check_mr_status); | |
c4ffee7c | 2685 | SET_DEVICE_OP(dev_ops, counter_alloc_stats); |
99fa331d MZ |
2686 | SET_DEVICE_OP(dev_ops, counter_bind_qp); |
2687 | SET_DEVICE_OP(dev_ops, counter_dealloc); | |
7e53b31a | 2688 | SET_DEVICE_OP(dev_ops, counter_init); |
99fa331d | 2689 | SET_DEVICE_OP(dev_ops, counter_unbind_qp); |
c4ffee7c | 2690 | SET_DEVICE_OP(dev_ops, counter_update_stats); |
3023a1e9 KH |
2691 | SET_DEVICE_OP(dev_ops, create_ah); |
2692 | SET_DEVICE_OP(dev_ops, create_counters); | |
2693 | SET_DEVICE_OP(dev_ops, create_cq); | |
2694 | SET_DEVICE_OP(dev_ops, create_flow); | |
3023a1e9 KH |
2695 | SET_DEVICE_OP(dev_ops, create_qp); |
2696 | SET_DEVICE_OP(dev_ops, create_rwq_ind_table); | |
2697 | SET_DEVICE_OP(dev_ops, create_srq); | |
676a80ad | 2698 | SET_DEVICE_OP(dev_ops, create_user_ah); |
3023a1e9 KH |
2699 | SET_DEVICE_OP(dev_ops, create_wq); |
2700 | SET_DEVICE_OP(dev_ops, dealloc_dm); | |
d0899892 | 2701 | SET_DEVICE_OP(dev_ops, dealloc_driver); |
3023a1e9 KH |
2702 | SET_DEVICE_OP(dev_ops, dealloc_mw); |
2703 | SET_DEVICE_OP(dev_ops, dealloc_pd); | |
2704 | SET_DEVICE_OP(dev_ops, dealloc_ucontext); | |
2705 | SET_DEVICE_OP(dev_ops, dealloc_xrcd); | |
2706 | SET_DEVICE_OP(dev_ops, del_gid); | |
bca51197 | 2707 | SET_DEVICE_OP(dev_ops, del_sub_dev); |
3023a1e9 KH |
2708 | SET_DEVICE_OP(dev_ops, dereg_mr); |
2709 | SET_DEVICE_OP(dev_ops, destroy_ah); | |
2710 | SET_DEVICE_OP(dev_ops, destroy_counters); | |
2711 | SET_DEVICE_OP(dev_ops, destroy_cq); | |
2712 | SET_DEVICE_OP(dev_ops, destroy_flow); | |
2713 | SET_DEVICE_OP(dev_ops, destroy_flow_action); | |
2714 | SET_DEVICE_OP(dev_ops, destroy_qp); | |
2715 | SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table); | |
2716 | SET_DEVICE_OP(dev_ops, destroy_srq); | |
2717 | SET_DEVICE_OP(dev_ops, destroy_wq); | |
915e4af5 | 2718 | SET_DEVICE_OP(dev_ops, device_group); |
3023a1e9 KH |
2719 | SET_DEVICE_OP(dev_ops, detach_mcast); |
2720 | SET_DEVICE_OP(dev_ops, disassociate_ucontext); | |
2721 | SET_DEVICE_OP(dev_ops, drain_rq); | |
2722 | SET_DEVICE_OP(dev_ops, drain_sq); | |
ca22354b | 2723 | SET_DEVICE_OP(dev_ops, enable_driver); |
211cd945 | 2724 | SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry); |
9e2a187a | 2725 | SET_DEVICE_OP(dev_ops, fill_res_cq_entry); |
65959522 | 2726 | SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw); |
f4434529 | 2727 | SET_DEVICE_OP(dev_ops, fill_res_mr_entry); |
65959522 | 2728 | SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw); |
5cc34116 | 2729 | SET_DEVICE_OP(dev_ops, fill_res_qp_entry); |
65959522 | 2730 | SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw); |
0e32d7d4 | 2731 | SET_DEVICE_OP(dev_ops, fill_res_srq_entry); |
aebf8145 | 2732 | SET_DEVICE_OP(dev_ops, fill_res_srq_entry_raw); |
f4434529 | 2733 | SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); |
3023a1e9 KH |
2734 | SET_DEVICE_OP(dev_ops, get_dev_fw_str); |
2735 | SET_DEVICE_OP(dev_ops, get_dma_mr); | |
2736 | SET_DEVICE_OP(dev_ops, get_hw_stats); | |
2737 | SET_DEVICE_OP(dev_ops, get_link_layer); | |
2738 | SET_DEVICE_OP(dev_ops, get_netdev); | |
514aee66 | 2739 | SET_DEVICE_OP(dev_ops, get_numa_node); |
3023a1e9 KH |
2740 | SET_DEVICE_OP(dev_ops, get_port_immutable); |
2741 | SET_DEVICE_OP(dev_ops, get_vector_affinity); | |
2742 | SET_DEVICE_OP(dev_ops, get_vf_config); | |
bfcb3c5d | 2743 | SET_DEVICE_OP(dev_ops, get_vf_guid); |
3023a1e9 | 2744 | SET_DEVICE_OP(dev_ops, get_vf_stats); |
dd05cb82 KH |
2745 | SET_DEVICE_OP(dev_ops, iw_accept); |
2746 | SET_DEVICE_OP(dev_ops, iw_add_ref); | |
2747 | SET_DEVICE_OP(dev_ops, iw_connect); | |
2748 | SET_DEVICE_OP(dev_ops, iw_create_listen); | |
2749 | SET_DEVICE_OP(dev_ops, iw_destroy_listen); | |
2750 | SET_DEVICE_OP(dev_ops, iw_get_qp); | |
2751 | SET_DEVICE_OP(dev_ops, iw_reject); | |
2752 | SET_DEVICE_OP(dev_ops, iw_rem_ref); | |
3023a1e9 | 2753 | SET_DEVICE_OP(dev_ops, map_mr_sg); |
2cdfcdd8 | 2754 | SET_DEVICE_OP(dev_ops, map_mr_sg_pi); |
3023a1e9 | 2755 | SET_DEVICE_OP(dev_ops, mmap); |
3411f9f0 | 2756 | SET_DEVICE_OP(dev_ops, mmap_free); |
3023a1e9 KH |
2757 | SET_DEVICE_OP(dev_ops, modify_ah); |
2758 | SET_DEVICE_OP(dev_ops, modify_cq); | |
2759 | SET_DEVICE_OP(dev_ops, modify_device); | |
5e2ddd1e | 2760 | SET_DEVICE_OP(dev_ops, modify_hw_stat); |
3023a1e9 KH |
2761 | SET_DEVICE_OP(dev_ops, modify_port); |
2762 | SET_DEVICE_OP(dev_ops, modify_qp); | |
2763 | SET_DEVICE_OP(dev_ops, modify_srq); | |
2764 | SET_DEVICE_OP(dev_ops, modify_wq); | |
2765 | SET_DEVICE_OP(dev_ops, peek_cq); | |
2766 | SET_DEVICE_OP(dev_ops, poll_cq); | |
d7407d16 | 2767 | SET_DEVICE_OP(dev_ops, port_groups); |
3023a1e9 KH |
2768 | SET_DEVICE_OP(dev_ops, post_recv); |
2769 | SET_DEVICE_OP(dev_ops, post_send); | |
2770 | SET_DEVICE_OP(dev_ops, post_srq_recv); | |
2771 | SET_DEVICE_OP(dev_ops, process_mad); | |
2772 | SET_DEVICE_OP(dev_ops, query_ah); | |
2773 | SET_DEVICE_OP(dev_ops, query_device); | |
2774 | SET_DEVICE_OP(dev_ops, query_gid); | |
2775 | SET_DEVICE_OP(dev_ops, query_pkey); | |
2776 | SET_DEVICE_OP(dev_ops, query_port); | |
2777 | SET_DEVICE_OP(dev_ops, query_qp); | |
2778 | SET_DEVICE_OP(dev_ops, query_srq); | |
1c8fb1ea | 2779 | SET_DEVICE_OP(dev_ops, query_ucontext); |
3023a1e9 KH |
2780 | SET_DEVICE_OP(dev_ops, rdma_netdev_get_params); |
2781 | SET_DEVICE_OP(dev_ops, read_counters); | |
2782 | SET_DEVICE_OP(dev_ops, reg_dm_mr); | |
2783 | SET_DEVICE_OP(dev_ops, reg_user_mr); | |
3bc489e8 | 2784 | SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf); |
3023a1e9 KH |
2785 | SET_DEVICE_OP(dev_ops, req_notify_cq); |
2786 | SET_DEVICE_OP(dev_ops, rereg_user_mr); | |
2787 | SET_DEVICE_OP(dev_ops, resize_cq); | |
2788 | SET_DEVICE_OP(dev_ops, set_vf_guid); | |
2789 | SET_DEVICE_OP(dev_ops, set_vf_link_state); | |
dc6be441 | 2790 | SET_DEVICE_OP(dev_ops, ufile_hw_cleanup); |
1fb0644c | 2791 | SET_DEVICE_OP(dev_ops, report_port_event); |
21a428a0 | 2792 | |
d3456914 | 2793 | SET_OBJ_SIZE(dev_ops, ib_ah); |
3b023e1b | 2794 | SET_OBJ_SIZE(dev_ops, ib_counters); |
e39afe3d | 2795 | SET_OBJ_SIZE(dev_ops, ib_cq); |
d18bb3e1 | 2796 | SET_OBJ_SIZE(dev_ops, ib_mw); |
21a428a0 | 2797 | SET_OBJ_SIZE(dev_ops, ib_pd); |
514aee66 | 2798 | SET_OBJ_SIZE(dev_ops, ib_qp); |
c0a6b5ec | 2799 | SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table); |
68e326de | 2800 | SET_OBJ_SIZE(dev_ops, ib_srq); |
a2a074ef | 2801 | SET_OBJ_SIZE(dev_ops, ib_ucontext); |
28ad5f65 | 2802 | SET_OBJ_SIZE(dev_ops, ib_xrcd); |
7e53b31a | 2803 | SET_OBJ_SIZE(dev_ops, rdma_counter); |
521ed0d9 KH |
2804 | } |
2805 | EXPORT_SYMBOL(ib_set_device_ops); | |
2806 | ||
bca51197 MZ |
2807 | int ib_add_sub_device(struct ib_device *parent, |
2808 | enum rdma_nl_dev_type type, | |
2809 | const char *name) | |
2810 | { | |
2811 | struct ib_device *sub; | |
2812 | int ret = 0; | |
2813 | ||
2814 | if (!parent->ops.add_sub_dev || !parent->ops.del_sub_dev) | |
2815 | return -EOPNOTSUPP; | |
2816 | ||
2817 | if (!ib_device_try_get(parent)) | |
2818 | return -EINVAL; | |
2819 | ||
2820 | sub = parent->ops.add_sub_dev(parent, type, name); | |
2821 | if (IS_ERR(sub)) { | |
2822 | ib_device_put(parent); | |
2823 | return PTR_ERR(sub); | |
2824 | } | |
2825 | ||
2826 | sub->type = type; | |
2827 | sub->parent = parent; | |
2828 | ||
2829 | mutex_lock(&parent->subdev_lock); | |
2830 | list_add_tail(&parent->subdev_list_head, &sub->subdev_list); | |
2831 | mutex_unlock(&parent->subdev_lock); | |
2832 | ||
2833 | return ret; | |
2834 | } | |
2835 | EXPORT_SYMBOL(ib_add_sub_device); | |
2836 | ||
2837 | int ib_del_sub_device_and_put(struct ib_device *sub) | |
2838 | { | |
2839 | struct ib_device *parent = sub->parent; | |
2840 | ||
2841 | if (!parent) | |
2842 | return -EOPNOTSUPP; | |
2843 | ||
2844 | mutex_lock(&parent->subdev_lock); | |
2845 | list_del(&sub->subdev_list); | |
2846 | mutex_unlock(&parent->subdev_lock); | |
2847 | ||
2848 | ib_device_put(sub); | |
2849 | parent->ops.del_sub_dev(sub); | |
2850 | ib_device_put(parent); | |
2851 | ||
2852 | return 0; | |
2853 | } | |
2854 | EXPORT_SYMBOL(ib_del_sub_device_and_put); | |
2855 | ||
5a7a9e03 CH |
2856 | #ifdef CONFIG_INFINIBAND_VIRT_DMA |
2857 | int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents) | |
2858 | { | |
2859 | struct scatterlist *s; | |
2860 | int i; | |
2861 | ||
2862 | for_each_sg(sg, s, nents, i) { | |
2863 | sg_dma_address(s) = (uintptr_t)sg_virt(s); | |
2864 | sg_dma_len(s) = s->length; | |
2865 | } | |
2866 | return nents; | |
2867 | } | |
2868 | EXPORT_SYMBOL(ib_dma_virt_map_sg); | |
2869 | #endif /* CONFIG_INFINIBAND_VIRT_DMA */ | |
2870 | ||
d0e312fe | 2871 | static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { |
735c631a | 2872 | [RDMA_NL_LS_OP_RESOLVE] = { |
647c75ac | 2873 | .doit = ib_nl_handle_resolve_resp, |
e3a2b93d LR |
2874 | .flags = RDMA_NL_ADMIN_PERM, |
2875 | }, | |
735c631a | 2876 | [RDMA_NL_LS_OP_SET_TIMEOUT] = { |
647c75ac | 2877 | .doit = ib_nl_handle_set_timeout, |
e3a2b93d LR |
2878 | .flags = RDMA_NL_ADMIN_PERM, |
2879 | }, | |
ae43f828 | 2880 | [RDMA_NL_LS_OP_IP_RESOLVE] = { |
647c75ac | 2881 | .doit = ib_nl_handle_ip_res_resp, |
e3a2b93d LR |
2882 | .flags = RDMA_NL_ADMIN_PERM, |
2883 | }, | |
735c631a MB |
2884 | }; |
2885 | ||
1fb0644c YL |
2886 | void ib_dispatch_port_state_event(struct ib_device *ibdev, struct net_device *ndev) |
2887 | { | |
2888 | enum ib_port_state curr_state; | |
2889 | struct ib_event ibevent = {}; | |
2890 | u32 port; | |
2891 | ||
2892 | if (ib_query_netdev_port(ibdev, ndev, &port)) | |
2893 | return; | |
2894 | ||
2895 | curr_state = ib_get_curr_port_state(ndev); | |
2896 | ||
2897 | write_lock_irq(&ibdev->cache_lock); | |
2898 | if (ibdev->port_data[port].cache.last_port_state == curr_state) { | |
2899 | write_unlock_irq(&ibdev->cache_lock); | |
2900 | return; | |
2901 | } | |
2902 | ibdev->port_data[port].cache.last_port_state = curr_state; | |
2903 | write_unlock_irq(&ibdev->cache_lock); | |
2904 | ||
2905 | ibevent.event = (curr_state == IB_PORT_DOWN) ? | |
2906 | IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE; | |
2907 | ibevent.device = ibdev; | |
2908 | ibevent.element.port_num = port; | |
2909 | ib_dispatch_event(&ibevent); | |
2910 | } | |
2911 | EXPORT_SYMBOL(ib_dispatch_port_state_event); | |
2912 | ||
2913 | static void handle_port_event(struct net_device *ndev, unsigned long event) | |
2914 | { | |
2915 | struct ib_device *ibdev; | |
2916 | ||
2917 | /* Currently, link events in bonding scenarios are still | |
2918 | * reported by drivers that support bonding. | |
2919 | */ | |
2920 | if (netif_is_lag_master(ndev) || netif_is_lag_port(ndev)) | |
2921 | return; | |
2922 | ||
2923 | ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN); | |
2924 | if (!ibdev) | |
2925 | return; | |
2926 | ||
2927 | if (ibdev->ops.report_port_event) { | |
2928 | ibdev->ops.report_port_event(ibdev, ndev, event); | |
2929 | goto put_ibdev; | |
2930 | } | |
2931 | ||
2932 | ib_dispatch_port_state_event(ibdev, ndev); | |
2933 | ||
2934 | put_ibdev: | |
2935 | ib_device_put(ibdev); | |
2936 | }; | |
2937 | ||
7566752e CM |
2938 | static int ib_netdevice_event(struct notifier_block *this, |
2939 | unsigned long event, void *ptr) | |
2940 | { | |
2941 | struct net_device *ndev = netdev_notifier_info_to_dev(ptr); | |
7566752e CM |
2942 | struct ib_device *ibdev; |
2943 | u32 port; | |
2944 | ||
2945 | switch (event) { | |
2946 | case NETDEV_CHANGENAME: | |
2947 | ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN); | |
2948 | if (!ibdev) | |
2949 | return NOTIFY_DONE; | |
2950 | ||
0c039a57 YL |
2951 | if (ib_query_netdev_port(ibdev, ndev, &port)) { |
2952 | ib_device_put(ibdev); | |
2953 | break; | |
7566752e | 2954 | } |
0c039a57 YL |
2955 | |
2956 | rdma_nl_notify_event(ibdev, port, RDMA_NETDEV_RENAME_EVENT); | |
7566752e CM |
2957 | ib_device_put(ibdev); |
2958 | break; | |
1fb0644c YL |
2959 | |
2960 | case NETDEV_UP: | |
2961 | case NETDEV_CHANGE: | |
2962 | case NETDEV_DOWN: | |
2963 | handle_port_event(ndev, event); | |
2964 | break; | |
2965 | ||
7566752e CM |
2966 | default: |
2967 | break; | |
2968 | } | |
2969 | ||
2970 | return NOTIFY_DONE; | |
2971 | } | |
2972 | ||
2973 | static struct notifier_block nb_netdevice = { | |
2974 | .notifier_call = ib_netdevice_event, | |
2975 | }; | |
2976 | ||
1da177e4 LT |
2977 | static int __init ib_core_init(void) |
2978 | { | |
ff815a89 | 2979 | int ret = -ENOMEM; |
1da177e4 | 2980 | |
f0626710 TH |
2981 | ib_wq = alloc_workqueue("infiniband", 0, 0); |
2982 | if (!ib_wq) | |
2983 | return -ENOMEM; | |
2984 | ||
ff815a89 TH |
2985 | ib_unreg_wq = alloc_workqueue("ib-unreg-wq", WQ_UNBOUND, |
2986 | WQ_UNBOUND_MAX_ACTIVE); | |
2987 | if (!ib_unreg_wq) | |
2988 | goto err; | |
2989 | ||
14d3a3b2 | 2990 | ib_comp_wq = alloc_workqueue("ib-comp-wq", |
b7363e67 | 2991 | WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); |
ff815a89 TH |
2992 | if (!ib_comp_wq) |
2993 | goto err_unbound; | |
14d3a3b2 | 2994 | |
f794809a JM |
2995 | ib_comp_unbound_wq = |
2996 | alloc_workqueue("ib-comp-unb-wq", | |
2997 | WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM | | |
2998 | WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE); | |
ff815a89 | 2999 | if (!ib_comp_unbound_wq) |
f794809a | 3000 | goto err_comp; |
f794809a | 3001 | |
55aeed06 | 3002 | ret = class_register(&ib_class); |
fd75c789 | 3003 | if (ret) { |
aba25a3e | 3004 | pr_warn("Couldn't create InfiniBand device class\n"); |
f794809a | 3005 | goto err_comp_unbound; |
fd75c789 | 3006 | } |
1da177e4 | 3007 | |
549af008 PP |
3008 | rdma_nl_init(); |
3009 | ||
e3f20f02 LR |
3010 | ret = addr_init(); |
3011 | if (ret) { | |
4469add9 | 3012 | pr_warn("Couldn't init IB address resolution\n"); |
e3f20f02 LR |
3013 | goto err_ibnl; |
3014 | } | |
3015 | ||
4c2cb422 MB |
3016 | ret = ib_mad_init(); |
3017 | if (ret) { | |
3018 | pr_warn("Couldn't init IB MAD\n"); | |
3019 | goto err_addr; | |
3020 | } | |
3021 | ||
c2e49c92 MB |
3022 | ret = ib_sa_init(); |
3023 | if (ret) { | |
3024 | pr_warn("Couldn't init SA\n"); | |
3025 | goto err_mad; | |
3026 | } | |
3027 | ||
42df744c | 3028 | ret = register_blocking_lsm_notifier(&ibdev_lsm_nb); |
8f408ab6 DJ |
3029 | if (ret) { |
3030 | pr_warn("Couldn't register LSM notifier. ret %d\n", ret); | |
c9901724 | 3031 | goto err_sa; |
8f408ab6 DJ |
3032 | } |
3033 | ||
4e0f7b90 PP |
3034 | ret = register_pernet_device(&rdma_dev_net_ops); |
3035 | if (ret) { | |
3036 | pr_warn("Couldn't init compat dev. ret %d\n", ret); | |
3037 | goto err_compat; | |
3038 | } | |
3039 | ||
6c80b41a | 3040 | nldev_init(); |
c9901724 | 3041 | rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); |
07c0d131 CZ |
3042 | ret = roce_gid_mgmt_init(); |
3043 | if (ret) { | |
3044 | pr_warn("Couldn't init RoCE GID management\n"); | |
3045 | goto err_parent; | |
3046 | } | |
1da177e4 | 3047 | |
7566752e CM |
3048 | register_netdevice_notifier(&nb_netdevice); |
3049 | ||
fd75c789 NM |
3050 | return 0; |
3051 | ||
07c0d131 CZ |
3052 | err_parent: |
3053 | rdma_nl_unregister(RDMA_NL_LS); | |
3054 | nldev_exit(); | |
3055 | unregister_pernet_device(&rdma_dev_net_ops); | |
4e0f7b90 | 3056 | err_compat: |
42df744c | 3057 | unregister_blocking_lsm_notifier(&ibdev_lsm_nb); |
735c631a MB |
3058 | err_sa: |
3059 | ib_sa_cleanup(); | |
c2e49c92 MB |
3060 | err_mad: |
3061 | ib_mad_cleanup(); | |
4c2cb422 MB |
3062 | err_addr: |
3063 | addr_cleanup(); | |
e3f20f02 | 3064 | err_ibnl: |
55aeed06 | 3065 | class_unregister(&ib_class); |
f794809a JM |
3066 | err_comp_unbound: |
3067 | destroy_workqueue(ib_comp_unbound_wq); | |
14d3a3b2 CH |
3068 | err_comp: |
3069 | destroy_workqueue(ib_comp_wq); | |
ff815a89 TH |
3070 | err_unbound: |
3071 | destroy_workqueue(ib_unreg_wq); | |
fd75c789 NM |
3072 | err: |
3073 | destroy_workqueue(ib_wq); | |
1da177e4 LT |
3074 | return ret; |
3075 | } | |
3076 | ||
3077 | static void __exit ib_core_cleanup(void) | |
3078 | { | |
7566752e | 3079 | unregister_netdevice_notifier(&nb_netdevice); |
5ef8c0c1 | 3080 | roce_gid_mgmt_cleanup(); |
c9901724 | 3081 | rdma_nl_unregister(RDMA_NL_LS); |
4508d32c | 3082 | nldev_exit(); |
4e0f7b90 | 3083 | unregister_pernet_device(&rdma_dev_net_ops); |
42df744c | 3084 | unregister_blocking_lsm_notifier(&ibdev_lsm_nb); |
c2e49c92 | 3085 | ib_sa_cleanup(); |
4c2cb422 | 3086 | ib_mad_cleanup(); |
e3f20f02 | 3087 | addr_cleanup(); |
c9901724 | 3088 | rdma_nl_exit(); |
55aeed06 | 3089 | class_unregister(&ib_class); |
f794809a | 3090 | destroy_workqueue(ib_comp_unbound_wq); |
14d3a3b2 | 3091 | destroy_workqueue(ib_comp_wq); |
f7c6a7b5 | 3092 | /* Make sure that any pending umem accounting work is done. */ |
f0626710 | 3093 | destroy_workqueue(ib_wq); |
ff815a89 | 3094 | destroy_workqueue(ib_unreg_wq); |
e59178d8 | 3095 | WARN_ON(!xa_empty(&clients)); |
0df91bb6 | 3096 | WARN_ON(!xa_empty(&devices)); |
1da177e4 LT |
3097 | } |
3098 | ||
e3bf14bd JG |
3099 | MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); |
3100 | ||
62dfa795 PP |
3101 | /* ib core relies on netdev stack to first register net_ns_type_operations |
3102 | * ns kobject type before ib_core initialization. | |
3103 | */ | |
3104 | fs_initcall(ib_core_init); | |
1da177e4 | 3105 | module_exit(ib_core_cleanup); |