Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
2a1d9b7f | 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
1da177e4 LT |
32 | */ |
33 | ||
34 | #include <linux/module.h> | |
35 | #include <linux/string.h> | |
36 | #include <linux/errno.h> | |
9a6b090c | 37 | #include <linux/kernel.h> |
1da177e4 LT |
38 | #include <linux/slab.h> |
39 | #include <linux/init.h> | |
95ed644f | 40 | #include <linux/mutex.h> |
9268f72d | 41 | #include <linux/netdevice.h> |
8f408ab6 DJ |
42 | #include <linux/security.h> |
43 | #include <linux/notifier.h> | |
b2cbae2c | 44 | #include <rdma/rdma_netlink.h> |
03db3a2d MB |
45 | #include <rdma/ib_addr.h> |
46 | #include <rdma/ib_cache.h> | |
1da177e4 LT |
47 | |
48 | #include "core_priv.h" | |
49 | ||
50 | MODULE_AUTHOR("Roland Dreier"); | |
51 | MODULE_DESCRIPTION("core kernel InfiniBand API"); | |
52 | MODULE_LICENSE("Dual BSD/GPL"); | |
53 | ||
54 | struct ib_client_data { | |
55 | struct list_head list; | |
56 | struct ib_client *client; | |
57 | void * data; | |
7c1eb45a HE |
58 | /* The device or client is going down. Do not call client or device |
59 | * callbacks other than remove(). */ | |
60 | bool going_down; | |
1da177e4 LT |
61 | }; |
62 | ||
14d3a3b2 | 63 | struct workqueue_struct *ib_comp_wq; |
f794809a | 64 | struct workqueue_struct *ib_comp_unbound_wq; |
f0626710 TH |
65 | struct workqueue_struct *ib_wq; |
66 | EXPORT_SYMBOL_GPL(ib_wq); | |
67 | ||
5aa44bb9 HE |
68 | /* The device_list and client_list contain devices and clients after their |
69 | * registration has completed, and the devices and clients are removed | |
70 | * during unregistration. */ | |
1da177e4 LT |
71 | static LIST_HEAD(device_list); |
72 | static LIST_HEAD(client_list); | |
73 | ||
74 | /* | |
5aa44bb9 HE |
75 | * device_mutex and lists_rwsem protect access to both device_list and |
76 | * client_list. device_mutex protects writer access by device and client | |
77 | * registration / de-registration. lists_rwsem protects reader access to | |
78 | * these lists. Iterators of these lists must lock it for read, while updates | |
79 | * to the lists must be done with a write lock. A special case is when the | |
80 | * device_mutex is locked. In this case locking the lists for read access is | |
81 | * not necessary as the device_mutex implies it. | |
7c1eb45a HE |
82 | * |
83 | * lists_rwsem also protects access to the client data list. | |
1da177e4 | 84 | */ |
95ed644f | 85 | static DEFINE_MUTEX(device_mutex); |
5aa44bb9 HE |
86 | static DECLARE_RWSEM(lists_rwsem); |
87 | ||
8f408ab6 DJ |
88 | static int ib_security_change(struct notifier_block *nb, unsigned long event, |
89 | void *lsm_data); | |
90 | static void ib_policy_change_task(struct work_struct *work); | |
91 | static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); | |
92 | ||
93 | static struct notifier_block ibdev_lsm_nb = { | |
94 | .notifier_call = ib_security_change, | |
95 | }; | |
1da177e4 LT |
96 | |
97 | static int ib_device_check_mandatory(struct ib_device *device) | |
98 | { | |
3023a1e9 | 99 | #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x } |
1da177e4 LT |
100 | static const struct { |
101 | size_t offset; | |
102 | char *name; | |
103 | } mandatory_table[] = { | |
104 | IB_MANDATORY_FUNC(query_device), | |
105 | IB_MANDATORY_FUNC(query_port), | |
106 | IB_MANDATORY_FUNC(query_pkey), | |
1da177e4 LT |
107 | IB_MANDATORY_FUNC(alloc_pd), |
108 | IB_MANDATORY_FUNC(dealloc_pd), | |
1da177e4 LT |
109 | IB_MANDATORY_FUNC(create_qp), |
110 | IB_MANDATORY_FUNC(modify_qp), | |
111 | IB_MANDATORY_FUNC(destroy_qp), | |
112 | IB_MANDATORY_FUNC(post_send), | |
113 | IB_MANDATORY_FUNC(post_recv), | |
114 | IB_MANDATORY_FUNC(create_cq), | |
115 | IB_MANDATORY_FUNC(destroy_cq), | |
116 | IB_MANDATORY_FUNC(poll_cq), | |
117 | IB_MANDATORY_FUNC(req_notify_cq), | |
118 | IB_MANDATORY_FUNC(get_dma_mr), | |
7738613e IW |
119 | IB_MANDATORY_FUNC(dereg_mr), |
120 | IB_MANDATORY_FUNC(get_port_immutable) | |
1da177e4 LT |
121 | }; |
122 | int i; | |
123 | ||
9a6b090c | 124 | for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { |
3023a1e9 KH |
125 | if (!*(void **) ((void *) &device->ops + |
126 | mandatory_table[i].offset)) { | |
43c7c851 JG |
127 | dev_warn(&device->dev, |
128 | "Device is missing mandatory function %s\n", | |
129 | mandatory_table[i].name); | |
1da177e4 LT |
130 | return -EINVAL; |
131 | } | |
132 | } | |
133 | ||
134 | return 0; | |
135 | } | |
136 | ||
f8978bd9 | 137 | static struct ib_device *__ib_device_get_by_index(u32 index) |
ecc82c53 LR |
138 | { |
139 | struct ib_device *device; | |
140 | ||
141 | list_for_each_entry(device, &device_list, core_list) | |
142 | if (device->index == index) | |
143 | return device; | |
144 | ||
145 | return NULL; | |
146 | } | |
147 | ||
f8978bd9 | 148 | /* |
01b67117 PP |
149 | * Caller must perform ib_device_put() to return the device reference count |
150 | * when ib_device_get_by_index() returns valid device pointer. | |
f8978bd9 LR |
151 | */ |
152 | struct ib_device *ib_device_get_by_index(u32 index) | |
153 | { | |
154 | struct ib_device *device; | |
155 | ||
156 | down_read(&lists_rwsem); | |
157 | device = __ib_device_get_by_index(index); | |
01b67117 PP |
158 | if (device) { |
159 | /* Do not return a device if unregistration has started. */ | |
160 | if (!refcount_inc_not_zero(&device->refcount)) | |
161 | device = NULL; | |
162 | } | |
f8978bd9 LR |
163 | up_read(&lists_rwsem); |
164 | return device; | |
165 | } | |
166 | ||
01b67117 PP |
167 | void ib_device_put(struct ib_device *device) |
168 | { | |
169 | if (refcount_dec_and_test(&device->refcount)) | |
170 | complete(&device->unreg_completion); | |
171 | } | |
172 | ||
1da177e4 LT |
173 | static struct ib_device *__ib_device_get_by_name(const char *name) |
174 | { | |
175 | struct ib_device *device; | |
176 | ||
177 | list_for_each_entry(device, &device_list, core_list) | |
896de009 | 178 | if (!strcmp(name, dev_name(&device->dev))) |
1da177e4 LT |
179 | return device; |
180 | ||
181 | return NULL; | |
182 | } | |
183 | ||
d21943dd LR |
184 | int ib_device_rename(struct ib_device *ibdev, const char *name) |
185 | { | |
d21943dd LR |
186 | int ret = 0; |
187 | ||
188 | if (!strcmp(name, dev_name(&ibdev->dev))) | |
189 | return ret; | |
190 | ||
191 | mutex_lock(&device_mutex); | |
344684e6 JG |
192 | if (__ib_device_get_by_name(name)) { |
193 | ret = -EEXIST; | |
194 | goto out; | |
d21943dd LR |
195 | } |
196 | ||
197 | ret = device_rename(&ibdev->dev, name); | |
198 | if (ret) | |
199 | goto out; | |
200 | strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX); | |
201 | out: | |
202 | mutex_unlock(&device_mutex); | |
203 | return ret; | |
204 | } | |
205 | ||
e349f858 | 206 | static int alloc_name(struct ib_device *ibdev, const char *name) |
1da177e4 | 207 | { |
65d470b3 | 208 | unsigned long *inuse; |
1da177e4 LT |
209 | struct ib_device *device; |
210 | int i; | |
211 | ||
65d470b3 | 212 | inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL); |
1da177e4 LT |
213 | if (!inuse) |
214 | return -ENOMEM; | |
215 | ||
216 | list_for_each_entry(device, &device_list, core_list) { | |
e349f858 JG |
217 | char buf[IB_DEVICE_NAME_MAX]; |
218 | ||
896de009 | 219 | if (sscanf(dev_name(&device->dev), name, &i) != 1) |
1da177e4 LT |
220 | continue; |
221 | if (i < 0 || i >= PAGE_SIZE * 8) | |
222 | continue; | |
223 | snprintf(buf, sizeof buf, name, i); | |
e349f858 | 224 | if (!strcmp(buf, dev_name(&device->dev))) |
1da177e4 LT |
225 | set_bit(i, inuse); |
226 | } | |
227 | ||
228 | i = find_first_zero_bit(inuse, PAGE_SIZE * 8); | |
229 | free_page((unsigned long) inuse); | |
1da177e4 | 230 | |
e349f858 | 231 | return dev_set_name(&ibdev->dev, name, i); |
1da177e4 LT |
232 | } |
233 | ||
55aeed06 JG |
234 | static void ib_device_release(struct device *device) |
235 | { | |
236 | struct ib_device *dev = container_of(device, struct ib_device, dev); | |
237 | ||
4be3a4fa PP |
238 | WARN_ON(dev->reg_state == IB_DEV_REGISTERED); |
239 | if (dev->reg_state == IB_DEV_UNREGISTERED) { | |
240 | /* | |
241 | * In IB_DEV_UNINITIALIZED state, cache or port table | |
242 | * is not even created. Free cache and port table only when | |
243 | * device reaches UNREGISTERED state. | |
244 | */ | |
245 | ib_cache_release_one(dev); | |
246 | kfree(dev->port_immutable); | |
247 | } | |
55aeed06 JG |
248 | kfree(dev); |
249 | } | |
250 | ||
251 | static int ib_device_uevent(struct device *device, | |
252 | struct kobj_uevent_env *env) | |
253 | { | |
896de009 | 254 | if (add_uevent_var(env, "NAME=%s", dev_name(device))) |
55aeed06 JG |
255 | return -ENOMEM; |
256 | ||
257 | /* | |
258 | * It would be nice to pass the node GUID with the event... | |
259 | */ | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | static struct class ib_class = { | |
265 | .name = "infiniband", | |
266 | .dev_release = ib_device_release, | |
267 | .dev_uevent = ib_device_uevent, | |
268 | }; | |
269 | ||
1da177e4 LT |
270 | /** |
271 | * ib_alloc_device - allocate an IB device struct | |
272 | * @size:size of structure to allocate | |
273 | * | |
274 | * Low-level drivers should use ib_alloc_device() to allocate &struct | |
275 | * ib_device. @size is the size of the structure to be allocated, | |
276 | * including any private data used by the low-level driver. | |
277 | * ib_dealloc_device() must be used to free structures allocated with | |
278 | * ib_alloc_device(). | |
279 | */ | |
280 | struct ib_device *ib_alloc_device(size_t size) | |
281 | { | |
55aeed06 JG |
282 | struct ib_device *device; |
283 | ||
284 | if (WARN_ON(size < sizeof(struct ib_device))) | |
285 | return NULL; | |
286 | ||
287 | device = kzalloc(size, GFP_KERNEL); | |
288 | if (!device) | |
289 | return NULL; | |
290 | ||
02d8883f LR |
291 | rdma_restrack_init(&device->res); |
292 | ||
55aeed06 JG |
293 | device->dev.class = &ib_class; |
294 | device_initialize(&device->dev); | |
295 | ||
55aeed06 JG |
296 | INIT_LIST_HEAD(&device->event_handler_list); |
297 | spin_lock_init(&device->event_handler_lock); | |
e1f540c3 | 298 | rwlock_init(&device->client_data_lock); |
55aeed06 JG |
299 | INIT_LIST_HEAD(&device->client_data_list); |
300 | INIT_LIST_HEAD(&device->port_list); | |
01b67117 PP |
301 | refcount_set(&device->refcount, 1); |
302 | init_completion(&device->unreg_completion); | |
1da177e4 | 303 | |
55aeed06 | 304 | return device; |
1da177e4 LT |
305 | } |
306 | EXPORT_SYMBOL(ib_alloc_device); | |
307 | ||
308 | /** | |
309 | * ib_dealloc_device - free an IB device struct | |
310 | * @device:structure to free | |
311 | * | |
312 | * Free a structure allocated with ib_alloc_device(). | |
313 | */ | |
314 | void ib_dealloc_device(struct ib_device *device) | |
315 | { | |
4512acd0 | 316 | WARN_ON(!list_empty(&device->client_data_list)); |
55aeed06 JG |
317 | WARN_ON(device->reg_state != IB_DEV_UNREGISTERED && |
318 | device->reg_state != IB_DEV_UNINITIALIZED); | |
103140ec | 319 | rdma_restrack_clean(&device->res); |
924b8900 | 320 | put_device(&device->dev); |
1da177e4 LT |
321 | } |
322 | EXPORT_SYMBOL(ib_dealloc_device); | |
323 | ||
324 | static int add_client_context(struct ib_device *device, struct ib_client *client) | |
325 | { | |
326 | struct ib_client_data *context; | |
1da177e4 | 327 | |
2d65f49f | 328 | context = kmalloc(sizeof(*context), GFP_KERNEL); |
a0b3455f | 329 | if (!context) |
1da177e4 | 330 | return -ENOMEM; |
1da177e4 LT |
331 | |
332 | context->client = client; | |
333 | context->data = NULL; | |
7c1eb45a | 334 | context->going_down = false; |
1da177e4 | 335 | |
7c1eb45a | 336 | down_write(&lists_rwsem); |
e1f540c3 | 337 | write_lock_irq(&device->client_data_lock); |
1da177e4 | 338 | list_add(&context->list, &device->client_data_list); |
e1f540c3 | 339 | write_unlock_irq(&device->client_data_lock); |
7c1eb45a | 340 | up_write(&lists_rwsem); |
1da177e4 LT |
341 | |
342 | return 0; | |
343 | } | |
344 | ||
337877a4 IW |
345 | static int verify_immutable(const struct ib_device *dev, u8 port) |
346 | { | |
347 | return WARN_ON(!rdma_cap_ib_mad(dev, port) && | |
348 | rdma_max_mad_size(dev, port) != 0); | |
349 | } | |
350 | ||
7738613e | 351 | static int read_port_immutable(struct ib_device *device) |
5eb620c8 | 352 | { |
55aeed06 | 353 | int ret; |
7738613e IW |
354 | u8 start_port = rdma_start_port(device); |
355 | u8 end_port = rdma_end_port(device); | |
356 | u8 port; | |
357 | ||
358 | /** | |
359 | * device->port_immutable is indexed directly by the port number to make | |
360 | * access to this data as efficient as possible. | |
361 | * | |
362 | * Therefore port_immutable is declared as a 1 based array with | |
363 | * potential empty slots at the beginning. | |
364 | */ | |
6396bb22 KC |
365 | device->port_immutable = kcalloc(end_port + 1, |
366 | sizeof(*device->port_immutable), | |
7738613e IW |
367 | GFP_KERNEL); |
368 | if (!device->port_immutable) | |
55aeed06 | 369 | return -ENOMEM; |
5eb620c8 | 370 | |
7738613e | 371 | for (port = start_port; port <= end_port; ++port) { |
3023a1e9 KH |
372 | ret = device->ops.get_port_immutable( |
373 | device, port, &device->port_immutable[port]); | |
5eb620c8 | 374 | if (ret) |
55aeed06 | 375 | return ret; |
337877a4 | 376 | |
55aeed06 JG |
377 | if (verify_immutable(device, port)) |
378 | return -EINVAL; | |
5eb620c8 | 379 | } |
55aeed06 | 380 | return 0; |
5eb620c8 YE |
381 | } |
382 | ||
9abb0d1b | 383 | void ib_get_device_fw_str(struct ib_device *dev, char *str) |
5fa76c20 | 384 | { |
3023a1e9 KH |
385 | if (dev->ops.get_dev_fw_str) |
386 | dev->ops.get_dev_fw_str(dev, str); | |
5fa76c20 IW |
387 | else |
388 | str[0] = '\0'; | |
389 | } | |
390 | EXPORT_SYMBOL(ib_get_device_fw_str); | |
391 | ||
d291f1a6 DJ |
392 | static int setup_port_pkey_list(struct ib_device *device) |
393 | { | |
394 | int i; | |
395 | ||
396 | /** | |
397 | * device->port_pkey_list is indexed directly by the port number, | |
398 | * Therefore it is declared as a 1 based array with potential empty | |
399 | * slots at the beginning. | |
400 | */ | |
401 | device->port_pkey_list = kcalloc(rdma_end_port(device) + 1, | |
402 | sizeof(*device->port_pkey_list), | |
403 | GFP_KERNEL); | |
404 | ||
405 | if (!device->port_pkey_list) | |
406 | return -ENOMEM; | |
407 | ||
408 | for (i = 0; i < (rdma_end_port(device) + 1); i++) { | |
409 | spin_lock_init(&device->port_pkey_list[i].list_lock); | |
410 | INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list); | |
411 | } | |
412 | ||
413 | return 0; | |
414 | } | |
415 | ||
8f408ab6 DJ |
416 | static void ib_policy_change_task(struct work_struct *work) |
417 | { | |
418 | struct ib_device *dev; | |
419 | ||
420 | down_read(&lists_rwsem); | |
421 | list_for_each_entry(dev, &device_list, core_list) { | |
422 | int i; | |
423 | ||
424 | for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { | |
425 | u64 sp; | |
426 | int ret = ib_get_cached_subnet_prefix(dev, | |
427 | i, | |
428 | &sp); | |
429 | ||
430 | WARN_ONCE(ret, | |
431 | "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", | |
432 | ret); | |
a750cfde DJ |
433 | if (!ret) |
434 | ib_security_cache_change(dev, i, sp); | |
8f408ab6 DJ |
435 | } |
436 | } | |
437 | up_read(&lists_rwsem); | |
438 | } | |
439 | ||
440 | static int ib_security_change(struct notifier_block *nb, unsigned long event, | |
441 | void *lsm_data) | |
442 | { | |
443 | if (event != LSM_POLICY_CHANGE) | |
444 | return NOTIFY_DONE; | |
445 | ||
446 | schedule_work(&ib_policy_change_work); | |
447 | ||
448 | return NOTIFY_OK; | |
449 | } | |
450 | ||
ecc82c53 LR |
451 | /** |
452 | * __dev_new_index - allocate an device index | |
453 | * | |
454 | * Returns a suitable unique value for a new device interface | |
455 | * number. It assumes that there are less than 2^32-1 ib devices | |
456 | * will be present in the system. | |
457 | */ | |
458 | static u32 __dev_new_index(void) | |
459 | { | |
460 | /* | |
461 | * The device index to allow stable naming. | |
462 | * Similar to struct net -> ifindex. | |
463 | */ | |
464 | static u32 index; | |
465 | ||
466 | for (;;) { | |
467 | if (!(++index)) | |
468 | index = 1; | |
469 | ||
470 | if (!__ib_device_get_by_index(index)) | |
471 | return index; | |
472 | } | |
473 | } | |
474 | ||
548cb4fb | 475 | static void setup_dma_device(struct ib_device *device) |
1da177e4 | 476 | { |
99db9494 BVA |
477 | struct device *parent = device->dev.parent; |
478 | ||
0957c29f BVA |
479 | WARN_ON_ONCE(device->dma_device); |
480 | if (device->dev.dma_ops) { | |
481 | /* | |
482 | * The caller provided custom DMA operations. Copy the | |
483 | * DMA-related fields that are used by e.g. dma_alloc_coherent() | |
484 | * into device->dev. | |
485 | */ | |
486 | device->dma_device = &device->dev; | |
02ee9da3 BVA |
487 | if (!device->dev.dma_mask) { |
488 | if (parent) | |
489 | device->dev.dma_mask = parent->dma_mask; | |
490 | else | |
491 | WARN_ON_ONCE(true); | |
492 | } | |
493 | if (!device->dev.coherent_dma_mask) { | |
494 | if (parent) | |
495 | device->dev.coherent_dma_mask = | |
496 | parent->coherent_dma_mask; | |
497 | else | |
498 | WARN_ON_ONCE(true); | |
499 | } | |
0957c29f BVA |
500 | } else { |
501 | /* | |
502 | * The caller did not provide custom DMA operations. Use the | |
503 | * DMA mapping operations of the parent device. | |
504 | */ | |
02ee9da3 | 505 | WARN_ON_ONCE(!parent); |
0957c29f BVA |
506 | device->dma_device = parent; |
507 | } | |
548cb4fb | 508 | } |
1da177e4 | 509 | |
548cb4fb PP |
510 | static void cleanup_device(struct ib_device *device) |
511 | { | |
512 | ib_cache_cleanup_one(device); | |
513 | ib_cache_release_one(device); | |
514 | kfree(device->port_pkey_list); | |
515 | kfree(device->port_immutable); | |
516 | } | |
1da177e4 | 517 | |
548cb4fb PP |
518 | static int setup_device(struct ib_device *device) |
519 | { | |
520 | struct ib_udata uhw = {.outlen = 0, .inlen = 0}; | |
521 | int ret; | |
1da177e4 | 522 | |
548cb4fb PP |
523 | ret = ib_device_check_mandatory(device); |
524 | if (ret) | |
525 | return ret; | |
1da177e4 | 526 | |
7738613e | 527 | ret = read_port_immutable(device); |
5eb620c8 | 528 | if (ret) { |
43c7c851 JG |
529 | dev_warn(&device->dev, |
530 | "Couldn't create per port immutable data\n"); | |
548cb4fb PP |
531 | return ret; |
532 | } | |
533 | ||
534 | memset(&device->attrs, 0, sizeof(device->attrs)); | |
3023a1e9 | 535 | ret = device->ops.query_device(device, &device->attrs, &uhw); |
548cb4fb PP |
536 | if (ret) { |
537 | dev_warn(&device->dev, | |
538 | "Couldn't query the device attributes\n"); | |
539 | goto port_cleanup; | |
5eb620c8 YE |
540 | } |
541 | ||
d291f1a6 DJ |
542 | ret = setup_port_pkey_list(device); |
543 | if (ret) { | |
43c7c851 | 544 | dev_warn(&device->dev, "Couldn't create per port_pkey_list\n"); |
67fecaf8 | 545 | goto port_cleanup; |
d291f1a6 DJ |
546 | } |
547 | ||
03db3a2d MB |
548 | ret = ib_cache_setup_one(device); |
549 | if (ret) { | |
43c7c851 JG |
550 | dev_warn(&device->dev, |
551 | "Couldn't set up InfiniBand P_Key/GID cache\n"); | |
67fecaf8 | 552 | goto pkey_cleanup; |
03db3a2d | 553 | } |
548cb4fb PP |
554 | return 0; |
555 | ||
556 | pkey_cleanup: | |
557 | kfree(device->port_pkey_list); | |
558 | port_cleanup: | |
559 | kfree(device->port_immutable); | |
560 | return ret; | |
561 | } | |
562 | ||
563 | /** | |
564 | * ib_register_device - Register an IB device with IB core | |
565 | * @device:Device to register | |
566 | * | |
567 | * Low-level drivers use ib_register_device() to register their | |
568 | * devices with the IB core. All registered clients will receive a | |
569 | * callback for each device that is added. @device must be allocated | |
570 | * with ib_alloc_device(). | |
571 | */ | |
ea4baf7f | 572 | int ib_register_device(struct ib_device *device, const char *name) |
548cb4fb PP |
573 | { |
574 | int ret; | |
575 | struct ib_client *client; | |
576 | ||
577 | setup_dma_device(device); | |
578 | ||
579 | mutex_lock(&device_mutex); | |
580 | ||
581 | if (strchr(name, '%')) { | |
582 | ret = alloc_name(device, name); | |
583 | if (ret) | |
584 | goto out; | |
585 | } else { | |
586 | ret = dev_set_name(&device->dev, name); | |
587 | if (ret) | |
588 | goto out; | |
589 | } | |
590 | if (__ib_device_get_by_name(dev_name(&device->dev))) { | |
591 | ret = -ENFILE; | |
592 | goto out; | |
593 | } | |
594 | strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX); | |
595 | ||
596 | ret = setup_device(device); | |
597 | if (ret) | |
598 | goto out; | |
03db3a2d | 599 | |
27399350 PP |
600 | device->index = __dev_new_index(); |
601 | ||
7527a7b1 | 602 | ib_device_register_rdmacg(device); |
3e153a93 | 603 | |
ea4baf7f | 604 | ret = ib_device_register_sysfs(device); |
1da177e4 | 605 | if (ret) { |
43c7c851 JG |
606 | dev_warn(&device->dev, |
607 | "Couldn't register device with driver model\n"); | |
2fb4f4ea | 608 | goto cg_cleanup; |
1da177e4 LT |
609 | } |
610 | ||
1da177e4 LT |
611 | device->reg_state = IB_DEV_REGISTERED; |
612 | ||
b8071ad8 | 613 | list_for_each_entry(client, &client_list, list) |
b059e210 | 614 | if (!add_client_context(device, client) && client->add) |
b8071ad8 | 615 | client->add(device); |
1da177e4 | 616 | |
5aa44bb9 HE |
617 | down_write(&lists_rwsem); |
618 | list_add_tail(&device->core_list, &device_list); | |
619 | up_write(&lists_rwsem); | |
4be3a4fa PP |
620 | mutex_unlock(&device_mutex); |
621 | return 0; | |
622 | ||
2fb4f4ea PP |
623 | cg_cleanup: |
624 | ib_device_unregister_rdmacg(device); | |
548cb4fb | 625 | cleanup_device(device); |
5aa44bb9 | 626 | out: |
95ed644f | 627 | mutex_unlock(&device_mutex); |
1da177e4 LT |
628 | return ret; |
629 | } | |
630 | EXPORT_SYMBOL(ib_register_device); | |
631 | ||
632 | /** | |
633 | * ib_unregister_device - Unregister an IB device | |
634 | * @device:Device to unregister | |
635 | * | |
636 | * Unregister an IB device. All clients will receive a remove callback. | |
637 | */ | |
638 | void ib_unregister_device(struct ib_device *device) | |
639 | { | |
1da177e4 LT |
640 | struct ib_client_data *context, *tmp; |
641 | unsigned long flags; | |
642 | ||
01b67117 PP |
643 | /* |
644 | * Wait for all netlink command callers to finish working on the | |
645 | * device. | |
646 | */ | |
647 | ib_device_put(device); | |
648 | wait_for_completion(&device->unreg_completion); | |
649 | ||
95ed644f | 650 | mutex_lock(&device_mutex); |
1da177e4 | 651 | |
5aa44bb9 HE |
652 | down_write(&lists_rwsem); |
653 | list_del(&device->core_list); | |
e1f540c3 | 654 | write_lock_irq(&device->client_data_lock); |
f7b65d9b | 655 | list_for_each_entry(context, &device->client_data_list, list) |
7c1eb45a | 656 | context->going_down = true; |
e1f540c3 | 657 | write_unlock_irq(&device->client_data_lock); |
7c1eb45a | 658 | downgrade_write(&lists_rwsem); |
5aa44bb9 | 659 | |
f7b65d9b | 660 | list_for_each_entry(context, &device->client_data_list, list) { |
7c1eb45a HE |
661 | if (context->client->remove) |
662 | context->client->remove(device, context->data); | |
663 | } | |
664 | up_read(&lists_rwsem); | |
1da177e4 | 665 | |
9206dff1 | 666 | ib_device_unregister_sysfs(device); |
c715a395 | 667 | ib_device_unregister_rdmacg(device); |
06f8174a SS |
668 | |
669 | mutex_unlock(&device_mutex); | |
670 | ||
03db3a2d | 671 | ib_cache_cleanup_one(device); |
9206dff1 | 672 | |
d291f1a6 DJ |
673 | ib_security_destroy_port_pkey_list(device); |
674 | kfree(device->port_pkey_list); | |
675 | ||
7c1eb45a | 676 | down_write(&lists_rwsem); |
e1f540c3 | 677 | write_lock_irqsave(&device->client_data_lock, flags); |
4512acd0 PP |
678 | list_for_each_entry_safe(context, tmp, &device->client_data_list, |
679 | list) { | |
680 | list_del(&context->list); | |
1da177e4 | 681 | kfree(context); |
4512acd0 | 682 | } |
e1f540c3 | 683 | write_unlock_irqrestore(&device->client_data_lock, flags); |
7c1eb45a | 684 | up_write(&lists_rwsem); |
1da177e4 LT |
685 | |
686 | device->reg_state = IB_DEV_UNREGISTERED; | |
687 | } | |
688 | EXPORT_SYMBOL(ib_unregister_device); | |
689 | ||
690 | /** | |
691 | * ib_register_client - Register an IB client | |
692 | * @client:Client to register | |
693 | * | |
694 | * Upper level users of the IB drivers can use ib_register_client() to | |
695 | * register callbacks for IB device addition and removal. When an IB | |
696 | * device is added, each registered client's add method will be called | |
697 | * (in the order the clients were registered), and when a device is | |
698 | * removed, each client's remove method will be called (in the reverse | |
699 | * order that clients were registered). In addition, when | |
700 | * ib_register_client() is called, the client will receive an add | |
701 | * callback for all devices already registered. | |
702 | */ | |
703 | int ib_register_client(struct ib_client *client) | |
704 | { | |
705 | struct ib_device *device; | |
706 | ||
95ed644f | 707 | mutex_lock(&device_mutex); |
1da177e4 | 708 | |
1da177e4 | 709 | list_for_each_entry(device, &device_list, core_list) |
b059e210 | 710 | if (!add_client_context(device, client) && client->add) |
1da177e4 LT |
711 | client->add(device); |
712 | ||
5aa44bb9 HE |
713 | down_write(&lists_rwsem); |
714 | list_add_tail(&client->list, &client_list); | |
715 | up_write(&lists_rwsem); | |
716 | ||
95ed644f | 717 | mutex_unlock(&device_mutex); |
1da177e4 LT |
718 | |
719 | return 0; | |
720 | } | |
721 | EXPORT_SYMBOL(ib_register_client); | |
722 | ||
723 | /** | |
724 | * ib_unregister_client - Unregister an IB client | |
725 | * @client:Client to unregister | |
726 | * | |
727 | * Upper level users use ib_unregister_client() to remove their client | |
728 | * registration. When ib_unregister_client() is called, the client | |
729 | * will receive a remove callback for each IB device still registered. | |
730 | */ | |
731 | void ib_unregister_client(struct ib_client *client) | |
732 | { | |
f7b65d9b | 733 | struct ib_client_data *context; |
1da177e4 | 734 | struct ib_device *device; |
1da177e4 | 735 | |
95ed644f | 736 | mutex_lock(&device_mutex); |
1da177e4 | 737 | |
5aa44bb9 HE |
738 | down_write(&lists_rwsem); |
739 | list_del(&client->list); | |
740 | up_write(&lists_rwsem); | |
741 | ||
1da177e4 | 742 | list_for_each_entry(device, &device_list, core_list) { |
7c1eb45a | 743 | struct ib_client_data *found_context = NULL; |
1da177e4 | 744 | |
7c1eb45a | 745 | down_write(&lists_rwsem); |
e1f540c3 | 746 | write_lock_irq(&device->client_data_lock); |
f7b65d9b | 747 | list_for_each_entry(context, &device->client_data_list, list) |
1da177e4 | 748 | if (context->client == client) { |
7c1eb45a HE |
749 | context->going_down = true; |
750 | found_context = context; | |
751 | break; | |
1da177e4 | 752 | } |
e1f540c3 | 753 | write_unlock_irq(&device->client_data_lock); |
7c1eb45a HE |
754 | up_write(&lists_rwsem); |
755 | ||
756 | if (client->remove) | |
757 | client->remove(device, found_context ? | |
758 | found_context->data : NULL); | |
759 | ||
760 | if (!found_context) { | |
43c7c851 JG |
761 | dev_warn(&device->dev, |
762 | "No client context found for %s\n", | |
763 | client->name); | |
7c1eb45a HE |
764 | continue; |
765 | } | |
766 | ||
767 | down_write(&lists_rwsem); | |
e1f540c3 | 768 | write_lock_irq(&device->client_data_lock); |
7c1eb45a | 769 | list_del(&found_context->list); |
e1f540c3 | 770 | write_unlock_irq(&device->client_data_lock); |
7c1eb45a | 771 | up_write(&lists_rwsem); |
93688ddb | 772 | kfree(found_context); |
1da177e4 | 773 | } |
1da177e4 | 774 | |
95ed644f | 775 | mutex_unlock(&device_mutex); |
1da177e4 LT |
776 | } |
777 | EXPORT_SYMBOL(ib_unregister_client); | |
778 | ||
779 | /** | |
780 | * ib_get_client_data - Get IB client context | |
781 | * @device:Device to get context for | |
782 | * @client:Client to get context for | |
783 | * | |
784 | * ib_get_client_data() returns client context set with | |
785 | * ib_set_client_data(). | |
786 | */ | |
787 | void *ib_get_client_data(struct ib_device *device, struct ib_client *client) | |
788 | { | |
789 | struct ib_client_data *context; | |
790 | void *ret = NULL; | |
791 | unsigned long flags; | |
792 | ||
e1f540c3 | 793 | read_lock_irqsave(&device->client_data_lock, flags); |
1da177e4 LT |
794 | list_for_each_entry(context, &device->client_data_list, list) |
795 | if (context->client == client) { | |
796 | ret = context->data; | |
797 | break; | |
798 | } | |
e1f540c3 | 799 | read_unlock_irqrestore(&device->client_data_lock, flags); |
1da177e4 LT |
800 | |
801 | return ret; | |
802 | } | |
803 | EXPORT_SYMBOL(ib_get_client_data); | |
804 | ||
805 | /** | |
9cd330d3 | 806 | * ib_set_client_data - Set IB client context |
1da177e4 LT |
807 | * @device:Device to set context for |
808 | * @client:Client to set context for | |
809 | * @data:Context to set | |
810 | * | |
811 | * ib_set_client_data() sets client context that can be retrieved with | |
812 | * ib_get_client_data(). | |
813 | */ | |
814 | void ib_set_client_data(struct ib_device *device, struct ib_client *client, | |
815 | void *data) | |
816 | { | |
817 | struct ib_client_data *context; | |
818 | unsigned long flags; | |
819 | ||
e1f540c3 | 820 | write_lock_irqsave(&device->client_data_lock, flags); |
1da177e4 LT |
821 | list_for_each_entry(context, &device->client_data_list, list) |
822 | if (context->client == client) { | |
823 | context->data = data; | |
824 | goto out; | |
825 | } | |
826 | ||
43c7c851 JG |
827 | dev_warn(&device->dev, "No client context found for %s\n", |
828 | client->name); | |
1da177e4 LT |
829 | |
830 | out: | |
e1f540c3 | 831 | write_unlock_irqrestore(&device->client_data_lock, flags); |
1da177e4 LT |
832 | } |
833 | EXPORT_SYMBOL(ib_set_client_data); | |
834 | ||
835 | /** | |
836 | * ib_register_event_handler - Register an IB event handler | |
837 | * @event_handler:Handler to register | |
838 | * | |
839 | * ib_register_event_handler() registers an event handler that will be | |
840 | * called back when asynchronous IB events occur (as defined in | |
841 | * chapter 11 of the InfiniBand Architecture Specification). This | |
842 | * callback may occur in interrupt context. | |
843 | */ | |
dcc9881e | 844 | void ib_register_event_handler(struct ib_event_handler *event_handler) |
1da177e4 LT |
845 | { |
846 | unsigned long flags; | |
847 | ||
848 | spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); | |
849 | list_add_tail(&event_handler->list, | |
850 | &event_handler->device->event_handler_list); | |
851 | spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); | |
1da177e4 LT |
852 | } |
853 | EXPORT_SYMBOL(ib_register_event_handler); | |
854 | ||
855 | /** | |
856 | * ib_unregister_event_handler - Unregister an event handler | |
857 | * @event_handler:Handler to unregister | |
858 | * | |
859 | * Unregister an event handler registered with | |
860 | * ib_register_event_handler(). | |
861 | */ | |
dcc9881e | 862 | void ib_unregister_event_handler(struct ib_event_handler *event_handler) |
1da177e4 LT |
863 | { |
864 | unsigned long flags; | |
865 | ||
866 | spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); | |
867 | list_del(&event_handler->list); | |
868 | spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); | |
1da177e4 LT |
869 | } |
870 | EXPORT_SYMBOL(ib_unregister_event_handler); | |
871 | ||
872 | /** | |
873 | * ib_dispatch_event - Dispatch an asynchronous event | |
874 | * @event:Event to dispatch | |
875 | * | |
876 | * Low-level drivers must call ib_dispatch_event() to dispatch the | |
877 | * event to all registered event handlers when an asynchronous event | |
878 | * occurs. | |
879 | */ | |
880 | void ib_dispatch_event(struct ib_event *event) | |
881 | { | |
882 | unsigned long flags; | |
883 | struct ib_event_handler *handler; | |
884 | ||
885 | spin_lock_irqsave(&event->device->event_handler_lock, flags); | |
886 | ||
887 | list_for_each_entry(handler, &event->device->event_handler_list, list) | |
888 | handler->handler(handler, event); | |
889 | ||
890 | spin_unlock_irqrestore(&event->device->event_handler_lock, flags); | |
891 | } | |
892 | EXPORT_SYMBOL(ib_dispatch_event); | |
893 | ||
1da177e4 LT |
894 | /** |
895 | * ib_query_port - Query IB port attributes | |
896 | * @device:Device to query | |
897 | * @port_num:Port number to query | |
898 | * @port_attr:Port attributes | |
899 | * | |
900 | * ib_query_port() returns the attributes of a port through the | |
901 | * @port_attr pointer. | |
902 | */ | |
903 | int ib_query_port(struct ib_device *device, | |
904 | u8 port_num, | |
905 | struct ib_port_attr *port_attr) | |
906 | { | |
fad61ad4 EC |
907 | union ib_gid gid; |
908 | int err; | |
909 | ||
24dc831b | 910 | if (!rdma_is_port_valid(device, port_num)) |
116c0074 RD |
911 | return -EINVAL; |
912 | ||
fad61ad4 | 913 | memset(port_attr, 0, sizeof(*port_attr)); |
3023a1e9 | 914 | err = device->ops.query_port(device, port_num, port_attr); |
fad61ad4 EC |
915 | if (err || port_attr->subnet_prefix) |
916 | return err; | |
917 | ||
d7012467 EC |
918 | if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) |
919 | return 0; | |
920 | ||
3023a1e9 | 921 | err = device->ops.query_gid(device, port_num, 0, &gid); |
fad61ad4 EC |
922 | if (err) |
923 | return err; | |
924 | ||
925 | port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); | |
926 | return 0; | |
1da177e4 LT |
927 | } |
928 | EXPORT_SYMBOL(ib_query_port); | |
929 | ||
03db3a2d MB |
930 | /** |
931 | * ib_enum_roce_netdev - enumerate all RoCE ports | |
932 | * @ib_dev : IB device we want to query | |
933 | * @filter: Should we call the callback? | |
934 | * @filter_cookie: Cookie passed to filter | |
935 | * @cb: Callback to call for each found RoCE ports | |
936 | * @cookie: Cookie passed back to the callback | |
937 | * | |
938 | * Enumerates all of the physical RoCE ports of ib_dev | |
939 | * which are related to netdevice and calls callback() on each | |
940 | * device for which filter() function returns non zero. | |
941 | */ | |
942 | void ib_enum_roce_netdev(struct ib_device *ib_dev, | |
943 | roce_netdev_filter filter, | |
944 | void *filter_cookie, | |
945 | roce_netdev_callback cb, | |
946 | void *cookie) | |
947 | { | |
948 | u8 port; | |
949 | ||
950 | for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev); | |
951 | port++) | |
952 | if (rdma_protocol_roce(ib_dev, port)) { | |
953 | struct net_device *idev = NULL; | |
954 | ||
3023a1e9 KH |
955 | if (ib_dev->ops.get_netdev) |
956 | idev = ib_dev->ops.get_netdev(ib_dev, port); | |
03db3a2d MB |
957 | |
958 | if (idev && | |
959 | idev->reg_state >= NETREG_UNREGISTERED) { | |
960 | dev_put(idev); | |
961 | idev = NULL; | |
962 | } | |
963 | ||
964 | if (filter(ib_dev, port, idev, filter_cookie)) | |
965 | cb(ib_dev, port, idev, cookie); | |
966 | ||
967 | if (idev) | |
968 | dev_put(idev); | |
969 | } | |
970 | } | |
971 | ||
972 | /** | |
973 | * ib_enum_all_roce_netdevs - enumerate all RoCE devices | |
974 | * @filter: Should we call the callback? | |
975 | * @filter_cookie: Cookie passed to filter | |
976 | * @cb: Callback to call for each found RoCE ports | |
977 | * @cookie: Cookie passed back to the callback | |
978 | * | |
979 | * Enumerates all RoCE devices' physical ports which are related | |
980 | * to netdevices and calls callback() on each device for which | |
981 | * filter() function returns non zero. | |
982 | */ | |
983 | void ib_enum_all_roce_netdevs(roce_netdev_filter filter, | |
984 | void *filter_cookie, | |
985 | roce_netdev_callback cb, | |
986 | void *cookie) | |
987 | { | |
988 | struct ib_device *dev; | |
989 | ||
990 | down_read(&lists_rwsem); | |
991 | list_for_each_entry(dev, &device_list, core_list) | |
992 | ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); | |
993 | up_read(&lists_rwsem); | |
8030c835 LR |
994 | } |
995 | ||
996 | /** | |
997 | * ib_enum_all_devs - enumerate all ib_devices | |
998 | * @cb: Callback to call for each found ib_device | |
999 | * | |
1000 | * Enumerates all ib_devices and calls callback() on each device. | |
1001 | */ | |
1002 | int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, | |
1003 | struct netlink_callback *cb) | |
1004 | { | |
1005 | struct ib_device *dev; | |
1006 | unsigned int idx = 0; | |
1007 | int ret = 0; | |
1008 | ||
1009 | down_read(&lists_rwsem); | |
1010 | list_for_each_entry(dev, &device_list, core_list) { | |
1011 | ret = nldev_cb(dev, skb, cb, idx); | |
1012 | if (ret) | |
1013 | break; | |
1014 | idx++; | |
1015 | } | |
1016 | ||
1017 | up_read(&lists_rwsem); | |
1018 | return ret; | |
03db3a2d MB |
1019 | } |
1020 | ||
1da177e4 LT |
1021 | /** |
1022 | * ib_query_pkey - Get P_Key table entry | |
1023 | * @device:Device to query | |
1024 | * @port_num:Port number to query | |
1025 | * @index:P_Key table index to query | |
1026 | * @pkey:Returned P_Key | |
1027 | * | |
1028 | * ib_query_pkey() fetches the specified P_Key table entry. | |
1029 | */ | |
1030 | int ib_query_pkey(struct ib_device *device, | |
1031 | u8 port_num, u16 index, u16 *pkey) | |
1032 | { | |
9af3f5cf YS |
1033 | if (!rdma_is_port_valid(device, port_num)) |
1034 | return -EINVAL; | |
1035 | ||
3023a1e9 | 1036 | return device->ops.query_pkey(device, port_num, index, pkey); |
1da177e4 LT |
1037 | } |
1038 | EXPORT_SYMBOL(ib_query_pkey); | |
1039 | ||
1040 | /** | |
1041 | * ib_modify_device - Change IB device attributes | |
1042 | * @device:Device to modify | |
1043 | * @device_modify_mask:Mask of attributes to change | |
1044 | * @device_modify:New attribute values | |
1045 | * | |
1046 | * ib_modify_device() changes a device's attributes as specified by | |
1047 | * the @device_modify_mask and @device_modify structure. | |
1048 | */ | |
1049 | int ib_modify_device(struct ib_device *device, | |
1050 | int device_modify_mask, | |
1051 | struct ib_device_modify *device_modify) | |
1052 | { | |
3023a1e9 | 1053 | if (!device->ops.modify_device) |
10e1b54b BVA |
1054 | return -ENOSYS; |
1055 | ||
3023a1e9 KH |
1056 | return device->ops.modify_device(device, device_modify_mask, |
1057 | device_modify); | |
1da177e4 LT |
1058 | } |
1059 | EXPORT_SYMBOL(ib_modify_device); | |
1060 | ||
1061 | /** | |
1062 | * ib_modify_port - Modifies the attributes for the specified port. | |
1063 | * @device: The device to modify. | |
1064 | * @port_num: The number of the port to modify. | |
1065 | * @port_modify_mask: Mask used to specify which attributes of the port | |
1066 | * to change. | |
1067 | * @port_modify: New attribute values for the port. | |
1068 | * | |
1069 | * ib_modify_port() changes a port's attributes as specified by the | |
1070 | * @port_modify_mask and @port_modify structure. | |
1071 | */ | |
1072 | int ib_modify_port(struct ib_device *device, | |
1073 | u8 port_num, int port_modify_mask, | |
1074 | struct ib_port_modify *port_modify) | |
1075 | { | |
61e0962d | 1076 | int rc; |
10e1b54b | 1077 | |
24dc831b | 1078 | if (!rdma_is_port_valid(device, port_num)) |
116c0074 RD |
1079 | return -EINVAL; |
1080 | ||
3023a1e9 KH |
1081 | if (device->ops.modify_port) |
1082 | rc = device->ops.modify_port(device, port_num, | |
1083 | port_modify_mask, | |
1084 | port_modify); | |
61e0962d SX |
1085 | else |
1086 | rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS; | |
1087 | return rc; | |
1da177e4 LT |
1088 | } |
1089 | EXPORT_SYMBOL(ib_modify_port); | |
1090 | ||
5eb620c8 YE |
1091 | /** |
1092 | * ib_find_gid - Returns the port number and GID table index where | |
dbb12562 | 1093 | * a specified GID value occurs. Its searches only for IB link layer. |
5eb620c8 YE |
1094 | * @device: The device to query. |
1095 | * @gid: The GID value to search for. | |
1096 | * @port_num: The port number of the device where the GID value was found. | |
1097 | * @index: The index into the GID table where the GID was found. This | |
1098 | * parameter may be NULL. | |
1099 | */ | |
1100 | int ib_find_gid(struct ib_device *device, union ib_gid *gid, | |
b26c4a11 | 1101 | u8 *port_num, u16 *index) |
5eb620c8 YE |
1102 | { |
1103 | union ib_gid tmp_gid; | |
1104 | int ret, port, i; | |
1105 | ||
0cf18d77 | 1106 | for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { |
22d24f75 | 1107 | if (!rdma_protocol_ib(device, port)) |
b39ffa1d MB |
1108 | continue; |
1109 | ||
7738613e | 1110 | for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { |
1dfce294 | 1111 | ret = rdma_query_gid(device, port, i, &tmp_gid); |
5eb620c8 YE |
1112 | if (ret) |
1113 | return ret; | |
1114 | if (!memcmp(&tmp_gid, gid, sizeof *gid)) { | |
1115 | *port_num = port; | |
1116 | if (index) | |
1117 | *index = i; | |
1118 | return 0; | |
1119 | } | |
1120 | } | |
1121 | } | |
1122 | ||
1123 | return -ENOENT; | |
1124 | } | |
1125 | EXPORT_SYMBOL(ib_find_gid); | |
1126 | ||
1127 | /** | |
1128 | * ib_find_pkey - Returns the PKey table index where a specified | |
1129 | * PKey value occurs. | |
1130 | * @device: The device to query. | |
1131 | * @port_num: The port number of the device to search for the PKey. | |
1132 | * @pkey: The PKey value to search for. | |
1133 | * @index: The index into the PKey table where the PKey was found. | |
1134 | */ | |
1135 | int ib_find_pkey(struct ib_device *device, | |
1136 | u8 port_num, u16 pkey, u16 *index) | |
1137 | { | |
1138 | int ret, i; | |
1139 | u16 tmp_pkey; | |
ff7166c4 | 1140 | int partial_ix = -1; |
5eb620c8 | 1141 | |
7738613e | 1142 | for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { |
5eb620c8 YE |
1143 | ret = ib_query_pkey(device, port_num, i, &tmp_pkey); |
1144 | if (ret) | |
1145 | return ret; | |
36026ecc | 1146 | if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { |
ff7166c4 JM |
1147 | /* if there is full-member pkey take it.*/ |
1148 | if (tmp_pkey & 0x8000) { | |
1149 | *index = i; | |
1150 | return 0; | |
1151 | } | |
1152 | if (partial_ix < 0) | |
1153 | partial_ix = i; | |
5eb620c8 YE |
1154 | } |
1155 | } | |
1156 | ||
ff7166c4 JM |
1157 | /*no full-member, if exists take the limited*/ |
1158 | if (partial_ix >= 0) { | |
1159 | *index = partial_ix; | |
1160 | return 0; | |
1161 | } | |
5eb620c8 YE |
1162 | return -ENOENT; |
1163 | } | |
1164 | EXPORT_SYMBOL(ib_find_pkey); | |
1165 | ||
9268f72d YK |
1166 | /** |
1167 | * ib_get_net_dev_by_params() - Return the appropriate net_dev | |
1168 | * for a received CM request | |
1169 | * @dev: An RDMA device on which the request has been received. | |
1170 | * @port: Port number on the RDMA device. | |
1171 | * @pkey: The Pkey the request came on. | |
1172 | * @gid: A GID that the net_dev uses to communicate. | |
1173 | * @addr: Contains the IP address that the request specified as its | |
1174 | * destination. | |
1175 | */ | |
1176 | struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, | |
1177 | u8 port, | |
1178 | u16 pkey, | |
1179 | const union ib_gid *gid, | |
1180 | const struct sockaddr *addr) | |
1181 | { | |
1182 | struct net_device *net_dev = NULL; | |
1183 | struct ib_client_data *context; | |
1184 | ||
1185 | if (!rdma_protocol_ib(dev, port)) | |
1186 | return NULL; | |
1187 | ||
1188 | down_read(&lists_rwsem); | |
1189 | ||
1190 | list_for_each_entry(context, &dev->client_data_list, list) { | |
1191 | struct ib_client *client = context->client; | |
1192 | ||
1193 | if (context->going_down) | |
1194 | continue; | |
1195 | ||
1196 | if (client->get_net_dev_by_params) { | |
1197 | net_dev = client->get_net_dev_by_params(dev, port, pkey, | |
1198 | gid, addr, | |
1199 | context->data); | |
1200 | if (net_dev) | |
1201 | break; | |
1202 | } | |
1203 | } | |
1204 | ||
1205 | up_read(&lists_rwsem); | |
1206 | ||
1207 | return net_dev; | |
1208 | } | |
1209 | EXPORT_SYMBOL(ib_get_net_dev_by_params); | |
1210 | ||
521ed0d9 KH |
1211 | void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) |
1212 | { | |
3023a1e9 | 1213 | struct ib_device_ops *dev_ops = &dev->ops; |
521ed0d9 KH |
1214 | #define SET_DEVICE_OP(ptr, name) \ |
1215 | do { \ | |
1216 | if (ops->name) \ | |
1217 | if (!((ptr)->name)) \ | |
1218 | (ptr)->name = ops->name; \ | |
1219 | } while (0) | |
1220 | ||
3023a1e9 | 1221 | SET_DEVICE_OP(dev_ops, add_gid); |
2f1927b0 | 1222 | SET_DEVICE_OP(dev_ops, advise_mr); |
3023a1e9 KH |
1223 | SET_DEVICE_OP(dev_ops, alloc_dm); |
1224 | SET_DEVICE_OP(dev_ops, alloc_fmr); | |
1225 | SET_DEVICE_OP(dev_ops, alloc_hw_stats); | |
1226 | SET_DEVICE_OP(dev_ops, alloc_mr); | |
1227 | SET_DEVICE_OP(dev_ops, alloc_mw); | |
1228 | SET_DEVICE_OP(dev_ops, alloc_pd); | |
1229 | SET_DEVICE_OP(dev_ops, alloc_rdma_netdev); | |
1230 | SET_DEVICE_OP(dev_ops, alloc_ucontext); | |
1231 | SET_DEVICE_OP(dev_ops, alloc_xrcd); | |
1232 | SET_DEVICE_OP(dev_ops, attach_mcast); | |
1233 | SET_DEVICE_OP(dev_ops, check_mr_status); | |
1234 | SET_DEVICE_OP(dev_ops, create_ah); | |
1235 | SET_DEVICE_OP(dev_ops, create_counters); | |
1236 | SET_DEVICE_OP(dev_ops, create_cq); | |
1237 | SET_DEVICE_OP(dev_ops, create_flow); | |
1238 | SET_DEVICE_OP(dev_ops, create_flow_action_esp); | |
1239 | SET_DEVICE_OP(dev_ops, create_qp); | |
1240 | SET_DEVICE_OP(dev_ops, create_rwq_ind_table); | |
1241 | SET_DEVICE_OP(dev_ops, create_srq); | |
1242 | SET_DEVICE_OP(dev_ops, create_wq); | |
1243 | SET_DEVICE_OP(dev_ops, dealloc_dm); | |
1244 | SET_DEVICE_OP(dev_ops, dealloc_fmr); | |
1245 | SET_DEVICE_OP(dev_ops, dealloc_mw); | |
1246 | SET_DEVICE_OP(dev_ops, dealloc_pd); | |
1247 | SET_DEVICE_OP(dev_ops, dealloc_ucontext); | |
1248 | SET_DEVICE_OP(dev_ops, dealloc_xrcd); | |
1249 | SET_DEVICE_OP(dev_ops, del_gid); | |
1250 | SET_DEVICE_OP(dev_ops, dereg_mr); | |
1251 | SET_DEVICE_OP(dev_ops, destroy_ah); | |
1252 | SET_DEVICE_OP(dev_ops, destroy_counters); | |
1253 | SET_DEVICE_OP(dev_ops, destroy_cq); | |
1254 | SET_DEVICE_OP(dev_ops, destroy_flow); | |
1255 | SET_DEVICE_OP(dev_ops, destroy_flow_action); | |
1256 | SET_DEVICE_OP(dev_ops, destroy_qp); | |
1257 | SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table); | |
1258 | SET_DEVICE_OP(dev_ops, destroy_srq); | |
1259 | SET_DEVICE_OP(dev_ops, destroy_wq); | |
1260 | SET_DEVICE_OP(dev_ops, detach_mcast); | |
1261 | SET_DEVICE_OP(dev_ops, disassociate_ucontext); | |
1262 | SET_DEVICE_OP(dev_ops, drain_rq); | |
1263 | SET_DEVICE_OP(dev_ops, drain_sq); | |
1264 | SET_DEVICE_OP(dev_ops, get_dev_fw_str); | |
1265 | SET_DEVICE_OP(dev_ops, get_dma_mr); | |
1266 | SET_DEVICE_OP(dev_ops, get_hw_stats); | |
1267 | SET_DEVICE_OP(dev_ops, get_link_layer); | |
1268 | SET_DEVICE_OP(dev_ops, get_netdev); | |
1269 | SET_DEVICE_OP(dev_ops, get_port_immutable); | |
1270 | SET_DEVICE_OP(dev_ops, get_vector_affinity); | |
1271 | SET_DEVICE_OP(dev_ops, get_vf_config); | |
1272 | SET_DEVICE_OP(dev_ops, get_vf_stats); | |
ea4baf7f | 1273 | SET_DEVICE_OP(dev_ops, init_port); |
3023a1e9 KH |
1274 | SET_DEVICE_OP(dev_ops, map_mr_sg); |
1275 | SET_DEVICE_OP(dev_ops, map_phys_fmr); | |
1276 | SET_DEVICE_OP(dev_ops, mmap); | |
1277 | SET_DEVICE_OP(dev_ops, modify_ah); | |
1278 | SET_DEVICE_OP(dev_ops, modify_cq); | |
1279 | SET_DEVICE_OP(dev_ops, modify_device); | |
1280 | SET_DEVICE_OP(dev_ops, modify_flow_action_esp); | |
1281 | SET_DEVICE_OP(dev_ops, modify_port); | |
1282 | SET_DEVICE_OP(dev_ops, modify_qp); | |
1283 | SET_DEVICE_OP(dev_ops, modify_srq); | |
1284 | SET_DEVICE_OP(dev_ops, modify_wq); | |
1285 | SET_DEVICE_OP(dev_ops, peek_cq); | |
1286 | SET_DEVICE_OP(dev_ops, poll_cq); | |
1287 | SET_DEVICE_OP(dev_ops, post_recv); | |
1288 | SET_DEVICE_OP(dev_ops, post_send); | |
1289 | SET_DEVICE_OP(dev_ops, post_srq_recv); | |
1290 | SET_DEVICE_OP(dev_ops, process_mad); | |
1291 | SET_DEVICE_OP(dev_ops, query_ah); | |
1292 | SET_DEVICE_OP(dev_ops, query_device); | |
1293 | SET_DEVICE_OP(dev_ops, query_gid); | |
1294 | SET_DEVICE_OP(dev_ops, query_pkey); | |
1295 | SET_DEVICE_OP(dev_ops, query_port); | |
1296 | SET_DEVICE_OP(dev_ops, query_qp); | |
1297 | SET_DEVICE_OP(dev_ops, query_srq); | |
1298 | SET_DEVICE_OP(dev_ops, rdma_netdev_get_params); | |
1299 | SET_DEVICE_OP(dev_ops, read_counters); | |
1300 | SET_DEVICE_OP(dev_ops, reg_dm_mr); | |
1301 | SET_DEVICE_OP(dev_ops, reg_user_mr); | |
1302 | SET_DEVICE_OP(dev_ops, req_ncomp_notif); | |
1303 | SET_DEVICE_OP(dev_ops, req_notify_cq); | |
1304 | SET_DEVICE_OP(dev_ops, rereg_user_mr); | |
1305 | SET_DEVICE_OP(dev_ops, resize_cq); | |
1306 | SET_DEVICE_OP(dev_ops, set_vf_guid); | |
1307 | SET_DEVICE_OP(dev_ops, set_vf_link_state); | |
1308 | SET_DEVICE_OP(dev_ops, unmap_fmr); | |
521ed0d9 KH |
1309 | } |
1310 | EXPORT_SYMBOL(ib_set_device_ops); | |
1311 | ||
d0e312fe | 1312 | static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { |
735c631a | 1313 | [RDMA_NL_LS_OP_RESOLVE] = { |
647c75ac | 1314 | .doit = ib_nl_handle_resolve_resp, |
e3a2b93d LR |
1315 | .flags = RDMA_NL_ADMIN_PERM, |
1316 | }, | |
735c631a | 1317 | [RDMA_NL_LS_OP_SET_TIMEOUT] = { |
647c75ac | 1318 | .doit = ib_nl_handle_set_timeout, |
e3a2b93d LR |
1319 | .flags = RDMA_NL_ADMIN_PERM, |
1320 | }, | |
ae43f828 | 1321 | [RDMA_NL_LS_OP_IP_RESOLVE] = { |
647c75ac | 1322 | .doit = ib_nl_handle_ip_res_resp, |
e3a2b93d LR |
1323 | .flags = RDMA_NL_ADMIN_PERM, |
1324 | }, | |
735c631a MB |
1325 | }; |
1326 | ||
1da177e4 LT |
1327 | static int __init ib_core_init(void) |
1328 | { | |
1329 | int ret; | |
1330 | ||
f0626710 TH |
1331 | ib_wq = alloc_workqueue("infiniband", 0, 0); |
1332 | if (!ib_wq) | |
1333 | return -ENOMEM; | |
1334 | ||
14d3a3b2 | 1335 | ib_comp_wq = alloc_workqueue("ib-comp-wq", |
b7363e67 | 1336 | WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); |
14d3a3b2 CH |
1337 | if (!ib_comp_wq) { |
1338 | ret = -ENOMEM; | |
1339 | goto err; | |
1340 | } | |
1341 | ||
f794809a JM |
1342 | ib_comp_unbound_wq = |
1343 | alloc_workqueue("ib-comp-unb-wq", | |
1344 | WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM | | |
1345 | WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE); | |
1346 | if (!ib_comp_unbound_wq) { | |
1347 | ret = -ENOMEM; | |
1348 | goto err_comp; | |
1349 | } | |
1350 | ||
55aeed06 | 1351 | ret = class_register(&ib_class); |
fd75c789 | 1352 | if (ret) { |
aba25a3e | 1353 | pr_warn("Couldn't create InfiniBand device class\n"); |
f794809a | 1354 | goto err_comp_unbound; |
fd75c789 | 1355 | } |
1da177e4 | 1356 | |
c9901724 | 1357 | ret = rdma_nl_init(); |
b2cbae2c | 1358 | if (ret) { |
c9901724 | 1359 | pr_warn("Couldn't init IB netlink interface: err %d\n", ret); |
b2cbae2c RD |
1360 | goto err_sysfs; |
1361 | } | |
1362 | ||
e3f20f02 LR |
1363 | ret = addr_init(); |
1364 | if (ret) { | |
1365 | pr_warn("Could't init IB address resolution\n"); | |
1366 | goto err_ibnl; | |
1367 | } | |
1368 | ||
4c2cb422 MB |
1369 | ret = ib_mad_init(); |
1370 | if (ret) { | |
1371 | pr_warn("Couldn't init IB MAD\n"); | |
1372 | goto err_addr; | |
1373 | } | |
1374 | ||
c2e49c92 MB |
1375 | ret = ib_sa_init(); |
1376 | if (ret) { | |
1377 | pr_warn("Couldn't init SA\n"); | |
1378 | goto err_mad; | |
1379 | } | |
1380 | ||
8f408ab6 DJ |
1381 | ret = register_lsm_notifier(&ibdev_lsm_nb); |
1382 | if (ret) { | |
1383 | pr_warn("Couldn't register LSM notifier. ret %d\n", ret); | |
c9901724 | 1384 | goto err_sa; |
8f408ab6 DJ |
1385 | } |
1386 | ||
6c80b41a | 1387 | nldev_init(); |
c9901724 | 1388 | rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); |
5ef8c0c1 | 1389 | roce_gid_mgmt_init(); |
1da177e4 | 1390 | |
fd75c789 NM |
1391 | return 0; |
1392 | ||
735c631a MB |
1393 | err_sa: |
1394 | ib_sa_cleanup(); | |
c2e49c92 MB |
1395 | err_mad: |
1396 | ib_mad_cleanup(); | |
4c2cb422 MB |
1397 | err_addr: |
1398 | addr_cleanup(); | |
e3f20f02 | 1399 | err_ibnl: |
c9901724 | 1400 | rdma_nl_exit(); |
fd75c789 | 1401 | err_sysfs: |
55aeed06 | 1402 | class_unregister(&ib_class); |
f794809a JM |
1403 | err_comp_unbound: |
1404 | destroy_workqueue(ib_comp_unbound_wq); | |
14d3a3b2 CH |
1405 | err_comp: |
1406 | destroy_workqueue(ib_comp_wq); | |
fd75c789 NM |
1407 | err: |
1408 | destroy_workqueue(ib_wq); | |
1da177e4 LT |
1409 | return ret; |
1410 | } | |
1411 | ||
1412 | static void __exit ib_core_cleanup(void) | |
1413 | { | |
5ef8c0c1 | 1414 | roce_gid_mgmt_cleanup(); |
6c80b41a | 1415 | nldev_exit(); |
c9901724 LR |
1416 | rdma_nl_unregister(RDMA_NL_LS); |
1417 | unregister_lsm_notifier(&ibdev_lsm_nb); | |
c2e49c92 | 1418 | ib_sa_cleanup(); |
4c2cb422 | 1419 | ib_mad_cleanup(); |
e3f20f02 | 1420 | addr_cleanup(); |
c9901724 | 1421 | rdma_nl_exit(); |
55aeed06 | 1422 | class_unregister(&ib_class); |
f794809a | 1423 | destroy_workqueue(ib_comp_unbound_wq); |
14d3a3b2 | 1424 | destroy_workqueue(ib_comp_wq); |
f7c6a7b5 | 1425 | /* Make sure that any pending umem accounting work is done. */ |
f0626710 | 1426 | destroy_workqueue(ib_wq); |
1da177e4 LT |
1427 | } |
1428 | ||
e3bf14bd JG |
1429 | MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); |
1430 | ||
a9cd1a67 | 1431 | subsys_initcall(ib_core_init); |
1da177e4 | 1432 | module_exit(ib_core_cleanup); |