IB/core: Add rwsem to allow reading device list or client list
[linux-2.6-block.git] / drivers / infiniband / core / device.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
1da177e4
LT
32 */
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
9a6b090c 37#include <linux/kernel.h>
1da177e4
LT
38#include <linux/slab.h>
39#include <linux/init.h>
95ed644f 40#include <linux/mutex.h>
b2cbae2c 41#include <rdma/rdma_netlink.h>
1da177e4
LT
42
43#include "core_priv.h"
44
45MODULE_AUTHOR("Roland Dreier");
46MODULE_DESCRIPTION("core kernel InfiniBand API");
47MODULE_LICENSE("Dual BSD/GPL");
48
49struct ib_client_data {
50 struct list_head list;
51 struct ib_client *client;
52 void * data;
53};
54
f0626710
TH
55struct workqueue_struct *ib_wq;
56EXPORT_SYMBOL_GPL(ib_wq);
57
5aa44bb9
HE
58/* The device_list and client_list contain devices and clients after their
59 * registration has completed, and the devices and clients are removed
60 * during unregistration. */
1da177e4
LT
61static LIST_HEAD(device_list);
62static LIST_HEAD(client_list);
63
64/*
5aa44bb9
HE
65 * device_mutex and lists_rwsem protect access to both device_list and
66 * client_list. device_mutex protects writer access by device and client
67 * registration / de-registration. lists_rwsem protects reader access to
68 * these lists. Iterators of these lists must lock it for read, while updates
69 * to the lists must be done with a write lock. A special case is when the
70 * device_mutex is locked. In this case locking the lists for read access is
71 * not necessary as the device_mutex implies it.
1da177e4 72 */
95ed644f 73static DEFINE_MUTEX(device_mutex);
5aa44bb9
HE
74static DECLARE_RWSEM(lists_rwsem);
75
1da177e4
LT
76
77static int ib_device_check_mandatory(struct ib_device *device)
78{
79#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
80 static const struct {
81 size_t offset;
82 char *name;
83 } mandatory_table[] = {
84 IB_MANDATORY_FUNC(query_device),
85 IB_MANDATORY_FUNC(query_port),
86 IB_MANDATORY_FUNC(query_pkey),
87 IB_MANDATORY_FUNC(query_gid),
88 IB_MANDATORY_FUNC(alloc_pd),
89 IB_MANDATORY_FUNC(dealloc_pd),
90 IB_MANDATORY_FUNC(create_ah),
91 IB_MANDATORY_FUNC(destroy_ah),
92 IB_MANDATORY_FUNC(create_qp),
93 IB_MANDATORY_FUNC(modify_qp),
94 IB_MANDATORY_FUNC(destroy_qp),
95 IB_MANDATORY_FUNC(post_send),
96 IB_MANDATORY_FUNC(post_recv),
97 IB_MANDATORY_FUNC(create_cq),
98 IB_MANDATORY_FUNC(destroy_cq),
99 IB_MANDATORY_FUNC(poll_cq),
100 IB_MANDATORY_FUNC(req_notify_cq),
101 IB_MANDATORY_FUNC(get_dma_mr),
7738613e
IW
102 IB_MANDATORY_FUNC(dereg_mr),
103 IB_MANDATORY_FUNC(get_port_immutable)
1da177e4
LT
104 };
105 int i;
106
9a6b090c 107 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
1da177e4
LT
108 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
109 printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
110 device->name, mandatory_table[i].name);
111 return -EINVAL;
112 }
113 }
114
115 return 0;
116}
117
118static struct ib_device *__ib_device_get_by_name(const char *name)
119{
120 struct ib_device *device;
121
122 list_for_each_entry(device, &device_list, core_list)
123 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
124 return device;
125
126 return NULL;
127}
128
129
130static int alloc_name(char *name)
131{
65d470b3 132 unsigned long *inuse;
1da177e4
LT
133 char buf[IB_DEVICE_NAME_MAX];
134 struct ib_device *device;
135 int i;
136
65d470b3 137 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
1da177e4
LT
138 if (!inuse)
139 return -ENOMEM;
140
141 list_for_each_entry(device, &device_list, core_list) {
142 if (!sscanf(device->name, name, &i))
143 continue;
144 if (i < 0 || i >= PAGE_SIZE * 8)
145 continue;
146 snprintf(buf, sizeof buf, name, i);
147 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
148 set_bit(i, inuse);
149 }
150
151 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
152 free_page((unsigned long) inuse);
153 snprintf(buf, sizeof buf, name, i);
154
155 if (__ib_device_get_by_name(buf))
156 return -ENFILE;
157
158 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
159 return 0;
160}
161
162/**
163 * ib_alloc_device - allocate an IB device struct
164 * @size:size of structure to allocate
165 *
166 * Low-level drivers should use ib_alloc_device() to allocate &struct
167 * ib_device. @size is the size of the structure to be allocated,
168 * including any private data used by the low-level driver.
169 * ib_dealloc_device() must be used to free structures allocated with
170 * ib_alloc_device().
171 */
172struct ib_device *ib_alloc_device(size_t size)
173{
1da177e4
LT
174 BUG_ON(size < sizeof (struct ib_device));
175
de6eb66b 176 return kzalloc(size, GFP_KERNEL);
1da177e4
LT
177}
178EXPORT_SYMBOL(ib_alloc_device);
179
180/**
181 * ib_dealloc_device - free an IB device struct
182 * @device:structure to free
183 *
184 * Free a structure allocated with ib_alloc_device().
185 */
186void ib_dealloc_device(struct ib_device *device)
187{
188 if (device->reg_state == IB_DEV_UNINITIALIZED) {
189 kfree(device);
190 return;
191 }
192
193 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
194
9206dff1 195 kobject_put(&device->dev.kobj);
1da177e4
LT
196}
197EXPORT_SYMBOL(ib_dealloc_device);
198
199static int add_client_context(struct ib_device *device, struct ib_client *client)
200{
201 struct ib_client_data *context;
202 unsigned long flags;
203
204 context = kmalloc(sizeof *context, GFP_KERNEL);
205 if (!context) {
206 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
207 device->name, client->name);
208 return -ENOMEM;
209 }
210
211 context->client = client;
212 context->data = NULL;
213
214 spin_lock_irqsave(&device->client_data_lock, flags);
215 list_add(&context->list, &device->client_data_list);
216 spin_unlock_irqrestore(&device->client_data_lock, flags);
217
218 return 0;
219}
220
337877a4
IW
221static int verify_immutable(const struct ib_device *dev, u8 port)
222{
223 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
224 rdma_max_mad_size(dev, port) != 0);
225}
226
7738613e 227static int read_port_immutable(struct ib_device *device)
5eb620c8 228{
7738613e
IW
229 int ret = -ENOMEM;
230 u8 start_port = rdma_start_port(device);
231 u8 end_port = rdma_end_port(device);
232 u8 port;
233
234 /**
235 * device->port_immutable is indexed directly by the port number to make
236 * access to this data as efficient as possible.
237 *
238 * Therefore port_immutable is declared as a 1 based array with
239 * potential empty slots at the beginning.
240 */
241 device->port_immutable = kzalloc(sizeof(*device->port_immutable)
242 * (end_port + 1),
243 GFP_KERNEL);
244 if (!device->port_immutable)
5eb620c8
YE
245 goto err;
246
7738613e
IW
247 for (port = start_port; port <= end_port; ++port) {
248 ret = device->get_port_immutable(device, port,
249 &device->port_immutable[port]);
5eb620c8
YE
250 if (ret)
251 goto err;
337877a4
IW
252
253 if (verify_immutable(device, port)) {
254 ret = -EINVAL;
255 goto err;
256 }
5eb620c8
YE
257 }
258
259 ret = 0;
260 goto out;
5eb620c8 261err:
7738613e 262 kfree(device->port_immutable);
5eb620c8 263out:
5eb620c8
YE
264 return ret;
265}
266
1da177e4
LT
267/**
268 * ib_register_device - Register an IB device with IB core
269 * @device:Device to register
270 *
271 * Low-level drivers use ib_register_device() to register their
272 * devices with the IB core. All registered clients will receive a
273 * callback for each device that is added. @device must be allocated
274 * with ib_alloc_device().
275 */
9a6edb60
RC
276int ib_register_device(struct ib_device *device,
277 int (*port_callback)(struct ib_device *,
278 u8, struct kobject *))
1da177e4
LT
279{
280 int ret;
281
95ed644f 282 mutex_lock(&device_mutex);
1da177e4
LT
283
284 if (strchr(device->name, '%')) {
285 ret = alloc_name(device->name);
286 if (ret)
287 goto out;
288 }
289
290 if (ib_device_check_mandatory(device)) {
291 ret = -EINVAL;
292 goto out;
293 }
294
295 INIT_LIST_HEAD(&device->event_handler_list);
296 INIT_LIST_HEAD(&device->client_data_list);
297 spin_lock_init(&device->event_handler_lock);
298 spin_lock_init(&device->client_data_lock);
299
7738613e 300 ret = read_port_immutable(device);
5eb620c8 301 if (ret) {
7738613e 302 printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
5eb620c8
YE
303 device->name);
304 goto out;
305 }
306
9a6edb60 307 ret = ib_device_register_sysfs(device, port_callback);
1da177e4
LT
308 if (ret) {
309 printk(KERN_WARNING "Couldn't register device %s with driver model\n",
310 device->name);
7738613e 311 kfree(device->port_immutable);
1da177e4
LT
312 goto out;
313 }
314
1da177e4
LT
315 device->reg_state = IB_DEV_REGISTERED;
316
317 {
318 struct ib_client *client;
319
320 list_for_each_entry(client, &client_list, list)
321 if (client->add && !add_client_context(device, client))
322 client->add(device);
323 }
324
5aa44bb9
HE
325 down_write(&lists_rwsem);
326 list_add_tail(&device->core_list, &device_list);
327 up_write(&lists_rwsem);
328out:
95ed644f 329 mutex_unlock(&device_mutex);
1da177e4
LT
330 return ret;
331}
332EXPORT_SYMBOL(ib_register_device);
333
334/**
335 * ib_unregister_device - Unregister an IB device
336 * @device:Device to unregister
337 *
338 * Unregister an IB device. All clients will receive a remove callback.
339 */
340void ib_unregister_device(struct ib_device *device)
341{
342 struct ib_client *client;
343 struct ib_client_data *context, *tmp;
344 unsigned long flags;
345
95ed644f 346 mutex_lock(&device_mutex);
1da177e4 347
5aa44bb9
HE
348 down_write(&lists_rwsem);
349 list_del(&device->core_list);
350 up_write(&lists_rwsem);
351
1da177e4
LT
352 list_for_each_entry_reverse(client, &client_list, list)
353 if (client->remove)
354 client->remove(device);
355
95ed644f 356 mutex_unlock(&device_mutex);
1da177e4 357
9206dff1
RD
358 ib_device_unregister_sysfs(device);
359
1da177e4
LT
360 spin_lock_irqsave(&device->client_data_lock, flags);
361 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
362 kfree(context);
363 spin_unlock_irqrestore(&device->client_data_lock, flags);
364
365 device->reg_state = IB_DEV_UNREGISTERED;
366}
367EXPORT_SYMBOL(ib_unregister_device);
368
369/**
370 * ib_register_client - Register an IB client
371 * @client:Client to register
372 *
373 * Upper level users of the IB drivers can use ib_register_client() to
374 * register callbacks for IB device addition and removal. When an IB
375 * device is added, each registered client's add method will be called
376 * (in the order the clients were registered), and when a device is
377 * removed, each client's remove method will be called (in the reverse
378 * order that clients were registered). In addition, when
379 * ib_register_client() is called, the client will receive an add
380 * callback for all devices already registered.
381 */
382int ib_register_client(struct ib_client *client)
383{
384 struct ib_device *device;
385
95ed644f 386 mutex_lock(&device_mutex);
1da177e4 387
1da177e4
LT
388 list_for_each_entry(device, &device_list, core_list)
389 if (client->add && !add_client_context(device, client))
390 client->add(device);
391
5aa44bb9
HE
392 down_write(&lists_rwsem);
393 list_add_tail(&client->list, &client_list);
394 up_write(&lists_rwsem);
395
95ed644f 396 mutex_unlock(&device_mutex);
1da177e4
LT
397
398 return 0;
399}
400EXPORT_SYMBOL(ib_register_client);
401
402/**
403 * ib_unregister_client - Unregister an IB client
404 * @client:Client to unregister
405 *
406 * Upper level users use ib_unregister_client() to remove their client
407 * registration. When ib_unregister_client() is called, the client
408 * will receive a remove callback for each IB device still registered.
409 */
410void ib_unregister_client(struct ib_client *client)
411{
412 struct ib_client_data *context, *tmp;
413 struct ib_device *device;
414 unsigned long flags;
415
95ed644f 416 mutex_lock(&device_mutex);
1da177e4 417
5aa44bb9
HE
418 down_write(&lists_rwsem);
419 list_del(&client->list);
420 up_write(&lists_rwsem);
421
1da177e4
LT
422 list_for_each_entry(device, &device_list, core_list) {
423 if (client->remove)
424 client->remove(device);
425
426 spin_lock_irqsave(&device->client_data_lock, flags);
427 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
428 if (context->client == client) {
429 list_del(&context->list);
430 kfree(context);
431 }
432 spin_unlock_irqrestore(&device->client_data_lock, flags);
433 }
1da177e4 434
95ed644f 435 mutex_unlock(&device_mutex);
1da177e4
LT
436}
437EXPORT_SYMBOL(ib_unregister_client);
438
439/**
440 * ib_get_client_data - Get IB client context
441 * @device:Device to get context for
442 * @client:Client to get context for
443 *
444 * ib_get_client_data() returns client context set with
445 * ib_set_client_data().
446 */
447void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
448{
449 struct ib_client_data *context;
450 void *ret = NULL;
451 unsigned long flags;
452
453 spin_lock_irqsave(&device->client_data_lock, flags);
454 list_for_each_entry(context, &device->client_data_list, list)
455 if (context->client == client) {
456 ret = context->data;
457 break;
458 }
459 spin_unlock_irqrestore(&device->client_data_lock, flags);
460
461 return ret;
462}
463EXPORT_SYMBOL(ib_get_client_data);
464
465/**
9cd330d3 466 * ib_set_client_data - Set IB client context
1da177e4
LT
467 * @device:Device to set context for
468 * @client:Client to set context for
469 * @data:Context to set
470 *
471 * ib_set_client_data() sets client context that can be retrieved with
472 * ib_get_client_data().
473 */
474void ib_set_client_data(struct ib_device *device, struct ib_client *client,
475 void *data)
476{
477 struct ib_client_data *context;
478 unsigned long flags;
479
480 spin_lock_irqsave(&device->client_data_lock, flags);
481 list_for_each_entry(context, &device->client_data_list, list)
482 if (context->client == client) {
483 context->data = data;
484 goto out;
485 }
486
487 printk(KERN_WARNING "No client context found for %s/%s\n",
488 device->name, client->name);
489
490out:
491 spin_unlock_irqrestore(&device->client_data_lock, flags);
492}
493EXPORT_SYMBOL(ib_set_client_data);
494
495/**
496 * ib_register_event_handler - Register an IB event handler
497 * @event_handler:Handler to register
498 *
499 * ib_register_event_handler() registers an event handler that will be
500 * called back when asynchronous IB events occur (as defined in
501 * chapter 11 of the InfiniBand Architecture Specification). This
502 * callback may occur in interrupt context.
503 */
504int ib_register_event_handler (struct ib_event_handler *event_handler)
505{
506 unsigned long flags;
507
508 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
509 list_add_tail(&event_handler->list,
510 &event_handler->device->event_handler_list);
511 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
512
513 return 0;
514}
515EXPORT_SYMBOL(ib_register_event_handler);
516
517/**
518 * ib_unregister_event_handler - Unregister an event handler
519 * @event_handler:Handler to unregister
520 *
521 * Unregister an event handler registered with
522 * ib_register_event_handler().
523 */
524int ib_unregister_event_handler(struct ib_event_handler *event_handler)
525{
526 unsigned long flags;
527
528 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
529 list_del(&event_handler->list);
530 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
531
532 return 0;
533}
534EXPORT_SYMBOL(ib_unregister_event_handler);
535
536/**
537 * ib_dispatch_event - Dispatch an asynchronous event
538 * @event:Event to dispatch
539 *
540 * Low-level drivers must call ib_dispatch_event() to dispatch the
541 * event to all registered event handlers when an asynchronous event
542 * occurs.
543 */
544void ib_dispatch_event(struct ib_event *event)
545{
546 unsigned long flags;
547 struct ib_event_handler *handler;
548
549 spin_lock_irqsave(&event->device->event_handler_lock, flags);
550
551 list_for_each_entry(handler, &event->device->event_handler_list, list)
552 handler->handler(handler, event);
553
554 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
555}
556EXPORT_SYMBOL(ib_dispatch_event);
557
558/**
559 * ib_query_device - Query IB device attributes
560 * @device:Device to query
561 * @device_attr:Device attributes
562 *
563 * ib_query_device() returns the attributes of a device through the
564 * @device_attr pointer.
565 */
566int ib_query_device(struct ib_device *device,
567 struct ib_device_attr *device_attr)
568{
2528e33e
MB
569 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
570
24306dc6
MB
571 memset(device_attr, 0, sizeof(*device_attr));
572
2528e33e 573 return device->query_device(device, device_attr, &uhw);
1da177e4
LT
574}
575EXPORT_SYMBOL(ib_query_device);
576
577/**
578 * ib_query_port - Query IB port attributes
579 * @device:Device to query
580 * @port_num:Port number to query
581 * @port_attr:Port attributes
582 *
583 * ib_query_port() returns the attributes of a port through the
584 * @port_attr pointer.
585 */
586int ib_query_port(struct ib_device *device,
587 u8 port_num,
588 struct ib_port_attr *port_attr)
589{
0cf18d77 590 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
116c0074
RD
591 return -EINVAL;
592
1da177e4
LT
593 return device->query_port(device, port_num, port_attr);
594}
595EXPORT_SYMBOL(ib_query_port);
596
597/**
598 * ib_query_gid - Get GID table entry
599 * @device:Device to query
600 * @port_num:Port number to query
601 * @index:GID table index to query
602 * @gid:Returned GID
603 *
604 * ib_query_gid() fetches the specified GID table entry.
605 */
606int ib_query_gid(struct ib_device *device,
607 u8 port_num, int index, union ib_gid *gid)
608{
609 return device->query_gid(device, port_num, index, gid);
610}
611EXPORT_SYMBOL(ib_query_gid);
612
613/**
614 * ib_query_pkey - Get P_Key table entry
615 * @device:Device to query
616 * @port_num:Port number to query
617 * @index:P_Key table index to query
618 * @pkey:Returned P_Key
619 *
620 * ib_query_pkey() fetches the specified P_Key table entry.
621 */
622int ib_query_pkey(struct ib_device *device,
623 u8 port_num, u16 index, u16 *pkey)
624{
625 return device->query_pkey(device, port_num, index, pkey);
626}
627EXPORT_SYMBOL(ib_query_pkey);
628
629/**
630 * ib_modify_device - Change IB device attributes
631 * @device:Device to modify
632 * @device_modify_mask:Mask of attributes to change
633 * @device_modify:New attribute values
634 *
635 * ib_modify_device() changes a device's attributes as specified by
636 * the @device_modify_mask and @device_modify structure.
637 */
638int ib_modify_device(struct ib_device *device,
639 int device_modify_mask,
640 struct ib_device_modify *device_modify)
641{
10e1b54b
BVA
642 if (!device->modify_device)
643 return -ENOSYS;
644
1da177e4
LT
645 return device->modify_device(device, device_modify_mask,
646 device_modify);
647}
648EXPORT_SYMBOL(ib_modify_device);
649
650/**
651 * ib_modify_port - Modifies the attributes for the specified port.
652 * @device: The device to modify.
653 * @port_num: The number of the port to modify.
654 * @port_modify_mask: Mask used to specify which attributes of the port
655 * to change.
656 * @port_modify: New attribute values for the port.
657 *
658 * ib_modify_port() changes a port's attributes as specified by the
659 * @port_modify_mask and @port_modify structure.
660 */
661int ib_modify_port(struct ib_device *device,
662 u8 port_num, int port_modify_mask,
663 struct ib_port_modify *port_modify)
664{
10e1b54b
BVA
665 if (!device->modify_port)
666 return -ENOSYS;
667
0cf18d77 668 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
116c0074
RD
669 return -EINVAL;
670
1da177e4
LT
671 return device->modify_port(device, port_num, port_modify_mask,
672 port_modify);
673}
674EXPORT_SYMBOL(ib_modify_port);
675
5eb620c8
YE
676/**
677 * ib_find_gid - Returns the port number and GID table index where
678 * a specified GID value occurs.
679 * @device: The device to query.
680 * @gid: The GID value to search for.
681 * @port_num: The port number of the device where the GID value was found.
682 * @index: The index into the GID table where the GID was found. This
683 * parameter may be NULL.
684 */
685int ib_find_gid(struct ib_device *device, union ib_gid *gid,
686 u8 *port_num, u16 *index)
687{
688 union ib_gid tmp_gid;
689 int ret, port, i;
690
0cf18d77 691 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
7738613e 692 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
5eb620c8
YE
693 ret = ib_query_gid(device, port, i, &tmp_gid);
694 if (ret)
695 return ret;
696 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
697 *port_num = port;
698 if (index)
699 *index = i;
700 return 0;
701 }
702 }
703 }
704
705 return -ENOENT;
706}
707EXPORT_SYMBOL(ib_find_gid);
708
709/**
710 * ib_find_pkey - Returns the PKey table index where a specified
711 * PKey value occurs.
712 * @device: The device to query.
713 * @port_num: The port number of the device to search for the PKey.
714 * @pkey: The PKey value to search for.
715 * @index: The index into the PKey table where the PKey was found.
716 */
717int ib_find_pkey(struct ib_device *device,
718 u8 port_num, u16 pkey, u16 *index)
719{
720 int ret, i;
721 u16 tmp_pkey;
ff7166c4 722 int partial_ix = -1;
5eb620c8 723
7738613e 724 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
5eb620c8
YE
725 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
726 if (ret)
727 return ret;
36026ecc 728 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
ff7166c4
JM
729 /* if there is full-member pkey take it.*/
730 if (tmp_pkey & 0x8000) {
731 *index = i;
732 return 0;
733 }
734 if (partial_ix < 0)
735 partial_ix = i;
5eb620c8
YE
736 }
737 }
738
ff7166c4
JM
739 /*no full-member, if exists take the limited*/
740 if (partial_ix >= 0) {
741 *index = partial_ix;
742 return 0;
743 }
5eb620c8
YE
744 return -ENOENT;
745}
746EXPORT_SYMBOL(ib_find_pkey);
747
1da177e4
LT
748static int __init ib_core_init(void)
749{
750 int ret;
751
f0626710
TH
752 ib_wq = alloc_workqueue("infiniband", 0, 0);
753 if (!ib_wq)
754 return -ENOMEM;
755
1da177e4 756 ret = ib_sysfs_setup();
fd75c789 757 if (ret) {
1da177e4 758 printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
fd75c789
NM
759 goto err;
760 }
1da177e4 761
b2cbae2c
RD
762 ret = ibnl_init();
763 if (ret) {
764 printk(KERN_WARNING "Couldn't init IB netlink interface\n");
765 goto err_sysfs;
766 }
767
1da177e4
LT
768 ret = ib_cache_setup();
769 if (ret) {
770 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
b2cbae2c 771 goto err_nl;
1da177e4
LT
772 }
773
fd75c789
NM
774 return 0;
775
b2cbae2c
RD
776err_nl:
777 ibnl_cleanup();
778
fd75c789
NM
779err_sysfs:
780 ib_sysfs_cleanup();
781
782err:
783 destroy_workqueue(ib_wq);
1da177e4
LT
784 return ret;
785}
786
787static void __exit ib_core_cleanup(void)
788{
789 ib_cache_cleanup();
b2cbae2c 790 ibnl_cleanup();
1da177e4 791 ib_sysfs_cleanup();
f7c6a7b5 792 /* Make sure that any pending umem accounting work is done. */
f0626710 793 destroy_workqueue(ib_wq);
1da177e4
LT
794}
795
796module_init(ib_core_init);
797module_exit(ib_core_cleanup);