2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/netdevice.h>
41 #include <linux/security.h>
42 #include <linux/notifier.h>
43 #include <rdma/rdma_netlink.h>
44 #include <rdma/ib_addr.h>
45 #include <rdma/ib_cache.h>
47 #include "core_priv.h"
50 MODULE_AUTHOR("Roland Dreier");
51 MODULE_DESCRIPTION("core kernel InfiniBand API");
52 MODULE_LICENSE("Dual BSD/GPL");
54 struct workqueue_struct *ib_comp_wq;
55 struct workqueue_struct *ib_comp_unbound_wq;
56 struct workqueue_struct *ib_wq;
57 EXPORT_SYMBOL_GPL(ib_wq);
60 * Each of the three rwsem locks (devices, clients, client_data) protects the
61 * xarray of the same name. Specifically it allows the caller to assert that
62 * the MARK will/will not be changing under the lock, and for devices and
63 * clients, that the value in the xarray is still a valid pointer. Change of
64 * the MARK is linked to the object state, so holding the lock and testing the
65 * MARK also asserts that the contained object is in a certain state.
67 * This is used to build a two stage register/unregister flow where objects
68 * can continue to be in the xarray even though they are still in progress to
69 * register/unregister.
71 * The xarray itself provides additional locking, and restartable iteration,
72 * which is also relied on.
74 * Locks should not be nested, with the exception of client_data, which is
75 * allowed to nest under the read side of the other two locks.
77 * The devices_rwsem also protects the device name list, any change or
78 * assignment of device name must also hold the write side to guarantee unique
83 * devices contains devices that have had their names assigned. The
84 * devices may not be registered. Users that care about the registration
85 * status need to call ib_device_try_get() on the device to ensure it is
86 * registered, and keep it registered, for the required duration.
89 static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
90 static DECLARE_RWSEM(devices_rwsem);
91 #define DEVICE_REGISTERED XA_MARK_1
93 static LIST_HEAD(client_list);
94 #define CLIENT_REGISTERED XA_MARK_1
95 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
96 static DECLARE_RWSEM(clients_rwsem);
99 * If client_data is registered then the corresponding client must also still
102 #define CLIENT_DATA_REGISTERED XA_MARK_1
104 * xarray has this behavior where it won't iterate over NULL values stored in
105 * allocated arrays. So we need our own iterator to see all values stored in
106 * the array. This does the same thing as xa_for_each except that it also
107 * returns NULL valued entries if the array is allocating. Simplified to only
108 * work on simple xarrays.
110 static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
113 XA_STATE(xas, xa, *indexp);
118 entry = xas_find_marked(&xas, ULONG_MAX, filter);
119 if (xa_is_zero(entry))
121 } while (xas_retry(&xas, entry));
125 *indexp = xas.xa_index;
126 if (xa_is_zero(entry))
130 return XA_ERROR(-ENOENT);
132 #define xan_for_each_marked(xa, index, entry, filter) \
133 for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
135 (index)++, entry = xan_find_marked(xa, &(index), filter))
137 static int ib_security_change(struct notifier_block *nb, unsigned long event,
139 static void ib_policy_change_task(struct work_struct *work);
140 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
142 static struct notifier_block ibdev_lsm_nb = {
143 .notifier_call = ib_security_change,
146 static int ib_device_check_mandatory(struct ib_device *device)
148 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
149 static const struct {
152 } mandatory_table[] = {
153 IB_MANDATORY_FUNC(query_device),
154 IB_MANDATORY_FUNC(query_port),
155 IB_MANDATORY_FUNC(query_pkey),
156 IB_MANDATORY_FUNC(alloc_pd),
157 IB_MANDATORY_FUNC(dealloc_pd),
158 IB_MANDATORY_FUNC(create_qp),
159 IB_MANDATORY_FUNC(modify_qp),
160 IB_MANDATORY_FUNC(destroy_qp),
161 IB_MANDATORY_FUNC(post_send),
162 IB_MANDATORY_FUNC(post_recv),
163 IB_MANDATORY_FUNC(create_cq),
164 IB_MANDATORY_FUNC(destroy_cq),
165 IB_MANDATORY_FUNC(poll_cq),
166 IB_MANDATORY_FUNC(req_notify_cq),
167 IB_MANDATORY_FUNC(get_dma_mr),
168 IB_MANDATORY_FUNC(dereg_mr),
169 IB_MANDATORY_FUNC(get_port_immutable)
173 device->kverbs_provider = true;
174 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
175 if (!*(void **) ((void *) &device->ops +
176 mandatory_table[i].offset)) {
177 device->kverbs_provider = false;
186 * Caller must perform ib_device_put() to return the device reference count
187 * when ib_device_get_by_index() returns valid device pointer.
189 struct ib_device *ib_device_get_by_index(u32 index)
191 struct ib_device *device;
193 down_read(&devices_rwsem);
194 device = xa_load(&devices, index);
196 if (!ib_device_try_get(device))
199 up_read(&devices_rwsem);
204 * ib_device_put - Release IB device reference
205 * @device: device whose reference to be released
207 * ib_device_put() releases reference to the IB device to allow it to be
208 * unregistered and eventually free.
210 void ib_device_put(struct ib_device *device)
212 if (refcount_dec_and_test(&device->refcount))
213 complete(&device->unreg_completion);
215 EXPORT_SYMBOL(ib_device_put);
217 static struct ib_device *__ib_device_get_by_name(const char *name)
219 struct ib_device *device;
222 xa_for_each (&devices, index, device)
223 if (!strcmp(name, dev_name(&device->dev)))
229 int ib_device_rename(struct ib_device *ibdev, const char *name)
233 down_write(&devices_rwsem);
234 if (!strcmp(name, dev_name(&ibdev->dev))) {
239 if (__ib_device_get_by_name(name)) {
244 ret = device_rename(&ibdev->dev, name);
247 strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
249 up_write(&devices_rwsem);
253 static int alloc_name(struct ib_device *ibdev, const char *name)
255 struct ib_device *device;
261 lockdep_assert_held_exclusive(&devices_rwsem);
263 xa_for_each (&devices, index, device) {
264 char buf[IB_DEVICE_NAME_MAX];
266 if (sscanf(dev_name(&device->dev), name, &i) != 1)
268 if (i < 0 || i >= INT_MAX)
270 snprintf(buf, sizeof buf, name, i);
271 if (strcmp(buf, dev_name(&device->dev)) != 0)
274 rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
279 rc = ida_alloc(&inuse, GFP_KERNEL);
283 rc = dev_set_name(&ibdev->dev, name, rc);
289 static void ib_device_release(struct device *device)
291 struct ib_device *dev = container_of(device, struct ib_device, dev);
293 WARN_ON(refcount_read(&dev->refcount));
294 ib_cache_release_one(dev);
295 ib_security_release_port_pkey_list(dev);
296 kfree(dev->port_pkey_list);
297 kfree(dev->port_immutable);
298 xa_destroy(&dev->client_data);
302 static int ib_device_uevent(struct device *device,
303 struct kobj_uevent_env *env)
305 if (add_uevent_var(env, "NAME=%s", dev_name(device)))
309 * It would be nice to pass the node GUID with the event...
315 static struct class ib_class = {
316 .name = "infiniband",
317 .dev_release = ib_device_release,
318 .dev_uevent = ib_device_uevent,
322 * _ib_alloc_device - allocate an IB device struct
323 * @size:size of structure to allocate
325 * Low-level drivers should use ib_alloc_device() to allocate &struct
326 * ib_device. @size is the size of the structure to be allocated,
327 * including any private data used by the low-level driver.
328 * ib_dealloc_device() must be used to free structures allocated with
331 struct ib_device *_ib_alloc_device(size_t size)
333 struct ib_device *device;
335 if (WARN_ON(size < sizeof(struct ib_device)))
338 device = kzalloc(size, GFP_KERNEL);
342 if (rdma_restrack_init(device)) {
347 device->dev.class = &ib_class;
348 device->groups[0] = &ib_dev_attr_group;
349 device->dev.groups = device->groups;
350 device_initialize(&device->dev);
352 INIT_LIST_HEAD(&device->event_handler_list);
353 spin_lock_init(&device->event_handler_lock);
355 * client_data needs to be alloc because we don't want our mark to be
356 * destroyed if the user stores NULL in the client data.
358 xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
359 init_rwsem(&device->client_data_rwsem);
360 INIT_LIST_HEAD(&device->port_list);
361 init_completion(&device->unreg_completion);
365 EXPORT_SYMBOL(_ib_alloc_device);
368 * ib_dealloc_device - free an IB device struct
369 * @device:structure to free
371 * Free a structure allocated with ib_alloc_device().
373 void ib_dealloc_device(struct ib_device *device)
375 WARN_ON(!xa_empty(&device->client_data));
376 WARN_ON(refcount_read(&device->refcount));
377 rdma_restrack_clean(device);
378 /* Balances with device_initialize */
379 put_device(&device->dev);
381 EXPORT_SYMBOL(ib_dealloc_device);
384 * add_client_context() and remove_client_context() must be safe against
385 * parallel calls on the same device - registration/unregistration of both the
386 * device and client can be occurring in parallel.
388 * The routines need to be a fence, any caller must not return until the add
389 * or remove is fully completed.
391 static int add_client_context(struct ib_device *device,
392 struct ib_client *client)
396 if (!device->kverbs_provider && !client->no_kverbs_req)
399 down_write(&device->client_data_rwsem);
401 * Another caller to add_client_context got here first and has already
402 * completely initialized context.
404 if (xa_get_mark(&device->client_data, client->client_id,
405 CLIENT_DATA_REGISTERED))
408 ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
412 downgrade_write(&device->client_data_rwsem);
416 /* Readers shall not see a client until add has been completed */
417 xa_set_mark(&device->client_data, client->client_id,
418 CLIENT_DATA_REGISTERED);
419 up_read(&device->client_data_rwsem);
423 up_write(&device->client_data_rwsem);
427 static void remove_client_context(struct ib_device *device,
428 unsigned int client_id)
430 struct ib_client *client;
433 down_write(&device->client_data_rwsem);
434 if (!xa_get_mark(&device->client_data, client_id,
435 CLIENT_DATA_REGISTERED)) {
436 up_write(&device->client_data_rwsem);
439 client_data = xa_load(&device->client_data, client_id);
440 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
441 client = xa_load(&clients, client_id);
442 downgrade_write(&device->client_data_rwsem);
445 * Notice we cannot be holding any exclusive locks when calling the
446 * remove callback as the remove callback can recurse back into any
447 * public functions in this module and thus try for any locks those
450 * For this reason clients and drivers should not call the
451 * unregistration functions will holdling any locks.
453 * It tempting to drop the client_data_rwsem too, but this is required
454 * to ensure that unregister_client does not return until all clients
455 * are completely unregistered, which is required to avoid module
459 client->remove(device, client_data);
461 xa_erase(&device->client_data, client_id);
462 up_read(&device->client_data_rwsem);
465 static int verify_immutable(const struct ib_device *dev, u8 port)
467 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
468 rdma_max_mad_size(dev, port) != 0);
471 static int read_port_immutable(struct ib_device *device)
477 * device->port_immutable is indexed directly by the port number to make
478 * access to this data as efficient as possible.
480 * Therefore port_immutable is declared as a 1 based array with
481 * potential empty slots at the beginning.
483 device->port_immutable =
484 kcalloc(rdma_end_port(device) + 1,
485 sizeof(*device->port_immutable), GFP_KERNEL);
486 if (!device->port_immutable)
489 rdma_for_each_port (device, port) {
490 ret = device->ops.get_port_immutable(
491 device, port, &device->port_immutable[port]);
495 if (verify_immutable(device, port))
501 void ib_get_device_fw_str(struct ib_device *dev, char *str)
503 if (dev->ops.get_dev_fw_str)
504 dev->ops.get_dev_fw_str(dev, str);
508 EXPORT_SYMBOL(ib_get_device_fw_str);
510 static int setup_port_pkey_list(struct ib_device *device)
515 * device->port_pkey_list is indexed directly by the port number,
516 * Therefore it is declared as a 1 based array with potential empty
517 * slots at the beginning.
519 device->port_pkey_list = kcalloc(rdma_end_port(device) + 1,
520 sizeof(*device->port_pkey_list),
523 if (!device->port_pkey_list)
526 for (i = 0; i < (rdma_end_port(device) + 1); i++) {
527 spin_lock_init(&device->port_pkey_list[i].list_lock);
528 INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list);
534 static void ib_policy_change_task(struct work_struct *work)
536 struct ib_device *dev;
539 down_read(&devices_rwsem);
540 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
543 rdma_for_each_port (dev, i) {
545 int ret = ib_get_cached_subnet_prefix(dev,
550 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
553 ib_security_cache_change(dev, i, sp);
556 up_read(&devices_rwsem);
559 static int ib_security_change(struct notifier_block *nb, unsigned long event,
562 if (event != LSM_POLICY_CHANGE)
565 schedule_work(&ib_policy_change_work);
566 ib_mad_agent_security_change();
572 * Assign the unique string device name and the unique device index.
574 static int assign_name(struct ib_device *device, const char *name)
579 down_write(&devices_rwsem);
580 /* Assign a unique name to the device */
581 if (strchr(name, '%'))
582 ret = alloc_name(device, name);
584 ret = dev_set_name(&device->dev, name);
588 if (__ib_device_get_by_name(dev_name(&device->dev))) {
592 strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
594 /* Cyclically allocate a user visible ID for the device */
595 device->index = last_id;
596 ret = xa_alloc(&devices, &device->index, INT_MAX, device, GFP_KERNEL);
597 if (ret == -ENOSPC) {
599 ret = xa_alloc(&devices, &device->index, INT_MAX, device,
604 last_id = device->index + 1;
609 up_write(&devices_rwsem);
613 static void release_name(struct ib_device *device)
615 down_write(&devices_rwsem);
616 xa_erase(&devices, device->index);
617 up_write(&devices_rwsem);
620 static void setup_dma_device(struct ib_device *device)
622 struct device *parent = device->dev.parent;
624 WARN_ON_ONCE(device->dma_device);
625 if (device->dev.dma_ops) {
627 * The caller provided custom DMA operations. Copy the
628 * DMA-related fields that are used by e.g. dma_alloc_coherent()
631 device->dma_device = &device->dev;
632 if (!device->dev.dma_mask) {
634 device->dev.dma_mask = parent->dma_mask;
638 if (!device->dev.coherent_dma_mask) {
640 device->dev.coherent_dma_mask =
641 parent->coherent_dma_mask;
647 * The caller did not provide custom DMA operations. Use the
648 * DMA mapping operations of the parent device.
650 WARN_ON_ONCE(!parent);
651 device->dma_device = parent;
656 * setup_device() allocates memory and sets up data that requires calling the
657 * device ops, this is the only reason these actions are not done during
658 * ib_alloc_device. It is undone by ib_dealloc_device().
660 static int setup_device(struct ib_device *device)
662 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
665 setup_dma_device(device);
667 ret = ib_device_check_mandatory(device);
671 ret = read_port_immutable(device);
673 dev_warn(&device->dev,
674 "Couldn't create per port immutable data\n");
678 memset(&device->attrs, 0, sizeof(device->attrs));
679 ret = device->ops.query_device(device, &device->attrs, &uhw);
681 dev_warn(&device->dev,
682 "Couldn't query the device attributes\n");
686 ret = setup_port_pkey_list(device);
688 dev_warn(&device->dev, "Couldn't create per port_pkey_list\n");
695 static void disable_device(struct ib_device *device)
697 struct ib_client *client;
699 WARN_ON(!refcount_read(&device->refcount));
701 down_write(&devices_rwsem);
702 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
703 up_write(&devices_rwsem);
705 down_read(&clients_rwsem);
706 list_for_each_entry_reverse(client, &client_list, list)
707 remove_client_context(device, client->client_id);
708 up_read(&clients_rwsem);
710 /* Pairs with refcount_set in enable_device */
711 ib_device_put(device);
712 wait_for_completion(&device->unreg_completion);
716 * An enabled device is visible to all clients and to all the public facing
717 * APIs that return a device pointer.
719 static int enable_device(struct ib_device *device)
721 struct ib_client *client;
725 refcount_set(&device->refcount, 1);
726 down_write(&devices_rwsem);
727 xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
728 up_write(&devices_rwsem);
730 down_read(&clients_rwsem);
731 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
732 ret = add_client_context(device, client);
734 up_read(&clients_rwsem);
735 disable_device(device);
739 up_read(&clients_rwsem);
744 * ib_register_device - Register an IB device with IB core
745 * @device:Device to register
747 * Low-level drivers use ib_register_device() to register their
748 * devices with the IB core. All registered clients will receive a
749 * callback for each device that is added. @device must be allocated
750 * with ib_alloc_device().
752 int ib_register_device(struct ib_device *device, const char *name)
756 ret = assign_name(device, name);
760 ret = setup_device(device);
764 ret = ib_cache_setup_one(device);
766 dev_warn(&device->dev,
767 "Couldn't set up InfiniBand P_Key/GID cache\n");
771 ib_device_register_rdmacg(device);
773 ret = device_add(&device->dev);
777 ret = ib_device_register_sysfs(device);
779 dev_warn(&device->dev,
780 "Couldn't register device with driver model\n");
784 ret = enable_device(device);
791 ib_device_unregister_sysfs(device);
793 device_del(&device->dev);
795 ib_device_unregister_rdmacg(device);
796 ib_cache_cleanup_one(device);
798 release_name(device);
801 EXPORT_SYMBOL(ib_register_device);
804 * ib_unregister_device - Unregister an IB device
805 * @device:Device to unregister
807 * Unregister an IB device. All clients will receive a remove callback.
809 void ib_unregister_device(struct ib_device *device)
811 disable_device(device);
812 ib_device_unregister_sysfs(device);
813 device_del(&device->dev);
814 ib_device_unregister_rdmacg(device);
815 ib_cache_cleanup_one(device);
816 release_name(device);
818 EXPORT_SYMBOL(ib_unregister_device);
820 static int assign_client_id(struct ib_client *client)
824 down_write(&clients_rwsem);
826 * The add/remove callbacks must be called in FIFO/LIFO order. To
827 * achieve this we assign client_ids so they are sorted in
828 * registration order, and retain a linked list we can reverse iterate
829 * to get the LIFO order. The extra linked list can go away if xarray
830 * learns to reverse iterate.
832 if (list_empty(&client_list))
833 client->client_id = 0;
836 list_last_entry(&client_list, struct ib_client, list)
838 ret = xa_alloc(&clients, &client->client_id, INT_MAX, client,
843 xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
844 list_add_tail(&client->list, &client_list);
847 up_write(&clients_rwsem);
852 * ib_register_client - Register an IB client
853 * @client:Client to register
855 * Upper level users of the IB drivers can use ib_register_client() to
856 * register callbacks for IB device addition and removal. When an IB
857 * device is added, each registered client's add method will be called
858 * (in the order the clients were registered), and when a device is
859 * removed, each client's remove method will be called (in the reverse
860 * order that clients were registered). In addition, when
861 * ib_register_client() is called, the client will receive an add
862 * callback for all devices already registered.
864 int ib_register_client(struct ib_client *client)
866 struct ib_device *device;
870 ret = assign_client_id(client);
874 down_read(&devices_rwsem);
875 xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
876 ret = add_client_context(device, client);
878 up_read(&devices_rwsem);
879 ib_unregister_client(client);
883 up_read(&devices_rwsem);
886 EXPORT_SYMBOL(ib_register_client);
889 * ib_unregister_client - Unregister an IB client
890 * @client:Client to unregister
892 * Upper level users use ib_unregister_client() to remove their client
893 * registration. When ib_unregister_client() is called, the client
894 * will receive a remove callback for each IB device still registered.
896 * This is a full fence, once it returns no client callbacks will be called,
897 * or are running in another thread.
899 void ib_unregister_client(struct ib_client *client)
901 struct ib_device *device;
904 down_write(&clients_rwsem);
905 xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
906 up_write(&clients_rwsem);
908 * Every device still known must be serialized to make sure we are
909 * done with the client callbacks before we return.
911 down_read(&devices_rwsem);
912 xa_for_each (&devices, index, device)
913 remove_client_context(device, client->client_id);
914 up_read(&devices_rwsem);
916 down_write(&clients_rwsem);
917 list_del(&client->list);
918 xa_erase(&clients, client->client_id);
919 up_write(&clients_rwsem);
921 EXPORT_SYMBOL(ib_unregister_client);
924 * ib_set_client_data - Set IB client context
925 * @device:Device to set context for
926 * @client:Client to set context for
927 * @data:Context to set
929 * ib_set_client_data() sets client context data that can be retrieved with
930 * ib_get_client_data(). This can only be called while the client is
931 * registered to the device, once the ib_client remove() callback returns this
934 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
939 if (WARN_ON(IS_ERR(data)))
942 rc = xa_store(&device->client_data, client->client_id, data,
944 WARN_ON(xa_is_err(rc));
946 EXPORT_SYMBOL(ib_set_client_data);
949 * ib_register_event_handler - Register an IB event handler
950 * @event_handler:Handler to register
952 * ib_register_event_handler() registers an event handler that will be
953 * called back when asynchronous IB events occur (as defined in
954 * chapter 11 of the InfiniBand Architecture Specification). This
955 * callback may occur in interrupt context.
957 void ib_register_event_handler(struct ib_event_handler *event_handler)
961 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
962 list_add_tail(&event_handler->list,
963 &event_handler->device->event_handler_list);
964 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
966 EXPORT_SYMBOL(ib_register_event_handler);
969 * ib_unregister_event_handler - Unregister an event handler
970 * @event_handler:Handler to unregister
972 * Unregister an event handler registered with
973 * ib_register_event_handler().
975 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
979 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
980 list_del(&event_handler->list);
981 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
983 EXPORT_SYMBOL(ib_unregister_event_handler);
986 * ib_dispatch_event - Dispatch an asynchronous event
987 * @event:Event to dispatch
989 * Low-level drivers must call ib_dispatch_event() to dispatch the
990 * event to all registered event handlers when an asynchronous event
993 void ib_dispatch_event(struct ib_event *event)
996 struct ib_event_handler *handler;
998 spin_lock_irqsave(&event->device->event_handler_lock, flags);
1000 list_for_each_entry(handler, &event->device->event_handler_list, list)
1001 handler->handler(handler, event);
1003 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
1005 EXPORT_SYMBOL(ib_dispatch_event);
1008 * ib_query_port - Query IB port attributes
1009 * @device:Device to query
1010 * @port_num:Port number to query
1011 * @port_attr:Port attributes
1013 * ib_query_port() returns the attributes of a port through the
1014 * @port_attr pointer.
1016 int ib_query_port(struct ib_device *device,
1018 struct ib_port_attr *port_attr)
1023 if (!rdma_is_port_valid(device, port_num))
1026 memset(port_attr, 0, sizeof(*port_attr));
1027 err = device->ops.query_port(device, port_num, port_attr);
1028 if (err || port_attr->subnet_prefix)
1031 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
1034 err = device->ops.query_gid(device, port_num, 0, &gid);
1038 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
1041 EXPORT_SYMBOL(ib_query_port);
1044 * ib_enum_roce_netdev - enumerate all RoCE ports
1045 * @ib_dev : IB device we want to query
1046 * @filter: Should we call the callback?
1047 * @filter_cookie: Cookie passed to filter
1048 * @cb: Callback to call for each found RoCE ports
1049 * @cookie: Cookie passed back to the callback
1051 * Enumerates all of the physical RoCE ports of ib_dev
1052 * which are related to netdevice and calls callback() on each
1053 * device for which filter() function returns non zero.
1055 void ib_enum_roce_netdev(struct ib_device *ib_dev,
1056 roce_netdev_filter filter,
1057 void *filter_cookie,
1058 roce_netdev_callback cb,
1063 rdma_for_each_port (ib_dev, port)
1064 if (rdma_protocol_roce(ib_dev, port)) {
1065 struct net_device *idev = NULL;
1067 if (ib_dev->ops.get_netdev)
1068 idev = ib_dev->ops.get_netdev(ib_dev, port);
1071 idev->reg_state >= NETREG_UNREGISTERED) {
1076 if (filter(ib_dev, port, idev, filter_cookie))
1077 cb(ib_dev, port, idev, cookie);
1085 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
1086 * @filter: Should we call the callback?
1087 * @filter_cookie: Cookie passed to filter
1088 * @cb: Callback to call for each found RoCE ports
1089 * @cookie: Cookie passed back to the callback
1091 * Enumerates all RoCE devices' physical ports which are related
1092 * to netdevices and calls callback() on each device for which
1093 * filter() function returns non zero.
1095 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
1096 void *filter_cookie,
1097 roce_netdev_callback cb,
1100 struct ib_device *dev;
1101 unsigned long index;
1103 down_read(&devices_rwsem);
1104 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
1105 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
1106 up_read(&devices_rwsem);
1110 * ib_enum_all_devs - enumerate all ib_devices
1111 * @cb: Callback to call for each found ib_device
1113 * Enumerates all ib_devices and calls callback() on each device.
1115 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
1116 struct netlink_callback *cb)
1118 unsigned long index;
1119 struct ib_device *dev;
1120 unsigned int idx = 0;
1123 down_read(&devices_rwsem);
1124 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1125 ret = nldev_cb(dev, skb, cb, idx);
1130 up_read(&devices_rwsem);
1135 * ib_query_pkey - Get P_Key table entry
1136 * @device:Device to query
1137 * @port_num:Port number to query
1138 * @index:P_Key table index to query
1139 * @pkey:Returned P_Key
1141 * ib_query_pkey() fetches the specified P_Key table entry.
1143 int ib_query_pkey(struct ib_device *device,
1144 u8 port_num, u16 index, u16 *pkey)
1146 if (!rdma_is_port_valid(device, port_num))
1149 return device->ops.query_pkey(device, port_num, index, pkey);
1151 EXPORT_SYMBOL(ib_query_pkey);
1154 * ib_modify_device - Change IB device attributes
1155 * @device:Device to modify
1156 * @device_modify_mask:Mask of attributes to change
1157 * @device_modify:New attribute values
1159 * ib_modify_device() changes a device's attributes as specified by
1160 * the @device_modify_mask and @device_modify structure.
1162 int ib_modify_device(struct ib_device *device,
1163 int device_modify_mask,
1164 struct ib_device_modify *device_modify)
1166 if (!device->ops.modify_device)
1169 return device->ops.modify_device(device, device_modify_mask,
1172 EXPORT_SYMBOL(ib_modify_device);
1175 * ib_modify_port - Modifies the attributes for the specified port.
1176 * @device: The device to modify.
1177 * @port_num: The number of the port to modify.
1178 * @port_modify_mask: Mask used to specify which attributes of the port
1180 * @port_modify: New attribute values for the port.
1182 * ib_modify_port() changes a port's attributes as specified by the
1183 * @port_modify_mask and @port_modify structure.
1185 int ib_modify_port(struct ib_device *device,
1186 u8 port_num, int port_modify_mask,
1187 struct ib_port_modify *port_modify)
1191 if (!rdma_is_port_valid(device, port_num))
1194 if (device->ops.modify_port)
1195 rc = device->ops.modify_port(device, port_num,
1199 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
1202 EXPORT_SYMBOL(ib_modify_port);
1205 * ib_find_gid - Returns the port number and GID table index where
1206 * a specified GID value occurs. Its searches only for IB link layer.
1207 * @device: The device to query.
1208 * @gid: The GID value to search for.
1209 * @port_num: The port number of the device where the GID value was found.
1210 * @index: The index into the GID table where the GID was found. This
1211 * parameter may be NULL.
1213 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1214 u8 *port_num, u16 *index)
1216 union ib_gid tmp_gid;
1220 rdma_for_each_port (device, port) {
1221 if (!rdma_protocol_ib(device, port))
1224 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
1225 ret = rdma_query_gid(device, port, i, &tmp_gid);
1228 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
1239 EXPORT_SYMBOL(ib_find_gid);
1242 * ib_find_pkey - Returns the PKey table index where a specified
1243 * PKey value occurs.
1244 * @device: The device to query.
1245 * @port_num: The port number of the device to search for the PKey.
1246 * @pkey: The PKey value to search for.
1247 * @index: The index into the PKey table where the PKey was found.
1249 int ib_find_pkey(struct ib_device *device,
1250 u8 port_num, u16 pkey, u16 *index)
1254 int partial_ix = -1;
1256 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
1257 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
1260 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
1261 /* if there is full-member pkey take it.*/
1262 if (tmp_pkey & 0x8000) {
1271 /*no full-member, if exists take the limited*/
1272 if (partial_ix >= 0) {
1273 *index = partial_ix;
1278 EXPORT_SYMBOL(ib_find_pkey);
1281 * ib_get_net_dev_by_params() - Return the appropriate net_dev
1282 * for a received CM request
1283 * @dev: An RDMA device on which the request has been received.
1284 * @port: Port number on the RDMA device.
1285 * @pkey: The Pkey the request came on.
1286 * @gid: A GID that the net_dev uses to communicate.
1287 * @addr: Contains the IP address that the request specified as its
1291 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1294 const union ib_gid *gid,
1295 const struct sockaddr *addr)
1297 struct net_device *net_dev = NULL;
1298 unsigned long index;
1301 if (!rdma_protocol_ib(dev, port))
1305 * Holding the read side guarantees that the client will not become
1306 * unregistered while we are calling get_net_dev_by_params()
1308 down_read(&dev->client_data_rwsem);
1309 xan_for_each_marked (&dev->client_data, index, client_data,
1310 CLIENT_DATA_REGISTERED) {
1311 struct ib_client *client = xa_load(&clients, index);
1313 if (!client || !client->get_net_dev_by_params)
1316 net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
1321 up_read(&dev->client_data_rwsem);
1325 EXPORT_SYMBOL(ib_get_net_dev_by_params);
1327 void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
1329 struct ib_device_ops *dev_ops = &dev->ops;
1330 #define SET_DEVICE_OP(ptr, name) \
1333 if (!((ptr)->name)) \
1334 (ptr)->name = ops->name; \
1337 #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
1339 SET_DEVICE_OP(dev_ops, add_gid);
1340 SET_DEVICE_OP(dev_ops, advise_mr);
1341 SET_DEVICE_OP(dev_ops, alloc_dm);
1342 SET_DEVICE_OP(dev_ops, alloc_fmr);
1343 SET_DEVICE_OP(dev_ops, alloc_hw_stats);
1344 SET_DEVICE_OP(dev_ops, alloc_mr);
1345 SET_DEVICE_OP(dev_ops, alloc_mw);
1346 SET_DEVICE_OP(dev_ops, alloc_pd);
1347 SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
1348 SET_DEVICE_OP(dev_ops, alloc_ucontext);
1349 SET_DEVICE_OP(dev_ops, alloc_xrcd);
1350 SET_DEVICE_OP(dev_ops, attach_mcast);
1351 SET_DEVICE_OP(dev_ops, check_mr_status);
1352 SET_DEVICE_OP(dev_ops, create_ah);
1353 SET_DEVICE_OP(dev_ops, create_counters);
1354 SET_DEVICE_OP(dev_ops, create_cq);
1355 SET_DEVICE_OP(dev_ops, create_flow);
1356 SET_DEVICE_OP(dev_ops, create_flow_action_esp);
1357 SET_DEVICE_OP(dev_ops, create_qp);
1358 SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
1359 SET_DEVICE_OP(dev_ops, create_srq);
1360 SET_DEVICE_OP(dev_ops, create_wq);
1361 SET_DEVICE_OP(dev_ops, dealloc_dm);
1362 SET_DEVICE_OP(dev_ops, dealloc_fmr);
1363 SET_DEVICE_OP(dev_ops, dealloc_mw);
1364 SET_DEVICE_OP(dev_ops, dealloc_pd);
1365 SET_DEVICE_OP(dev_ops, dealloc_ucontext);
1366 SET_DEVICE_OP(dev_ops, dealloc_xrcd);
1367 SET_DEVICE_OP(dev_ops, del_gid);
1368 SET_DEVICE_OP(dev_ops, dereg_mr);
1369 SET_DEVICE_OP(dev_ops, destroy_ah);
1370 SET_DEVICE_OP(dev_ops, destroy_counters);
1371 SET_DEVICE_OP(dev_ops, destroy_cq);
1372 SET_DEVICE_OP(dev_ops, destroy_flow);
1373 SET_DEVICE_OP(dev_ops, destroy_flow_action);
1374 SET_DEVICE_OP(dev_ops, destroy_qp);
1375 SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
1376 SET_DEVICE_OP(dev_ops, destroy_srq);
1377 SET_DEVICE_OP(dev_ops, destroy_wq);
1378 SET_DEVICE_OP(dev_ops, detach_mcast);
1379 SET_DEVICE_OP(dev_ops, disassociate_ucontext);
1380 SET_DEVICE_OP(dev_ops, drain_rq);
1381 SET_DEVICE_OP(dev_ops, drain_sq);
1382 SET_DEVICE_OP(dev_ops, fill_res_entry);
1383 SET_DEVICE_OP(dev_ops, get_dev_fw_str);
1384 SET_DEVICE_OP(dev_ops, get_dma_mr);
1385 SET_DEVICE_OP(dev_ops, get_hw_stats);
1386 SET_DEVICE_OP(dev_ops, get_link_layer);
1387 SET_DEVICE_OP(dev_ops, get_netdev);
1388 SET_DEVICE_OP(dev_ops, get_port_immutable);
1389 SET_DEVICE_OP(dev_ops, get_vector_affinity);
1390 SET_DEVICE_OP(dev_ops, get_vf_config);
1391 SET_DEVICE_OP(dev_ops, get_vf_stats);
1392 SET_DEVICE_OP(dev_ops, init_port);
1393 SET_DEVICE_OP(dev_ops, map_mr_sg);
1394 SET_DEVICE_OP(dev_ops, map_phys_fmr);
1395 SET_DEVICE_OP(dev_ops, mmap);
1396 SET_DEVICE_OP(dev_ops, modify_ah);
1397 SET_DEVICE_OP(dev_ops, modify_cq);
1398 SET_DEVICE_OP(dev_ops, modify_device);
1399 SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
1400 SET_DEVICE_OP(dev_ops, modify_port);
1401 SET_DEVICE_OP(dev_ops, modify_qp);
1402 SET_DEVICE_OP(dev_ops, modify_srq);
1403 SET_DEVICE_OP(dev_ops, modify_wq);
1404 SET_DEVICE_OP(dev_ops, peek_cq);
1405 SET_DEVICE_OP(dev_ops, poll_cq);
1406 SET_DEVICE_OP(dev_ops, post_recv);
1407 SET_DEVICE_OP(dev_ops, post_send);
1408 SET_DEVICE_OP(dev_ops, post_srq_recv);
1409 SET_DEVICE_OP(dev_ops, process_mad);
1410 SET_DEVICE_OP(dev_ops, query_ah);
1411 SET_DEVICE_OP(dev_ops, query_device);
1412 SET_DEVICE_OP(dev_ops, query_gid);
1413 SET_DEVICE_OP(dev_ops, query_pkey);
1414 SET_DEVICE_OP(dev_ops, query_port);
1415 SET_DEVICE_OP(dev_ops, query_qp);
1416 SET_DEVICE_OP(dev_ops, query_srq);
1417 SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
1418 SET_DEVICE_OP(dev_ops, read_counters);
1419 SET_DEVICE_OP(dev_ops, reg_dm_mr);
1420 SET_DEVICE_OP(dev_ops, reg_user_mr);
1421 SET_DEVICE_OP(dev_ops, req_ncomp_notif);
1422 SET_DEVICE_OP(dev_ops, req_notify_cq);
1423 SET_DEVICE_OP(dev_ops, rereg_user_mr);
1424 SET_DEVICE_OP(dev_ops, resize_cq);
1425 SET_DEVICE_OP(dev_ops, set_vf_guid);
1426 SET_DEVICE_OP(dev_ops, set_vf_link_state);
1427 SET_DEVICE_OP(dev_ops, unmap_fmr);
1429 SET_OBJ_SIZE(dev_ops, ib_pd);
1431 EXPORT_SYMBOL(ib_set_device_ops);
1433 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1434 [RDMA_NL_LS_OP_RESOLVE] = {
1435 .doit = ib_nl_handle_resolve_resp,
1436 .flags = RDMA_NL_ADMIN_PERM,
1438 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
1439 .doit = ib_nl_handle_set_timeout,
1440 .flags = RDMA_NL_ADMIN_PERM,
1442 [RDMA_NL_LS_OP_IP_RESOLVE] = {
1443 .doit = ib_nl_handle_ip_res_resp,
1444 .flags = RDMA_NL_ADMIN_PERM,
1448 static int __init ib_core_init(void)
1452 ib_wq = alloc_workqueue("infiniband", 0, 0);
1456 ib_comp_wq = alloc_workqueue("ib-comp-wq",
1457 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1463 ib_comp_unbound_wq =
1464 alloc_workqueue("ib-comp-unb-wq",
1465 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
1466 WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
1467 if (!ib_comp_unbound_wq) {
1472 ret = class_register(&ib_class);
1474 pr_warn("Couldn't create InfiniBand device class\n");
1475 goto err_comp_unbound;
1478 ret = rdma_nl_init();
1480 pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
1486 pr_warn("Could't init IB address resolution\n");
1490 ret = ib_mad_init();
1492 pr_warn("Couldn't init IB MAD\n");
1498 pr_warn("Couldn't init SA\n");
1502 ret = register_lsm_notifier(&ibdev_lsm_nb);
1504 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
1509 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
1510 roce_gid_mgmt_init();
1523 class_unregister(&ib_class);
1525 destroy_workqueue(ib_comp_unbound_wq);
1527 destroy_workqueue(ib_comp_wq);
1529 destroy_workqueue(ib_wq);
1533 static void __exit ib_core_cleanup(void)
1535 roce_gid_mgmt_cleanup();
1537 rdma_nl_unregister(RDMA_NL_LS);
1538 unregister_lsm_notifier(&ibdev_lsm_nb);
1543 class_unregister(&ib_class);
1544 destroy_workqueue(ib_comp_unbound_wq);
1545 destroy_workqueue(ib_comp_wq);
1546 /* Make sure that any pending umem accounting work is done. */
1547 destroy_workqueue(ib_wq);
1548 WARN_ON(!xa_empty(&clients));
1549 WARN_ON(!xa_empty(&devices));
1552 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
1554 subsys_initcall(ib_core_init);
1555 module_exit(ib_core_cleanup);