2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include "mlx5_core.h"
36 static LIST_HEAD(intf_list);
37 static LIST_HEAD(mlx5_dev_list);
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
41 struct mlx5_device_context {
42 struct list_head list;
43 struct mlx5_interface *intf;
50 MLX5_INTERFACE_ATTACHED,
53 void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
55 struct mlx5_device_context *dev_ctx;
56 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
58 if (!mlx5_lag_intf_add(intf, priv))
61 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
66 dev_ctx->context = intf->add(dev);
67 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
69 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
71 if (dev_ctx->context) {
72 spin_lock_irq(&priv->ctx_lock);
73 list_add_tail(&dev_ctx->list, &priv->ctx_list);
74 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
75 if (dev_ctx->intf->pfault) {
77 mlx5_core_err(dev, "multiple page fault handlers not supported");
79 priv->pfault_ctx = dev_ctx->context;
80 priv->pfault = dev_ctx->intf->pfault;
84 spin_unlock_irq(&priv->ctx_lock);
90 static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
91 struct mlx5_priv *priv)
93 struct mlx5_device_context *dev_ctx;
95 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
96 if (dev_ctx->intf == intf)
101 void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
103 struct mlx5_device_context *dev_ctx;
104 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
106 dev_ctx = mlx5_get_device(intf, priv);
110 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
111 spin_lock_irq(&priv->ctx_lock);
112 if (priv->pfault == dev_ctx->intf->pfault)
114 spin_unlock_irq(&priv->ctx_lock);
116 synchronize_srcu(&priv->pfault_srcu);
119 spin_lock_irq(&priv->ctx_lock);
120 list_del(&dev_ctx->list);
121 spin_unlock_irq(&priv->ctx_lock);
123 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
124 intf->remove(dev, dev_ctx->context);
129 static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
131 struct mlx5_device_context *dev_ctx;
132 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
134 dev_ctx = mlx5_get_device(intf, priv);
139 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
141 intf->attach(dev, dev_ctx->context);
142 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
144 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
146 dev_ctx->context = intf->add(dev);
147 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
151 void mlx5_attach_device(struct mlx5_core_dev *dev)
153 struct mlx5_priv *priv = &dev->priv;
154 struct mlx5_interface *intf;
156 mutex_lock(&mlx5_intf_mutex);
157 list_for_each_entry(intf, &intf_list, list)
158 mlx5_attach_interface(intf, priv);
159 mutex_unlock(&mlx5_intf_mutex);
162 static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
164 struct mlx5_device_context *dev_ctx;
165 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
167 dev_ctx = mlx5_get_device(intf, priv);
172 if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
174 intf->detach(dev, dev_ctx->context);
175 clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
177 if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
179 intf->remove(dev, dev_ctx->context);
180 clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
184 void mlx5_detach_device(struct mlx5_core_dev *dev)
186 struct mlx5_priv *priv = &dev->priv;
187 struct mlx5_interface *intf;
189 mutex_lock(&mlx5_intf_mutex);
190 list_for_each_entry(intf, &intf_list, list)
191 mlx5_detach_interface(intf, priv);
192 mutex_unlock(&mlx5_intf_mutex);
195 bool mlx5_device_registered(struct mlx5_core_dev *dev)
197 struct mlx5_priv *priv;
200 mutex_lock(&mlx5_intf_mutex);
201 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
202 if (priv == &dev->priv)
204 mutex_unlock(&mlx5_intf_mutex);
209 int mlx5_register_device(struct mlx5_core_dev *dev)
211 struct mlx5_priv *priv = &dev->priv;
212 struct mlx5_interface *intf;
214 mutex_lock(&mlx5_intf_mutex);
215 list_add_tail(&priv->dev_list, &mlx5_dev_list);
216 list_for_each_entry(intf, &intf_list, list)
217 mlx5_add_device(intf, priv);
218 mutex_unlock(&mlx5_intf_mutex);
223 void mlx5_unregister_device(struct mlx5_core_dev *dev)
225 struct mlx5_priv *priv = &dev->priv;
226 struct mlx5_interface *intf;
228 mutex_lock(&mlx5_intf_mutex);
229 list_for_each_entry(intf, &intf_list, list)
230 mlx5_remove_device(intf, priv);
231 list_del(&priv->dev_list);
232 mutex_unlock(&mlx5_intf_mutex);
235 int mlx5_register_interface(struct mlx5_interface *intf)
237 struct mlx5_priv *priv;
239 if (!intf->add || !intf->remove)
242 mutex_lock(&mlx5_intf_mutex);
243 list_add_tail(&intf->list, &intf_list);
244 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
245 mlx5_add_device(intf, priv);
246 mutex_unlock(&mlx5_intf_mutex);
250 EXPORT_SYMBOL(mlx5_register_interface);
252 void mlx5_unregister_interface(struct mlx5_interface *intf)
254 struct mlx5_priv *priv;
256 mutex_lock(&mlx5_intf_mutex);
257 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
258 mlx5_remove_device(intf, priv);
259 list_del(&intf->list);
260 mutex_unlock(&mlx5_intf_mutex);
262 EXPORT_SYMBOL(mlx5_unregister_interface);
264 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
266 struct mlx5_priv *priv = &mdev->priv;
267 struct mlx5_device_context *dev_ctx;
271 spin_lock_irqsave(&priv->ctx_lock, flags);
273 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
274 if ((dev_ctx->intf->protocol == protocol) &&
275 dev_ctx->intf->get_dev) {
276 result = dev_ctx->intf->get_dev(dev_ctx->context);
280 spin_unlock_irqrestore(&priv->ctx_lock, flags);
284 EXPORT_SYMBOL(mlx5_get_protocol_dev);
286 /* Must be called with intf_mutex held */
287 void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
289 struct mlx5_interface *intf;
291 list_for_each_entry(intf, &intf_list, list)
292 if (intf->protocol == protocol) {
293 mlx5_add_device(intf, &dev->priv);
298 /* Must be called with intf_mutex held */
299 void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
301 struct mlx5_interface *intf;
303 list_for_each_entry(intf, &intf_list, list)
304 if (intf->protocol == protocol) {
305 mlx5_remove_device(intf, &dev->priv);
310 static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
312 return (u16)((dev->pdev->bus->number << 8) |
313 PCI_SLOT(dev->pdev->devfn));
316 /* Must be called with intf_mutex held */
317 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
319 u16 pci_id = mlx5_gen_pci_id(dev);
320 struct mlx5_core_dev *res = NULL;
321 struct mlx5_core_dev *tmp_dev;
322 struct mlx5_priv *priv;
324 list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
325 tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
326 if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
335 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
338 struct mlx5_priv *priv = &dev->priv;
339 struct mlx5_device_context *dev_ctx;
342 spin_lock_irqsave(&priv->ctx_lock, flags);
344 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
345 if (dev_ctx->intf->event)
346 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
348 spin_unlock_irqrestore(&priv->ctx_lock, flags);
351 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
352 void mlx5_core_page_fault(struct mlx5_core_dev *dev,
353 struct mlx5_pagefault *pfault)
355 struct mlx5_priv *priv = &dev->priv;
358 srcu_idx = srcu_read_lock(&priv->pfault_srcu);
360 priv->pfault(dev, priv->pfault_ctx, pfault);
361 srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
365 void mlx5_dev_list_lock(void)
367 mutex_lock(&mlx5_intf_mutex);
370 void mlx5_dev_list_unlock(void)
372 mutex_unlock(&mlx5_intf_mutex);
375 int mlx5_dev_list_trylock(void)
377 return mutex_trylock(&mlx5_intf_mutex);