Merge remote-tracking branches 'spi/fix/dw', 'spi/fix/lantiq' and 'spi/fix/pl022...
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / dev.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/mlx5/driver.h>
34 #include "mlx5_core.h"
35
36 static LIST_HEAD(intf_list);
37 static LIST_HEAD(mlx5_dev_list);
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
40
41 struct mlx5_device_context {
42         struct list_head        list;
43         struct mlx5_interface  *intf;
44         void                   *context;
45         unsigned long           state;
46 };
47
48 enum {
49         MLX5_INTERFACE_ADDED,
50         MLX5_INTERFACE_ATTACHED,
51 };
52
53 void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
54 {
55         struct mlx5_device_context *dev_ctx;
56         struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
57
58         if (!mlx5_lag_intf_add(intf, priv))
59                 return;
60
61         dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
62         if (!dev_ctx)
63                 return;
64
65         dev_ctx->intf = intf;
66         dev_ctx->context = intf->add(dev);
67         set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
68         if (intf->attach)
69                 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
70
71         if (dev_ctx->context) {
72                 spin_lock_irq(&priv->ctx_lock);
73                 list_add_tail(&dev_ctx->list, &priv->ctx_list);
74 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
75                 if (dev_ctx->intf->pfault) {
76                         if (priv->pfault) {
77                                 mlx5_core_err(dev, "multiple page fault handlers not supported");
78                         } else {
79                                 priv->pfault_ctx = dev_ctx->context;
80                                 priv->pfault = dev_ctx->intf->pfault;
81                         }
82                 }
83 #endif
84                 spin_unlock_irq(&priv->ctx_lock);
85         } else {
86                 kfree(dev_ctx);
87         }
88 }
89
90 static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
91                                                    struct mlx5_priv *priv)
92 {
93         struct mlx5_device_context *dev_ctx;
94
95         list_for_each_entry(dev_ctx, &priv->ctx_list, list)
96                 if (dev_ctx->intf == intf)
97                         return dev_ctx;
98         return NULL;
99 }
100
101 void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
102 {
103         struct mlx5_device_context *dev_ctx;
104         struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
105
106         dev_ctx = mlx5_get_device(intf, priv);
107         if (!dev_ctx)
108                 return;
109
110 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
111         spin_lock_irq(&priv->ctx_lock);
112         if (priv->pfault == dev_ctx->intf->pfault)
113                 priv->pfault = NULL;
114         spin_unlock_irq(&priv->ctx_lock);
115
116         synchronize_srcu(&priv->pfault_srcu);
117 #endif
118
119         spin_lock_irq(&priv->ctx_lock);
120         list_del(&dev_ctx->list);
121         spin_unlock_irq(&priv->ctx_lock);
122
123         if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
124                 intf->remove(dev, dev_ctx->context);
125
126         kfree(dev_ctx);
127 }
128
129 static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
130 {
131         struct mlx5_device_context *dev_ctx;
132         struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
133
134         dev_ctx = mlx5_get_device(intf, priv);
135         if (!dev_ctx)
136                 return;
137
138         if (intf->attach) {
139                 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
140                         return;
141                 intf->attach(dev, dev_ctx->context);
142                 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
143         } else {
144                 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
145                         return;
146                 dev_ctx->context = intf->add(dev);
147                 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
148         }
149 }
150
151 void mlx5_attach_device(struct mlx5_core_dev *dev)
152 {
153         struct mlx5_priv *priv = &dev->priv;
154         struct mlx5_interface *intf;
155
156         mutex_lock(&mlx5_intf_mutex);
157         list_for_each_entry(intf, &intf_list, list)
158                 mlx5_attach_interface(intf, priv);
159         mutex_unlock(&mlx5_intf_mutex);
160 }
161
162 static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
163 {
164         struct mlx5_device_context *dev_ctx;
165         struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
166
167         dev_ctx = mlx5_get_device(intf, priv);
168         if (!dev_ctx)
169                 return;
170
171         if (intf->detach) {
172                 if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
173                         return;
174                 intf->detach(dev, dev_ctx->context);
175                 clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
176         } else {
177                 if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
178                         return;
179                 intf->remove(dev, dev_ctx->context);
180                 clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
181         }
182 }
183
184 void mlx5_detach_device(struct mlx5_core_dev *dev)
185 {
186         struct mlx5_priv *priv = &dev->priv;
187         struct mlx5_interface *intf;
188
189         mutex_lock(&mlx5_intf_mutex);
190         list_for_each_entry(intf, &intf_list, list)
191                 mlx5_detach_interface(intf, priv);
192         mutex_unlock(&mlx5_intf_mutex);
193 }
194
195 bool mlx5_device_registered(struct mlx5_core_dev *dev)
196 {
197         struct mlx5_priv *priv;
198         bool found = false;
199
200         mutex_lock(&mlx5_intf_mutex);
201         list_for_each_entry(priv, &mlx5_dev_list, dev_list)
202                 if (priv == &dev->priv)
203                         found = true;
204         mutex_unlock(&mlx5_intf_mutex);
205
206         return found;
207 }
208
209 int mlx5_register_device(struct mlx5_core_dev *dev)
210 {
211         struct mlx5_priv *priv = &dev->priv;
212         struct mlx5_interface *intf;
213
214         mutex_lock(&mlx5_intf_mutex);
215         list_add_tail(&priv->dev_list, &mlx5_dev_list);
216         list_for_each_entry(intf, &intf_list, list)
217                 mlx5_add_device(intf, priv);
218         mutex_unlock(&mlx5_intf_mutex);
219
220         return 0;
221 }
222
223 void mlx5_unregister_device(struct mlx5_core_dev *dev)
224 {
225         struct mlx5_priv *priv = &dev->priv;
226         struct mlx5_interface *intf;
227
228         mutex_lock(&mlx5_intf_mutex);
229         list_for_each_entry(intf, &intf_list, list)
230                 mlx5_remove_device(intf, priv);
231         list_del(&priv->dev_list);
232         mutex_unlock(&mlx5_intf_mutex);
233 }
234
235 int mlx5_register_interface(struct mlx5_interface *intf)
236 {
237         struct mlx5_priv *priv;
238
239         if (!intf->add || !intf->remove)
240                 return -EINVAL;
241
242         mutex_lock(&mlx5_intf_mutex);
243         list_add_tail(&intf->list, &intf_list);
244         list_for_each_entry(priv, &mlx5_dev_list, dev_list)
245                 mlx5_add_device(intf, priv);
246         mutex_unlock(&mlx5_intf_mutex);
247
248         return 0;
249 }
250 EXPORT_SYMBOL(mlx5_register_interface);
251
252 void mlx5_unregister_interface(struct mlx5_interface *intf)
253 {
254         struct mlx5_priv *priv;
255
256         mutex_lock(&mlx5_intf_mutex);
257         list_for_each_entry(priv, &mlx5_dev_list, dev_list)
258                 mlx5_remove_device(intf, priv);
259         list_del(&intf->list);
260         mutex_unlock(&mlx5_intf_mutex);
261 }
262 EXPORT_SYMBOL(mlx5_unregister_interface);
263
264 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
265 {
266         struct mlx5_priv *priv = &mdev->priv;
267         struct mlx5_device_context *dev_ctx;
268         unsigned long flags;
269         void *result = NULL;
270
271         spin_lock_irqsave(&priv->ctx_lock, flags);
272
273         list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
274                 if ((dev_ctx->intf->protocol == protocol) &&
275                     dev_ctx->intf->get_dev) {
276                         result = dev_ctx->intf->get_dev(dev_ctx->context);
277                         break;
278                 }
279
280         spin_unlock_irqrestore(&priv->ctx_lock, flags);
281
282         return result;
283 }
284 EXPORT_SYMBOL(mlx5_get_protocol_dev);
285
286 /* Must be called with intf_mutex held */
287 void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
288 {
289         struct mlx5_interface *intf;
290
291         list_for_each_entry(intf, &intf_list, list)
292                 if (intf->protocol == protocol) {
293                         mlx5_add_device(intf, &dev->priv);
294                         break;
295                 }
296 }
297
298 /* Must be called with intf_mutex held */
299 void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
300 {
301         struct mlx5_interface *intf;
302
303         list_for_each_entry(intf, &intf_list, list)
304                 if (intf->protocol == protocol) {
305                         mlx5_remove_device(intf, &dev->priv);
306                         break;
307                 }
308 }
309
310 static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
311 {
312         return (u16)((dev->pdev->bus->number << 8) |
313                      PCI_SLOT(dev->pdev->devfn));
314 }
315
316 /* Must be called with intf_mutex held */
317 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
318 {
319         u16 pci_id = mlx5_gen_pci_id(dev);
320         struct mlx5_core_dev *res = NULL;
321         struct mlx5_core_dev *tmp_dev;
322         struct mlx5_priv *priv;
323
324         list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
325                 tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
326                 if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
327                         res = tmp_dev;
328                         break;
329                 }
330         }
331
332         return res;
333 }
334
335 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
336                      unsigned long param)
337 {
338         struct mlx5_priv *priv = &dev->priv;
339         struct mlx5_device_context *dev_ctx;
340         unsigned long flags;
341
342         spin_lock_irqsave(&priv->ctx_lock, flags);
343
344         list_for_each_entry(dev_ctx, &priv->ctx_list, list)
345                 if (dev_ctx->intf->event)
346                         dev_ctx->intf->event(dev, dev_ctx->context, event, param);
347
348         spin_unlock_irqrestore(&priv->ctx_lock, flags);
349 }
350
351 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
352 void mlx5_core_page_fault(struct mlx5_core_dev *dev,
353                           struct mlx5_pagefault *pfault)
354 {
355         struct mlx5_priv *priv = &dev->priv;
356         int srcu_idx;
357
358         srcu_idx = srcu_read_lock(&priv->pfault_srcu);
359         if (priv->pfault)
360                 priv->pfault(dev, priv->pfault_ctx, pfault);
361         srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
362 }
363 #endif
364
365 void mlx5_dev_list_lock(void)
366 {
367         mutex_lock(&mlx5_intf_mutex);
368 }
369
370 void mlx5_dev_list_unlock(void)
371 {
372         mutex_unlock(&mlx5_intf_mutex);
373 }
374
375 int mlx5_dev_list_trylock(void)
376 {
377         return mutex_trylock(&mlx5_intf_mutex);
378 }