net/mlx5: Delay events till ib registration ends
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / dev.c
CommitLineData
f1ee87fe
MHY
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/driver.h>
34#include "mlx5_core.h"
35
36static LIST_HEAD(intf_list);
37static LIST_HEAD(mlx5_dev_list);
38/* intf dev list mutex */
39static DEFINE_MUTEX(mlx5_intf_mutex);
40
41struct mlx5_device_context {
42 struct list_head list;
43 struct mlx5_interface *intf;
44 void *context;
45 unsigned long state;
46};
47
97834eba
ES
48struct mlx5_delayed_event {
49 struct list_head list;
50 struct mlx5_core_dev *dev;
51 enum mlx5_dev_event event;
52 unsigned long param;
53};
54
f1ee87fe
MHY
55enum {
56 MLX5_INTERFACE_ADDED,
57 MLX5_INTERFACE_ATTACHED,
58};
59
97834eba
ES
60static void add_delayed_event(struct mlx5_priv *priv,
61 struct mlx5_core_dev *dev,
62 enum mlx5_dev_event event,
63 unsigned long param)
64{
65 struct mlx5_delayed_event *delayed_event;
66
67 delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
68 if (!delayed_event) {
69 mlx5_core_err(dev, "event %d is missed\n", event);
70 return;
71 }
72
73 mlx5_core_dbg(dev, "Accumulating event %d\n", event);
74 delayed_event->dev = dev;
75 delayed_event->event = event;
76 delayed_event->param = param;
77 list_add_tail(&delayed_event->list, &priv->waiting_events_list);
78}
79
80static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx,
81 struct mlx5_core_dev *dev,
82 struct mlx5_priv *priv)
83{
84 struct mlx5_delayed_event *de;
85 struct mlx5_delayed_event *n;
86
87 /* stop delaying events */
88 priv->is_accum_events = false;
89
90 /* fire all accumulated events before new event comes */
91 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
92 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
93 list_del(&de->list);
94 kfree(de);
95 }
96}
97
98static void cleanup_delayed_evets(struct mlx5_priv *priv)
99{
100 struct mlx5_delayed_event *de;
101 struct mlx5_delayed_event *n;
102
103 spin_lock_irq(&priv->ctx_lock);
104 priv->is_accum_events = false;
105 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
106 list_del(&de->list);
107 kfree(de);
108 }
109 spin_unlock_irq(&priv->ctx_lock);
110}
111
f1ee87fe
MHY
112void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
113{
114 struct mlx5_device_context *dev_ctx;
115 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
116
117 if (!mlx5_lag_intf_add(intf, priv))
118 return;
119
120 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
121 if (!dev_ctx)
122 return;
123
124 dev_ctx->intf = intf;
97834eba
ES
125 /* accumulating events that can come after mlx5_ib calls to
126 * ib_register_device, till adding that interface to the events list.
127 */
128
129 priv->is_accum_events = true;
130
f1ee87fe
MHY
131 dev_ctx->context = intf->add(dev);
132 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
133 if (intf->attach)
134 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
135
136 if (dev_ctx->context) {
137 spin_lock_irq(&priv->ctx_lock);
138 list_add_tail(&dev_ctx->list, &priv->ctx_list);
97834eba
ES
139
140 fire_delayed_event_locked(dev_ctx, dev, priv);
141
d9aaed83
AK
142#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
143 if (dev_ctx->intf->pfault) {
144 if (priv->pfault) {
145 mlx5_core_err(dev, "multiple page fault handlers not supported");
146 } else {
147 priv->pfault_ctx = dev_ctx->context;
148 priv->pfault = dev_ctx->intf->pfault;
149 }
150 }
151#endif
f1ee87fe
MHY
152 spin_unlock_irq(&priv->ctx_lock);
153 } else {
154 kfree(dev_ctx);
97834eba
ES
155 /* delete all accumulated events */
156 cleanup_delayed_evets(priv);
f1ee87fe
MHY
157 }
158}
159
160static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
161 struct mlx5_priv *priv)
162{
163 struct mlx5_device_context *dev_ctx;
164
165 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
166 if (dev_ctx->intf == intf)
167 return dev_ctx;
168 return NULL;
169}
170
171void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
172{
173 struct mlx5_device_context *dev_ctx;
174 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
175
176 dev_ctx = mlx5_get_device(intf, priv);
177 if (!dev_ctx)
178 return;
179
d9aaed83
AK
180#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
181 spin_lock_irq(&priv->ctx_lock);
182 if (priv->pfault == dev_ctx->intf->pfault)
183 priv->pfault = NULL;
184 spin_unlock_irq(&priv->ctx_lock);
185
186 synchronize_srcu(&priv->pfault_srcu);
187#endif
188
f1ee87fe
MHY
189 spin_lock_irq(&priv->ctx_lock);
190 list_del(&dev_ctx->list);
191 spin_unlock_irq(&priv->ctx_lock);
192
193 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
194 intf->remove(dev, dev_ctx->context);
195
196 kfree(dev_ctx);
197}
198
199static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
200{
201 struct mlx5_device_context *dev_ctx;
202 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
203
204 dev_ctx = mlx5_get_device(intf, priv);
205 if (!dev_ctx)
206 return;
207
208 if (intf->attach) {
209 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
210 return;
211 intf->attach(dev, dev_ctx->context);
212 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
213 } else {
214 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
215 return;
216 dev_ctx->context = intf->add(dev);
217 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
218 }
219}
220
221void mlx5_attach_device(struct mlx5_core_dev *dev)
222{
223 struct mlx5_priv *priv = &dev->priv;
224 struct mlx5_interface *intf;
225
226 mutex_lock(&mlx5_intf_mutex);
227 list_for_each_entry(intf, &intf_list, list)
228 mlx5_attach_interface(intf, priv);
229 mutex_unlock(&mlx5_intf_mutex);
230}
231
232static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
233{
234 struct mlx5_device_context *dev_ctx;
235 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
236
237 dev_ctx = mlx5_get_device(intf, priv);
238 if (!dev_ctx)
239 return;
240
241 if (intf->detach) {
242 if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
243 return;
244 intf->detach(dev, dev_ctx->context);
245 clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
246 } else {
247 if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
248 return;
249 intf->remove(dev, dev_ctx->context);
250 clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
251 }
252}
253
254void mlx5_detach_device(struct mlx5_core_dev *dev)
255{
256 struct mlx5_priv *priv = &dev->priv;
257 struct mlx5_interface *intf;
258
259 mutex_lock(&mlx5_intf_mutex);
260 list_for_each_entry(intf, &intf_list, list)
261 mlx5_detach_interface(intf, priv);
262 mutex_unlock(&mlx5_intf_mutex);
263}
264
265bool mlx5_device_registered(struct mlx5_core_dev *dev)
266{
267 struct mlx5_priv *priv;
268 bool found = false;
269
270 mutex_lock(&mlx5_intf_mutex);
271 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
272 if (priv == &dev->priv)
273 found = true;
274 mutex_unlock(&mlx5_intf_mutex);
275
276 return found;
277}
278
279int mlx5_register_device(struct mlx5_core_dev *dev)
280{
281 struct mlx5_priv *priv = &dev->priv;
282 struct mlx5_interface *intf;
283
284 mutex_lock(&mlx5_intf_mutex);
285 list_add_tail(&priv->dev_list, &mlx5_dev_list);
286 list_for_each_entry(intf, &intf_list, list)
287 mlx5_add_device(intf, priv);
288 mutex_unlock(&mlx5_intf_mutex);
289
290 return 0;
291}
292
293void mlx5_unregister_device(struct mlx5_core_dev *dev)
294{
295 struct mlx5_priv *priv = &dev->priv;
296 struct mlx5_interface *intf;
297
298 mutex_lock(&mlx5_intf_mutex);
299 list_for_each_entry(intf, &intf_list, list)
300 mlx5_remove_device(intf, priv);
301 list_del(&priv->dev_list);
302 mutex_unlock(&mlx5_intf_mutex);
303}
304
305int mlx5_register_interface(struct mlx5_interface *intf)
306{
307 struct mlx5_priv *priv;
308
309 if (!intf->add || !intf->remove)
310 return -EINVAL;
311
312 mutex_lock(&mlx5_intf_mutex);
313 list_add_tail(&intf->list, &intf_list);
314 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
315 mlx5_add_device(intf, priv);
316 mutex_unlock(&mlx5_intf_mutex);
317
318 return 0;
319}
320EXPORT_SYMBOL(mlx5_register_interface);
321
322void mlx5_unregister_interface(struct mlx5_interface *intf)
323{
324 struct mlx5_priv *priv;
325
326 mutex_lock(&mlx5_intf_mutex);
327 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
328 mlx5_remove_device(intf, priv);
329 list_del(&intf->list);
330 mutex_unlock(&mlx5_intf_mutex);
331}
332EXPORT_SYMBOL(mlx5_unregister_interface);
333
334void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
335{
336 struct mlx5_priv *priv = &mdev->priv;
337 struct mlx5_device_context *dev_ctx;
338 unsigned long flags;
339 void *result = NULL;
340
341 spin_lock_irqsave(&priv->ctx_lock, flags);
342
343 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
344 if ((dev_ctx->intf->protocol == protocol) &&
345 dev_ctx->intf->get_dev) {
346 result = dev_ctx->intf->get_dev(dev_ctx->context);
347 break;
348 }
349
350 spin_unlock_irqrestore(&priv->ctx_lock, flags);
351
352 return result;
353}
354EXPORT_SYMBOL(mlx5_get_protocol_dev);
355
356/* Must be called with intf_mutex held */
357void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
358{
359 struct mlx5_interface *intf;
360
361 list_for_each_entry(intf, &intf_list, list)
362 if (intf->protocol == protocol) {
363 mlx5_add_device(intf, &dev->priv);
364 break;
365 }
366}
367
368/* Must be called with intf_mutex held */
369void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
370{
371 struct mlx5_interface *intf;
372
373 list_for_each_entry(intf, &intf_list, list)
374 if (intf->protocol == protocol) {
375 mlx5_remove_device(intf, &dev->priv);
376 break;
377 }
378}
379
380static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
381{
382 return (u16)((dev->pdev->bus->number << 8) |
383 PCI_SLOT(dev->pdev->devfn));
384}
385
386/* Must be called with intf_mutex held */
387struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
388{
389 u16 pci_id = mlx5_gen_pci_id(dev);
390 struct mlx5_core_dev *res = NULL;
391 struct mlx5_core_dev *tmp_dev;
392 struct mlx5_priv *priv;
393
394 list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
395 tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
396 if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
397 res = tmp_dev;
398 break;
399 }
400 }
401
402 return res;
403}
404
405void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
406 unsigned long param)
407{
408 struct mlx5_priv *priv = &dev->priv;
409 struct mlx5_device_context *dev_ctx;
410 unsigned long flags;
411
412 spin_lock_irqsave(&priv->ctx_lock, flags);
413
97834eba
ES
414 if (priv->is_accum_events)
415 add_delayed_event(priv, dev, event, param);
416
f1ee87fe
MHY
417 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
418 if (dev_ctx->intf->event)
419 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
420
421 spin_unlock_irqrestore(&priv->ctx_lock, flags);
422}
423
d9aaed83
AK
424#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
425void mlx5_core_page_fault(struct mlx5_core_dev *dev,
426 struct mlx5_pagefault *pfault)
427{
428 struct mlx5_priv *priv = &dev->priv;
429 int srcu_idx;
430
431 srcu_idx = srcu_read_lock(&priv->pfault_srcu);
432 if (priv->pfault)
433 priv->pfault(dev, priv->pfault_ctx, pfault);
434 srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
435}
436#endif
437
f1ee87fe
MHY
438void mlx5_dev_list_lock(void)
439{
440 mutex_lock(&mlx5_intf_mutex);
441}
442
443void mlx5_dev_list_unlock(void)
444{
445 mutex_unlock(&mlx5_intf_mutex);
446}
447
448int mlx5_dev_list_trylock(void)
449{
450 return mutex_trylock(&mlx5_intf_mutex);
451}