net/mlx5: Introducing E-Switch and l2 table
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / main.c
CommitLineData
e126ba97 1/*
302bdf68 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
adec640e 33#include <linux/highmem.h>
e126ba97
EC
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/io-mapping.h>
db058a18 41#include <linux/interrupt.h>
e3297246 42#include <linux/delay.h>
e126ba97
EC
43#include <linux/mlx5/driver.h>
44#include <linux/mlx5/cq.h>
45#include <linux/mlx5/qp.h>
46#include <linux/mlx5/srq.h>
47#include <linux/debugfs.h>
f66f049f 48#include <linux/kmod.h>
89d44f0a 49#include <linux/delay.h>
b775516b 50#include <linux/mlx5/mlx5_ifc.h>
e126ba97 51#include "mlx5_core.h"
073bb189
SM
52#ifdef CONFIG_MLX5_CORE_EN
53#include "eswitch.h"
54#endif
e126ba97 55
e126ba97 56MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
4ae6c18c 57MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
e126ba97
EC
58MODULE_LICENSE("Dual BSD/GPL");
59MODULE_VERSION(DRIVER_VERSION);
60
61int mlx5_core_debug_mask;
62module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
63MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
64
9603b61d
JM
65#define MLX5_DEFAULT_PROF 2
66static int prof_sel = MLX5_DEFAULT_PROF;
67module_param_named(prof_sel, prof_sel, int, 0444);
68MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
69
9603b61d
JM
70static LIST_HEAD(intf_list);
71static LIST_HEAD(dev_list);
72static DEFINE_MUTEX(intf_mutex);
73
74struct mlx5_device_context {
75 struct list_head list;
76 struct mlx5_interface *intf;
77 void *context;
78};
79
80static struct mlx5_profile profile[] = {
81 [0] = {
82 .mask = 0,
83 },
84 [1] = {
85 .mask = MLX5_PROF_MASK_QP_SIZE,
86 .log_max_qp = 12,
87 },
88 [2] = {
89 .mask = MLX5_PROF_MASK_QP_SIZE |
90 MLX5_PROF_MASK_MR_CACHE,
91 .log_max_qp = 17,
92 .mr_cache[0] = {
93 .size = 500,
94 .limit = 250
95 },
96 .mr_cache[1] = {
97 .size = 500,
98 .limit = 250
99 },
100 .mr_cache[2] = {
101 .size = 500,
102 .limit = 250
103 },
104 .mr_cache[3] = {
105 .size = 500,
106 .limit = 250
107 },
108 .mr_cache[4] = {
109 .size = 500,
110 .limit = 250
111 },
112 .mr_cache[5] = {
113 .size = 500,
114 .limit = 250
115 },
116 .mr_cache[6] = {
117 .size = 500,
118 .limit = 250
119 },
120 .mr_cache[7] = {
121 .size = 500,
122 .limit = 250
123 },
124 .mr_cache[8] = {
125 .size = 500,
126 .limit = 250
127 },
128 .mr_cache[9] = {
129 .size = 500,
130 .limit = 250
131 },
132 .mr_cache[10] = {
133 .size = 500,
134 .limit = 250
135 },
136 .mr_cache[11] = {
137 .size = 500,
138 .limit = 250
139 },
140 .mr_cache[12] = {
141 .size = 64,
142 .limit = 32
143 },
144 .mr_cache[13] = {
145 .size = 32,
146 .limit = 16
147 },
148 .mr_cache[14] = {
149 .size = 16,
150 .limit = 8
151 },
152 .mr_cache[15] = {
153 .size = 8,
154 .limit = 4
155 },
156 },
157};
e126ba97 158
e3297246
EC
159#define FW_INIT_TIMEOUT_MILI 2000
160#define FW_INIT_WAIT_MS 2
161
162static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
163{
164 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
165 int err = 0;
166
167 while (fw_initializing(dev)) {
168 if (time_after(jiffies, end)) {
169 err = -EBUSY;
170 break;
171 }
172 msleep(FW_INIT_WAIT_MS);
173 }
174
175 return err;
176}
177
e126ba97
EC
178static int set_dma_caps(struct pci_dev *pdev)
179{
180 int err;
181
182 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
183 if (err) {
1a91de28 184 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
e126ba97
EC
185 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
186 if (err) {
1a91de28 187 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
e126ba97
EC
188 return err;
189 }
190 }
191
192 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
193 if (err) {
194 dev_warn(&pdev->dev,
1a91de28 195 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
e126ba97
EC
196 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
197 if (err) {
198 dev_err(&pdev->dev,
1a91de28 199 "Can't set consistent PCI DMA mask, aborting\n");
e126ba97
EC
200 return err;
201 }
202 }
203
204 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
205 return err;
206}
207
89d44f0a
MD
208static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
209{
210 struct pci_dev *pdev = dev->pdev;
211 int err = 0;
212
213 mutex_lock(&dev->pci_status_mutex);
214 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
215 err = pci_enable_device(pdev);
216 if (!err)
217 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
218 }
219 mutex_unlock(&dev->pci_status_mutex);
220
221 return err;
222}
223
224static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
225{
226 struct pci_dev *pdev = dev->pdev;
227
228 mutex_lock(&dev->pci_status_mutex);
229 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
230 pci_disable_device(pdev);
231 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
232 }
233 mutex_unlock(&dev->pci_status_mutex);
234}
235
e126ba97
EC
236static int request_bar(struct pci_dev *pdev)
237{
238 int err = 0;
239
240 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1a91de28 241 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
e126ba97
EC
242 return -ENODEV;
243 }
244
245 err = pci_request_regions(pdev, DRIVER_NAME);
246 if (err)
247 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
248
249 return err;
250}
251
252static void release_bar(struct pci_dev *pdev)
253{
254 pci_release_regions(pdev);
255}
256
257static int mlx5_enable_msix(struct mlx5_core_dev *dev)
258{
db058a18
SM
259 struct mlx5_priv *priv = &dev->priv;
260 struct mlx5_eq_table *table = &priv->eq_table;
938fe83c 261 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
e126ba97 262 int nvec;
e126ba97
EC
263 int i;
264
938fe83c
SM
265 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
266 MLX5_EQ_VEC_COMP_BASE;
e126ba97
EC
267 nvec = min_t(int, nvec, num_eqs);
268 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
269 return -ENOMEM;
270
db058a18
SM
271 priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
272
273 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
274 if (!priv->msix_arr || !priv->irq_info)
275 goto err_free_msix;
e126ba97
EC
276
277 for (i = 0; i < nvec; i++)
db058a18 278 priv->msix_arr[i].entry = i;
e126ba97 279
db058a18 280 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
3a9e161a 281 MLX5_EQ_VEC_COMP_BASE + 1, nvec);
f3c9407b
AG
282 if (nvec < 0)
283 return nvec;
e126ba97 284
f3c9407b 285 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
e126ba97
EC
286
287 return 0;
db058a18
SM
288
289err_free_msix:
290 kfree(priv->irq_info);
291 kfree(priv->msix_arr);
292 return -ENOMEM;
e126ba97
EC
293}
294
295static void mlx5_disable_msix(struct mlx5_core_dev *dev)
296{
db058a18 297 struct mlx5_priv *priv = &dev->priv;
e126ba97
EC
298
299 pci_disable_msix(dev->pdev);
db058a18
SM
300 kfree(priv->irq_info);
301 kfree(priv->msix_arr);
e126ba97
EC
302}
303
304struct mlx5_reg_host_endianess {
305 u8 he;
306 u8 rsvd[15];
307};
308
87b8de49
EC
309
310#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
311
312enum {
c7a08ac7
EC
313 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
314 MLX5_DEV_CAP_FLAG_DCT,
87b8de49
EC
315};
316
c7a08ac7
EC
317static u16 to_fw_pkey_sz(u32 size)
318{
319 switch (size) {
320 case 128:
321 return 0;
322 case 256:
323 return 1;
324 case 512:
325 return 2;
326 case 1024:
327 return 3;
328 case 2048:
329 return 4;
330 case 4096:
331 return 5;
332 default:
333 pr_warn("invalid pkey table size %d\n", size);
334 return 0;
335 }
336}
337
938fe83c
SM
338int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
339 enum mlx5_cap_mode cap_mode)
c7a08ac7 340{
b775516b
EC
341 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
342 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
938fe83c
SM
343 void *out, *hca_caps;
344 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
e126ba97
EC
345 int err;
346
b775516b
EC
347 memset(in, 0, sizeof(in));
348 out = kzalloc(out_sz, GFP_KERNEL);
c7a08ac7 349 if (!out)
e126ba97 350 return -ENOMEM;
938fe83c 351
b775516b
EC
352 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
353 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
354 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
355 if (err)
356 goto query_ex;
e126ba97 357
b775516b 358 err = mlx5_cmd_status_to_err_v2(out);
c7a08ac7 359 if (err) {
938fe83c
SM
360 mlx5_core_warn(dev,
361 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
362 cap_type, cap_mode, err);
e126ba97
EC
363 goto query_ex;
364 }
c7a08ac7 365
938fe83c
SM
366 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
367
368 switch (cap_mode) {
369 case HCA_CAP_OPMOD_GET_MAX:
370 memcpy(dev->hca_caps_max[cap_type], hca_caps,
371 MLX5_UN_SZ_BYTES(hca_cap_union));
372 break;
373 case HCA_CAP_OPMOD_GET_CUR:
374 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
375 MLX5_UN_SZ_BYTES(hca_cap_union));
376 break;
377 default:
378 mlx5_core_warn(dev,
379 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
380 cap_type, cap_mode);
381 err = -EINVAL;
382 break;
383 }
c7a08ac7
EC
384query_ex:
385 kfree(out);
386 return err;
387}
388
b775516b 389static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
c7a08ac7 390{
b775516b 391 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
c7a08ac7
EC
392 int err;
393
b775516b 394 memset(out, 0, sizeof(out));
e126ba97 395
b775516b
EC
396 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
397 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
e126ba97 398 if (err)
c7a08ac7 399 return err;
e126ba97 400
b775516b 401 err = mlx5_cmd_status_to_err_v2(out);
c7a08ac7
EC
402
403 return err;
404}
405
406static int handle_hca_cap(struct mlx5_core_dev *dev)
407{
b775516b 408 void *set_ctx = NULL;
c7a08ac7 409 struct mlx5_profile *prof = dev->profile;
c7a08ac7 410 int err = -ENOMEM;
b775516b 411 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
938fe83c 412 void *set_hca_cap;
c7a08ac7 413
b775516b 414 set_ctx = kzalloc(set_sz, GFP_KERNEL);
c7a08ac7 415 if (!set_ctx)
e126ba97 416 goto query_ex;
e126ba97 417
938fe83c 418 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
c7a08ac7 419 if (err)
e126ba97 420 goto query_ex;
e126ba97 421
938fe83c 422 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
e126ba97
EC
423 if (err)
424 goto query_ex;
425
938fe83c
SM
426 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
427 capability);
428 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
429 MLX5_ST_SZ_BYTES(cmd_hca_cap));
430
431 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
707c4602 432 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
938fe83c 433 128);
c7a08ac7 434 /* we limit the size of the pkey table to 128 entries for now */
938fe83c
SM
435 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
436 to_fw_pkey_sz(128));
c7a08ac7
EC
437
438 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
938fe83c
SM
439 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
440 prof->log_max_qp);
c7a08ac7 441
938fe83c
SM
442 /* disable cmdif checksum */
443 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
c7a08ac7 444
fe1e1876
CS
445 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
446
b775516b 447 err = set_caps(dev, set_ctx, set_sz);
c7a08ac7 448
e126ba97 449query_ex:
e126ba97 450 kfree(set_ctx);
e126ba97
EC
451 return err;
452}
453
454static int set_hca_ctrl(struct mlx5_core_dev *dev)
455{
456 struct mlx5_reg_host_endianess he_in;
457 struct mlx5_reg_host_endianess he_out;
458 int err;
459
fc50db98
EC
460 if (!mlx5_core_is_pf(dev))
461 return 0;
462
e126ba97
EC
463 memset(&he_in, 0, sizeof(he_in));
464 he_in.he = MLX5_SET_HOST_ENDIANNESS;
465 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
466 &he_out, sizeof(he_out),
467 MLX5_REG_HOST_ENDIANNESS, 0, 1);
468 return err;
469}
470
0b107106 471int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
cd23b14b 472{
0b107106
EC
473 u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
474 u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
cd23b14b 475 int err;
cd23b14b 476
0b107106
EC
477 memset(in, 0, sizeof(in));
478 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
479 MLX5_SET(enable_hca_in, in, function_id, func_id);
480 memset(out, 0, sizeof(out));
481
cd23b14b
EC
482 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
483 if (err)
484 return err;
485
0b107106 486 return mlx5_cmd_status_to_err_v2(out);
cd23b14b
EC
487}
488
0b107106 489int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
cd23b14b 490{
0b107106
EC
491 u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
492 u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
cd23b14b 493 int err;
cd23b14b 494
0b107106
EC
495 memset(in, 0, sizeof(in));
496 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
497 MLX5_SET(disable_hca_in, in, function_id, func_id);
498 memset(out, 0, sizeof(out));
499 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
cd23b14b
EC
500 if (err)
501 return err;
502
0b107106 503 return mlx5_cmd_status_to_err_v2(out);
cd23b14b
EC
504}
505
db058a18
SM
506static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
507{
508 struct mlx5_priv *priv = &mdev->priv;
509 struct msix_entry *msix = priv->msix_arr;
510 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
311c7c71 511 int numa_node = priv->numa_node;
db058a18
SM
512 int err;
513
514 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
515 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
516 return -ENOMEM;
517 }
518
dda922c8
DM
519 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
520 priv->irq_info[i].mask);
db058a18
SM
521
522 err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
523 if (err) {
524 mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
525 irq);
526 goto err_clear_mask;
527 }
528
529 return 0;
530
531err_clear_mask:
532 free_cpumask_var(priv->irq_info[i].mask);
533 return err;
534}
535
536static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
537{
538 struct mlx5_priv *priv = &mdev->priv;
539 struct msix_entry *msix = priv->msix_arr;
540 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
541
542 irq_set_affinity_hint(irq, NULL);
543 free_cpumask_var(priv->irq_info[i].mask);
544}
545
546static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
547{
548 int err;
549 int i;
550
551 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
552 err = mlx5_irq_set_affinity_hint(mdev, i);
553 if (err)
554 goto err_out;
555 }
556
557 return 0;
558
559err_out:
560 for (i--; i >= 0; i--)
561 mlx5_irq_clear_affinity_hint(mdev, i);
562
563 return err;
564}
565
566static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
567{
568 int i;
569
570 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
571 mlx5_irq_clear_affinity_hint(mdev, i);
572}
573
233d05d2
SM
574int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
575{
576 struct mlx5_eq_table *table = &dev->priv.eq_table;
577 struct mlx5_eq *eq, *n;
578 int err = -ENOENT;
579
580 spin_lock(&table->lock);
581 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
582 if (eq->index == vector) {
583 *eqn = eq->eqn;
584 *irqn = eq->irqn;
585 err = 0;
586 break;
587 }
588 }
589 spin_unlock(&table->lock);
590
591 return err;
592}
593EXPORT_SYMBOL(mlx5_vector2eqn);
594
595static void free_comp_eqs(struct mlx5_core_dev *dev)
596{
597 struct mlx5_eq_table *table = &dev->priv.eq_table;
598 struct mlx5_eq *eq, *n;
599
600 spin_lock(&table->lock);
601 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
602 list_del(&eq->list);
603 spin_unlock(&table->lock);
604 if (mlx5_destroy_unmap_eq(dev, eq))
605 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
606 eq->eqn);
607 kfree(eq);
608 spin_lock(&table->lock);
609 }
610 spin_unlock(&table->lock);
611}
612
613static int alloc_comp_eqs(struct mlx5_core_dev *dev)
614{
615 struct mlx5_eq_table *table = &dev->priv.eq_table;
db058a18 616 char name[MLX5_MAX_IRQ_NAME];
233d05d2
SM
617 struct mlx5_eq *eq;
618 int ncomp_vec;
619 int nent;
620 int err;
621 int i;
622
623 INIT_LIST_HEAD(&table->comp_eqs_list);
624 ncomp_vec = table->num_comp_vectors;
625 nent = MLX5_COMP_EQ_SIZE;
626 for (i = 0; i < ncomp_vec; i++) {
627 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
628 if (!eq) {
629 err = -ENOMEM;
630 goto clean;
631 }
632
db058a18 633 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
233d05d2
SM
634 err = mlx5_create_map_eq(dev, eq,
635 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
636 name, &dev->priv.uuari.uars[0]);
637 if (err) {
638 kfree(eq);
639 goto clean;
640 }
641 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
642 eq->index = i;
643 spin_lock(&table->lock);
644 list_add_tail(&eq->list, &table->comp_eqs_list);
645 spin_unlock(&table->lock);
646 }
647
648 return 0;
649
650clean:
651 free_comp_eqs(dev);
652 return err;
653}
654
f62b8bb8
AV
655#ifdef CONFIG_MLX5_CORE_EN
656static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
657{
658 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
659 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
660 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
661 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
662 int err;
663 u32 sup_issi;
664
665 memset(query_in, 0, sizeof(query_in));
666 memset(query_out, 0, sizeof(query_out));
667
668 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
669
670 err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
671 query_out, sizeof(query_out));
672 if (err) {
673 if (((struct mlx5_outbox_hdr *)query_out)->status ==
674 MLX5_CMD_STAT_BAD_OP_ERR) {
675 pr_debug("Only ISSI 0 is supported\n");
676 return 0;
677 }
678
679 pr_err("failed to query ISSI\n");
680 return err;
681 }
682
683 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
684
685 if (sup_issi & (1 << 1)) {
686 memset(set_in, 0, sizeof(set_in));
687 memset(set_out, 0, sizeof(set_out));
688
689 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
690 MLX5_SET(set_issi_in, set_in, current_issi, 1);
691
692 err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
693 set_out, sizeof(set_out));
694 if (err) {
695 pr_err("failed to set ISSI=1\n");
696 return err;
697 }
698
699 dev->issi = 1;
700
701 return 0;
e74a1db0 702 } else if (sup_issi & (1 << 0) || !sup_issi) {
f62b8bb8
AV
703 return 0;
704 }
705
706 return -ENOTSUPP;
707}
708#endif
709
88a85f99
AS
710static int map_bf_area(struct mlx5_core_dev *dev)
711{
712 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
713 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
714
715 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
716
717 return dev->priv.bf_mapping ? 0 : -ENOMEM;
718}
719
720static void unmap_bf_area(struct mlx5_core_dev *dev)
721{
722 if (dev->priv.bf_mapping)
723 io_mapping_free(dev->priv.bf_mapping);
724}
725
a31208b1
MD
726static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
727{
728 struct mlx5_device_context *dev_ctx;
729 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
730
731 dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
732 if (!dev_ctx)
733 return;
734
735 dev_ctx->intf = intf;
736 dev_ctx->context = intf->add(dev);
737
738 if (dev_ctx->context) {
739 spin_lock_irq(&priv->ctx_lock);
740 list_add_tail(&dev_ctx->list, &priv->ctx_list);
741 spin_unlock_irq(&priv->ctx_lock);
742 } else {
743 kfree(dev_ctx);
744 }
745}
746
747static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
748{
749 struct mlx5_device_context *dev_ctx;
750 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
751
752 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
753 if (dev_ctx->intf == intf) {
754 spin_lock_irq(&priv->ctx_lock);
755 list_del(&dev_ctx->list);
756 spin_unlock_irq(&priv->ctx_lock);
757
758 intf->remove(dev, dev_ctx->context);
759 kfree(dev_ctx);
760 return;
761 }
762}
763
764static int mlx5_register_device(struct mlx5_core_dev *dev)
e126ba97
EC
765{
766 struct mlx5_priv *priv = &dev->priv;
a31208b1
MD
767 struct mlx5_interface *intf;
768
769 mutex_lock(&intf_mutex);
770 list_add_tail(&priv->dev_list, &dev_list);
771 list_for_each_entry(intf, &intf_list, list)
772 mlx5_add_device(intf, priv);
773 mutex_unlock(&intf_mutex);
774
775 return 0;
776}
777
778static void mlx5_unregister_device(struct mlx5_core_dev *dev)
779{
780 struct mlx5_priv *priv = &dev->priv;
781 struct mlx5_interface *intf;
782
783 mutex_lock(&intf_mutex);
784 list_for_each_entry(intf, &intf_list, list)
785 mlx5_remove_device(intf, priv);
786 list_del(&priv->dev_list);
787 mutex_unlock(&intf_mutex);
788}
789
790int mlx5_register_interface(struct mlx5_interface *intf)
791{
792 struct mlx5_priv *priv;
793
794 if (!intf->add || !intf->remove)
795 return -EINVAL;
796
797 mutex_lock(&intf_mutex);
798 list_add_tail(&intf->list, &intf_list);
799 list_for_each_entry(priv, &dev_list, dev_list)
800 mlx5_add_device(intf, priv);
801 mutex_unlock(&intf_mutex);
802
803 return 0;
804}
805EXPORT_SYMBOL(mlx5_register_interface);
806
807void mlx5_unregister_interface(struct mlx5_interface *intf)
808{
809 struct mlx5_priv *priv;
810
811 mutex_lock(&intf_mutex);
812 list_for_each_entry(priv, &dev_list, dev_list)
813 mlx5_remove_device(intf, priv);
814 list_del(&intf->list);
815 mutex_unlock(&intf_mutex);
816}
817EXPORT_SYMBOL(mlx5_unregister_interface);
818
819void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
820{
821 struct mlx5_priv *priv = &mdev->priv;
822 struct mlx5_device_context *dev_ctx;
823 unsigned long flags;
824 void *result = NULL;
825
826 spin_lock_irqsave(&priv->ctx_lock, flags);
827
828 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
829 if ((dev_ctx->intf->protocol == protocol) &&
830 dev_ctx->intf->get_dev) {
831 result = dev_ctx->intf->get_dev(dev_ctx->context);
832 break;
833 }
834
835 spin_unlock_irqrestore(&priv->ctx_lock, flags);
836
837 return result;
838}
839EXPORT_SYMBOL(mlx5_get_protocol_dev);
840
841static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
842{
843 struct pci_dev *pdev = dev->pdev;
844 int err = 0;
e126ba97 845
e126ba97
EC
846 pci_set_drvdata(dev->pdev, dev);
847 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
848 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
849
850 mutex_init(&priv->pgdir_mutex);
851 INIT_LIST_HEAD(&priv->pgdir_list);
852 spin_lock_init(&priv->mkey_lock);
853
311c7c71
SM
854 mutex_init(&priv->alloc_mutex);
855
856 priv->numa_node = dev_to_node(&dev->pdev->dev);
857
e126ba97
EC
858 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
859 if (!priv->dbg_root)
860 return -ENOMEM;
861
89d44f0a 862 err = mlx5_pci_enable_device(dev);
e126ba97 863 if (err) {
1a91de28 864 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
e126ba97
EC
865 goto err_dbg;
866 }
867
868 err = request_bar(pdev);
869 if (err) {
1a91de28 870 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
e126ba97
EC
871 goto err_disable;
872 }
873
874 pci_set_master(pdev);
875
876 err = set_dma_caps(pdev);
877 if (err) {
878 dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
879 goto err_clr_master;
880 }
881
882 dev->iseg_base = pci_resource_start(dev->pdev, 0);
883 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
884 if (!dev->iseg) {
885 err = -ENOMEM;
886 dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
887 goto err_clr_master;
888 }
a31208b1
MD
889
890 return 0;
891
892err_clr_master:
893 pci_clear_master(dev->pdev);
894 release_bar(dev->pdev);
895err_disable:
89d44f0a 896 mlx5_pci_disable_device(dev);
a31208b1
MD
897
898err_dbg:
899 debugfs_remove(priv->dbg_root);
900 return err;
901}
902
903static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
904{
905 iounmap(dev->iseg);
906 pci_clear_master(dev->pdev);
907 release_bar(dev->pdev);
89d44f0a 908 mlx5_pci_disable_device(dev);
a31208b1
MD
909 debugfs_remove(priv->dbg_root);
910}
911
912#define MLX5_IB_MOD "mlx5_ib"
913static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
914{
915 struct pci_dev *pdev = dev->pdev;
916 int err;
917
89d44f0a
MD
918 mutex_lock(&dev->intf_state_mutex);
919 if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
920 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
921 __func__);
922 goto out;
923 }
924
e126ba97
EC
925 dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
926 fw_rev_min(dev), fw_rev_sub(dev));
927
89d44f0a
MD
928 /* on load removing any previous indication of internal error, device is
929 * up
930 */
931 dev->state = MLX5_DEVICE_STATE_UP;
932
e126ba97
EC
933 err = mlx5_cmd_init(dev);
934 if (err) {
935 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
89d44f0a 936 goto out_err;
e126ba97
EC
937 }
938
e3297246
EC
939 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
940 if (err) {
941 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
942 FW_INIT_TIMEOUT_MILI);
943 goto out_err;
944 }
945
e126ba97 946 mlx5_pagealloc_init(dev);
cd23b14b 947
0b107106 948 err = mlx5_core_enable_hca(dev, 0);
cd23b14b
EC
949 if (err) {
950 dev_err(&pdev->dev, "enable hca failed\n");
951 goto err_pagealloc_cleanup;
952 }
953
f62b8bb8
AV
954#ifdef CONFIG_MLX5_CORE_EN
955 err = mlx5_core_set_issi(dev);
956 if (err) {
957 dev_err(&pdev->dev, "failed to set issi\n");
958 goto err_disable_hca;
959 }
960#endif
961
cd23b14b
EC
962 err = mlx5_satisfy_startup_pages(dev, 1);
963 if (err) {
964 dev_err(&pdev->dev, "failed to allocate boot pages\n");
965 goto err_disable_hca;
966 }
967
e126ba97
EC
968 err = set_hca_ctrl(dev);
969 if (err) {
970 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
cd23b14b 971 goto reclaim_boot_pages;
e126ba97
EC
972 }
973
974 err = handle_hca_cap(dev);
975 if (err) {
976 dev_err(&pdev->dev, "handle_hca_cap failed\n");
cd23b14b 977 goto reclaim_boot_pages;
e126ba97
EC
978 }
979
cd23b14b 980 err = mlx5_satisfy_startup_pages(dev, 0);
e126ba97 981 if (err) {
cd23b14b
EC
982 dev_err(&pdev->dev, "failed to allocate init pages\n");
983 goto reclaim_boot_pages;
e126ba97
EC
984 }
985
986 err = mlx5_pagealloc_start(dev);
987 if (err) {
988 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
cd23b14b 989 goto reclaim_boot_pages;
e126ba97
EC
990 }
991
992 err = mlx5_cmd_init_hca(dev);
993 if (err) {
994 dev_err(&pdev->dev, "init hca failed\n");
995 goto err_pagealloc_stop;
996 }
997
998 mlx5_start_health_poll(dev);
999
938fe83c 1000 err = mlx5_query_hca_caps(dev);
e126ba97
EC
1001 if (err) {
1002 dev_err(&pdev->dev, "query hca failed\n");
1003 goto err_stop_poll;
1004 }
1005
211e6c80 1006 err = mlx5_query_board_id(dev);
e126ba97 1007 if (err) {
211e6c80 1008 dev_err(&pdev->dev, "query board id failed\n");
e126ba97
EC
1009 goto err_stop_poll;
1010 }
1011
1012 err = mlx5_enable_msix(dev);
1013 if (err) {
1014 dev_err(&pdev->dev, "enable msix failed\n");
1015 goto err_stop_poll;
1016 }
1017
1018 err = mlx5_eq_init(dev);
1019 if (err) {
1020 dev_err(&pdev->dev, "failed to initialize eq\n");
1021 goto disable_msix;
1022 }
1023
1024 err = mlx5_alloc_uuars(dev, &priv->uuari);
1025 if (err) {
1026 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
1027 goto err_eq_cleanup;
1028 }
1029
1030 err = mlx5_start_eqs(dev);
1031 if (err) {
1032 dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
1033 goto err_free_uar;
1034 }
1035
233d05d2
SM
1036 err = alloc_comp_eqs(dev);
1037 if (err) {
1038 dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
1039 goto err_stop_eqs;
1040 }
1041
88a85f99
AS
1042 if (map_bf_area(dev))
1043 dev_err(&pdev->dev, "Failed to map blue flame area\n");
1044
db058a18
SM
1045 err = mlx5_irq_set_affinity_hints(dev);
1046 if (err) {
1047 dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
88a85f99 1048 goto err_unmap_bf_area;
db058a18
SM
1049 }
1050
e126ba97
EC
1051 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
1052
1053 mlx5_init_cq_table(dev);
1054 mlx5_init_qp_table(dev);
1055 mlx5_init_srq_table(dev);
3bcdb17a 1056 mlx5_init_mr_table(dev);
e126ba97 1057
073bb189
SM
1058#ifdef CONFIG_MLX5_CORE_EN
1059 err = mlx5_eswitch_init(dev);
1060 if (err) {
1061 dev_err(&pdev->dev, "eswitch init failed %d\n", err);
1062 goto err_reg_dev;
1063 }
1064#endif
1065
fc50db98
EC
1066 err = mlx5_sriov_init(dev);
1067 if (err) {
1068 dev_err(&pdev->dev, "sriov init failed %d\n", err);
1069 goto err_sriov;
1070 }
1071
a31208b1
MD
1072 err = mlx5_register_device(dev);
1073 if (err) {
1074 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
1075 goto err_reg_dev;
1076 }
1077
1078 err = request_module_nowait(MLX5_IB_MOD);
1079 if (err)
1080 pr_info("failed request module on %s\n", MLX5_IB_MOD);
1081
89d44f0a
MD
1082 dev->interface_state = MLX5_INTERFACE_STATE_UP;
1083out:
1084 mutex_unlock(&dev->intf_state_mutex);
1085
e126ba97
EC
1086 return 0;
1087
fc50db98
EC
1088err_sriov:
1089 if (mlx5_sriov_cleanup(dev))
1090 dev_err(&dev->pdev->dev, "sriov cleanup failed\n");
1091
073bb189
SM
1092#ifdef CONFIG_MLX5_CORE_EN
1093 mlx5_eswitch_cleanup(dev->priv.eswitch);
1094#endif
a31208b1
MD
1095err_reg_dev:
1096 mlx5_cleanup_mr_table(dev);
1097 mlx5_cleanup_srq_table(dev);
1098 mlx5_cleanup_qp_table(dev);
1099 mlx5_cleanup_cq_table(dev);
1100 mlx5_irq_clear_affinity_hints(dev);
1101
88a85f99
AS
1102err_unmap_bf_area:
1103 unmap_bf_area(dev);
1104
db058a18
SM
1105 free_comp_eqs(dev);
1106
233d05d2
SM
1107err_stop_eqs:
1108 mlx5_stop_eqs(dev);
1109
e126ba97
EC
1110err_free_uar:
1111 mlx5_free_uuars(dev, &priv->uuari);
1112
1113err_eq_cleanup:
1114 mlx5_eq_cleanup(dev);
1115
1116disable_msix:
1117 mlx5_disable_msix(dev);
1118
1119err_stop_poll:
1120 mlx5_stop_health_poll(dev);
1bde6e30
EC
1121 if (mlx5_cmd_teardown_hca(dev)) {
1122 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
89d44f0a 1123 goto out_err;
1bde6e30 1124 }
e126ba97
EC
1125
1126err_pagealloc_stop:
1127 mlx5_pagealloc_stop(dev);
1128
cd23b14b 1129reclaim_boot_pages:
e126ba97
EC
1130 mlx5_reclaim_startup_pages(dev);
1131
cd23b14b 1132err_disable_hca:
0b107106 1133 mlx5_core_disable_hca(dev, 0);
cd23b14b 1134
e126ba97
EC
1135err_pagealloc_cleanup:
1136 mlx5_pagealloc_cleanup(dev);
1137 mlx5_cmd_cleanup(dev);
1138
89d44f0a
MD
1139out_err:
1140 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1141 mutex_unlock(&dev->intf_state_mutex);
1142
e126ba97
EC
1143 return err;
1144}
e126ba97 1145
a31208b1 1146static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
e126ba97 1147{
89d44f0a 1148 int err = 0;
e126ba97 1149
fc50db98
EC
1150 err = mlx5_sriov_cleanup(dev);
1151 if (err) {
1152 dev_warn(&dev->pdev->dev, "%s: sriov cleanup failed - abort\n",
1153 __func__);
1154 return err;
1155 }
1156
89d44f0a
MD
1157 mutex_lock(&dev->intf_state_mutex);
1158 if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
1159 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1160 __func__);
1161 goto out;
1162 }
a31208b1 1163 mlx5_unregister_device(dev);
073bb189
SM
1164#ifdef CONFIG_MLX5_CORE_EN
1165 mlx5_eswitch_cleanup(dev->priv.eswitch);
1166#endif
1167
a31208b1 1168 mlx5_cleanup_mr_table(dev);
e126ba97
EC
1169 mlx5_cleanup_srq_table(dev);
1170 mlx5_cleanup_qp_table(dev);
1171 mlx5_cleanup_cq_table(dev);
db058a18 1172 mlx5_irq_clear_affinity_hints(dev);
88a85f99 1173 unmap_bf_area(dev);
233d05d2 1174 free_comp_eqs(dev);
e126ba97
EC
1175 mlx5_stop_eqs(dev);
1176 mlx5_free_uuars(dev, &priv->uuari);
1177 mlx5_eq_cleanup(dev);
1178 mlx5_disable_msix(dev);
1179 mlx5_stop_health_poll(dev);
ac6ea6e8
EC
1180 err = mlx5_cmd_teardown_hca(dev);
1181 if (err) {
1bde6e30 1182 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
ac6ea6e8 1183 goto out;
1bde6e30 1184 }
e126ba97
EC
1185 mlx5_pagealloc_stop(dev);
1186 mlx5_reclaim_startup_pages(dev);
0b107106 1187 mlx5_core_disable_hca(dev, 0);
e126ba97
EC
1188 mlx5_pagealloc_cleanup(dev);
1189 mlx5_cmd_cleanup(dev);
9603b61d 1190
ac6ea6e8 1191out:
89d44f0a
MD
1192 dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
1193 mutex_unlock(&dev->intf_state_mutex);
ac6ea6e8 1194 return err;
9603b61d 1195}
64613d94 1196
89d44f0a 1197void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
ac6ea6e8 1198 unsigned long param)
9603b61d
JM
1199{
1200 struct mlx5_priv *priv = &dev->priv;
1201 struct mlx5_device_context *dev_ctx;
1202 unsigned long flags;
1203
1204 spin_lock_irqsave(&priv->ctx_lock, flags);
1205
1206 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1207 if (dev_ctx->intf->event)
4d2f9bbb 1208 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
9603b61d
JM
1209
1210 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1211}
1212
1213struct mlx5_core_event_handler {
1214 void (*event)(struct mlx5_core_dev *dev,
1215 enum mlx5_dev_event event,
1216 void *data);
1217};
1218
f66f049f 1219
9603b61d
JM
1220static int init_one(struct pci_dev *pdev,
1221 const struct pci_device_id *id)
1222{
1223 struct mlx5_core_dev *dev;
1224 struct mlx5_priv *priv;
1225 int err;
1226
1227 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1228 if (!dev) {
1229 dev_err(&pdev->dev, "kzalloc failed\n");
1230 return -ENOMEM;
1231 }
1232 priv = &dev->priv;
fc50db98 1233 priv->pci_dev_data = id->driver_data;
9603b61d
JM
1234
1235 pci_set_drvdata(pdev, dev);
1236
1237 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
1238 pr_warn("selected profile out of range, selecting default (%d)\n",
1239 MLX5_DEFAULT_PROF);
1240 prof_sel = MLX5_DEFAULT_PROF;
1241 }
1242 dev->profile = &profile[prof_sel];
a31208b1 1243 dev->pdev = pdev;
9603b61d
JM
1244 dev->event = mlx5_core_event;
1245
364d1798
EC
1246 INIT_LIST_HEAD(&priv->ctx_list);
1247 spin_lock_init(&priv->ctx_lock);
89d44f0a
MD
1248 mutex_init(&dev->pci_status_mutex);
1249 mutex_init(&dev->intf_state_mutex);
a31208b1 1250 err = mlx5_pci_init(dev, priv);
9603b61d 1251 if (err) {
a31208b1
MD
1252 dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
1253 goto clean_dev;
9603b61d
JM
1254 }
1255
ac6ea6e8
EC
1256 err = mlx5_health_init(dev);
1257 if (err) {
1258 dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
1259 goto close_pci;
1260 }
1261
a31208b1 1262 err = mlx5_load_one(dev, priv);
9603b61d 1263 if (err) {
a31208b1 1264 dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
ac6ea6e8 1265 goto clean_health;
9603b61d
JM
1266 }
1267
1268 return 0;
1269
ac6ea6e8
EC
1270clean_health:
1271 mlx5_health_cleanup(dev);
a31208b1
MD
1272close_pci:
1273 mlx5_pci_close(dev, priv);
1274clean_dev:
1275 pci_set_drvdata(pdev, NULL);
9603b61d 1276 kfree(dev);
a31208b1 1277
9603b61d
JM
1278 return err;
1279}
a31208b1 1280
9603b61d
JM
1281static void remove_one(struct pci_dev *pdev)
1282{
1283 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
a31208b1 1284 struct mlx5_priv *priv = &dev->priv;
9603b61d 1285
a31208b1
MD
1286 if (mlx5_unload_one(dev, priv)) {
1287 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
ac6ea6e8 1288 mlx5_health_cleanup(dev);
a31208b1
MD
1289 return;
1290 }
ac6ea6e8 1291 mlx5_health_cleanup(dev);
a31208b1
MD
1292 mlx5_pci_close(dev, priv);
1293 pci_set_drvdata(pdev, NULL);
9603b61d
JM
1294 kfree(dev);
1295}
1296
89d44f0a
MD
1297static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1298 pci_channel_state_t state)
1299{
1300 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1301 struct mlx5_priv *priv = &dev->priv;
1302
1303 dev_info(&pdev->dev, "%s was called\n", __func__);
1304 mlx5_enter_error_state(dev);
1305 mlx5_unload_one(dev, priv);
1306 mlx5_pci_disable_device(dev);
1307 return state == pci_channel_io_perm_failure ?
1308 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1309}
1310
1311static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1312{
1313 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1314 int err = 0;
1315
1316 dev_info(&pdev->dev, "%s was called\n", __func__);
1317
1318 err = mlx5_pci_enable_device(dev);
1319 if (err) {
1320 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1321 , __func__, err);
1322 return PCI_ERS_RESULT_DISCONNECT;
1323 }
1324 pci_set_master(pdev);
1325 pci_set_power_state(pdev, PCI_D0);
1326 pci_restore_state(pdev);
1327
1328 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1329}
1330
1331void mlx5_disable_device(struct mlx5_core_dev *dev)
1332{
1333 mlx5_pci_err_detected(dev->pdev, 0);
1334}
1335
1336/* wait for the device to show vital signs. For now we check
1337 * that we can read the device ID and that the health buffer
1338 * shows a non zero value which is different than 0xffffffff
1339 */
1340static void wait_vital(struct pci_dev *pdev)
1341{
1342 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1343 struct mlx5_core_health *health = &dev->priv.health;
1344 const int niter = 100;
1345 u32 count;
1346 u16 did;
1347 int i;
1348
1349 /* Wait for firmware to be ready after reset */
1350 msleep(1000);
1351 for (i = 0; i < niter; i++) {
1352 if (pci_read_config_word(pdev, 2, &did)) {
1353 dev_warn(&pdev->dev, "failed reading config word\n");
1354 break;
1355 }
1356 if (did == pdev->device) {
1357 dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
1358 break;
1359 }
1360 msleep(50);
1361 }
1362 if (i == niter)
1363 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1364
1365 for (i = 0; i < niter; i++) {
1366 count = ioread32be(health->health_counter);
1367 if (count && count != 0xffffffff) {
1368 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
1369 break;
1370 }
1371 msleep(50);
1372 }
1373
1374 if (i == niter)
1375 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1376}
1377
1378static void mlx5_pci_resume(struct pci_dev *pdev)
1379{
1380 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1381 struct mlx5_priv *priv = &dev->priv;
1382 int err;
1383
1384 dev_info(&pdev->dev, "%s was called\n", __func__);
1385
1386 pci_save_state(pdev);
1387 wait_vital(pdev);
1388
1389 err = mlx5_load_one(dev, priv);
1390 if (err)
1391 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
1392 , __func__, err);
1393 else
1394 dev_info(&pdev->dev, "%s: device recovered\n", __func__);
1395}
1396
1397static const struct pci_error_handlers mlx5_err_handler = {
1398 .error_detected = mlx5_pci_err_detected,
1399 .slot_reset = mlx5_pci_slot_reset,
1400 .resume = mlx5_pci_resume
1401};
1402
9603b61d 1403static const struct pci_device_id mlx5_core_pci_table[] = {
fc50db98
EC
1404 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
1405 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
1406 { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
1407 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1408 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
1409 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
9603b61d
JM
1410 { 0, }
1411};
1412
1413MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1414
1415static struct pci_driver mlx5_core_driver = {
1416 .name = DRIVER_NAME,
1417 .id_table = mlx5_core_pci_table,
1418 .probe = init_one,
89d44f0a 1419 .remove = remove_one,
fc50db98
EC
1420 .err_handler = &mlx5_err_handler,
1421 .sriov_configure = mlx5_core_sriov_configure,
9603b61d 1422};
e126ba97
EC
1423
1424static int __init init(void)
1425{
1426 int err;
1427
1428 mlx5_register_debugfs();
e126ba97 1429
9603b61d
JM
1430 err = pci_register_driver(&mlx5_core_driver);
1431 if (err)
ac6ea6e8 1432 goto err_debug;
9603b61d 1433
f62b8bb8
AV
1434#ifdef CONFIG_MLX5_CORE_EN
1435 mlx5e_init();
1436#endif
1437
e126ba97
EC
1438 return 0;
1439
e126ba97
EC
1440err_debug:
1441 mlx5_unregister_debugfs();
1442 return err;
1443}
1444
1445static void __exit cleanup(void)
1446{
f62b8bb8
AV
1447#ifdef CONFIG_MLX5_CORE_EN
1448 mlx5e_cleanup();
1449#endif
9603b61d 1450 pci_unregister_driver(&mlx5_core_driver);
e126ba97
EC
1451 mlx5_unregister_debugfs();
1452}
1453
1454module_init(init);
1455module_exit(cleanup);