mlx4: generalization of multicast steering.
[linux-2.6-block.git] / drivers / net / mlx4 / main.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
51a379d0 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39#include <linux/pci.h>
40#include <linux/dma-mapping.h>
5a0e3ad6 41#include <linux/slab.h>
225c7b1f
RD
42
43#include <linux/mlx4/device.h>
44#include <linux/mlx4/doorbell.h>
45
46#include "mlx4.h"
47#include "fw.h"
48#include "icm.h"
49
50MODULE_AUTHOR("Roland Dreier");
51MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_VERSION(DRV_VERSION);
54
27bf91d6
YP
55struct workqueue_struct *mlx4_wq;
56
225c7b1f
RD
57#ifdef CONFIG_MLX4_DEBUG
58
59int mlx4_debug_level = 0;
60module_param_named(debug_level, mlx4_debug_level, int, 0644);
61MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
62
63#endif /* CONFIG_MLX4_DEBUG */
64
65#ifdef CONFIG_PCI_MSI
66
08fb1055 67static int msi_x = 1;
225c7b1f
RD
68module_param(msi_x, int, 0444);
69MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
70
71#else /* CONFIG_PCI_MSI */
72
73#define msi_x (0)
74
75#endif /* CONFIG_PCI_MSI */
76
f33afc26 77static char mlx4_version[] __devinitdata =
225c7b1f
RD
78 DRV_NAME ": Mellanox ConnectX core driver v"
79 DRV_VERSION " (" DRV_RELDATE ")\n";
80
81static struct mlx4_profile default_profile = {
9b1f3851 82 .num_qp = 1 << 17,
225c7b1f 83 .num_srq = 1 << 16,
c9f2ba5e 84 .rdmarc_per_qp = 1 << 4,
225c7b1f
RD
85 .num_cq = 1 << 16,
86 .num_mcg = 1 << 13,
87 .num_mpt = 1 << 17,
88 .num_mtt = 1 << 20,
89};
90
93fc9e1b
YP
91static int log_num_mac = 2;
92module_param_named(log_num_mac, log_num_mac, int, 0444);
93MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
94
95static int log_num_vlan;
96module_param_named(log_num_vlan, log_num_vlan, int, 0444);
97MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
98
99static int use_prio;
100module_param_named(use_prio, use_prio, bool, 0444);
101MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
102 "(0/1, default 0)");
103
ab6bf42e
EC
104static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
105module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
0498628f 106MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
ab6bf42e 107
27bf91d6
YP
108int mlx4_check_port_params(struct mlx4_dev *dev,
109 enum mlx4_port_type *port_type)
7ff93f8b
YP
110{
111 int i;
112
113 for (i = 0; i < dev->caps.num_ports - 1; i++) {
27bf91d6
YP
114 if (port_type[i] != port_type[i + 1]) {
115 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
116 mlx4_err(dev, "Only same port types supported "
117 "on this HCA, aborting.\n");
118 return -EINVAL;
119 }
120 if (port_type[i] == MLX4_PORT_TYPE_ETH &&
121 port_type[i + 1] == MLX4_PORT_TYPE_IB)
122 return -EINVAL;
7ff93f8b
YP
123 }
124 }
7ff93f8b
YP
125
126 for (i = 0; i < dev->caps.num_ports; i++) {
127 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
128 mlx4_err(dev, "Requested port type for port %d is not "
129 "supported on this HCA\n", i + 1);
130 return -EINVAL;
131 }
132 }
133 return 0;
134}
135
136static void mlx4_set_port_mask(struct mlx4_dev *dev)
137{
138 int i;
139
140 dev->caps.port_mask = 0;
141 for (i = 1; i <= dev->caps.num_ports; ++i)
142 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
143 dev->caps.port_mask |= 1 << (i - 1);
144}
3d73c288 145static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
225c7b1f
RD
146{
147 int err;
5ae2a7a8 148 int i;
225c7b1f
RD
149
150 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
151 if (err) {
152 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
153 return err;
154 }
155
156 if (dev_cap->min_page_sz > PAGE_SIZE) {
157 mlx4_err(dev, "HCA minimum page size of %d bigger than "
158 "kernel PAGE_SIZE of %ld, aborting.\n",
159 dev_cap->min_page_sz, PAGE_SIZE);
160 return -ENODEV;
161 }
162 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
163 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
164 "aborting.\n",
165 dev_cap->num_ports, MLX4_MAX_PORTS);
166 return -ENODEV;
167 }
168
169 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
170 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
171 "PCI resource 2 size of 0x%llx, aborting.\n",
172 dev_cap->uar_size,
173 (unsigned long long) pci_resource_len(dev->pdev, 2));
174 return -ENODEV;
175 }
176
177 dev->caps.num_ports = dev_cap->num_ports;
5ae2a7a8
RD
178 for (i = 1; i <= dev->caps.num_ports; ++i) {
179 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
b79acb49 180 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
5ae2a7a8
RD
181 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
182 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
183 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
b79acb49
YP
184 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
185 dev->caps.def_mac[i] = dev_cap->def_mac[i];
7ff93f8b 186 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
7699517d
YP
187 dev->caps.trans_type[i] = dev_cap->trans_type[i];
188 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
189 dev->caps.wavelength[i] = dev_cap->wavelength[i];
190 dev->caps.trans_code[i] = dev_cap->trans_code[i];
5ae2a7a8
RD
191 }
192
225c7b1f 193 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
225c7b1f
RD
194 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
195 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
196 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
197 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
198 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
199 dev->caps.max_wqes = dev_cap->max_qp_sz;
200 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
225c7b1f
RD
201 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
202 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
203 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
204 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
205 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
206 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
207 /*
208 * Subtract 1 from the limit because we need to allocate a
209 * spare CQE so the HCA HW can tell the difference between an
210 * empty CQ and a full CQ.
211 */
212 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
213 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
214 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
ab6bf42e 215 dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
121964ec 216 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
ab6bf42e 217 dev->caps.mtts_per_seg);
225c7b1f
RD
218 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
219 dev->caps.reserved_uars = dev_cap->reserved_uars;
220 dev->caps.reserved_pds = dev_cap->reserved_pds;
ab6bf42e 221 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
149983af 222 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
225c7b1f
RD
223 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
224 dev->caps.flags = dev_cap->flags;
95d04f07
RD
225 dev->caps.bmme_flags = dev_cap->bmme_flags;
226 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
225c7b1f 227 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
0533943c 228 dev->caps.udp_rss = dev_cap->udp_rss;
e7c1c2c4 229 dev->caps.loopback_support = dev_cap->loopback_support;
0345584e
YP
230 dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
231 dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
14c07b13 232 dev->caps.wol = dev_cap->wol;
b832be1e 233 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
225c7b1f 234
93fc9e1b
YP
235 dev->caps.log_num_macs = log_num_mac;
236 dev->caps.log_num_vlans = log_num_vlan;
237 dev->caps.log_num_prios = use_prio ? 3 : 0;
238
239 for (i = 1; i <= dev->caps.num_ports; ++i) {
7ff93f8b
YP
240 if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
241 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
242 else
243 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
27bf91d6
YP
244 dev->caps.possible_type[i] = dev->caps.port_type[i];
245 mlx4_priv(dev)->sense.sense_allowed[i] =
246 dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
7ff93f8b 247
93fc9e1b
YP
248 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
249 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
250 mlx4_warn(dev, "Requested number of MACs is too much "
251 "for port %d, reducing to %d.\n",
252 i, 1 << dev->caps.log_num_macs);
253 }
254 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
255 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
256 mlx4_warn(dev, "Requested number of VLANs is too much "
257 "for port %d, reducing to %d.\n",
258 i, 1 << dev->caps.log_num_vlans);
259 }
260 }
261
7ff93f8b
YP
262 mlx4_set_port_mask(dev);
263
93fc9e1b
YP
264 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
265 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
266 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
267 (1 << dev->caps.log_num_macs) *
268 (1 << dev->caps.log_num_vlans) *
269 (1 << dev->caps.log_num_prios) *
270 dev->caps.num_ports;
271 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
272
273 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
274 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
275 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
276 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
277
225c7b1f
RD
278 return 0;
279}
280
7ff93f8b
YP
281/*
282 * Change the port configuration of the device.
283 * Every user of this function must hold the port mutex.
284 */
27bf91d6
YP
285int mlx4_change_port_types(struct mlx4_dev *dev,
286 enum mlx4_port_type *port_types)
7ff93f8b
YP
287{
288 int err = 0;
289 int change = 0;
290 int port;
291
292 for (port = 0; port < dev->caps.num_ports; port++) {
27bf91d6
YP
293 /* Change the port type only if the new type is different
294 * from the current, and not set to Auto */
7ff93f8b
YP
295 if (port_types[port] != dev->caps.port_type[port + 1]) {
296 change = 1;
297 dev->caps.port_type[port + 1] = port_types[port];
298 }
299 }
300 if (change) {
301 mlx4_unregister_device(dev);
302 for (port = 1; port <= dev->caps.num_ports; port++) {
303 mlx4_CLOSE_PORT(dev, port);
304 err = mlx4_SET_PORT(dev, port);
305 if (err) {
306 mlx4_err(dev, "Failed to set port %d, "
307 "aborting\n", port);
308 goto out;
309 }
310 }
311 mlx4_set_port_mask(dev);
312 err = mlx4_register_device(dev);
313 }
314
315out:
316 return err;
317}
318
319static ssize_t show_port_type(struct device *dev,
320 struct device_attribute *attr,
321 char *buf)
322{
323 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
324 port_attr);
325 struct mlx4_dev *mdev = info->dev;
27bf91d6
YP
326 char type[8];
327
328 sprintf(type, "%s",
329 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
330 "ib" : "eth");
331 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
332 sprintf(buf, "auto (%s)\n", type);
333 else
334 sprintf(buf, "%s\n", type);
7ff93f8b 335
27bf91d6 336 return strlen(buf);
7ff93f8b
YP
337}
338
339static ssize_t set_port_type(struct device *dev,
340 struct device_attribute *attr,
341 const char *buf, size_t count)
342{
343 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
344 port_attr);
345 struct mlx4_dev *mdev = info->dev;
346 struct mlx4_priv *priv = mlx4_priv(mdev);
347 enum mlx4_port_type types[MLX4_MAX_PORTS];
27bf91d6 348 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
7ff93f8b
YP
349 int i;
350 int err = 0;
351
352 if (!strcmp(buf, "ib\n"))
353 info->tmp_type = MLX4_PORT_TYPE_IB;
354 else if (!strcmp(buf, "eth\n"))
355 info->tmp_type = MLX4_PORT_TYPE_ETH;
27bf91d6
YP
356 else if (!strcmp(buf, "auto\n"))
357 info->tmp_type = MLX4_PORT_TYPE_AUTO;
7ff93f8b
YP
358 else {
359 mlx4_err(mdev, "%s is not supported port type\n", buf);
360 return -EINVAL;
361 }
362
27bf91d6 363 mlx4_stop_sense(mdev);
7ff93f8b 364 mutex_lock(&priv->port_mutex);
27bf91d6
YP
365 /* Possible type is always the one that was delivered */
366 mdev->caps.possible_type[info->port] = info->tmp_type;
367
368 for (i = 0; i < mdev->caps.num_ports; i++) {
7ff93f8b 369 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
27bf91d6
YP
370 mdev->caps.possible_type[i+1];
371 if (types[i] == MLX4_PORT_TYPE_AUTO)
372 types[i] = mdev->caps.port_type[i+1];
373 }
7ff93f8b 374
27bf91d6
YP
375 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
376 for (i = 1; i <= mdev->caps.num_ports; i++) {
377 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
378 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
379 err = -EINVAL;
380 }
381 }
382 }
383 if (err) {
384 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
385 "Set only 'eth' or 'ib' for both ports "
386 "(should be the same)\n");
387 goto out;
388 }
389
390 mlx4_do_sense_ports(mdev, new_types, types);
391
392 err = mlx4_check_port_params(mdev, new_types);
7ff93f8b
YP
393 if (err)
394 goto out;
395
27bf91d6
YP
396 /* We are about to apply the changes after the configuration
397 * was verified, no need to remember the temporary types
398 * any more */
399 for (i = 0; i < mdev->caps.num_ports; i++)
400 priv->port[i + 1].tmp_type = 0;
7ff93f8b 401
27bf91d6 402 err = mlx4_change_port_types(mdev, new_types);
7ff93f8b
YP
403
404out:
27bf91d6 405 mlx4_start_sense(mdev);
7ff93f8b
YP
406 mutex_unlock(&priv->port_mutex);
407 return err ? err : count;
408}
409
e8f9b2ed 410static int mlx4_load_fw(struct mlx4_dev *dev)
225c7b1f
RD
411{
412 struct mlx4_priv *priv = mlx4_priv(dev);
413 int err;
414
415 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
5b0bf5e2 416 GFP_HIGHUSER | __GFP_NOWARN, 0);
225c7b1f
RD
417 if (!priv->fw.fw_icm) {
418 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
419 return -ENOMEM;
420 }
421
422 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
423 if (err) {
424 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
425 goto err_free;
426 }
427
428 err = mlx4_RUN_FW(dev);
429 if (err) {
430 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
431 goto err_unmap_fa;
432 }
433
434 return 0;
435
436err_unmap_fa:
437 mlx4_UNMAP_FA(dev);
438
439err_free:
5b0bf5e2 440 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
225c7b1f
RD
441 return err;
442}
443
e8f9b2ed
RD
444static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
445 int cmpt_entry_sz)
225c7b1f
RD
446{
447 struct mlx4_priv *priv = mlx4_priv(dev);
448 int err;
449
450 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
451 cmpt_base +
452 ((u64) (MLX4_CMPT_TYPE_QP *
453 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
454 cmpt_entry_sz, dev->caps.num_qps,
93fc9e1b
YP
455 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
456 0, 0);
225c7b1f
RD
457 if (err)
458 goto err;
459
460 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
461 cmpt_base +
462 ((u64) (MLX4_CMPT_TYPE_SRQ *
463 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
464 cmpt_entry_sz, dev->caps.num_srqs,
5b0bf5e2 465 dev->caps.reserved_srqs, 0, 0);
225c7b1f
RD
466 if (err)
467 goto err_qp;
468
469 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
470 cmpt_base +
471 ((u64) (MLX4_CMPT_TYPE_CQ *
472 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
473 cmpt_entry_sz, dev->caps.num_cqs,
5b0bf5e2 474 dev->caps.reserved_cqs, 0, 0);
225c7b1f
RD
475 if (err)
476 goto err_srq;
477
478 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
479 cmpt_base +
480 ((u64) (MLX4_CMPT_TYPE_EQ *
481 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
482 cmpt_entry_sz,
b8dd786f 483 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
225c7b1f
RD
484 if (err)
485 goto err_cq;
486
487 return 0;
488
489err_cq:
490 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
491
492err_srq:
493 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
494
495err_qp:
496 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
497
498err:
499 return err;
500}
501
3d73c288
RD
502static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
503 struct mlx4_init_hca_param *init_hca, u64 icm_size)
225c7b1f
RD
504{
505 struct mlx4_priv *priv = mlx4_priv(dev);
506 u64 aux_pages;
507 int err;
508
509 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
510 if (err) {
511 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
512 return err;
513 }
514
515 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
516 (unsigned long long) icm_size >> 10,
517 (unsigned long long) aux_pages << 2);
518
519 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
5b0bf5e2 520 GFP_HIGHUSER | __GFP_NOWARN, 0);
225c7b1f
RD
521 if (!priv->fw.aux_icm) {
522 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
523 return -ENOMEM;
524 }
525
526 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
527 if (err) {
528 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
529 goto err_free_aux;
530 }
531
532 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
533 if (err) {
534 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
535 goto err_unmap_aux;
536 }
537
fa0681d2
RD
538 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
539 init_hca->eqc_base, dev_cap->eqc_entry_sz,
540 dev->caps.num_eqs, dev->caps.num_eqs,
541 0, 0);
225c7b1f
RD
542 if (err) {
543 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
544 goto err_unmap_cmpt;
545 }
546
d7bb58fb
JM
547 /*
548 * Reserved MTT entries must be aligned up to a cacheline
549 * boundary, since the FW will write to them, while the driver
550 * writes to all other MTT entries. (The variable
551 * dev->caps.mtt_entry_sz below is really the MTT segment
552 * size, not the raw entry size)
553 */
554 dev->caps.reserved_mtts =
555 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
556 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
557
225c7b1f
RD
558 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
559 init_hca->mtt_base,
560 dev->caps.mtt_entry_sz,
561 dev->caps.num_mtt_segs,
5b0bf5e2 562 dev->caps.reserved_mtts, 1, 0);
225c7b1f
RD
563 if (err) {
564 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
565 goto err_unmap_eq;
566 }
567
568 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
569 init_hca->dmpt_base,
570 dev_cap->dmpt_entry_sz,
571 dev->caps.num_mpts,
5b0bf5e2 572 dev->caps.reserved_mrws, 1, 1);
225c7b1f
RD
573 if (err) {
574 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
575 goto err_unmap_mtt;
576 }
577
578 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
579 init_hca->qpc_base,
580 dev_cap->qpc_entry_sz,
581 dev->caps.num_qps,
93fc9e1b
YP
582 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
583 0, 0);
225c7b1f
RD
584 if (err) {
585 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
586 goto err_unmap_dmpt;
587 }
588
589 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
590 init_hca->auxc_base,
591 dev_cap->aux_entry_sz,
592 dev->caps.num_qps,
93fc9e1b
YP
593 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
594 0, 0);
225c7b1f
RD
595 if (err) {
596 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
597 goto err_unmap_qp;
598 }
599
600 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
601 init_hca->altc_base,
602 dev_cap->altc_entry_sz,
603 dev->caps.num_qps,
93fc9e1b
YP
604 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
605 0, 0);
225c7b1f
RD
606 if (err) {
607 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
608 goto err_unmap_auxc;
609 }
610
611 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
612 init_hca->rdmarc_base,
613 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
614 dev->caps.num_qps,
93fc9e1b
YP
615 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
616 0, 0);
225c7b1f
RD
617 if (err) {
618 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
619 goto err_unmap_altc;
620 }
621
622 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
623 init_hca->cqc_base,
624 dev_cap->cqc_entry_sz,
625 dev->caps.num_cqs,
5b0bf5e2 626 dev->caps.reserved_cqs, 0, 0);
225c7b1f
RD
627 if (err) {
628 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
629 goto err_unmap_rdmarc;
630 }
631
632 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
633 init_hca->srqc_base,
634 dev_cap->srq_entry_sz,
635 dev->caps.num_srqs,
5b0bf5e2 636 dev->caps.reserved_srqs, 0, 0);
225c7b1f
RD
637 if (err) {
638 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
639 goto err_unmap_cq;
640 }
641
642 /*
643 * It's not strictly required, but for simplicity just map the
644 * whole multicast group table now. The table isn't very big
645 * and it's a lot easier than trying to track ref counts.
646 */
647 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
648 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
649 dev->caps.num_mgms + dev->caps.num_amgms,
650 dev->caps.num_mgms + dev->caps.num_amgms,
5b0bf5e2 651 0, 0);
225c7b1f
RD
652 if (err) {
653 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
654 goto err_unmap_srq;
655 }
656
657 return 0;
658
659err_unmap_srq:
660 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
661
662err_unmap_cq:
663 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
664
665err_unmap_rdmarc:
666 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
667
668err_unmap_altc:
669 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
670
671err_unmap_auxc:
672 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
673
674err_unmap_qp:
675 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
676
677err_unmap_dmpt:
678 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
679
680err_unmap_mtt:
681 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
682
683err_unmap_eq:
fa0681d2 684 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
225c7b1f
RD
685
686err_unmap_cmpt:
687 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
688 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
689 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
690 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
691
692err_unmap_aux:
693 mlx4_UNMAP_ICM_AUX(dev);
694
695err_free_aux:
5b0bf5e2 696 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
225c7b1f
RD
697
698 return err;
699}
700
701static void mlx4_free_icms(struct mlx4_dev *dev)
702{
703 struct mlx4_priv *priv = mlx4_priv(dev);
704
705 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
706 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
707 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
708 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
709 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
710 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
711 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
712 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
713 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
fa0681d2 714 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
225c7b1f
RD
715 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
716 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
717 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
718 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
225c7b1f
RD
719
720 mlx4_UNMAP_ICM_AUX(dev);
5b0bf5e2 721 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
225c7b1f
RD
722}
723
724static void mlx4_close_hca(struct mlx4_dev *dev)
725{
726 mlx4_CLOSE_HCA(dev, 0);
727 mlx4_free_icms(dev);
728 mlx4_UNMAP_FA(dev);
5b0bf5e2 729 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
225c7b1f
RD
730}
731
3d73c288 732static int mlx4_init_hca(struct mlx4_dev *dev)
225c7b1f
RD
733{
734 struct mlx4_priv *priv = mlx4_priv(dev);
735 struct mlx4_adapter adapter;
736 struct mlx4_dev_cap dev_cap;
2d928651 737 struct mlx4_mod_stat_cfg mlx4_cfg;
225c7b1f
RD
738 struct mlx4_profile profile;
739 struct mlx4_init_hca_param init_hca;
740 u64 icm_size;
741 int err;
742
743 err = mlx4_QUERY_FW(dev);
744 if (err) {
cc4ac2e7
YP
745 if (err == -EACCES)
746 mlx4_info(dev, "non-primary physical function, skipping.\n");
747 else
748 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
225c7b1f
RD
749 return err;
750 }
751
752 err = mlx4_load_fw(dev);
753 if (err) {
754 mlx4_err(dev, "Failed to start FW, aborting.\n");
755 return err;
756 }
757
2d928651
VS
758 mlx4_cfg.log_pg_sz_m = 1;
759 mlx4_cfg.log_pg_sz = 0;
760 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
761 if (err)
762 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
763
225c7b1f
RD
764 err = mlx4_dev_cap(dev, &dev_cap);
765 if (err) {
766 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
767 goto err_stop_fw;
768 }
769
770 profile = default_profile;
771
772 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
773 if ((long long) icm_size < 0) {
774 err = icm_size;
775 goto err_stop_fw;
776 }
777
778 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
779
780 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
781 if (err)
782 goto err_stop_fw;
783
784 err = mlx4_INIT_HCA(dev, &init_hca);
785 if (err) {
786 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
787 goto err_free_icm;
788 }
789
790 err = mlx4_QUERY_ADAPTER(dev, &adapter);
791 if (err) {
792 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
793 goto err_close;
794 }
795
796 priv->eq_table.inta_pin = adapter.inta_pin;
cd9281d8 797 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
225c7b1f
RD
798
799 return 0;
800
801err_close:
1af92e2a 802 mlx4_CLOSE_HCA(dev, 0);
225c7b1f
RD
803
804err_free_icm:
805 mlx4_free_icms(dev);
806
807err_stop_fw:
808 mlx4_UNMAP_FA(dev);
5b0bf5e2 809 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
225c7b1f
RD
810
811 return err;
812}
813
3d73c288 814static int mlx4_setup_hca(struct mlx4_dev *dev)
225c7b1f
RD
815{
816 struct mlx4_priv *priv = mlx4_priv(dev);
817 int err;
7ff93f8b 818 int port;
9a5aa622 819 __be32 ib_port_default_caps;
225c7b1f 820
225c7b1f
RD
821 err = mlx4_init_uar_table(dev);
822 if (err) {
823 mlx4_err(dev, "Failed to initialize "
824 "user access region table, aborting.\n");
825 return err;
826 }
827
828 err = mlx4_uar_alloc(dev, &priv->driver_uar);
829 if (err) {
830 mlx4_err(dev, "Failed to allocate driver access region, "
831 "aborting.\n");
832 goto err_uar_table_free;
833 }
834
4979d18f 835 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
225c7b1f
RD
836 if (!priv->kar) {
837 mlx4_err(dev, "Couldn't map kernel access region, "
838 "aborting.\n");
839 err = -ENOMEM;
840 goto err_uar_free;
841 }
842
843 err = mlx4_init_pd_table(dev);
844 if (err) {
845 mlx4_err(dev, "Failed to initialize "
846 "protection domain table, aborting.\n");
847 goto err_kar_unmap;
848 }
849
850 err = mlx4_init_mr_table(dev);
851 if (err) {
852 mlx4_err(dev, "Failed to initialize "
853 "memory region table, aborting.\n");
854 goto err_pd_table_free;
855 }
856
225c7b1f
RD
857 err = mlx4_init_eq_table(dev);
858 if (err) {
859 mlx4_err(dev, "Failed to initialize "
860 "event queue table, aborting.\n");
ee49bd93 861 goto err_mr_table_free;
225c7b1f
RD
862 }
863
864 err = mlx4_cmd_use_events(dev);
865 if (err) {
866 mlx4_err(dev, "Failed to switch to event-driven "
867 "firmware commands, aborting.\n");
868 goto err_eq_table_free;
869 }
870
871 err = mlx4_NOP(dev);
872 if (err) {
08fb1055
MT
873 if (dev->flags & MLX4_FLAG_MSI_X) {
874 mlx4_warn(dev, "NOP command failed to generate MSI-X "
875 "interrupt IRQ %d).\n",
b8dd786f 876 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
08fb1055
MT
877 mlx4_warn(dev, "Trying again without MSI-X.\n");
878 } else {
879 mlx4_err(dev, "NOP command failed to generate interrupt "
880 "(IRQ %d), aborting.\n",
b8dd786f 881 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
225c7b1f 882 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
08fb1055 883 }
225c7b1f
RD
884
885 goto err_cmd_poll;
886 }
887
888 mlx4_dbg(dev, "NOP command IRQ test passed\n");
889
890 err = mlx4_init_cq_table(dev);
891 if (err) {
892 mlx4_err(dev, "Failed to initialize "
893 "completion queue table, aborting.\n");
894 goto err_cmd_poll;
895 }
896
897 err = mlx4_init_srq_table(dev);
898 if (err) {
899 mlx4_err(dev, "Failed to initialize "
900 "shared receive queue table, aborting.\n");
901 goto err_cq_table_free;
902 }
903
904 err = mlx4_init_qp_table(dev);
905 if (err) {
906 mlx4_err(dev, "Failed to initialize "
907 "queue pair table, aborting.\n");
908 goto err_srq_table_free;
909 }
910
911 err = mlx4_init_mcg_table(dev);
912 if (err) {
913 mlx4_err(dev, "Failed to initialize "
914 "multicast group table, aborting.\n");
915 goto err_qp_table_free;
916 }
917
7ff93f8b 918 for (port = 1; port <= dev->caps.num_ports; port++) {
9a5aa622
JM
919 ib_port_default_caps = 0;
920 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
921 if (err)
922 mlx4_warn(dev, "failed to get port %d default "
923 "ib capabilities (%d). Continuing with "
924 "caps = 0\n", port, err);
925 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
7ff93f8b
YP
926 err = mlx4_SET_PORT(dev, port);
927 if (err) {
928 mlx4_err(dev, "Failed to set port %d, aborting\n",
929 port);
930 goto err_mcg_table_free;
931 }
932 }
933
225c7b1f
RD
934 return 0;
935
7ff93f8b
YP
936err_mcg_table_free:
937 mlx4_cleanup_mcg_table(dev);
938
225c7b1f
RD
939err_qp_table_free:
940 mlx4_cleanup_qp_table(dev);
941
942err_srq_table_free:
943 mlx4_cleanup_srq_table(dev);
944
945err_cq_table_free:
946 mlx4_cleanup_cq_table(dev);
947
948err_cmd_poll:
949 mlx4_cmd_use_polling(dev);
950
951err_eq_table_free:
952 mlx4_cleanup_eq_table(dev);
953
ee49bd93 954err_mr_table_free:
225c7b1f
RD
955 mlx4_cleanup_mr_table(dev);
956
957err_pd_table_free:
958 mlx4_cleanup_pd_table(dev);
959
960err_kar_unmap:
961 iounmap(priv->kar);
962
963err_uar_free:
964 mlx4_uar_free(dev, &priv->driver_uar);
965
966err_uar_table_free:
967 mlx4_cleanup_uar_table(dev);
968 return err;
969}
970
e8f9b2ed 971static void mlx4_enable_msi_x(struct mlx4_dev *dev)
225c7b1f
RD
972{
973 struct mlx4_priv *priv = mlx4_priv(dev);
b8dd786f 974 struct msix_entry *entries;
0b7ca5a9
YP
975 int nreq = min_t(int, dev->caps.num_ports *
976 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
977 + MSIX_LEGACY_SZ, MAX_MSIX);
225c7b1f
RD
978 int err;
979 int i;
980
981 if (msi_x) {
70cb9253 982 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
0b7ca5a9 983 nreq);
b8dd786f
YP
984 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
985 if (!entries)
986 goto no_msi;
987
988 for (i = 0; i < nreq; ++i)
225c7b1f
RD
989 entries[i].entry = i;
990
b8dd786f
YP
991 retry:
992 err = pci_enable_msix(dev->pdev, entries, nreq);
225c7b1f 993 if (err) {
b8dd786f
YP
994 /* Try again if at least 2 vectors are available */
995 if (err > 1) {
996 mlx4_info(dev, "Requested %d vectors, "
997 "but only %d MSI-X vectors available, "
998 "trying again\n", nreq, err);
999 nreq = err;
1000 goto retry;
1001 }
5bf0da7d 1002 kfree(entries);
225c7b1f
RD
1003 goto no_msi;
1004 }
1005
0b7ca5a9
YP
1006 if (nreq <
1007 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1008 /*Working in legacy mode , all EQ's shared*/
1009 dev->caps.comp_pool = 0;
1010 dev->caps.num_comp_vectors = nreq - 1;
1011 } else {
1012 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1013 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1014 }
b8dd786f 1015 for (i = 0; i < nreq; ++i)
225c7b1f
RD
1016 priv->eq_table.eq[i].irq = entries[i].vector;
1017
1018 dev->flags |= MLX4_FLAG_MSI_X;
b8dd786f
YP
1019
1020 kfree(entries);
225c7b1f
RD
1021 return;
1022 }
1023
1024no_msi:
b8dd786f 1025 dev->caps.num_comp_vectors = 1;
0b7ca5a9 1026 dev->caps.comp_pool = 0;
b8dd786f
YP
1027
1028 for (i = 0; i < 2; ++i)
225c7b1f
RD
1029 priv->eq_table.eq[i].irq = dev->pdev->irq;
1030}
1031
7ff93f8b 1032static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2a2336f8
YP
1033{
1034 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
7ff93f8b 1035 int err = 0;
2a2336f8
YP
1036
1037 info->dev = dev;
1038 info->port = port;
1039 mlx4_init_mac_table(dev, &info->mac_table);
1040 mlx4_init_vlan_table(dev, &info->vlan_table);
7ff93f8b
YP
1041
1042 sprintf(info->dev_name, "mlx4_port%d", port);
1043 info->port_attr.attr.name = info->dev_name;
1044 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1045 info->port_attr.show = show_port_type;
1046 info->port_attr.store = set_port_type;
3691c964 1047 sysfs_attr_init(&info->port_attr.attr);
7ff93f8b
YP
1048
1049 err = device_create_file(&dev->pdev->dev, &info->port_attr);
1050 if (err) {
1051 mlx4_err(dev, "Failed to create file for port %d\n", port);
1052 info->port = -1;
1053 }
1054
1055 return err;
1056}
1057
1058static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1059{
1060 if (info->port < 0)
1061 return;
1062
1063 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
2a2336f8
YP
1064}
1065
3d73c288 1066static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
225c7b1f 1067{
225c7b1f
RD
1068 struct mlx4_priv *priv;
1069 struct mlx4_dev *dev;
1070 int err;
2a2336f8 1071 int port;
225c7b1f 1072
0a645e80 1073 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
225c7b1f
RD
1074
1075 err = pci_enable_device(pdev);
1076 if (err) {
1077 dev_err(&pdev->dev, "Cannot enable PCI device, "
1078 "aborting.\n");
1079 return err;
1080 }
1081
1082 /*
4ff08a76 1083 * Check for BARs. We expect 0: 1MB
225c7b1f
RD
1084 */
1085 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
1086 pci_resource_len(pdev, 0) != 1 << 20) {
1087 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
1088 err = -ENODEV;
1089 goto err_disable_pdev;
1090 }
1091 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1092 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1093 err = -ENODEV;
1094 goto err_disable_pdev;
1095 }
1096
a01df0fe 1097 err = pci_request_regions(pdev, DRV_NAME);
225c7b1f 1098 if (err) {
a01df0fe 1099 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
225c7b1f
RD
1100 goto err_disable_pdev;
1101 }
1102
225c7b1f
RD
1103 pci_set_master(pdev);
1104
6a35528a 1105 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
225c7b1f
RD
1106 if (err) {
1107 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
284901a9 1108 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
225c7b1f
RD
1109 if (err) {
1110 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
a01df0fe 1111 goto err_release_regions;
225c7b1f
RD
1112 }
1113 }
6a35528a 1114 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
225c7b1f
RD
1115 if (err) {
1116 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1117 "consistent PCI DMA mask.\n");
284901a9 1118 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
225c7b1f
RD
1119 if (err) {
1120 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1121 "aborting.\n");
a01df0fe 1122 goto err_release_regions;
225c7b1f
RD
1123 }
1124 }
1125
1126 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1127 if (!priv) {
1128 dev_err(&pdev->dev, "Device struct alloc failed, "
1129 "aborting.\n");
1130 err = -ENOMEM;
a01df0fe 1131 goto err_release_regions;
225c7b1f
RD
1132 }
1133
1134 dev = &priv->dev;
1135 dev->pdev = pdev;
b581401e
RD
1136 INIT_LIST_HEAD(&priv->ctx_list);
1137 spin_lock_init(&priv->ctx_lock);
225c7b1f 1138
7ff93f8b
YP
1139 mutex_init(&priv->port_mutex);
1140
6296883c
YP
1141 INIT_LIST_HEAD(&priv->pgdir_list);
1142 mutex_init(&priv->pgdir_mutex);
1143
725c8999
YP
1144 pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id);
1145
225c7b1f
RD
1146 /*
1147 * Now reset the HCA before we touch the PCI capabilities or
1148 * attempt a firmware command, since a boot ROM may have left
1149 * the HCA in an undefined state.
1150 */
1151 err = mlx4_reset(dev);
1152 if (err) {
1153 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
1154 goto err_free_dev;
1155 }
1156
225c7b1f
RD
1157 if (mlx4_cmd_init(dev)) {
1158 mlx4_err(dev, "Failed to init command interface, aborting.\n");
1159 goto err_free_dev;
1160 }
1161
1162 err = mlx4_init_hca(dev);
1163 if (err)
1164 goto err_cmd;
1165
b8dd786f
YP
1166 err = mlx4_alloc_eq_table(dev);
1167 if (err)
1168 goto err_close;
1169
0b7ca5a9
YP
1170 priv->msix_ctl.pool_bm = 0;
1171 spin_lock_init(&priv->msix_ctl.pool_lock);
1172
08fb1055
MT
1173 mlx4_enable_msi_x(dev);
1174
225c7b1f 1175 err = mlx4_setup_hca(dev);
08fb1055
MT
1176 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1177 dev->flags &= ~MLX4_FLAG_MSI_X;
1178 pci_disable_msix(pdev);
1179 err = mlx4_setup_hca(dev);
1180 }
1181
225c7b1f 1182 if (err)
b8dd786f 1183 goto err_free_eq;
225c7b1f 1184
7ff93f8b
YP
1185 for (port = 1; port <= dev->caps.num_ports; port++) {
1186 err = mlx4_init_port_info(dev, port);
1187 if (err)
1188 goto err_port;
1189 }
2a2336f8 1190
225c7b1f
RD
1191 err = mlx4_register_device(dev);
1192 if (err)
7ff93f8b 1193 goto err_port;
225c7b1f 1194
27bf91d6
YP
1195 mlx4_sense_init(dev);
1196 mlx4_start_sense(dev);
1197
225c7b1f
RD
1198 pci_set_drvdata(pdev, dev);
1199
1200 return 0;
1201
7ff93f8b 1202err_port:
b4f77264 1203 for (--port; port >= 1; --port)
7ff93f8b
YP
1204 mlx4_cleanup_port_info(&priv->port[port]);
1205
225c7b1f
RD
1206 mlx4_cleanup_mcg_table(dev);
1207 mlx4_cleanup_qp_table(dev);
1208 mlx4_cleanup_srq_table(dev);
1209 mlx4_cleanup_cq_table(dev);
1210 mlx4_cmd_use_polling(dev);
1211 mlx4_cleanup_eq_table(dev);
225c7b1f
RD
1212 mlx4_cleanup_mr_table(dev);
1213 mlx4_cleanup_pd_table(dev);
1214 mlx4_cleanup_uar_table(dev);
1215
b8dd786f
YP
1216err_free_eq:
1217 mlx4_free_eq_table(dev);
1218
225c7b1f 1219err_close:
08fb1055
MT
1220 if (dev->flags & MLX4_FLAG_MSI_X)
1221 pci_disable_msix(pdev);
1222
225c7b1f
RD
1223 mlx4_close_hca(dev);
1224
1225err_cmd:
1226 mlx4_cmd_cleanup(dev);
1227
1228err_free_dev:
225c7b1f
RD
1229 kfree(priv);
1230
a01df0fe
RD
1231err_release_regions:
1232 pci_release_regions(pdev);
225c7b1f
RD
1233
1234err_disable_pdev:
1235 pci_disable_device(pdev);
1236 pci_set_drvdata(pdev, NULL);
1237 return err;
1238}
1239
3d73c288
RD
1240static int __devinit mlx4_init_one(struct pci_dev *pdev,
1241 const struct pci_device_id *id)
1242{
0a645e80 1243 printk_once(KERN_INFO "%s", mlx4_version);
3d73c288 1244
b027cacd 1245 return __mlx4_init_one(pdev, id);
3d73c288
RD
1246}
1247
1248static void mlx4_remove_one(struct pci_dev *pdev)
225c7b1f
RD
1249{
1250 struct mlx4_dev *dev = pci_get_drvdata(pdev);
1251 struct mlx4_priv *priv = mlx4_priv(dev);
1252 int p;
1253
1254 if (dev) {
27bf91d6 1255 mlx4_stop_sense(dev);
225c7b1f
RD
1256 mlx4_unregister_device(dev);
1257
7ff93f8b
YP
1258 for (p = 1; p <= dev->caps.num_ports; p++) {
1259 mlx4_cleanup_port_info(&priv->port[p]);
225c7b1f 1260 mlx4_CLOSE_PORT(dev, p);
7ff93f8b 1261 }
225c7b1f
RD
1262
1263 mlx4_cleanup_mcg_table(dev);
1264 mlx4_cleanup_qp_table(dev);
1265 mlx4_cleanup_srq_table(dev);
1266 mlx4_cleanup_cq_table(dev);
1267 mlx4_cmd_use_polling(dev);
1268 mlx4_cleanup_eq_table(dev);
225c7b1f
RD
1269 mlx4_cleanup_mr_table(dev);
1270 mlx4_cleanup_pd_table(dev);
1271
1272 iounmap(priv->kar);
1273 mlx4_uar_free(dev, &priv->driver_uar);
1274 mlx4_cleanup_uar_table(dev);
b8dd786f 1275 mlx4_free_eq_table(dev);
225c7b1f
RD
1276 mlx4_close_hca(dev);
1277 mlx4_cmd_cleanup(dev);
1278
1279 if (dev->flags & MLX4_FLAG_MSI_X)
1280 pci_disable_msix(pdev);
1281
1282 kfree(priv);
a01df0fe 1283 pci_release_regions(pdev);
225c7b1f
RD
1284 pci_disable_device(pdev);
1285 pci_set_drvdata(pdev, NULL);
1286 }
1287}
1288
ee49bd93
JM
1289int mlx4_restart_one(struct pci_dev *pdev)
1290{
1291 mlx4_remove_one(pdev);
3d73c288 1292 return __mlx4_init_one(pdev, NULL);
ee49bd93
JM
1293}
1294
a3aa1884 1295static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
225c7b1f
RD
1296 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
1297 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
1298 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
786f238e
JM
1299 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
1300 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
57893d1c
YP
1301 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1302 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
085343b4
JM
1303 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1304 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
92bd3bbf 1305 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
06c3aa5e 1306 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
e76d0b67 1307 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
31dd272e
YP
1308 { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1309 { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1310 { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1311 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1312 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1313 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1314 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1315 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1316 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1317 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1318 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1319 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1320 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1321 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1322 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
225c7b1f
RD
1323 { 0, }
1324};
1325
1326MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
1327
1328static struct pci_driver mlx4_driver = {
1329 .name = DRV_NAME,
1330 .id_table = mlx4_pci_table,
1331 .probe = mlx4_init_one,
1332 .remove = __devexit_p(mlx4_remove_one)
1333};
1334
7ff93f8b
YP
1335static int __init mlx4_verify_params(void)
1336{
1337 if ((log_num_mac < 0) || (log_num_mac > 7)) {
0a645e80 1338 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
7ff93f8b
YP
1339 return -1;
1340 }
1341
1342 if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
0a645e80 1343 pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
7ff93f8b
YP
1344 return -1;
1345 }
1346
0498628f 1347 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
0a645e80 1348 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
ab6bf42e
EC
1349 return -1;
1350 }
1351
7ff93f8b
YP
1352 return 0;
1353}
1354
225c7b1f
RD
1355static int __init mlx4_init(void)
1356{
1357 int ret;
1358
7ff93f8b
YP
1359 if (mlx4_verify_params())
1360 return -EINVAL;
1361
27bf91d6
YP
1362 mlx4_catas_init();
1363
1364 mlx4_wq = create_singlethread_workqueue("mlx4");
1365 if (!mlx4_wq)
1366 return -ENOMEM;
ee49bd93 1367
225c7b1f
RD
1368 ret = pci_register_driver(&mlx4_driver);
1369 return ret < 0 ? ret : 0;
1370}
1371
1372static void __exit mlx4_cleanup(void)
1373{
1374 pci_unregister_driver(&mlx4_driver);
27bf91d6 1375 destroy_workqueue(mlx4_wq);
225c7b1f
RD
1376}
1377
1378module_init(mlx4_init);
1379module_exit(mlx4_cleanup);