net/mlx4_en: Datapath resources allocated dynamically
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx4 / main.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
51a379d0 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39#include <linux/pci.h>
40#include <linux/dma-mapping.h>
5a0e3ad6 41#include <linux/slab.h>
c1b43dca 42#include <linux/io-mapping.h>
ab9c17a0 43#include <linux/delay.h>
90b1ebe7 44#include <linux/netdevice.h>
b046ffe5 45#include <linux/kmod.h>
225c7b1f
RD
46
47#include <linux/mlx4/device.h>
48#include <linux/mlx4/doorbell.h>
49
50#include "mlx4.h"
51#include "fw.h"
52#include "icm.h"
53
54MODULE_AUTHOR("Roland Dreier");
55MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
56MODULE_LICENSE("Dual BSD/GPL");
57MODULE_VERSION(DRV_VERSION);
58
27bf91d6
YP
59struct workqueue_struct *mlx4_wq;
60
225c7b1f
RD
61#ifdef CONFIG_MLX4_DEBUG
62
63int mlx4_debug_level = 0;
64module_param_named(debug_level, mlx4_debug_level, int, 0644);
65MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
66
67#endif /* CONFIG_MLX4_DEBUG */
68
69#ifdef CONFIG_PCI_MSI
70
08fb1055 71static int msi_x = 1;
225c7b1f
RD
72module_param(msi_x, int, 0444);
73MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
74
75#else /* CONFIG_PCI_MSI */
76
77#define msi_x (0)
78
79#endif /* CONFIG_PCI_MSI */
80
ab9c17a0
JM
81static int num_vfs;
82module_param(num_vfs, int, 0444);
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
84
85static int probe_vf;
86module_param(probe_vf, int, 0644);
87MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
88
3c439b55 89int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
0ec2c0f8
EE
90module_param_named(log_num_mgm_entry_size,
91 mlx4_log_num_mgm_entry_size, int, 0444);
92MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
93 " of qp per mcg, for example:"
3c439b55 94 " 10 gives 248.range: 7 <="
0ff1fb65 95 " log_num_mgm_entry_size <= 12."
3c439b55
JM
96 " To activate device managed"
97 " flow steering when available, set to -1");
0ec2c0f8 98
08ff3235
OG
99static bool enable_64b_cqe_eqe;
100module_param(enable_64b_cqe_eqe, bool, 0444);
101MODULE_PARM_DESC(enable_64b_cqe_eqe,
278cee05 102 "Enable 64 byte CQEs/EQEs when the FW supports this");
08ff3235 103
ab9c17a0 104#define HCA_GLOBAL_CAP_MASK 0
08ff3235
OG
105
106#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
ab9c17a0 107
f57e6848 108static char mlx4_version[] =
225c7b1f
RD
109 DRV_NAME ": Mellanox ConnectX core driver v"
110 DRV_VERSION " (" DRV_RELDATE ")\n";
111
112static struct mlx4_profile default_profile = {
ab9c17a0 113 .num_qp = 1 << 18,
225c7b1f 114 .num_srq = 1 << 16,
c9f2ba5e 115 .rdmarc_per_qp = 1 << 4,
225c7b1f
RD
116 .num_cq = 1 << 16,
117 .num_mcg = 1 << 13,
ab9c17a0 118 .num_mpt = 1 << 19,
9fd7a1e1 119 .num_mtt = 1 << 20, /* It is really num mtt segements */
225c7b1f
RD
120};
121
ab9c17a0 122static int log_num_mac = 7;
93fc9e1b
YP
123module_param_named(log_num_mac, log_num_mac, int, 0444);
124MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
125
126static int log_num_vlan;
127module_param_named(log_num_vlan, log_num_vlan, int, 0444);
128MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
cb29688a
OG
129/* Log2 max number of VLANs per ETH port (0-7) */
130#define MLX4_LOG_NUM_VLANS 7
93fc9e1b 131
eb939922 132static bool use_prio;
93fc9e1b
YP
133module_param_named(use_prio, use_prio, bool, 0444);
134MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
135 "(0/1, default 0)");
136
2b8fb286 137int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
ab6bf42e 138module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
0498628f 139MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
ab6bf42e 140
8d0fc7b6 141static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
ab9c17a0
JM
142static int arr_argc = 2;
143module_param_array(port_type_array, int, &arr_argc, 0444);
8d0fc7b6
YP
144MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
145 "1 for IB, 2 for Ethernet");
ab9c17a0
JM
146
147struct mlx4_port_config {
148 struct list_head list;
149 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
150 struct pci_dev *pdev;
151};
152
27bf91d6
YP
153int mlx4_check_port_params(struct mlx4_dev *dev,
154 enum mlx4_port_type *port_type)
7ff93f8b
YP
155{
156 int i;
157
158 for (i = 0; i < dev->caps.num_ports - 1; i++) {
27bf91d6
YP
159 if (port_type[i] != port_type[i + 1]) {
160 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
161 mlx4_err(dev, "Only same port types supported "
162 "on this HCA, aborting.\n");
163 return -EINVAL;
164 }
7ff93f8b
YP
165 }
166 }
7ff93f8b
YP
167
168 for (i = 0; i < dev->caps.num_ports; i++) {
169 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
170 mlx4_err(dev, "Requested port type for port %d is not "
171 "supported on this HCA\n", i + 1);
172 return -EINVAL;
173 }
174 }
175 return 0;
176}
177
178static void mlx4_set_port_mask(struct mlx4_dev *dev)
179{
180 int i;
181
7ff93f8b 182 for (i = 1; i <= dev->caps.num_ports; ++i)
65dab25d 183 dev->caps.port_mask[i] = dev->caps.port_type[i];
7ff93f8b 184}
f2a3f6a3 185
3d73c288 186static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
225c7b1f
RD
187{
188 int err;
5ae2a7a8 189 int i;
225c7b1f
RD
190
191 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
192 if (err) {
193 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
194 return err;
195 }
196
197 if (dev_cap->min_page_sz > PAGE_SIZE) {
198 mlx4_err(dev, "HCA minimum page size of %d bigger than "
199 "kernel PAGE_SIZE of %ld, aborting.\n",
200 dev_cap->min_page_sz, PAGE_SIZE);
201 return -ENODEV;
202 }
203 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
204 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
205 "aborting.\n",
206 dev_cap->num_ports, MLX4_MAX_PORTS);
207 return -ENODEV;
208 }
209
210 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
211 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
212 "PCI resource 2 size of 0x%llx, aborting.\n",
213 dev_cap->uar_size,
214 (unsigned long long) pci_resource_len(dev->pdev, 2));
215 return -ENODEV;
216 }
217
218 dev->caps.num_ports = dev_cap->num_ports;
3fc929e2 219 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
5ae2a7a8
RD
220 for (i = 1; i <= dev->caps.num_ports; ++i) {
221 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
b79acb49 222 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
6634961c
JM
223 dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i];
224 dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
225 /* set gid and pkey table operating lengths by default
226 * to non-sriov values */
5ae2a7a8
RD
227 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
228 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
229 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
b79acb49
YP
230 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
231 dev->caps.def_mac[i] = dev_cap->def_mac[i];
7ff93f8b 232 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
8d0fc7b6
YP
233 dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
234 dev->caps.default_sense[i] = dev_cap->default_sense[i];
7699517d
YP
235 dev->caps.trans_type[i] = dev_cap->trans_type[i];
236 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
237 dev->caps.wavelength[i] = dev_cap->wavelength[i];
238 dev->caps.trans_code[i] = dev_cap->trans_code[i];
5ae2a7a8
RD
239 }
240
ab9c17a0 241 dev->caps.uar_page_size = PAGE_SIZE;
225c7b1f 242 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
225c7b1f
RD
243 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
244 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
245 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
246 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
247 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
248 dev->caps.max_wqes = dev_cap->max_qp_sz;
249 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
225c7b1f
RD
250 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
251 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
252 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
253 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
254 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
225c7b1f
RD
255 /*
256 * Subtract 1 from the limit because we need to allocate a
257 * spare CQE so the HCA HW can tell the difference between an
258 * empty CQ and a full CQ.
259 */
260 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
261 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
262 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
2b8fb286 263 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
225c7b1f 264 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
ab9c17a0
JM
265
266 /* The first 128 UARs are used for EQ doorbells */
267 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
225c7b1f 268 dev->caps.reserved_pds = dev_cap->reserved_pds;
012a8ff5
SH
269 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
270 dev_cap->reserved_xrcds : 0;
271 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
272 dev_cap->max_xrcds : 0;
2b8fb286
MA
273 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
274
149983af 275 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
225c7b1f
RD
276 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
277 dev->caps.flags = dev_cap->flags;
b3416f44 278 dev->caps.flags2 = dev_cap->flags2;
95d04f07
RD
279 dev->caps.bmme_flags = dev_cap->bmme_flags;
280 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
225c7b1f 281 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
b832be1e 282 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
b3416f44 283 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
225c7b1f 284
ca3e57a5
RD
285 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
286 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
58a60168 287 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
aadf4f3f
RD
288 /* Don't do sense port on multifunction devices (for now at least) */
289 if (mlx4_is_mfunc(dev))
290 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
58a60168 291
93fc9e1b 292 dev->caps.log_num_macs = log_num_mac;
cb29688a 293 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
93fc9e1b
YP
294 dev->caps.log_num_prios = use_prio ? 3 : 0;
295
296 for (i = 1; i <= dev->caps.num_ports; ++i) {
ab9c17a0
JM
297 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
298 if (dev->caps.supported_type[i]) {
299 /* if only ETH is supported - assign ETH */
300 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
301 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
105c320f 302 /* if only IB is supported, assign IB */
ab9c17a0 303 else if (dev->caps.supported_type[i] ==
105c320f
JM
304 MLX4_PORT_TYPE_IB)
305 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
ab9c17a0 306 else {
105c320f
JM
307 /* if IB and ETH are supported, we set the port
308 * type according to user selection of port type;
309 * if user selected none, take the FW hint */
310 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
8d0fc7b6
YP
311 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
312 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
ab9c17a0 313 else
105c320f 314 dev->caps.port_type[i] = port_type_array[i - 1];
ab9c17a0
JM
315 }
316 }
8d0fc7b6
YP
317 /*
318 * Link sensing is allowed on the port if 3 conditions are true:
319 * 1. Both protocols are supported on the port.
320 * 2. Different types are supported on the port
321 * 3. FW declared that it supports link sensing
322 */
27bf91d6 323 mlx4_priv(dev)->sense.sense_allowed[i] =
58a60168 324 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
8d0fc7b6 325 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
58a60168 326 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
7ff93f8b 327
8d0fc7b6
YP
328 /*
329 * If "default_sense" bit is set, we move the port to "AUTO" mode
330 * and perform sense_port FW command to try and set the correct
331 * port type from beginning
332 */
46c46747 333 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
8d0fc7b6
YP
334 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
335 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
336 mlx4_SENSE_PORT(dev, i, &sensed_port);
337 if (sensed_port != MLX4_PORT_TYPE_NONE)
338 dev->caps.port_type[i] = sensed_port;
339 } else {
340 dev->caps.possible_type[i] = dev->caps.port_type[i];
341 }
342
93fc9e1b
YP
343 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
344 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
345 mlx4_warn(dev, "Requested number of MACs is too much "
346 "for port %d, reducing to %d.\n",
347 i, 1 << dev->caps.log_num_macs);
348 }
349 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
350 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
351 mlx4_warn(dev, "Requested number of VLANs is too much "
352 "for port %d, reducing to %d.\n",
353 i, 1 << dev->caps.log_num_vlans);
354 }
355 }
356
f2a3f6a3
OG
357 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
358
93fc9e1b
YP
359 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
360 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
361 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
362 (1 << dev->caps.log_num_macs) *
363 (1 << dev->caps.log_num_vlans) *
364 (1 << dev->caps.log_num_prios) *
365 dev->caps.num_ports;
366 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
367
368 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
369 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
370 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
372
e2c76824 373 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
08ff3235 374
b3051320 375 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
08ff3235
OG
376 if (dev_cap->flags &
377 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
378 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
379 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
380 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
381 }
382 }
383
f97b4b5d 384 if ((dev->caps.flags &
08ff3235
OG
385 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
386 mlx4_is_master(dev))
387 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
388
225c7b1f
RD
389 return 0;
390}
ab9c17a0
JM
391/*The function checks if there are live vf, return the num of them*/
392static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
393{
394 struct mlx4_priv *priv = mlx4_priv(dev);
395 struct mlx4_slave_state *s_state;
396 int i;
397 int ret = 0;
398
399 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
400 s_state = &priv->mfunc.master.slave_state[i];
401 if (s_state->active && s_state->last_cmd !=
402 MLX4_COMM_CMD_RESET) {
403 mlx4_warn(dev, "%s: slave: %d is still active\n",
404 __func__, i);
405 ret++;
406 }
407 }
408 return ret;
409}
410
396f2feb
JM
411int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
412{
413 u32 qk = MLX4_RESERVED_QKEY_BASE;
47605df9
JM
414
415 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
416 qpn < dev->phys_caps.base_proxy_sqpn)
396f2feb
JM
417 return -EINVAL;
418
47605df9 419 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
396f2feb 420 /* tunnel qp */
47605df9 421 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
396f2feb 422 else
47605df9 423 qk += qpn - dev->phys_caps.base_proxy_sqpn;
396f2feb
JM
424 *qkey = qk;
425 return 0;
426}
427EXPORT_SYMBOL(mlx4_get_parav_qkey);
428
54679e14
JM
429void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
430{
431 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
432
433 if (!mlx4_is_master(dev))
434 return;
435
436 priv->virt2phys_pkey[slave][port - 1][i] = val;
437}
438EXPORT_SYMBOL(mlx4_sync_pkey_table);
439
afa8fd1d
JM
440void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
441{
442 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
443
444 if (!mlx4_is_master(dev))
445 return;
446
447 priv->slave_node_guids[slave] = guid;
448}
449EXPORT_SYMBOL(mlx4_put_slave_node_guid);
450
451__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
452{
453 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
454
455 if (!mlx4_is_master(dev))
456 return 0;
457
458 return priv->slave_node_guids[slave];
459}
460EXPORT_SYMBOL(mlx4_get_slave_node_guid);
461
e10903b0 462int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
ab9c17a0
JM
463{
464 struct mlx4_priv *priv = mlx4_priv(dev);
465 struct mlx4_slave_state *s_slave;
466
467 if (!mlx4_is_master(dev))
468 return 0;
469
470 s_slave = &priv->mfunc.master.slave_state[slave];
471 return !!s_slave->active;
472}
473EXPORT_SYMBOL(mlx4_is_slave_active);
474
7b8157be
JM
475static void slave_adjust_steering_mode(struct mlx4_dev *dev,
476 struct mlx4_dev_cap *dev_cap,
477 struct mlx4_init_hca_param *hca_param)
478{
479 dev->caps.steering_mode = hca_param->steering_mode;
480 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
481 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
482 dev->caps.fs_log_max_ucast_qp_range_size =
483 dev_cap->fs_log_max_ucast_qp_range_size;
484 } else
485 dev->caps.num_qp_per_mgm =
486 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
487
488 mlx4_dbg(dev, "Steering mode is: %s\n",
489 mlx4_steering_mode_str(dev->caps.steering_mode));
490}
491
ab9c17a0
JM
492static int mlx4_slave_cap(struct mlx4_dev *dev)
493{
494 int err;
495 u32 page_size;
496 struct mlx4_dev_cap dev_cap;
497 struct mlx4_func_cap func_cap;
498 struct mlx4_init_hca_param hca_param;
499 int i;
500
501 memset(&hca_param, 0, sizeof(hca_param));
502 err = mlx4_QUERY_HCA(dev, &hca_param);
503 if (err) {
504 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
505 return err;
506 }
507
508 /*fail if the hca has an unknown capability */
509 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
510 HCA_GLOBAL_CAP_MASK) {
511 mlx4_err(dev, "Unknown hca global capabilities\n");
512 return -ENOSYS;
513 }
514
515 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
516
ddd8a6c1
EE
517 dev->caps.hca_core_clock = hca_param.hca_core_clock;
518
ab9c17a0 519 memset(&dev_cap, 0, sizeof(dev_cap));
b91cb3eb 520 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
ab9c17a0
JM
521 err = mlx4_dev_cap(dev, &dev_cap);
522 if (err) {
523 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
524 return err;
525 }
526
b91cb3eb
JM
527 err = mlx4_QUERY_FW(dev);
528 if (err)
529 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
530
ab9c17a0
JM
531 page_size = ~dev->caps.page_size_cap + 1;
532 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
533 if (page_size > PAGE_SIZE) {
534 mlx4_err(dev, "HCA minimum page size of %d bigger than "
535 "kernel PAGE_SIZE of %ld, aborting.\n",
536 page_size, PAGE_SIZE);
537 return -ENODEV;
538 }
539
540 /* slave gets uar page size from QUERY_HCA fw command */
541 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
542
543 /* TODO: relax this assumption */
544 if (dev->caps.uar_page_size != PAGE_SIZE) {
545 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
546 dev->caps.uar_page_size, PAGE_SIZE);
547 return -ENODEV;
548 }
549
550 memset(&func_cap, 0, sizeof(func_cap));
47605df9 551 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
ab9c17a0 552 if (err) {
47605df9
JM
553 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
554 err);
ab9c17a0
JM
555 return err;
556 }
557
558 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
559 PF_CONTEXT_BEHAVIOUR_MASK) {
560 mlx4_err(dev, "Unknown pf context behaviour\n");
561 return -ENOSYS;
562 }
563
ab9c17a0 564 dev->caps.num_ports = func_cap.num_ports;
5a0d0a61
JM
565 dev->quotas.qp = func_cap.qp_quota;
566 dev->quotas.srq = func_cap.srq_quota;
567 dev->quotas.cq = func_cap.cq_quota;
568 dev->quotas.mpt = func_cap.mpt_quota;
569 dev->quotas.mtt = func_cap.mtt_quota;
570 dev->caps.num_qps = 1 << hca_param.log_num_qps;
571 dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
572 dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
573 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
574 dev->caps.num_eqs = func_cap.max_eq;
575 dev->caps.reserved_eqs = func_cap.reserved_eq;
ab9c17a0
JM
576 dev->caps.num_pds = MLX4_NUM_PDS;
577 dev->caps.num_mgms = 0;
578 dev->caps.num_amgms = 0;
579
ab9c17a0
JM
580 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
581 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
582 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
583 return -ENODEV;
584 }
585
47605df9
JM
586 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
587 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
588 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
589 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
590
591 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
592 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
593 err = -ENOMEM;
594 goto err_mem;
595 }
596
6634961c 597 for (i = 1; i <= dev->caps.num_ports; ++i) {
47605df9
JM
598 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
599 if (err) {
600 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
601 " port %d, aborting (%d).\n", i, err);
602 goto err_mem;
603 }
604 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
605 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
606 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
607 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
6230bb23 608 dev->caps.port_mask[i] = dev->caps.port_type[i];
6634961c
JM
609 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
610 &dev->caps.gid_table_len[i],
611 &dev->caps.pkey_table_len[i]))
47605df9 612 goto err_mem;
6634961c 613 }
6230bb23 614
ab9c17a0
JM
615 if (dev->caps.uar_page_size * (dev->caps.num_uars -
616 dev->caps.reserved_uars) >
617 pci_resource_len(dev->pdev, 2)) {
618 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
619 "PCI resource 2 size of 0x%llx, aborting.\n",
620 dev->caps.uar_page_size * dev->caps.num_uars,
621 (unsigned long long) pci_resource_len(dev->pdev, 2));
47605df9 622 goto err_mem;
ab9c17a0
JM
623 }
624
08ff3235
OG
625 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
626 dev->caps.eqe_size = 64;
627 dev->caps.eqe_factor = 1;
628 } else {
629 dev->caps.eqe_size = 32;
630 dev->caps.eqe_factor = 0;
631 }
632
633 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
634 dev->caps.cqe_size = 64;
635 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
636 } else {
637 dev->caps.cqe_size = 32;
638 }
639
f9bd2d7f
AV
640 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
641 mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
642
7b8157be
JM
643 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
644
ab9c17a0 645 return 0;
47605df9
JM
646
647err_mem:
648 kfree(dev->caps.qp0_tunnel);
649 kfree(dev->caps.qp0_proxy);
650 kfree(dev->caps.qp1_tunnel);
651 kfree(dev->caps.qp1_proxy);
652 dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
653 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
654
655 return err;
ab9c17a0 656}
225c7b1f 657
b046ffe5
EP
658static void mlx4_request_modules(struct mlx4_dev *dev)
659{
660 int port;
661 int has_ib_port = false;
662 int has_eth_port = false;
663#define EN_DRV_NAME "mlx4_en"
664#define IB_DRV_NAME "mlx4_ib"
665
666 for (port = 1; port <= dev->caps.num_ports; port++) {
667 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
668 has_ib_port = true;
669 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
670 has_eth_port = true;
671 }
672
673 if (has_ib_port)
674 request_module_nowait(IB_DRV_NAME);
675 if (has_eth_port)
676 request_module_nowait(EN_DRV_NAME);
677}
678
7ff93f8b
YP
679/*
680 * Change the port configuration of the device.
681 * Every user of this function must hold the port mutex.
682 */
27bf91d6
YP
683int mlx4_change_port_types(struct mlx4_dev *dev,
684 enum mlx4_port_type *port_types)
7ff93f8b
YP
685{
686 int err = 0;
687 int change = 0;
688 int port;
689
690 for (port = 0; port < dev->caps.num_ports; port++) {
27bf91d6
YP
691 /* Change the port type only if the new type is different
692 * from the current, and not set to Auto */
3d8f9308 693 if (port_types[port] != dev->caps.port_type[port + 1])
7ff93f8b 694 change = 1;
7ff93f8b
YP
695 }
696 if (change) {
697 mlx4_unregister_device(dev);
698 for (port = 1; port <= dev->caps.num_ports; port++) {
699 mlx4_CLOSE_PORT(dev, port);
1e0f03d5 700 dev->caps.port_type[port] = port_types[port - 1];
6634961c 701 err = mlx4_SET_PORT(dev, port, -1);
7ff93f8b
YP
702 if (err) {
703 mlx4_err(dev, "Failed to set port %d, "
704 "aborting\n", port);
705 goto out;
706 }
707 }
708 mlx4_set_port_mask(dev);
709 err = mlx4_register_device(dev);
b046ffe5
EP
710 if (err) {
711 mlx4_err(dev, "Failed to register device\n");
712 goto out;
713 }
714 mlx4_request_modules(dev);
7ff93f8b
YP
715 }
716
717out:
718 return err;
719}
720
721static ssize_t show_port_type(struct device *dev,
722 struct device_attribute *attr,
723 char *buf)
724{
725 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
726 port_attr);
727 struct mlx4_dev *mdev = info->dev;
27bf91d6
YP
728 char type[8];
729
730 sprintf(type, "%s",
731 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
732 "ib" : "eth");
733 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
734 sprintf(buf, "auto (%s)\n", type);
735 else
736 sprintf(buf, "%s\n", type);
7ff93f8b 737
27bf91d6 738 return strlen(buf);
7ff93f8b
YP
739}
740
741static ssize_t set_port_type(struct device *dev,
742 struct device_attribute *attr,
743 const char *buf, size_t count)
744{
745 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
746 port_attr);
747 struct mlx4_dev *mdev = info->dev;
748 struct mlx4_priv *priv = mlx4_priv(mdev);
749 enum mlx4_port_type types[MLX4_MAX_PORTS];
27bf91d6 750 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
7ff93f8b
YP
751 int i;
752 int err = 0;
753
754 if (!strcmp(buf, "ib\n"))
755 info->tmp_type = MLX4_PORT_TYPE_IB;
756 else if (!strcmp(buf, "eth\n"))
757 info->tmp_type = MLX4_PORT_TYPE_ETH;
27bf91d6
YP
758 else if (!strcmp(buf, "auto\n"))
759 info->tmp_type = MLX4_PORT_TYPE_AUTO;
7ff93f8b
YP
760 else {
761 mlx4_err(mdev, "%s is not supported port type\n", buf);
762 return -EINVAL;
763 }
764
27bf91d6 765 mlx4_stop_sense(mdev);
7ff93f8b 766 mutex_lock(&priv->port_mutex);
27bf91d6
YP
767 /* Possible type is always the one that was delivered */
768 mdev->caps.possible_type[info->port] = info->tmp_type;
769
770 for (i = 0; i < mdev->caps.num_ports; i++) {
7ff93f8b 771 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
27bf91d6
YP
772 mdev->caps.possible_type[i+1];
773 if (types[i] == MLX4_PORT_TYPE_AUTO)
774 types[i] = mdev->caps.port_type[i+1];
775 }
7ff93f8b 776
58a60168
YP
777 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
778 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
27bf91d6
YP
779 for (i = 1; i <= mdev->caps.num_ports; i++) {
780 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
781 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
782 err = -EINVAL;
783 }
784 }
785 }
786 if (err) {
787 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
788 "Set only 'eth' or 'ib' for both ports "
789 "(should be the same)\n");
790 goto out;
791 }
792
793 mlx4_do_sense_ports(mdev, new_types, types);
794
795 err = mlx4_check_port_params(mdev, new_types);
7ff93f8b
YP
796 if (err)
797 goto out;
798
27bf91d6
YP
799 /* We are about to apply the changes after the configuration
800 * was verified, no need to remember the temporary types
801 * any more */
802 for (i = 0; i < mdev->caps.num_ports; i++)
803 priv->port[i + 1].tmp_type = 0;
7ff93f8b 804
27bf91d6 805 err = mlx4_change_port_types(mdev, new_types);
7ff93f8b
YP
806
807out:
27bf91d6 808 mlx4_start_sense(mdev);
7ff93f8b
YP
809 mutex_unlock(&priv->port_mutex);
810 return err ? err : count;
811}
812
096335b3
OG
813enum ibta_mtu {
814 IB_MTU_256 = 1,
815 IB_MTU_512 = 2,
816 IB_MTU_1024 = 3,
817 IB_MTU_2048 = 4,
818 IB_MTU_4096 = 5
819};
820
821static inline int int_to_ibta_mtu(int mtu)
822{
823 switch (mtu) {
824 case 256: return IB_MTU_256;
825 case 512: return IB_MTU_512;
826 case 1024: return IB_MTU_1024;
827 case 2048: return IB_MTU_2048;
828 case 4096: return IB_MTU_4096;
829 default: return -1;
830 }
831}
832
833static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
834{
835 switch (mtu) {
836 case IB_MTU_256: return 256;
837 case IB_MTU_512: return 512;
838 case IB_MTU_1024: return 1024;
839 case IB_MTU_2048: return 2048;
840 case IB_MTU_4096: return 4096;
841 default: return -1;
842 }
843}
844
845static ssize_t show_port_ib_mtu(struct device *dev,
846 struct device_attribute *attr,
847 char *buf)
848{
849 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
850 port_mtu_attr);
851 struct mlx4_dev *mdev = info->dev;
852
853 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
854 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
855
856 sprintf(buf, "%d\n",
857 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
858 return strlen(buf);
859}
860
861static ssize_t set_port_ib_mtu(struct device *dev,
862 struct device_attribute *attr,
863 const char *buf, size_t count)
864{
865 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
866 port_mtu_attr);
867 struct mlx4_dev *mdev = info->dev;
868 struct mlx4_priv *priv = mlx4_priv(mdev);
869 int err, port, mtu, ibta_mtu = -1;
870
871 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
872 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
873 return -EINVAL;
874 }
875
618fad95
DB
876 err = kstrtoint(buf, 0, &mtu);
877 if (!err)
096335b3
OG
878 ibta_mtu = int_to_ibta_mtu(mtu);
879
618fad95 880 if (err || ibta_mtu < 0) {
096335b3
OG
881 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
882 return -EINVAL;
883 }
884
885 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
886
887 mlx4_stop_sense(mdev);
888 mutex_lock(&priv->port_mutex);
889 mlx4_unregister_device(mdev);
890 for (port = 1; port <= mdev->caps.num_ports; port++) {
891 mlx4_CLOSE_PORT(mdev, port);
6634961c 892 err = mlx4_SET_PORT(mdev, port, -1);
096335b3
OG
893 if (err) {
894 mlx4_err(mdev, "Failed to set port %d, "
895 "aborting\n", port);
896 goto err_set_port;
897 }
898 }
899 err = mlx4_register_device(mdev);
900err_set_port:
901 mutex_unlock(&priv->port_mutex);
902 mlx4_start_sense(mdev);
903 return err ? err : count;
904}
905
e8f9b2ed 906static int mlx4_load_fw(struct mlx4_dev *dev)
225c7b1f
RD
907{
908 struct mlx4_priv *priv = mlx4_priv(dev);
909 int err;
910
911 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
5b0bf5e2 912 GFP_HIGHUSER | __GFP_NOWARN, 0);
225c7b1f
RD
913 if (!priv->fw.fw_icm) {
914 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
915 return -ENOMEM;
916 }
917
918 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
919 if (err) {
920 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
921 goto err_free;
922 }
923
924 err = mlx4_RUN_FW(dev);
925 if (err) {
926 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
927 goto err_unmap_fa;
928 }
929
930 return 0;
931
932err_unmap_fa:
933 mlx4_UNMAP_FA(dev);
934
935err_free:
5b0bf5e2 936 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
225c7b1f
RD
937 return err;
938}
939
e8f9b2ed
RD
940static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
941 int cmpt_entry_sz)
225c7b1f
RD
942{
943 struct mlx4_priv *priv = mlx4_priv(dev);
944 int err;
ab9c17a0 945 int num_eqs;
225c7b1f
RD
946
947 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
948 cmpt_base +
949 ((u64) (MLX4_CMPT_TYPE_QP *
950 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
951 cmpt_entry_sz, dev->caps.num_qps,
93fc9e1b
YP
952 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
953 0, 0);
225c7b1f
RD
954 if (err)
955 goto err;
956
957 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
958 cmpt_base +
959 ((u64) (MLX4_CMPT_TYPE_SRQ *
960 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
961 cmpt_entry_sz, dev->caps.num_srqs,
5b0bf5e2 962 dev->caps.reserved_srqs, 0, 0);
225c7b1f
RD
963 if (err)
964 goto err_qp;
965
966 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
967 cmpt_base +
968 ((u64) (MLX4_CMPT_TYPE_CQ *
969 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
970 cmpt_entry_sz, dev->caps.num_cqs,
5b0bf5e2 971 dev->caps.reserved_cqs, 0, 0);
225c7b1f
RD
972 if (err)
973 goto err_srq;
974
3fc929e2
MA
975 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
976 dev->caps.num_eqs;
225c7b1f
RD
977 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
978 cmpt_base +
979 ((u64) (MLX4_CMPT_TYPE_EQ *
980 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
ab9c17a0 981 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
225c7b1f
RD
982 if (err)
983 goto err_cq;
984
985 return 0;
986
987err_cq:
988 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
989
990err_srq:
991 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
992
993err_qp:
994 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
995
996err:
997 return err;
998}
999
3d73c288
RD
1000static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1001 struct mlx4_init_hca_param *init_hca, u64 icm_size)
225c7b1f
RD
1002{
1003 struct mlx4_priv *priv = mlx4_priv(dev);
1004 u64 aux_pages;
ab9c17a0 1005 int num_eqs;
225c7b1f
RD
1006 int err;
1007
1008 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1009 if (err) {
1010 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
1011 return err;
1012 }
1013
1014 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
1015 (unsigned long long) icm_size >> 10,
1016 (unsigned long long) aux_pages << 2);
1017
1018 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
5b0bf5e2 1019 GFP_HIGHUSER | __GFP_NOWARN, 0);
225c7b1f
RD
1020 if (!priv->fw.aux_icm) {
1021 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
1022 return -ENOMEM;
1023 }
1024
1025 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1026 if (err) {
1027 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
1028 goto err_free_aux;
1029 }
1030
1031 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1032 if (err) {
1033 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
1034 goto err_unmap_aux;
1035 }
1036
ab9c17a0 1037
3fc929e2
MA
1038 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
1039 dev->caps.num_eqs;
fa0681d2
RD
1040 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1041 init_hca->eqc_base, dev_cap->eqc_entry_sz,
ab9c17a0 1042 num_eqs, num_eqs, 0, 0);
225c7b1f
RD
1043 if (err) {
1044 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
1045 goto err_unmap_cmpt;
1046 }
1047
d7bb58fb
JM
1048 /*
1049 * Reserved MTT entries must be aligned up to a cacheline
1050 * boundary, since the FW will write to them, while the driver
1051 * writes to all other MTT entries. (The variable
1052 * dev->caps.mtt_entry_sz below is really the MTT segment
1053 * size, not the raw entry size)
1054 */
1055 dev->caps.reserved_mtts =
1056 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1057 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1058
225c7b1f
RD
1059 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1060 init_hca->mtt_base,
1061 dev->caps.mtt_entry_sz,
2b8fb286 1062 dev->caps.num_mtts,
5b0bf5e2 1063 dev->caps.reserved_mtts, 1, 0);
225c7b1f
RD
1064 if (err) {
1065 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
1066 goto err_unmap_eq;
1067 }
1068
1069 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1070 init_hca->dmpt_base,
1071 dev_cap->dmpt_entry_sz,
1072 dev->caps.num_mpts,
5b0bf5e2 1073 dev->caps.reserved_mrws, 1, 1);
225c7b1f
RD
1074 if (err) {
1075 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
1076 goto err_unmap_mtt;
1077 }
1078
1079 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1080 init_hca->qpc_base,
1081 dev_cap->qpc_entry_sz,
1082 dev->caps.num_qps,
93fc9e1b
YP
1083 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1084 0, 0);
225c7b1f
RD
1085 if (err) {
1086 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
1087 goto err_unmap_dmpt;
1088 }
1089
1090 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1091 init_hca->auxc_base,
1092 dev_cap->aux_entry_sz,
1093 dev->caps.num_qps,
93fc9e1b
YP
1094 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1095 0, 0);
225c7b1f
RD
1096 if (err) {
1097 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
1098 goto err_unmap_qp;
1099 }
1100
1101 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1102 init_hca->altc_base,
1103 dev_cap->altc_entry_sz,
1104 dev->caps.num_qps,
93fc9e1b
YP
1105 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1106 0, 0);
225c7b1f
RD
1107 if (err) {
1108 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
1109 goto err_unmap_auxc;
1110 }
1111
1112 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1113 init_hca->rdmarc_base,
1114 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1115 dev->caps.num_qps,
93fc9e1b
YP
1116 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1117 0, 0);
225c7b1f
RD
1118 if (err) {
1119 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1120 goto err_unmap_altc;
1121 }
1122
1123 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1124 init_hca->cqc_base,
1125 dev_cap->cqc_entry_sz,
1126 dev->caps.num_cqs,
5b0bf5e2 1127 dev->caps.reserved_cqs, 0, 0);
225c7b1f
RD
1128 if (err) {
1129 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
1130 goto err_unmap_rdmarc;
1131 }
1132
1133 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1134 init_hca->srqc_base,
1135 dev_cap->srq_entry_sz,
1136 dev->caps.num_srqs,
5b0bf5e2 1137 dev->caps.reserved_srqs, 0, 0);
225c7b1f
RD
1138 if (err) {
1139 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
1140 goto err_unmap_cq;
1141 }
1142
1143 /*
0ff1fb65
HHZ
1144 * For flow steering device managed mode it is required to use
1145 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1146 * required, but for simplicity just map the whole multicast
1147 * group table now. The table isn't very big and it's a lot
1148 * easier than trying to track ref counts.
225c7b1f
RD
1149 */
1150 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
0ec2c0f8
EE
1151 init_hca->mc_base,
1152 mlx4_get_mgm_entry_size(dev),
225c7b1f
RD
1153 dev->caps.num_mgms + dev->caps.num_amgms,
1154 dev->caps.num_mgms + dev->caps.num_amgms,
5b0bf5e2 1155 0, 0);
225c7b1f
RD
1156 if (err) {
1157 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
1158 goto err_unmap_srq;
1159 }
1160
1161 return 0;
1162
1163err_unmap_srq:
1164 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1165
1166err_unmap_cq:
1167 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1168
1169err_unmap_rdmarc:
1170 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1171
1172err_unmap_altc:
1173 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1174
1175err_unmap_auxc:
1176 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1177
1178err_unmap_qp:
1179 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1180
1181err_unmap_dmpt:
1182 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1183
1184err_unmap_mtt:
1185 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1186
1187err_unmap_eq:
fa0681d2 1188 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
225c7b1f
RD
1189
1190err_unmap_cmpt:
1191 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1192 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1193 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1194 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1195
1196err_unmap_aux:
1197 mlx4_UNMAP_ICM_AUX(dev);
1198
1199err_free_aux:
5b0bf5e2 1200 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
225c7b1f
RD
1201
1202 return err;
1203}
1204
1205static void mlx4_free_icms(struct mlx4_dev *dev)
1206{
1207 struct mlx4_priv *priv = mlx4_priv(dev);
1208
1209 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1210 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1211 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1212 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1213 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1214 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1215 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1216 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1217 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
fa0681d2 1218 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
225c7b1f
RD
1219 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1220 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1221 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1222 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
225c7b1f
RD
1223
1224 mlx4_UNMAP_ICM_AUX(dev);
5b0bf5e2 1225 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
225c7b1f
RD
1226}
1227
ab9c17a0
JM
1228static void mlx4_slave_exit(struct mlx4_dev *dev)
1229{
1230 struct mlx4_priv *priv = mlx4_priv(dev);
1231
f3d4c89e 1232 mutex_lock(&priv->cmd.slave_cmd_mutex);
ab9c17a0
JM
1233 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1234 mlx4_warn(dev, "Failed to close slave function.\n");
f3d4c89e 1235 mutex_unlock(&priv->cmd.slave_cmd_mutex);
ab9c17a0
JM
1236}
1237
c1b43dca
EC
1238static int map_bf_area(struct mlx4_dev *dev)
1239{
1240 struct mlx4_priv *priv = mlx4_priv(dev);
1241 resource_size_t bf_start;
1242 resource_size_t bf_len;
1243 int err = 0;
1244
3d747473
JM
1245 if (!dev->caps.bf_reg_size)
1246 return -ENXIO;
1247
ab9c17a0
JM
1248 bf_start = pci_resource_start(dev->pdev, 2) +
1249 (dev->caps.num_uars << PAGE_SHIFT);
1250 bf_len = pci_resource_len(dev->pdev, 2) -
1251 (dev->caps.num_uars << PAGE_SHIFT);
c1b43dca
EC
1252 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1253 if (!priv->bf_mapping)
1254 err = -ENOMEM;
1255
1256 return err;
1257}
1258
1259static void unmap_bf_area(struct mlx4_dev *dev)
1260{
1261 if (mlx4_priv(dev)->bf_mapping)
1262 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1263}
1264
ec693d47
AV
1265cycle_t mlx4_read_clock(struct mlx4_dev *dev)
1266{
1267 u32 clockhi, clocklo, clockhi1;
1268 cycle_t cycles;
1269 int i;
1270 struct mlx4_priv *priv = mlx4_priv(dev);
1271
1272 for (i = 0; i < 10; i++) {
1273 clockhi = swab32(readl(priv->clock_mapping));
1274 clocklo = swab32(readl(priv->clock_mapping + 4));
1275 clockhi1 = swab32(readl(priv->clock_mapping));
1276 if (clockhi == clockhi1)
1277 break;
1278 }
1279
1280 cycles = (u64) clockhi << 32 | (u64) clocklo;
1281
1282 return cycles;
1283}
1284EXPORT_SYMBOL_GPL(mlx4_read_clock);
1285
1286
ddd8a6c1
EE
1287static int map_internal_clock(struct mlx4_dev *dev)
1288{
1289 struct mlx4_priv *priv = mlx4_priv(dev);
1290
1291 priv->clock_mapping =
1292 ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) +
1293 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1294
1295 if (!priv->clock_mapping)
1296 return -ENOMEM;
1297
1298 return 0;
1299}
1300
1301static void unmap_internal_clock(struct mlx4_dev *dev)
1302{
1303 struct mlx4_priv *priv = mlx4_priv(dev);
1304
1305 if (priv->clock_mapping)
1306 iounmap(priv->clock_mapping);
1307}
1308
225c7b1f
RD
1309static void mlx4_close_hca(struct mlx4_dev *dev)
1310{
ddd8a6c1 1311 unmap_internal_clock(dev);
c1b43dca 1312 unmap_bf_area(dev);
ab9c17a0
JM
1313 if (mlx4_is_slave(dev))
1314 mlx4_slave_exit(dev);
1315 else {
1316 mlx4_CLOSE_HCA(dev, 0);
1317 mlx4_free_icms(dev);
1318 mlx4_UNMAP_FA(dev);
1319 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1320 }
1321}
1322
1323static int mlx4_init_slave(struct mlx4_dev *dev)
1324{
1325 struct mlx4_priv *priv = mlx4_priv(dev);
1326 u64 dma = (u64) priv->mfunc.vhcr_dma;
ab9c17a0
JM
1327 int ret_from_reset = 0;
1328 u32 slave_read;
1329 u32 cmd_channel_ver;
1330
f3d4c89e 1331 mutex_lock(&priv->cmd.slave_cmd_mutex);
ab9c17a0
JM
1332 priv->cmd.max_cmds = 1;
1333 mlx4_warn(dev, "Sending reset\n");
1334 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1335 MLX4_COMM_TIME);
1336 /* if we are in the middle of flr the slave will try
1337 * NUM_OF_RESET_RETRIES times before leaving.*/
1338 if (ret_from_reset) {
1339 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
5efe5355
JM
1340 mlx4_warn(dev, "slave is currently in the "
1341 "middle of FLR. Deferring probe.\n");
1342 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1343 return -EPROBE_DEFER;
ab9c17a0
JM
1344 } else
1345 goto err;
1346 }
1347
1348 /* check the driver version - the slave I/F revision
1349 * must match the master's */
1350 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1351 cmd_channel_ver = mlx4_comm_get_version();
1352
1353 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1354 MLX4_COMM_GET_IF_REV(slave_read)) {
1355 mlx4_err(dev, "slave driver version is not supported"
1356 " by the master\n");
1357 goto err;
1358 }
1359
1360 mlx4_warn(dev, "Sending vhcr0\n");
1361 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1362 MLX4_COMM_TIME))
1363 goto err;
1364 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1365 MLX4_COMM_TIME))
1366 goto err;
1367 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1368 MLX4_COMM_TIME))
1369 goto err;
1370 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
1371 goto err;
f3d4c89e
RD
1372
1373 mutex_unlock(&priv->cmd.slave_cmd_mutex);
ab9c17a0
JM
1374 return 0;
1375
1376err:
1377 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
f3d4c89e 1378 mutex_unlock(&priv->cmd.slave_cmd_mutex);
ab9c17a0 1379 return -EIO;
225c7b1f
RD
1380}
1381
6634961c
JM
1382static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1383{
1384 int i;
1385
1386 for (i = 1; i <= dev->caps.num_ports; i++) {
1387 dev->caps.gid_table_len[i] = 1;
1388 dev->caps.pkey_table_len[i] =
1389 dev->phys_caps.pkey_phys_table_len[i] - 1;
1390 }
1391}
1392
3c439b55
JM
1393static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1394{
1395 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1396
1397 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1398 i++) {
1399 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1400 break;
1401 }
1402
1403 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1404}
1405
7b8157be
JM
1406static void choose_steering_mode(struct mlx4_dev *dev,
1407 struct mlx4_dev_cap *dev_cap)
1408{
3c439b55
JM
1409 if (mlx4_log_num_mgm_entry_size == -1 &&
1410 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
7b8157be 1411 (!mlx4_is_mfunc(dev) ||
3c439b55
JM
1412 (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
1413 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1414 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1415 dev->oper_log_mgm_entry_size =
1416 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
7b8157be
JM
1417 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1418 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1419 dev->caps.fs_log_max_ucast_qp_range_size =
1420 dev_cap->fs_log_max_ucast_qp_range_size;
1421 } else {
1422 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1423 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1424 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1425 else {
1426 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1427
1428 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1429 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1430 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
1431 "set to use B0 steering. Falling back to A0 steering mode.\n");
1432 }
3c439b55
JM
1433 dev->oper_log_mgm_entry_size =
1434 mlx4_log_num_mgm_entry_size > 0 ?
1435 mlx4_log_num_mgm_entry_size :
1436 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
7b8157be
JM
1437 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1438 }
3c439b55
JM
1439 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
1440 "modparam log_num_mgm_entry_size = %d\n",
1441 mlx4_steering_mode_str(dev->caps.steering_mode),
1442 dev->oper_log_mgm_entry_size,
1443 mlx4_log_num_mgm_entry_size);
7b8157be
JM
1444}
1445
3d73c288 1446static int mlx4_init_hca(struct mlx4_dev *dev)
225c7b1f
RD
1447{
1448 struct mlx4_priv *priv = mlx4_priv(dev);
1449 struct mlx4_adapter adapter;
1450 struct mlx4_dev_cap dev_cap;
2d928651 1451 struct mlx4_mod_stat_cfg mlx4_cfg;
225c7b1f
RD
1452 struct mlx4_profile profile;
1453 struct mlx4_init_hca_param init_hca;
1454 u64 icm_size;
1455 int err;
1456
ab9c17a0
JM
1457 if (!mlx4_is_slave(dev)) {
1458 err = mlx4_QUERY_FW(dev);
1459 if (err) {
1460 if (err == -EACCES)
1461 mlx4_info(dev, "non-primary physical function, skipping.\n");
1462 else
1463 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
bef772eb 1464 return err;
ab9c17a0 1465 }
225c7b1f 1466
ab9c17a0
JM
1467 err = mlx4_load_fw(dev);
1468 if (err) {
1469 mlx4_err(dev, "Failed to start FW, aborting.\n");
bef772eb 1470 return err;
ab9c17a0 1471 }
225c7b1f 1472
ab9c17a0
JM
1473 mlx4_cfg.log_pg_sz_m = 1;
1474 mlx4_cfg.log_pg_sz = 0;
1475 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
1476 if (err)
1477 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2d928651 1478
ab9c17a0
JM
1479 err = mlx4_dev_cap(dev, &dev_cap);
1480 if (err) {
1481 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
1482 goto err_stop_fw;
1483 }
225c7b1f 1484
7b8157be
JM
1485 choose_steering_mode(dev, &dev_cap);
1486
6634961c
JM
1487 if (mlx4_is_master(dev))
1488 mlx4_parav_master_pf_caps(dev);
1489
ab9c17a0 1490 profile = default_profile;
0ff1fb65
HHZ
1491 if (dev->caps.steering_mode ==
1492 MLX4_STEERING_MODE_DEVICE_MANAGED)
1493 profile.num_mcg = MLX4_FS_NUM_MCG;
225c7b1f 1494
ab9c17a0
JM
1495 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1496 &init_hca);
1497 if ((long long) icm_size < 0) {
1498 err = icm_size;
1499 goto err_stop_fw;
1500 }
225c7b1f 1501
a5bbe892
EC
1502 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
1503
ab9c17a0
JM
1504 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
1505 init_hca.uar_page_sz = PAGE_SHIFT - 12;
e448834e
SM
1506 init_hca.mw_enabled = 0;
1507 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
1508 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
1509 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
c1b43dca 1510
ab9c17a0
JM
1511 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
1512 if (err)
1513 goto err_stop_fw;
225c7b1f 1514
ab9c17a0
JM
1515 err = mlx4_INIT_HCA(dev, &init_hca);
1516 if (err) {
1517 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
1518 goto err_free_icm;
1519 }
ddd8a6c1
EE
1520 /*
1521 * If TS is supported by FW
1522 * read HCA frequency by QUERY_HCA command
1523 */
1524 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
1525 memset(&init_hca, 0, sizeof(init_hca));
1526 err = mlx4_QUERY_HCA(dev, &init_hca);
1527 if (err) {
1528 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
1529 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1530 } else {
1531 dev->caps.hca_core_clock =
1532 init_hca.hca_core_clock;
1533 }
1534
1535 /* In case we got HCA frequency 0 - disable timestamping
1536 * to avoid dividing by zero
1537 */
1538 if (!dev->caps.hca_core_clock) {
1539 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1540 mlx4_err(dev,
1541 "HCA frequency is 0. Timestamping is not supported.");
1542 } else if (map_internal_clock(dev)) {
1543 /*
1544 * Map internal clock,
1545 * in case of failure disable timestamping
1546 */
1547 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1548 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
1549 }
1550 }
ab9c17a0
JM
1551 } else {
1552 err = mlx4_init_slave(dev);
1553 if (err) {
5efe5355
JM
1554 if (err != -EPROBE_DEFER)
1555 mlx4_err(dev, "Failed to initialize slave\n");
bef772eb 1556 return err;
ab9c17a0 1557 }
225c7b1f 1558
ab9c17a0
JM
1559 err = mlx4_slave_cap(dev);
1560 if (err) {
1561 mlx4_err(dev, "Failed to obtain slave caps\n");
1562 goto err_close;
1563 }
225c7b1f
RD
1564 }
1565
ab9c17a0
JM
1566 if (map_bf_area(dev))
1567 mlx4_dbg(dev, "Failed to map blue flame area\n");
1568
1569 /*Only the master set the ports, all the rest got it from it.*/
1570 if (!mlx4_is_slave(dev))
1571 mlx4_set_port_mask(dev);
1572
225c7b1f
RD
1573 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1574 if (err) {
1575 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
bef772eb 1576 goto unmap_bf;
225c7b1f
RD
1577 }
1578
1579 priv->eq_table.inta_pin = adapter.inta_pin;
cd9281d8 1580 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
225c7b1f
RD
1581
1582 return 0;
1583
bef772eb 1584unmap_bf:
ddd8a6c1 1585 unmap_internal_clock(dev);
bef772eb
AY
1586 unmap_bf_area(dev);
1587
225c7b1f 1588err_close:
41929ed2
DB
1589 if (mlx4_is_slave(dev))
1590 mlx4_slave_exit(dev);
1591 else
1592 mlx4_CLOSE_HCA(dev, 0);
225c7b1f
RD
1593
1594err_free_icm:
ab9c17a0
JM
1595 if (!mlx4_is_slave(dev))
1596 mlx4_free_icms(dev);
225c7b1f
RD
1597
1598err_stop_fw:
ab9c17a0
JM
1599 if (!mlx4_is_slave(dev)) {
1600 mlx4_UNMAP_FA(dev);
1601 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1602 }
225c7b1f
RD
1603 return err;
1604}
1605
f2a3f6a3
OG
1606static int mlx4_init_counters_table(struct mlx4_dev *dev)
1607{
1608 struct mlx4_priv *priv = mlx4_priv(dev);
1609 int nent;
1610
1611 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1612 return -ENOENT;
1613
1614 nent = dev->caps.max_counters;
1615 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
1616}
1617
1618static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1619{
1620 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1621}
1622
ba062d52 1623int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
f2a3f6a3
OG
1624{
1625 struct mlx4_priv *priv = mlx4_priv(dev);
1626
1627 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1628 return -ENOENT;
1629
1630 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
1631 if (*idx == -1)
1632 return -ENOMEM;
1633
1634 return 0;
1635}
ba062d52
JM
1636
1637int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1638{
1639 u64 out_param;
1640 int err;
1641
1642 if (mlx4_is_mfunc(dev)) {
1643 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
1644 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
1645 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1646 if (!err)
1647 *idx = get_param_l(&out_param);
1648
1649 return err;
1650 }
1651 return __mlx4_counter_alloc(dev, idx);
1652}
f2a3f6a3
OG
1653EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1654
ba062d52 1655void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
f2a3f6a3
OG
1656{
1657 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
1658 return;
1659}
ba062d52
JM
1660
1661void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1662{
e7dbeba8 1663 u64 in_param = 0;
ba062d52
JM
1664
1665 if (mlx4_is_mfunc(dev)) {
1666 set_param_l(&in_param, idx);
1667 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
1668 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
1669 MLX4_CMD_WRAPPED);
1670 return;
1671 }
1672 __mlx4_counter_free(dev, idx);
1673}
f2a3f6a3
OG
1674EXPORT_SYMBOL_GPL(mlx4_counter_free);
1675
3d73c288 1676static int mlx4_setup_hca(struct mlx4_dev *dev)
225c7b1f
RD
1677{
1678 struct mlx4_priv *priv = mlx4_priv(dev);
1679 int err;
7ff93f8b 1680 int port;
9a5aa622 1681 __be32 ib_port_default_caps;
225c7b1f 1682
225c7b1f
RD
1683 err = mlx4_init_uar_table(dev);
1684 if (err) {
1685 mlx4_err(dev, "Failed to initialize "
1686 "user access region table, aborting.\n");
1687 return err;
1688 }
1689
1690 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1691 if (err) {
1692 mlx4_err(dev, "Failed to allocate driver access region, "
1693 "aborting.\n");
1694 goto err_uar_table_free;
1695 }
1696
4979d18f 1697 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
225c7b1f
RD
1698 if (!priv->kar) {
1699 mlx4_err(dev, "Couldn't map kernel access region, "
1700 "aborting.\n");
1701 err = -ENOMEM;
1702 goto err_uar_free;
1703 }
1704
1705 err = mlx4_init_pd_table(dev);
1706 if (err) {
1707 mlx4_err(dev, "Failed to initialize "
1708 "protection domain table, aborting.\n");
1709 goto err_kar_unmap;
1710 }
1711
012a8ff5
SH
1712 err = mlx4_init_xrcd_table(dev);
1713 if (err) {
1714 mlx4_err(dev, "Failed to initialize "
1715 "reliable connection domain table, aborting.\n");
1716 goto err_pd_table_free;
1717 }
1718
225c7b1f
RD
1719 err = mlx4_init_mr_table(dev);
1720 if (err) {
1721 mlx4_err(dev, "Failed to initialize "
1722 "memory region table, aborting.\n");
012a8ff5 1723 goto err_xrcd_table_free;
225c7b1f
RD
1724 }
1725
fe6f700d
YP
1726 if (!mlx4_is_slave(dev)) {
1727 err = mlx4_init_mcg_table(dev);
1728 if (err) {
1729 mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
1730 goto err_mr_table_free;
1731 }
1732 }
1733
225c7b1f
RD
1734 err = mlx4_init_eq_table(dev);
1735 if (err) {
1736 mlx4_err(dev, "Failed to initialize "
1737 "event queue table, aborting.\n");
fe6f700d 1738 goto err_mcg_table_free;
225c7b1f
RD
1739 }
1740
1741 err = mlx4_cmd_use_events(dev);
1742 if (err) {
1743 mlx4_err(dev, "Failed to switch to event-driven "
1744 "firmware commands, aborting.\n");
1745 goto err_eq_table_free;
1746 }
1747
1748 err = mlx4_NOP(dev);
1749 if (err) {
08fb1055
MT
1750 if (dev->flags & MLX4_FLAG_MSI_X) {
1751 mlx4_warn(dev, "NOP command failed to generate MSI-X "
1752 "interrupt IRQ %d).\n",
b8dd786f 1753 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
08fb1055
MT
1754 mlx4_warn(dev, "Trying again without MSI-X.\n");
1755 } else {
1756 mlx4_err(dev, "NOP command failed to generate interrupt "
1757 "(IRQ %d), aborting.\n",
b8dd786f 1758 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
225c7b1f 1759 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
08fb1055 1760 }
225c7b1f
RD
1761
1762 goto err_cmd_poll;
1763 }
1764
1765 mlx4_dbg(dev, "NOP command IRQ test passed\n");
1766
1767 err = mlx4_init_cq_table(dev);
1768 if (err) {
1769 mlx4_err(dev, "Failed to initialize "
1770 "completion queue table, aborting.\n");
1771 goto err_cmd_poll;
1772 }
1773
1774 err = mlx4_init_srq_table(dev);
1775 if (err) {
1776 mlx4_err(dev, "Failed to initialize "
1777 "shared receive queue table, aborting.\n");
1778 goto err_cq_table_free;
1779 }
1780
1781 err = mlx4_init_qp_table(dev);
1782 if (err) {
1783 mlx4_err(dev, "Failed to initialize "
1784 "queue pair table, aborting.\n");
1785 goto err_srq_table_free;
1786 }
1787
f2a3f6a3
OG
1788 err = mlx4_init_counters_table(dev);
1789 if (err && err != -ENOENT) {
1790 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
fe6f700d 1791 goto err_qp_table_free;
f2a3f6a3
OG
1792 }
1793
ab9c17a0
JM
1794 if (!mlx4_is_slave(dev)) {
1795 for (port = 1; port <= dev->caps.num_ports; port++) {
ab9c17a0
JM
1796 ib_port_default_caps = 0;
1797 err = mlx4_get_port_ib_caps(dev, port,
1798 &ib_port_default_caps);
1799 if (err)
1800 mlx4_warn(dev, "failed to get port %d default "
1801 "ib capabilities (%d). Continuing "
1802 "with caps = 0\n", port, err);
1803 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1804
2aca1172
JM
1805 /* initialize per-slave default ib port capabilities */
1806 if (mlx4_is_master(dev)) {
1807 int i;
1808 for (i = 0; i < dev->num_slaves; i++) {
1809 if (i == mlx4_master_func_num(dev))
1810 continue;
1811 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
1812 ib_port_default_caps;
1813 }
1814 }
1815
096335b3
OG
1816 if (mlx4_is_mfunc(dev))
1817 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
1818 else
1819 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
97285b78 1820
6634961c
JM
1821 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
1822 dev->caps.pkey_table_len[port] : -1);
ab9c17a0
JM
1823 if (err) {
1824 mlx4_err(dev, "Failed to set port %d, aborting\n",
1825 port);
1826 goto err_counters_table_free;
1827 }
7ff93f8b
YP
1828 }
1829 }
1830
225c7b1f
RD
1831 return 0;
1832
f2a3f6a3
OG
1833err_counters_table_free:
1834 mlx4_cleanup_counters_table(dev);
1835
225c7b1f
RD
1836err_qp_table_free:
1837 mlx4_cleanup_qp_table(dev);
1838
1839err_srq_table_free:
1840 mlx4_cleanup_srq_table(dev);
1841
1842err_cq_table_free:
1843 mlx4_cleanup_cq_table(dev);
1844
1845err_cmd_poll:
1846 mlx4_cmd_use_polling(dev);
1847
1848err_eq_table_free:
1849 mlx4_cleanup_eq_table(dev);
1850
fe6f700d
YP
1851err_mcg_table_free:
1852 if (!mlx4_is_slave(dev))
1853 mlx4_cleanup_mcg_table(dev);
1854
ee49bd93 1855err_mr_table_free:
225c7b1f
RD
1856 mlx4_cleanup_mr_table(dev);
1857
012a8ff5
SH
1858err_xrcd_table_free:
1859 mlx4_cleanup_xrcd_table(dev);
1860
225c7b1f
RD
1861err_pd_table_free:
1862 mlx4_cleanup_pd_table(dev);
1863
1864err_kar_unmap:
1865 iounmap(priv->kar);
1866
1867err_uar_free:
1868 mlx4_uar_free(dev, &priv->driver_uar);
1869
1870err_uar_table_free:
1871 mlx4_cleanup_uar_table(dev);
1872 return err;
1873}
1874
e8f9b2ed 1875static void mlx4_enable_msi_x(struct mlx4_dev *dev)
225c7b1f
RD
1876{
1877 struct mlx4_priv *priv = mlx4_priv(dev);
b8dd786f 1878 struct msix_entry *entries;
0b7ca5a9 1879 int nreq = min_t(int, dev->caps.num_ports *
90b1ebe7
YM
1880 min_t(int, netif_get_num_default_rss_queues() + 1,
1881 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
225c7b1f
RD
1882 int err;
1883 int i;
1884
1885 if (msi_x) {
ca4c7b35
OG
1886 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
1887 nreq);
ab9c17a0 1888
b8dd786f
YP
1889 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1890 if (!entries)
1891 goto no_msi;
1892
1893 for (i = 0; i < nreq; ++i)
225c7b1f
RD
1894 entries[i].entry = i;
1895
b8dd786f
YP
1896 retry:
1897 err = pci_enable_msix(dev->pdev, entries, nreq);
225c7b1f 1898 if (err) {
b8dd786f
YP
1899 /* Try again if at least 2 vectors are available */
1900 if (err > 1) {
1901 mlx4_info(dev, "Requested %d vectors, "
1902 "but only %d MSI-X vectors available, "
1903 "trying again\n", nreq, err);
1904 nreq = err;
1905 goto retry;
1906 }
5bf0da7d 1907 kfree(entries);
225c7b1f
RD
1908 goto no_msi;
1909 }
1910
0b7ca5a9
YP
1911 if (nreq <
1912 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1913 /*Working in legacy mode , all EQ's shared*/
1914 dev->caps.comp_pool = 0;
1915 dev->caps.num_comp_vectors = nreq - 1;
1916 } else {
1917 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1918 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1919 }
b8dd786f 1920 for (i = 0; i < nreq; ++i)
225c7b1f
RD
1921 priv->eq_table.eq[i].irq = entries[i].vector;
1922
1923 dev->flags |= MLX4_FLAG_MSI_X;
b8dd786f
YP
1924
1925 kfree(entries);
225c7b1f
RD
1926 return;
1927 }
1928
1929no_msi:
b8dd786f 1930 dev->caps.num_comp_vectors = 1;
0b7ca5a9 1931 dev->caps.comp_pool = 0;
b8dd786f
YP
1932
1933 for (i = 0; i < 2; ++i)
225c7b1f
RD
1934 priv->eq_table.eq[i].irq = dev->pdev->irq;
1935}
1936
7ff93f8b 1937static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2a2336f8
YP
1938{
1939 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
7ff93f8b 1940 int err = 0;
2a2336f8
YP
1941
1942 info->dev = dev;
1943 info->port = port;
ab9c17a0 1944 if (!mlx4_is_slave(dev)) {
ab9c17a0
JM
1945 mlx4_init_mac_table(dev, &info->mac_table);
1946 mlx4_init_vlan_table(dev, &info->vlan_table);
16a10ffd 1947 info->base_qpn = mlx4_get_base_qpn(dev, port);
ab9c17a0 1948 }
7ff93f8b
YP
1949
1950 sprintf(info->dev_name, "mlx4_port%d", port);
1951 info->port_attr.attr.name = info->dev_name;
ab9c17a0
JM
1952 if (mlx4_is_mfunc(dev))
1953 info->port_attr.attr.mode = S_IRUGO;
1954 else {
1955 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1956 info->port_attr.store = set_port_type;
1957 }
7ff93f8b 1958 info->port_attr.show = show_port_type;
3691c964 1959 sysfs_attr_init(&info->port_attr.attr);
7ff93f8b
YP
1960
1961 err = device_create_file(&dev->pdev->dev, &info->port_attr);
1962 if (err) {
1963 mlx4_err(dev, "Failed to create file for port %d\n", port);
1964 info->port = -1;
1965 }
1966
096335b3
OG
1967 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
1968 info->port_mtu_attr.attr.name = info->dev_mtu_name;
1969 if (mlx4_is_mfunc(dev))
1970 info->port_mtu_attr.attr.mode = S_IRUGO;
1971 else {
1972 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
1973 info->port_mtu_attr.store = set_port_ib_mtu;
1974 }
1975 info->port_mtu_attr.show = show_port_ib_mtu;
1976 sysfs_attr_init(&info->port_mtu_attr.attr);
1977
1978 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
1979 if (err) {
1980 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
1981 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1982 info->port = -1;
1983 }
1984
7ff93f8b
YP
1985 return err;
1986}
1987
1988static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1989{
1990 if (info->port < 0)
1991 return;
1992
1993 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
096335b3 1994 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
2a2336f8
YP
1995}
1996
b12d93d6
YP
1997static int mlx4_init_steering(struct mlx4_dev *dev)
1998{
1999 struct mlx4_priv *priv = mlx4_priv(dev);
2000 int num_entries = dev->caps.num_ports;
2001 int i, j;
2002
2003 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
2004 if (!priv->steer)
2005 return -ENOMEM;
2006
45b51365 2007 for (i = 0; i < num_entries; i++)
b12d93d6
YP
2008 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2009 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
2010 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
2011 }
b12d93d6
YP
2012 return 0;
2013}
2014
2015static void mlx4_clear_steering(struct mlx4_dev *dev)
2016{
2017 struct mlx4_priv *priv = mlx4_priv(dev);
2018 struct mlx4_steer_index *entry, *tmp_entry;
2019 struct mlx4_promisc_qp *pqp, *tmp_pqp;
2020 int num_entries = dev->caps.num_ports;
2021 int i, j;
2022
2023 for (i = 0; i < num_entries; i++) {
2024 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2025 list_for_each_entry_safe(pqp, tmp_pqp,
2026 &priv->steer[i].promisc_qps[j],
2027 list) {
2028 list_del(&pqp->list);
2029 kfree(pqp);
2030 }
2031 list_for_each_entry_safe(entry, tmp_entry,
2032 &priv->steer[i].steer_entries[j],
2033 list) {
2034 list_del(&entry->list);
2035 list_for_each_entry_safe(pqp, tmp_pqp,
2036 &entry->duplicates,
2037 list) {
2038 list_del(&pqp->list);
2039 kfree(pqp);
2040 }
2041 kfree(entry);
2042 }
2043 }
2044 }
2045 kfree(priv->steer);
2046}
2047
ab9c17a0
JM
2048static int extended_func_num(struct pci_dev *pdev)
2049{
2050 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
2051}
2052
2053#define MLX4_OWNER_BASE 0x8069c
2054#define MLX4_OWNER_SIZE 4
2055
2056static int mlx4_get_ownership(struct mlx4_dev *dev)
2057{
2058 void __iomem *owner;
2059 u32 ret;
2060
57dbf29a
KSS
2061 if (pci_channel_offline(dev->pdev))
2062 return -EIO;
2063
ab9c17a0
JM
2064 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2065 MLX4_OWNER_SIZE);
2066 if (!owner) {
2067 mlx4_err(dev, "Failed to obtain ownership bit\n");
2068 return -ENOMEM;
2069 }
2070
2071 ret = readl(owner);
2072 iounmap(owner);
2073 return (int) !!ret;
2074}
2075
2076static void mlx4_free_ownership(struct mlx4_dev *dev)
2077{
2078 void __iomem *owner;
2079
57dbf29a
KSS
2080 if (pci_channel_offline(dev->pdev))
2081 return;
2082
ab9c17a0
JM
2083 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2084 MLX4_OWNER_SIZE);
2085 if (!owner) {
2086 mlx4_err(dev, "Failed to obtain ownership bit\n");
2087 return;
2088 }
2089 writel(0, owner);
2090 msleep(1000);
2091 iounmap(owner);
2092}
2093
839f1243 2094static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
225c7b1f 2095{
225c7b1f
RD
2096 struct mlx4_priv *priv;
2097 struct mlx4_dev *dev;
2098 int err;
2a2336f8 2099 int port;
225c7b1f 2100
0a645e80 2101 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
225c7b1f
RD
2102
2103 err = pci_enable_device(pdev);
2104 if (err) {
2105 dev_err(&pdev->dev, "Cannot enable PCI device, "
2106 "aborting.\n");
2107 return err;
2108 }
5a0d0a61
JM
2109
2110 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
2111 * per port, we must limit the number of VFs to 63 (since their are
2112 * 128 MACs)
2113 */
2114 if (num_vfs >= MLX4_MAX_NUM_VF) {
2115 dev_err(&pdev->dev,
2116 "Requested more VF's (%d) than allowed (%d)\n",
2117 num_vfs, MLX4_MAX_NUM_VF - 1);
ab9c17a0
JM
2118 return -EINVAL;
2119 }
30e514a7
JM
2120
2121 if (num_vfs < 0) {
2122 pr_err("num_vfs module parameter cannot be negative\n");
2123 return -EINVAL;
2124 }
225c7b1f 2125 /*
ab9c17a0 2126 * Check for BARs.
225c7b1f 2127 */
839f1243 2128 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
ab9c17a0
JM
2129 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2130 dev_err(&pdev->dev, "Missing DCS, aborting."
839f1243
RD
2131 "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2132 pci_dev_data, pci_resource_flags(pdev, 0));
225c7b1f
RD
2133 err = -ENODEV;
2134 goto err_disable_pdev;
2135 }
2136 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2137 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
2138 err = -ENODEV;
2139 goto err_disable_pdev;
2140 }
2141
a01df0fe 2142 err = pci_request_regions(pdev, DRV_NAME);
225c7b1f 2143 if (err) {
a01df0fe 2144 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
225c7b1f
RD
2145 goto err_disable_pdev;
2146 }
2147
225c7b1f
RD
2148 pci_set_master(pdev);
2149
6a35528a 2150 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
225c7b1f
RD
2151 if (err) {
2152 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
284901a9 2153 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
225c7b1f
RD
2154 if (err) {
2155 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
a01df0fe 2156 goto err_release_regions;
225c7b1f
RD
2157 }
2158 }
6a35528a 2159 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
225c7b1f
RD
2160 if (err) {
2161 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
2162 "consistent PCI DMA mask.\n");
284901a9 2163 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
225c7b1f
RD
2164 if (err) {
2165 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
2166 "aborting.\n");
a01df0fe 2167 goto err_release_regions;
225c7b1f
RD
2168 }
2169 }
2170
7f9e5c48
DD
2171 /* Allow large DMA segments, up to the firmware limit of 1 GB */
2172 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
2173
b2adaca9 2174 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
225c7b1f 2175 if (!priv) {
225c7b1f 2176 err = -ENOMEM;
a01df0fe 2177 goto err_release_regions;
225c7b1f
RD
2178 }
2179
2180 dev = &priv->dev;
2181 dev->pdev = pdev;
b581401e
RD
2182 INIT_LIST_HEAD(&priv->ctx_list);
2183 spin_lock_init(&priv->ctx_lock);
225c7b1f 2184
7ff93f8b
YP
2185 mutex_init(&priv->port_mutex);
2186
6296883c
YP
2187 INIT_LIST_HEAD(&priv->pgdir_list);
2188 mutex_init(&priv->pgdir_mutex);
2189
c1b43dca
EC
2190 INIT_LIST_HEAD(&priv->bf_list);
2191 mutex_init(&priv->bf_mutex);
2192
aca7a3ac 2193 dev->rev_id = pdev->revision;
ab9c17a0 2194 /* Detect if this device is a virtual function */
839f1243 2195 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
ab9c17a0
JM
2196 /* When acting as pf, we normally skip vfs unless explicitly
2197 * requested to probe them. */
2198 if (num_vfs && extended_func_num(pdev) > probe_vf) {
2199 mlx4_warn(dev, "Skipping virtual function:%d\n",
2200 extended_func_num(pdev));
2201 err = -ENODEV;
2202 goto err_free_dev;
2203 }
2204 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2205 dev->flags |= MLX4_FLAG_SLAVE;
2206 } else {
2207 /* We reset the device and enable SRIOV only for physical
2208 * devices. Try to claim ownership on the device;
2209 * if already taken, skip -- do not allow multiple PFs */
2210 err = mlx4_get_ownership(dev);
2211 if (err) {
2212 if (err < 0)
2213 goto err_free_dev;
2214 else {
2215 mlx4_warn(dev, "Multiple PFs not yet supported."
2216 " Skipping PF.\n");
2217 err = -EINVAL;
2218 goto err_free_dev;
2219 }
2220 }
aca7a3ac 2221
ab9c17a0 2222 if (num_vfs) {
84b1f153 2223 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
ab9c17a0
JM
2224 err = pci_enable_sriov(pdev, num_vfs);
2225 if (err) {
84b1f153
RD
2226 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2227 err);
ab9c17a0
JM
2228 err = 0;
2229 } else {
2230 mlx4_warn(dev, "Running in master mode\n");
2231 dev->flags |= MLX4_FLAG_SRIOV |
2232 MLX4_FLAG_MASTER;
2233 dev->num_vfs = num_vfs;
2234 }
2235 }
2236
fe6f700d
YP
2237 atomic_set(&priv->opreq_count, 0);
2238 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
2239
ab9c17a0
JM
2240 /*
2241 * Now reset the HCA before we touch the PCI capabilities or
2242 * attempt a firmware command, since a boot ROM may have left
2243 * the HCA in an undefined state.
2244 */
2245 err = mlx4_reset(dev);
2246 if (err) {
2247 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
2248 goto err_rel_own;
2249 }
225c7b1f
RD
2250 }
2251
ab9c17a0 2252slave_start:
521130d1
EE
2253 err = mlx4_cmd_init(dev);
2254 if (err) {
225c7b1f 2255 mlx4_err(dev, "Failed to init command interface, aborting.\n");
ab9c17a0
JM
2256 goto err_sriov;
2257 }
2258
2259 /* In slave functions, the communication channel must be initialized
2260 * before posting commands. Also, init num_slaves before calling
2261 * mlx4_init_hca */
2262 if (mlx4_is_mfunc(dev)) {
2263 if (mlx4_is_master(dev))
2264 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
2265 else {
2266 dev->num_slaves = 0;
f356fcbe
JM
2267 err = mlx4_multi_func_init(dev);
2268 if (err) {
ab9c17a0
JM
2269 mlx4_err(dev, "Failed to init slave mfunc"
2270 " interface, aborting.\n");
2271 goto err_cmd;
2272 }
2273 }
225c7b1f
RD
2274 }
2275
2276 err = mlx4_init_hca(dev);
ab9c17a0
JM
2277 if (err) {
2278 if (err == -EACCES) {
2279 /* Not primary Physical function
2280 * Running in slave mode */
2281 mlx4_cmd_cleanup(dev);
2282 dev->flags |= MLX4_FLAG_SLAVE;
2283 dev->flags &= ~MLX4_FLAG_MASTER;
2284 goto slave_start;
2285 } else
2286 goto err_mfunc;
2287 }
2288
2289 /* In master functions, the communication channel must be initialized
2290 * after obtaining its address from fw */
2291 if (mlx4_is_master(dev)) {
f356fcbe
JM
2292 err = mlx4_multi_func_init(dev);
2293 if (err) {
ab9c17a0
JM
2294 mlx4_err(dev, "Failed to init master mfunc"
2295 "interface, aborting.\n");
2296 goto err_close;
2297 }
2298 }
225c7b1f 2299
b8dd786f
YP
2300 err = mlx4_alloc_eq_table(dev);
2301 if (err)
ab9c17a0 2302 goto err_master_mfunc;
b8dd786f 2303
0b7ca5a9 2304 priv->msix_ctl.pool_bm = 0;
730c41d5 2305 mutex_init(&priv->msix_ctl.pool_lock);
0b7ca5a9 2306
08fb1055 2307 mlx4_enable_msi_x(dev);
ab9c17a0
JM
2308 if ((mlx4_is_mfunc(dev)) &&
2309 !(dev->flags & MLX4_FLAG_MSI_X)) {
f356fcbe 2310 err = -ENOSYS;
ab9c17a0
JM
2311 mlx4_err(dev, "INTx is not supported in multi-function mode."
2312 " aborting.\n");
b12d93d6 2313 goto err_free_eq;
ab9c17a0
JM
2314 }
2315
2316 if (!mlx4_is_slave(dev)) {
2317 err = mlx4_init_steering(dev);
2318 if (err)
2319 goto err_free_eq;
2320 }
b12d93d6 2321
225c7b1f 2322 err = mlx4_setup_hca(dev);
ab9c17a0
JM
2323 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
2324 !mlx4_is_mfunc(dev)) {
08fb1055 2325 dev->flags &= ~MLX4_FLAG_MSI_X;
9858d2d1
YP
2326 dev->caps.num_comp_vectors = 1;
2327 dev->caps.comp_pool = 0;
08fb1055
MT
2328 pci_disable_msix(pdev);
2329 err = mlx4_setup_hca(dev);
2330 }
2331
225c7b1f 2332 if (err)
b12d93d6 2333 goto err_steer;
225c7b1f 2334
5a0d0a61
JM
2335 mlx4_init_quotas(dev);
2336
7ff93f8b
YP
2337 for (port = 1; port <= dev->caps.num_ports; port++) {
2338 err = mlx4_init_port_info(dev, port);
2339 if (err)
2340 goto err_port;
2341 }
2a2336f8 2342
225c7b1f
RD
2343 err = mlx4_register_device(dev);
2344 if (err)
7ff93f8b 2345 goto err_port;
225c7b1f 2346
b046ffe5
EP
2347 mlx4_request_modules(dev);
2348
27bf91d6
YP
2349 mlx4_sense_init(dev);
2350 mlx4_start_sense(dev);
2351
839f1243 2352 priv->pci_dev_data = pci_dev_data;
225c7b1f
RD
2353 pci_set_drvdata(pdev, dev);
2354
2355 return 0;
2356
7ff93f8b 2357err_port:
b4f77264 2358 for (--port; port >= 1; --port)
7ff93f8b
YP
2359 mlx4_cleanup_port_info(&priv->port[port]);
2360
f2a3f6a3 2361 mlx4_cleanup_counters_table(dev);
225c7b1f
RD
2362 mlx4_cleanup_qp_table(dev);
2363 mlx4_cleanup_srq_table(dev);
2364 mlx4_cleanup_cq_table(dev);
2365 mlx4_cmd_use_polling(dev);
2366 mlx4_cleanup_eq_table(dev);
fe6f700d 2367 mlx4_cleanup_mcg_table(dev);
225c7b1f 2368 mlx4_cleanup_mr_table(dev);
012a8ff5 2369 mlx4_cleanup_xrcd_table(dev);
225c7b1f
RD
2370 mlx4_cleanup_pd_table(dev);
2371 mlx4_cleanup_uar_table(dev);
2372
b12d93d6 2373err_steer:
ab9c17a0
JM
2374 if (!mlx4_is_slave(dev))
2375 mlx4_clear_steering(dev);
b12d93d6 2376
b8dd786f
YP
2377err_free_eq:
2378 mlx4_free_eq_table(dev);
2379
ab9c17a0
JM
2380err_master_mfunc:
2381 if (mlx4_is_master(dev))
2382 mlx4_multi_func_cleanup(dev);
2383
225c7b1f 2384err_close:
08fb1055
MT
2385 if (dev->flags & MLX4_FLAG_MSI_X)
2386 pci_disable_msix(pdev);
2387
225c7b1f
RD
2388 mlx4_close_hca(dev);
2389
ab9c17a0
JM
2390err_mfunc:
2391 if (mlx4_is_slave(dev))
2392 mlx4_multi_func_cleanup(dev);
2393
225c7b1f
RD
2394err_cmd:
2395 mlx4_cmd_cleanup(dev);
2396
ab9c17a0 2397err_sriov:
681372a7 2398 if (dev->flags & MLX4_FLAG_SRIOV)
ab9c17a0
JM
2399 pci_disable_sriov(pdev);
2400
2401err_rel_own:
2402 if (!mlx4_is_slave(dev))
2403 mlx4_free_ownership(dev);
2404
225c7b1f 2405err_free_dev:
225c7b1f
RD
2406 kfree(priv);
2407
a01df0fe
RD
2408err_release_regions:
2409 pci_release_regions(pdev);
225c7b1f
RD
2410
2411err_disable_pdev:
2412 pci_disable_device(pdev);
2413 pci_set_drvdata(pdev, NULL);
2414 return err;
2415}
2416
1dd06ae8 2417static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3d73c288 2418{
0a645e80 2419 printk_once(KERN_INFO "%s", mlx4_version);
3d73c288 2420
839f1243 2421 return __mlx4_init_one(pdev, id->driver_data);
3d73c288
RD
2422}
2423
2424static void mlx4_remove_one(struct pci_dev *pdev)
225c7b1f
RD
2425{
2426 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2427 struct mlx4_priv *priv = mlx4_priv(dev);
2428 int p;
2429
2430 if (dev) {
ab9c17a0
JM
2431 /* in SRIOV it is not allowed to unload the pf's
2432 * driver while there are alive vf's */
2433 if (mlx4_is_master(dev)) {
2434 if (mlx4_how_many_lives_vf(dev))
2435 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2436 }
27bf91d6 2437 mlx4_stop_sense(dev);
225c7b1f
RD
2438 mlx4_unregister_device(dev);
2439
7ff93f8b
YP
2440 for (p = 1; p <= dev->caps.num_ports; p++) {
2441 mlx4_cleanup_port_info(&priv->port[p]);
225c7b1f 2442 mlx4_CLOSE_PORT(dev, p);
7ff93f8b 2443 }
225c7b1f 2444
b8924951
JM
2445 if (mlx4_is_master(dev))
2446 mlx4_free_resource_tracker(dev,
2447 RES_TR_FREE_SLAVES_ONLY);
2448
f2a3f6a3 2449 mlx4_cleanup_counters_table(dev);
225c7b1f
RD
2450 mlx4_cleanup_qp_table(dev);
2451 mlx4_cleanup_srq_table(dev);
2452 mlx4_cleanup_cq_table(dev);
2453 mlx4_cmd_use_polling(dev);
2454 mlx4_cleanup_eq_table(dev);
fe6f700d 2455 mlx4_cleanup_mcg_table(dev);
225c7b1f 2456 mlx4_cleanup_mr_table(dev);
012a8ff5 2457 mlx4_cleanup_xrcd_table(dev);
225c7b1f
RD
2458 mlx4_cleanup_pd_table(dev);
2459
ab9c17a0 2460 if (mlx4_is_master(dev))
b8924951
JM
2461 mlx4_free_resource_tracker(dev,
2462 RES_TR_FREE_STRUCTS_ONLY);
ab9c17a0 2463
225c7b1f
RD
2464 iounmap(priv->kar);
2465 mlx4_uar_free(dev, &priv->driver_uar);
2466 mlx4_cleanup_uar_table(dev);
ab9c17a0
JM
2467 if (!mlx4_is_slave(dev))
2468 mlx4_clear_steering(dev);
b8dd786f 2469 mlx4_free_eq_table(dev);
ab9c17a0
JM
2470 if (mlx4_is_master(dev))
2471 mlx4_multi_func_cleanup(dev);
225c7b1f 2472 mlx4_close_hca(dev);
ab9c17a0
JM
2473 if (mlx4_is_slave(dev))
2474 mlx4_multi_func_cleanup(dev);
225c7b1f
RD
2475 mlx4_cmd_cleanup(dev);
2476
2477 if (dev->flags & MLX4_FLAG_MSI_X)
2478 pci_disable_msix(pdev);
681372a7 2479 if (dev->flags & MLX4_FLAG_SRIOV) {
84b1f153 2480 mlx4_warn(dev, "Disabling SR-IOV\n");
ab9c17a0
JM
2481 pci_disable_sriov(pdev);
2482 }
225c7b1f 2483
ab9c17a0
JM
2484 if (!mlx4_is_slave(dev))
2485 mlx4_free_ownership(dev);
47605df9
JM
2486
2487 kfree(dev->caps.qp0_tunnel);
2488 kfree(dev->caps.qp0_proxy);
2489 kfree(dev->caps.qp1_tunnel);
2490 kfree(dev->caps.qp1_proxy);
2491
225c7b1f 2492 kfree(priv);
a01df0fe 2493 pci_release_regions(pdev);
225c7b1f
RD
2494 pci_disable_device(pdev);
2495 pci_set_drvdata(pdev, NULL);
2496 }
2497}
2498
ee49bd93
JM
2499int mlx4_restart_one(struct pci_dev *pdev)
2500{
839f1243
RD
2501 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2502 struct mlx4_priv *priv = mlx4_priv(dev);
2503 int pci_dev_data;
2504
2505 pci_dev_data = priv->pci_dev_data;
ee49bd93 2506 mlx4_remove_one(pdev);
839f1243 2507 return __mlx4_init_one(pdev, pci_dev_data);
ee49bd93
JM
2508}
2509
a3aa1884 2510static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
ab9c17a0 2511 /* MT25408 "Hermon" SDR */
ca3e57a5 2512 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2513 /* MT25408 "Hermon" DDR */
ca3e57a5 2514 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2515 /* MT25408 "Hermon" QDR */
ca3e57a5 2516 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2517 /* MT25408 "Hermon" DDR PCIe gen2 */
ca3e57a5 2518 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2519 /* MT25408 "Hermon" QDR PCIe gen2 */
ca3e57a5 2520 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2521 /* MT25408 "Hermon" EN 10GigE */
ca3e57a5 2522 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2523 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
ca3e57a5 2524 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2525 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
ca3e57a5 2526 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2527 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
ca3e57a5 2528 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2529 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
ca3e57a5 2530 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2531 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
ca3e57a5 2532 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2533 /* MT26478 ConnectX2 40GigE PCIe gen2 */
ca3e57a5 2534 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2535 /* MT25400 Family [ConnectX-2 Virtual Function] */
839f1243 2536 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
ab9c17a0
JM
2537 /* MT27500 Family [ConnectX-3] */
2538 { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
2539 /* MT27500 Family [ConnectX-3 Virtual Function] */
839f1243 2540 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
ab9c17a0
JM
2541 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
2542 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
2543 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
2544 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
2545 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
2546 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
2547 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
2548 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
2549 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
2550 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
2551 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
2552 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
225c7b1f
RD
2553 { 0, }
2554};
2555
2556MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2557
57dbf29a
KSS
2558static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2559 pci_channel_state_t state)
2560{
2561 mlx4_remove_one(pdev);
2562
2563 return state == pci_channel_io_perm_failure ?
2564 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
2565}
2566
2567static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2568{
839f1243 2569 int ret = __mlx4_init_one(pdev, 0);
57dbf29a
KSS
2570
2571 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2572}
2573
3646f0e5 2574static const struct pci_error_handlers mlx4_err_handler = {
57dbf29a
KSS
2575 .error_detected = mlx4_pci_err_detected,
2576 .slot_reset = mlx4_pci_slot_reset,
2577};
2578
225c7b1f
RD
2579static struct pci_driver mlx4_driver = {
2580 .name = DRV_NAME,
2581 .id_table = mlx4_pci_table,
2582 .probe = mlx4_init_one,
f57e6848 2583 .remove = mlx4_remove_one,
57dbf29a 2584 .err_handler = &mlx4_err_handler,
225c7b1f
RD
2585};
2586
7ff93f8b
YP
2587static int __init mlx4_verify_params(void)
2588{
2589 if ((log_num_mac < 0) || (log_num_mac > 7)) {
0a645e80 2590 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
7ff93f8b
YP
2591 return -1;
2592 }
2593
cb29688a
OG
2594 if (log_num_vlan != 0)
2595 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2596 MLX4_LOG_NUM_VLANS);
7ff93f8b 2597
0498628f 2598 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
0a645e80 2599 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
ab6bf42e
EC
2600 return -1;
2601 }
2602
ab9c17a0
JM
2603 /* Check if module param for ports type has legal combination */
2604 if (port_type_array[0] == false && port_type_array[1] == true) {
2605 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2606 port_type_array[0] = true;
2607 }
2608
3c439b55
JM
2609 if (mlx4_log_num_mgm_entry_size != -1 &&
2610 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2611 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2612 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
2613 "in legal range (-1 or %d..%d)\n",
2614 mlx4_log_num_mgm_entry_size,
2615 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2616 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2617 return -1;
2618 }
2619
7ff93f8b
YP
2620 return 0;
2621}
2622
225c7b1f
RD
2623static int __init mlx4_init(void)
2624{
2625 int ret;
2626
7ff93f8b
YP
2627 if (mlx4_verify_params())
2628 return -EINVAL;
2629
27bf91d6
YP
2630 mlx4_catas_init();
2631
2632 mlx4_wq = create_singlethread_workqueue("mlx4");
2633 if (!mlx4_wq)
2634 return -ENOMEM;
ee49bd93 2635
225c7b1f
RD
2636 ret = pci_register_driver(&mlx4_driver);
2637 return ret < 0 ? ret : 0;
2638}
2639
2640static void __exit mlx4_cleanup(void)
2641{
2642 pci_unregister_driver(&mlx4_driver);
27bf91d6 2643 destroy_workqueue(mlx4_wq);
225c7b1f
RD
2644}
2645
2646module_init(mlx4_init);
2647module_exit(mlx4_cleanup);