net/mlx4: Adapt code for N-Port VF
[linux-block.git] / drivers / net / ethernet / mellanox / mlx4 / main.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
45
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
48
49 #include "mlx4.h"
50 #include "fw.h"
51 #include "icm.h"
52
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION);
57
58 struct workqueue_struct *mlx4_wq;
59
60 #ifdef CONFIG_MLX4_DEBUG
61
62 int mlx4_debug_level = 0;
63 module_param_named(debug_level, mlx4_debug_level, int, 0644);
64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
65
66 #endif /* CONFIG_MLX4_DEBUG */
67
68 #ifdef CONFIG_PCI_MSI
69
70 static int msi_x = 1;
71 module_param(msi_x, int, 0444);
72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
73
74 #else /* CONFIG_PCI_MSI */
75
76 #define msi_x (0)
77
78 #endif /* CONFIG_PCI_MSI */
79
80 static int num_vfs;
81 module_param(num_vfs, int, 0444);
82 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
83
84 static int probe_vf;
85 module_param(probe_vf, int, 0644);
86 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
87
88 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
89 module_param_named(log_num_mgm_entry_size,
90                         mlx4_log_num_mgm_entry_size, int, 0444);
91 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
92                                          " of qp per mcg, for example:"
93                                          " 10 gives 248.range: 7 <="
94                                          " log_num_mgm_entry_size <= 12."
95                                          " To activate device managed"
96                                          " flow steering when available, set to -1");
97
98 static bool enable_64b_cqe_eqe = true;
99 module_param(enable_64b_cqe_eqe, bool, 0444);
100 MODULE_PARM_DESC(enable_64b_cqe_eqe,
101                  "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
102
103 #define HCA_GLOBAL_CAP_MASK            0
104
105 #define PF_CONTEXT_BEHAVIOUR_MASK       MLX4_FUNC_CAP_64B_EQE_CQE
106
107 static char mlx4_version[] =
108         DRV_NAME ": Mellanox ConnectX core driver v"
109         DRV_VERSION " (" DRV_RELDATE ")\n";
110
111 static struct mlx4_profile default_profile = {
112         .num_qp         = 1 << 18,
113         .num_srq        = 1 << 16,
114         .rdmarc_per_qp  = 1 << 4,
115         .num_cq         = 1 << 16,
116         .num_mcg        = 1 << 13,
117         .num_mpt        = 1 << 19,
118         .num_mtt        = 1 << 20, /* It is really num mtt segements */
119 };
120
121 static int log_num_mac = 7;
122 module_param_named(log_num_mac, log_num_mac, int, 0444);
123 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
124
125 static int log_num_vlan;
126 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
127 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
128 /* Log2 max number of VLANs per ETH port (0-7) */
129 #define MLX4_LOG_NUM_VLANS 7
130
131 static bool use_prio;
132 module_param_named(use_prio, use_prio, bool, 0444);
133 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
134                   "(0/1, default 0)");
135
136 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
137 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
138 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
139
140 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
141 static int arr_argc = 2;
142 module_param_array(port_type_array, int, &arr_argc, 0444);
143 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
144                                 "1 for IB, 2 for Ethernet");
145
146 struct mlx4_port_config {
147         struct list_head list;
148         enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
149         struct pci_dev *pdev;
150 };
151
152 static atomic_t pf_loading = ATOMIC_INIT(0);
153
154 int mlx4_check_port_params(struct mlx4_dev *dev,
155                            enum mlx4_port_type *port_type)
156 {
157         int i;
158
159         for (i = 0; i < dev->caps.num_ports - 1; i++) {
160                 if (port_type[i] != port_type[i + 1]) {
161                         if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
162                                 mlx4_err(dev, "Only same port types supported "
163                                          "on this HCA, aborting.\n");
164                                 return -EINVAL;
165                         }
166                 }
167         }
168
169         for (i = 0; i < dev->caps.num_ports; i++) {
170                 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
171                         mlx4_err(dev, "Requested port type for port %d is not "
172                                       "supported on this HCA\n", i + 1);
173                         return -EINVAL;
174                 }
175         }
176         return 0;
177 }
178
179 static void mlx4_set_port_mask(struct mlx4_dev *dev)
180 {
181         int i;
182
183         for (i = 1; i <= dev->caps.num_ports; ++i)
184                 dev->caps.port_mask[i] = dev->caps.port_type[i];
185 }
186
187 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
188 {
189         int err;
190         int i;
191
192         err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
193         if (err) {
194                 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
195                 return err;
196         }
197
198         if (dev_cap->min_page_sz > PAGE_SIZE) {
199                 mlx4_err(dev, "HCA minimum page size of %d bigger than "
200                          "kernel PAGE_SIZE of %ld, aborting.\n",
201                          dev_cap->min_page_sz, PAGE_SIZE);
202                 return -ENODEV;
203         }
204         if (dev_cap->num_ports > MLX4_MAX_PORTS) {
205                 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
206                          "aborting.\n",
207                          dev_cap->num_ports, MLX4_MAX_PORTS);
208                 return -ENODEV;
209         }
210
211         if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
212                 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
213                          "PCI resource 2 size of 0x%llx, aborting.\n",
214                          dev_cap->uar_size,
215                          (unsigned long long) pci_resource_len(dev->pdev, 2));
216                 return -ENODEV;
217         }
218
219         dev->caps.num_ports          = dev_cap->num_ports;
220         dev->phys_caps.num_phys_eqs  = MLX4_MAX_EQ_NUM;
221         for (i = 1; i <= dev->caps.num_ports; ++i) {
222                 dev->caps.vl_cap[i]         = dev_cap->max_vl[i];
223                 dev->caps.ib_mtu_cap[i]     = dev_cap->ib_mtu[i];
224                 dev->phys_caps.gid_phys_table_len[i]  = dev_cap->max_gids[i];
225                 dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
226                 /* set gid and pkey table operating lengths by default
227                  * to non-sriov values */
228                 dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
229                 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
230                 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
231                 dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
232                 dev->caps.def_mac[i]        = dev_cap->def_mac[i];
233                 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
234                 dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
235                 dev->caps.default_sense[i] = dev_cap->default_sense[i];
236                 dev->caps.trans_type[i]     = dev_cap->trans_type[i];
237                 dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
238                 dev->caps.wavelength[i]     = dev_cap->wavelength[i];
239                 dev->caps.trans_code[i]     = dev_cap->trans_code[i];
240         }
241
242         dev->caps.uar_page_size      = PAGE_SIZE;
243         dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
244         dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
245         dev->caps.bf_reg_size        = dev_cap->bf_reg_size;
246         dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
247         dev->caps.max_sq_sg          = dev_cap->max_sq_sg;
248         dev->caps.max_rq_sg          = dev_cap->max_rq_sg;
249         dev->caps.max_wqes           = dev_cap->max_qp_sz;
250         dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
251         dev->caps.max_srq_wqes       = dev_cap->max_srq_sz;
252         dev->caps.max_srq_sge        = dev_cap->max_rq_sg - 1;
253         dev->caps.reserved_srqs      = dev_cap->reserved_srqs;
254         dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
255         dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
256         /*
257          * Subtract 1 from the limit because we need to allocate a
258          * spare CQE so the HCA HW can tell the difference between an
259          * empty CQ and a full CQ.
260          */
261         dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
262         dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
263         dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
264         dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
265         dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
266
267         /* The first 128 UARs are used for EQ doorbells */
268         dev->caps.reserved_uars      = max_t(int, 128, dev_cap->reserved_uars);
269         dev->caps.reserved_pds       = dev_cap->reserved_pds;
270         dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
271                                         dev_cap->reserved_xrcds : 0;
272         dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
273                                         dev_cap->max_xrcds : 0;
274         dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;
275
276         dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
277         dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
278         dev->caps.flags              = dev_cap->flags;
279         dev->caps.flags2             = dev_cap->flags2;
280         dev->caps.bmme_flags         = dev_cap->bmme_flags;
281         dev->caps.reserved_lkey      = dev_cap->reserved_lkey;
282         dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
283         dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
284         dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
285
286         /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
287         if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
288                 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
289         /* Don't do sense port on multifunction devices (for now at least) */
290         if (mlx4_is_mfunc(dev))
291                 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
292
293         dev->caps.log_num_macs  = log_num_mac;
294         dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
295         dev->caps.log_num_prios = use_prio ? 3 : 0;
296
297         for (i = 1; i <= dev->caps.num_ports; ++i) {
298                 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
299                 if (dev->caps.supported_type[i]) {
300                         /* if only ETH is supported - assign ETH */
301                         if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
302                                 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
303                         /* if only IB is supported, assign IB */
304                         else if (dev->caps.supported_type[i] ==
305                                  MLX4_PORT_TYPE_IB)
306                                 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
307                         else {
308                                 /* if IB and ETH are supported, we set the port
309                                  * type according to user selection of port type;
310                                  * if user selected none, take the FW hint */
311                                 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
312                                         dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
313                                                 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
314                                 else
315                                         dev->caps.port_type[i] = port_type_array[i - 1];
316                         }
317                 }
318                 /*
319                  * Link sensing is allowed on the port if 3 conditions are true:
320                  * 1. Both protocols are supported on the port.
321                  * 2. Different types are supported on the port
322                  * 3. FW declared that it supports link sensing
323                  */
324                 mlx4_priv(dev)->sense.sense_allowed[i] =
325                         ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
326                          (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
327                          (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
328
329                 /*
330                  * If "default_sense" bit is set, we move the port to "AUTO" mode
331                  * and perform sense_port FW command to try and set the correct
332                  * port type from beginning
333                  */
334                 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
335                         enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
336                         dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
337                         mlx4_SENSE_PORT(dev, i, &sensed_port);
338                         if (sensed_port != MLX4_PORT_TYPE_NONE)
339                                 dev->caps.port_type[i] = sensed_port;
340                 } else {
341                         dev->caps.possible_type[i] = dev->caps.port_type[i];
342                 }
343
344                 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
345                         dev->caps.log_num_macs = dev_cap->log_max_macs[i];
346                         mlx4_warn(dev, "Requested number of MACs is too much "
347                                   "for port %d, reducing to %d.\n",
348                                   i, 1 << dev->caps.log_num_macs);
349                 }
350                 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
351                         dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
352                         mlx4_warn(dev, "Requested number of VLANs is too much "
353                                   "for port %d, reducing to %d.\n",
354                                   i, 1 << dev->caps.log_num_vlans);
355                 }
356         }
357
358         dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
359
360         dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
361         dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
362                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
363                 (1 << dev->caps.log_num_macs) *
364                 (1 << dev->caps.log_num_vlans) *
365                 (1 << dev->caps.log_num_prios) *
366                 dev->caps.num_ports;
367         dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
368
369         dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
370                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
371                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
372                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
373
374         dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
375
376         if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
377                 if (dev_cap->flags &
378                     (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
379                         mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
380                         dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
381                         dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
382                 }
383         }
384
385         if ((dev->caps.flags &
386             (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
387             mlx4_is_master(dev))
388                 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
389
390         return 0;
391 }
392
393 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
394                                        enum pci_bus_speed *speed,
395                                        enum pcie_link_width *width)
396 {
397         u32 lnkcap1, lnkcap2;
398         int err1, err2;
399
400 #define  PCIE_MLW_CAP_SHIFT 4   /* start of MLW mask in link capabilities */
401
402         *speed = PCI_SPEED_UNKNOWN;
403         *width = PCIE_LNK_WIDTH_UNKNOWN;
404
405         err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
406         err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
407         if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
408                 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
409                         *speed = PCIE_SPEED_8_0GT;
410                 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
411                         *speed = PCIE_SPEED_5_0GT;
412                 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
413                         *speed = PCIE_SPEED_2_5GT;
414         }
415         if (!err1) {
416                 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
417                 if (!lnkcap2) { /* pre-r3.0 */
418                         if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
419                                 *speed = PCIE_SPEED_5_0GT;
420                         else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
421                                 *speed = PCIE_SPEED_2_5GT;
422                 }
423         }
424
425         if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
426                 return err1 ? err1 :
427                         err2 ? err2 : -EINVAL;
428         }
429         return 0;
430 }
431
432 static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
433 {
434         enum pcie_link_width width, width_cap;
435         enum pci_bus_speed speed, speed_cap;
436         int err;
437
438 #define PCIE_SPEED_STR(speed) \
439         (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
440          speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
441          speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
442          "Unknown")
443
444         err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
445         if (err) {
446                 mlx4_warn(dev,
447                           "Unable to determine PCIe device BW capabilities\n");
448                 return;
449         }
450
451         err = pcie_get_minimum_link(dev->pdev, &speed, &width);
452         if (err || speed == PCI_SPEED_UNKNOWN ||
453             width == PCIE_LNK_WIDTH_UNKNOWN) {
454                 mlx4_warn(dev,
455                           "Unable to determine PCI device chain minimum BW\n");
456                 return;
457         }
458
459         if (width != width_cap || speed != speed_cap)
460                 mlx4_warn(dev,
461                           "PCIe BW is different than device's capability\n");
462
463         mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
464                   PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
465         mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
466                   width, width_cap);
467         return;
468 }
469
470 /*The function checks if there are live vf, return the num of them*/
471 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
472 {
473         struct mlx4_priv *priv = mlx4_priv(dev);
474         struct mlx4_slave_state *s_state;
475         int i;
476         int ret = 0;
477
478         for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
479                 s_state = &priv->mfunc.master.slave_state[i];
480                 if (s_state->active && s_state->last_cmd !=
481                     MLX4_COMM_CMD_RESET) {
482                         mlx4_warn(dev, "%s: slave: %d is still active\n",
483                                   __func__, i);
484                         ret++;
485                 }
486         }
487         return ret;
488 }
489
490 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
491 {
492         u32 qk = MLX4_RESERVED_QKEY_BASE;
493
494         if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
495             qpn < dev->phys_caps.base_proxy_sqpn)
496                 return -EINVAL;
497
498         if (qpn >= dev->phys_caps.base_tunnel_sqpn)
499                 /* tunnel qp */
500                 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
501         else
502                 qk += qpn - dev->phys_caps.base_proxy_sqpn;
503         *qkey = qk;
504         return 0;
505 }
506 EXPORT_SYMBOL(mlx4_get_parav_qkey);
507
508 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
509 {
510         struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
511
512         if (!mlx4_is_master(dev))
513                 return;
514
515         priv->virt2phys_pkey[slave][port - 1][i] = val;
516 }
517 EXPORT_SYMBOL(mlx4_sync_pkey_table);
518
519 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
520 {
521         struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
522
523         if (!mlx4_is_master(dev))
524                 return;
525
526         priv->slave_node_guids[slave] = guid;
527 }
528 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
529
530 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
531 {
532         struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
533
534         if (!mlx4_is_master(dev))
535                 return 0;
536
537         return priv->slave_node_guids[slave];
538 }
539 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
540
541 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
542 {
543         struct mlx4_priv *priv = mlx4_priv(dev);
544         struct mlx4_slave_state *s_slave;
545
546         if (!mlx4_is_master(dev))
547                 return 0;
548
549         s_slave = &priv->mfunc.master.slave_state[slave];
550         return !!s_slave->active;
551 }
552 EXPORT_SYMBOL(mlx4_is_slave_active);
553
554 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
555                                        struct mlx4_dev_cap *dev_cap,
556                                        struct mlx4_init_hca_param *hca_param)
557 {
558         dev->caps.steering_mode = hca_param->steering_mode;
559         if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
560                 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
561                 dev->caps.fs_log_max_ucast_qp_range_size =
562                         dev_cap->fs_log_max_ucast_qp_range_size;
563         } else
564                 dev->caps.num_qp_per_mgm =
565                         4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
566
567         mlx4_dbg(dev, "Steering mode is: %s\n",
568                  mlx4_steering_mode_str(dev->caps.steering_mode));
569 }
570
571 static int mlx4_slave_cap(struct mlx4_dev *dev)
572 {
573         int                        err;
574         u32                        page_size;
575         struct mlx4_dev_cap        dev_cap;
576         struct mlx4_func_cap       func_cap;
577         struct mlx4_init_hca_param hca_param;
578         int                        i;
579
580         memset(&hca_param, 0, sizeof(hca_param));
581         err = mlx4_QUERY_HCA(dev, &hca_param);
582         if (err) {
583                 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
584                 return err;
585         }
586
587         /*fail if the hca has an unknown capability */
588         if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
589             HCA_GLOBAL_CAP_MASK) {
590                 mlx4_err(dev, "Unknown hca global capabilities\n");
591                 return -ENOSYS;
592         }
593
594         mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
595
596         dev->caps.hca_core_clock = hca_param.hca_core_clock;
597
598         memset(&dev_cap, 0, sizeof(dev_cap));
599         dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
600         err = mlx4_dev_cap(dev, &dev_cap);
601         if (err) {
602                 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
603                 return err;
604         }
605
606         err = mlx4_QUERY_FW(dev);
607         if (err)
608                 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
609
610         page_size = ~dev->caps.page_size_cap + 1;
611         mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
612         if (page_size > PAGE_SIZE) {
613                 mlx4_err(dev, "HCA minimum page size of %d bigger than "
614                          "kernel PAGE_SIZE of %ld, aborting.\n",
615                          page_size, PAGE_SIZE);
616                 return -ENODEV;
617         }
618
619         /* slave gets uar page size from QUERY_HCA fw command */
620         dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
621
622         /* TODO: relax this assumption */
623         if (dev->caps.uar_page_size != PAGE_SIZE) {
624                 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
625                          dev->caps.uar_page_size, PAGE_SIZE);
626                 return -ENODEV;
627         }
628
629         memset(&func_cap, 0, sizeof(func_cap));
630         err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
631         if (err) {
632                 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
633                           err);
634                 return err;
635         }
636
637         if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
638             PF_CONTEXT_BEHAVIOUR_MASK) {
639                 mlx4_err(dev, "Unknown pf context behaviour\n");
640                 return -ENOSYS;
641         }
642
643         dev->caps.num_ports             = func_cap.num_ports;
644         dev->quotas.qp                  = func_cap.qp_quota;
645         dev->quotas.srq                 = func_cap.srq_quota;
646         dev->quotas.cq                  = func_cap.cq_quota;
647         dev->quotas.mpt                 = func_cap.mpt_quota;
648         dev->quotas.mtt                 = func_cap.mtt_quota;
649         dev->caps.num_qps               = 1 << hca_param.log_num_qps;
650         dev->caps.num_srqs              = 1 << hca_param.log_num_srqs;
651         dev->caps.num_cqs               = 1 << hca_param.log_num_cqs;
652         dev->caps.num_mpts              = 1 << hca_param.log_mpt_sz;
653         dev->caps.num_eqs               = func_cap.max_eq;
654         dev->caps.reserved_eqs          = func_cap.reserved_eq;
655         dev->caps.num_pds               = MLX4_NUM_PDS;
656         dev->caps.num_mgms              = 0;
657         dev->caps.num_amgms             = 0;
658
659         if (dev->caps.num_ports > MLX4_MAX_PORTS) {
660                 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
661                          "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
662                 return -ENODEV;
663         }
664
665         dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
666         dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
667         dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
668         dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
669
670         if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
671             !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
672                 err = -ENOMEM;
673                 goto err_mem;
674         }
675
676         for (i = 1; i <= dev->caps.num_ports; ++i) {
677                 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
678                 if (err) {
679                         mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
680                                  " port %d, aborting (%d).\n", i, err);
681                         goto err_mem;
682                 }
683                 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
684                 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
685                 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
686                 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
687                 dev->caps.port_mask[i] = dev->caps.port_type[i];
688                 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
689                 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
690                                                     &dev->caps.gid_table_len[i],
691                                                     &dev->caps.pkey_table_len[i]))
692                         goto err_mem;
693         }
694
695         if (dev->caps.uar_page_size * (dev->caps.num_uars -
696                                        dev->caps.reserved_uars) >
697                                        pci_resource_len(dev->pdev, 2)) {
698                 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
699                          "PCI resource 2 size of 0x%llx, aborting.\n",
700                          dev->caps.uar_page_size * dev->caps.num_uars,
701                          (unsigned long long) pci_resource_len(dev->pdev, 2));
702                 goto err_mem;
703         }
704
705         if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
706                 dev->caps.eqe_size   = 64;
707                 dev->caps.eqe_factor = 1;
708         } else {
709                 dev->caps.eqe_size   = 32;
710                 dev->caps.eqe_factor = 0;
711         }
712
713         if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
714                 dev->caps.cqe_size   = 64;
715                 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
716         } else {
717                 dev->caps.cqe_size   = 32;
718         }
719
720         dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
721         mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
722
723         slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
724
725         return 0;
726
727 err_mem:
728         kfree(dev->caps.qp0_tunnel);
729         kfree(dev->caps.qp0_proxy);
730         kfree(dev->caps.qp1_tunnel);
731         kfree(dev->caps.qp1_proxy);
732         dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
733                 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
734
735         return err;
736 }
737
738 static void mlx4_request_modules(struct mlx4_dev *dev)
739 {
740         int port;
741         int has_ib_port = false;
742         int has_eth_port = false;
743 #define EN_DRV_NAME     "mlx4_en"
744 #define IB_DRV_NAME     "mlx4_ib"
745
746         for (port = 1; port <= dev->caps.num_ports; port++) {
747                 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
748                         has_ib_port = true;
749                 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
750                         has_eth_port = true;
751         }
752
753         if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
754                 request_module_nowait(IB_DRV_NAME);
755         if (has_eth_port)
756                 request_module_nowait(EN_DRV_NAME);
757 }
758
759 /*
760  * Change the port configuration of the device.
761  * Every user of this function must hold the port mutex.
762  */
763 int mlx4_change_port_types(struct mlx4_dev *dev,
764                            enum mlx4_port_type *port_types)
765 {
766         int err = 0;
767         int change = 0;
768         int port;
769
770         for (port = 0; port <  dev->caps.num_ports; port++) {
771                 /* Change the port type only if the new type is different
772                  * from the current, and not set to Auto */
773                 if (port_types[port] != dev->caps.port_type[port + 1])
774                         change = 1;
775         }
776         if (change) {
777                 mlx4_unregister_device(dev);
778                 for (port = 1; port <= dev->caps.num_ports; port++) {
779                         mlx4_CLOSE_PORT(dev, port);
780                         dev->caps.port_type[port] = port_types[port - 1];
781                         err = mlx4_SET_PORT(dev, port, -1);
782                         if (err) {
783                                 mlx4_err(dev, "Failed to set port %d, "
784                                               "aborting\n", port);
785                                 goto out;
786                         }
787                 }
788                 mlx4_set_port_mask(dev);
789                 err = mlx4_register_device(dev);
790                 if (err) {
791                         mlx4_err(dev, "Failed to register device\n");
792                         goto out;
793                 }
794                 mlx4_request_modules(dev);
795         }
796
797 out:
798         return err;
799 }
800
801 static ssize_t show_port_type(struct device *dev,
802                               struct device_attribute *attr,
803                               char *buf)
804 {
805         struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
806                                                    port_attr);
807         struct mlx4_dev *mdev = info->dev;
808         char type[8];
809
810         sprintf(type, "%s",
811                 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
812                 "ib" : "eth");
813         if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
814                 sprintf(buf, "auto (%s)\n", type);
815         else
816                 sprintf(buf, "%s\n", type);
817
818         return strlen(buf);
819 }
820
821 static ssize_t set_port_type(struct device *dev,
822                              struct device_attribute *attr,
823                              const char *buf, size_t count)
824 {
825         struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
826                                                    port_attr);
827         struct mlx4_dev *mdev = info->dev;
828         struct mlx4_priv *priv = mlx4_priv(mdev);
829         enum mlx4_port_type types[MLX4_MAX_PORTS];
830         enum mlx4_port_type new_types[MLX4_MAX_PORTS];
831         int i;
832         int err = 0;
833
834         if (!strcmp(buf, "ib\n"))
835                 info->tmp_type = MLX4_PORT_TYPE_IB;
836         else if (!strcmp(buf, "eth\n"))
837                 info->tmp_type = MLX4_PORT_TYPE_ETH;
838         else if (!strcmp(buf, "auto\n"))
839                 info->tmp_type = MLX4_PORT_TYPE_AUTO;
840         else {
841                 mlx4_err(mdev, "%s is not supported port type\n", buf);
842                 return -EINVAL;
843         }
844
845         mlx4_stop_sense(mdev);
846         mutex_lock(&priv->port_mutex);
847         /* Possible type is always the one that was delivered */
848         mdev->caps.possible_type[info->port] = info->tmp_type;
849
850         for (i = 0; i < mdev->caps.num_ports; i++) {
851                 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
852                                         mdev->caps.possible_type[i+1];
853                 if (types[i] == MLX4_PORT_TYPE_AUTO)
854                         types[i] = mdev->caps.port_type[i+1];
855         }
856
857         if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
858             !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
859                 for (i = 1; i <= mdev->caps.num_ports; i++) {
860                         if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
861                                 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
862                                 err = -EINVAL;
863                         }
864                 }
865         }
866         if (err) {
867                 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
868                                "Set only 'eth' or 'ib' for both ports "
869                                "(should be the same)\n");
870                 goto out;
871         }
872
873         mlx4_do_sense_ports(mdev, new_types, types);
874
875         err = mlx4_check_port_params(mdev, new_types);
876         if (err)
877                 goto out;
878
879         /* We are about to apply the changes after the configuration
880          * was verified, no need to remember the temporary types
881          * any more */
882         for (i = 0; i < mdev->caps.num_ports; i++)
883                 priv->port[i + 1].tmp_type = 0;
884
885         err = mlx4_change_port_types(mdev, new_types);
886
887 out:
888         mlx4_start_sense(mdev);
889         mutex_unlock(&priv->port_mutex);
890         return err ? err : count;
891 }
892
893 enum ibta_mtu {
894         IB_MTU_256  = 1,
895         IB_MTU_512  = 2,
896         IB_MTU_1024 = 3,
897         IB_MTU_2048 = 4,
898         IB_MTU_4096 = 5
899 };
900
901 static inline int int_to_ibta_mtu(int mtu)
902 {
903         switch (mtu) {
904         case 256:  return IB_MTU_256;
905         case 512:  return IB_MTU_512;
906         case 1024: return IB_MTU_1024;
907         case 2048: return IB_MTU_2048;
908         case 4096: return IB_MTU_4096;
909         default: return -1;
910         }
911 }
912
913 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
914 {
915         switch (mtu) {
916         case IB_MTU_256:  return  256;
917         case IB_MTU_512:  return  512;
918         case IB_MTU_1024: return 1024;
919         case IB_MTU_2048: return 2048;
920         case IB_MTU_4096: return 4096;
921         default: return -1;
922         }
923 }
924
925 static ssize_t show_port_ib_mtu(struct device *dev,
926                              struct device_attribute *attr,
927                              char *buf)
928 {
929         struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
930                                                    port_mtu_attr);
931         struct mlx4_dev *mdev = info->dev;
932
933         if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
934                 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
935
936         sprintf(buf, "%d\n",
937                         ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
938         return strlen(buf);
939 }
940
941 static ssize_t set_port_ib_mtu(struct device *dev,
942                              struct device_attribute *attr,
943                              const char *buf, size_t count)
944 {
945         struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
946                                                    port_mtu_attr);
947         struct mlx4_dev *mdev = info->dev;
948         struct mlx4_priv *priv = mlx4_priv(mdev);
949         int err, port, mtu, ibta_mtu = -1;
950
951         if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
952                 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
953                 return -EINVAL;
954         }
955
956         err = kstrtoint(buf, 0, &mtu);
957         if (!err)
958                 ibta_mtu = int_to_ibta_mtu(mtu);
959
960         if (err || ibta_mtu < 0) {
961                 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
962                 return -EINVAL;
963         }
964
965         mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
966
967         mlx4_stop_sense(mdev);
968         mutex_lock(&priv->port_mutex);
969         mlx4_unregister_device(mdev);
970         for (port = 1; port <= mdev->caps.num_ports; port++) {
971                 mlx4_CLOSE_PORT(mdev, port);
972                 err = mlx4_SET_PORT(mdev, port, -1);
973                 if (err) {
974                         mlx4_err(mdev, "Failed to set port %d, "
975                                       "aborting\n", port);
976                         goto err_set_port;
977                 }
978         }
979         err = mlx4_register_device(mdev);
980 err_set_port:
981         mutex_unlock(&priv->port_mutex);
982         mlx4_start_sense(mdev);
983         return err ? err : count;
984 }
985
986 static int mlx4_load_fw(struct mlx4_dev *dev)
987 {
988         struct mlx4_priv *priv = mlx4_priv(dev);
989         int err;
990
991         priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
992                                          GFP_HIGHUSER | __GFP_NOWARN, 0);
993         if (!priv->fw.fw_icm) {
994                 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
995                 return -ENOMEM;
996         }
997
998         err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
999         if (err) {
1000                 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
1001                 goto err_free;
1002         }
1003
1004         err = mlx4_RUN_FW(dev);
1005         if (err) {
1006                 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
1007                 goto err_unmap_fa;
1008         }
1009
1010         return 0;
1011
1012 err_unmap_fa:
1013         mlx4_UNMAP_FA(dev);
1014
1015 err_free:
1016         mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1017         return err;
1018 }
1019
1020 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1021                                 int cmpt_entry_sz)
1022 {
1023         struct mlx4_priv *priv = mlx4_priv(dev);
1024         int err;
1025         int num_eqs;
1026
1027         err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1028                                   cmpt_base +
1029                                   ((u64) (MLX4_CMPT_TYPE_QP *
1030                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1031                                   cmpt_entry_sz, dev->caps.num_qps,
1032                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1033                                   0, 0);
1034         if (err)
1035                 goto err;
1036
1037         err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1038                                   cmpt_base +
1039                                   ((u64) (MLX4_CMPT_TYPE_SRQ *
1040                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1041                                   cmpt_entry_sz, dev->caps.num_srqs,
1042                                   dev->caps.reserved_srqs, 0, 0);
1043         if (err)
1044                 goto err_qp;
1045
1046         err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1047                                   cmpt_base +
1048                                   ((u64) (MLX4_CMPT_TYPE_CQ *
1049                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1050                                   cmpt_entry_sz, dev->caps.num_cqs,
1051                                   dev->caps.reserved_cqs, 0, 0);
1052         if (err)
1053                 goto err_srq;
1054
1055         num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
1056                   dev->caps.num_eqs;
1057         err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1058                                   cmpt_base +
1059                                   ((u64) (MLX4_CMPT_TYPE_EQ *
1060                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1061                                   cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1062         if (err)
1063                 goto err_cq;
1064
1065         return 0;
1066
1067 err_cq:
1068         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1069
1070 err_srq:
1071         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1072
1073 err_qp:
1074         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1075
1076 err:
1077         return err;
1078 }
1079
1080 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1081                          struct mlx4_init_hca_param *init_hca, u64 icm_size)
1082 {
1083         struct mlx4_priv *priv = mlx4_priv(dev);
1084         u64 aux_pages;
1085         int num_eqs;
1086         int err;
1087
1088         err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1089         if (err) {
1090                 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
1091                 return err;
1092         }
1093
1094         mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
1095                  (unsigned long long) icm_size >> 10,
1096                  (unsigned long long) aux_pages << 2);
1097
1098         priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1099                                           GFP_HIGHUSER | __GFP_NOWARN, 0);
1100         if (!priv->fw.aux_icm) {
1101                 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
1102                 return -ENOMEM;
1103         }
1104
1105         err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1106         if (err) {
1107                 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
1108                 goto err_free_aux;
1109         }
1110
1111         err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1112         if (err) {
1113                 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
1114                 goto err_unmap_aux;
1115         }
1116
1117
1118         num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
1119                    dev->caps.num_eqs;
1120         err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1121                                   init_hca->eqc_base, dev_cap->eqc_entry_sz,
1122                                   num_eqs, num_eqs, 0, 0);
1123         if (err) {
1124                 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
1125                 goto err_unmap_cmpt;
1126         }
1127
1128         /*
1129          * Reserved MTT entries must be aligned up to a cacheline
1130          * boundary, since the FW will write to them, while the driver
1131          * writes to all other MTT entries. (The variable
1132          * dev->caps.mtt_entry_sz below is really the MTT segment
1133          * size, not the raw entry size)
1134          */
1135         dev->caps.reserved_mtts =
1136                 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1137                       dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1138
1139         err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1140                                   init_hca->mtt_base,
1141                                   dev->caps.mtt_entry_sz,
1142                                   dev->caps.num_mtts,
1143                                   dev->caps.reserved_mtts, 1, 0);
1144         if (err) {
1145                 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
1146                 goto err_unmap_eq;
1147         }
1148
1149         err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1150                                   init_hca->dmpt_base,
1151                                   dev_cap->dmpt_entry_sz,
1152                                   dev->caps.num_mpts,
1153                                   dev->caps.reserved_mrws, 1, 1);
1154         if (err) {
1155                 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
1156                 goto err_unmap_mtt;
1157         }
1158
1159         err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1160                                   init_hca->qpc_base,
1161                                   dev_cap->qpc_entry_sz,
1162                                   dev->caps.num_qps,
1163                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1164                                   0, 0);
1165         if (err) {
1166                 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
1167                 goto err_unmap_dmpt;
1168         }
1169
1170         err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1171                                   init_hca->auxc_base,
1172                                   dev_cap->aux_entry_sz,
1173                                   dev->caps.num_qps,
1174                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1175                                   0, 0);
1176         if (err) {
1177                 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
1178                 goto err_unmap_qp;
1179         }
1180
1181         err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1182                                   init_hca->altc_base,
1183                                   dev_cap->altc_entry_sz,
1184                                   dev->caps.num_qps,
1185                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1186                                   0, 0);
1187         if (err) {
1188                 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
1189                 goto err_unmap_auxc;
1190         }
1191
1192         err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1193                                   init_hca->rdmarc_base,
1194                                   dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1195                                   dev->caps.num_qps,
1196                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1197                                   0, 0);
1198         if (err) {
1199                 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1200                 goto err_unmap_altc;
1201         }
1202
1203         err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1204                                   init_hca->cqc_base,
1205                                   dev_cap->cqc_entry_sz,
1206                                   dev->caps.num_cqs,
1207                                   dev->caps.reserved_cqs, 0, 0);
1208         if (err) {
1209                 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
1210                 goto err_unmap_rdmarc;
1211         }
1212
1213         err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1214                                   init_hca->srqc_base,
1215                                   dev_cap->srq_entry_sz,
1216                                   dev->caps.num_srqs,
1217                                   dev->caps.reserved_srqs, 0, 0);
1218         if (err) {
1219                 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
1220                 goto err_unmap_cq;
1221         }
1222
1223         /*
1224          * For flow steering device managed mode it is required to use
1225          * mlx4_init_icm_table. For B0 steering mode it's not strictly
1226          * required, but for simplicity just map the whole multicast
1227          * group table now.  The table isn't very big and it's a lot
1228          * easier than trying to track ref counts.
1229          */
1230         err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1231                                   init_hca->mc_base,
1232                                   mlx4_get_mgm_entry_size(dev),
1233                                   dev->caps.num_mgms + dev->caps.num_amgms,
1234                                   dev->caps.num_mgms + dev->caps.num_amgms,
1235                                   0, 0);
1236         if (err) {
1237                 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
1238                 goto err_unmap_srq;
1239         }
1240
1241         return 0;
1242
1243 err_unmap_srq:
1244         mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1245
1246 err_unmap_cq:
1247         mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1248
1249 err_unmap_rdmarc:
1250         mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1251
1252 err_unmap_altc:
1253         mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1254
1255 err_unmap_auxc:
1256         mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1257
1258 err_unmap_qp:
1259         mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1260
1261 err_unmap_dmpt:
1262         mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1263
1264 err_unmap_mtt:
1265         mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1266
1267 err_unmap_eq:
1268         mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1269
1270 err_unmap_cmpt:
1271         mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1272         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1273         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1274         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1275
1276 err_unmap_aux:
1277         mlx4_UNMAP_ICM_AUX(dev);
1278
1279 err_free_aux:
1280         mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1281
1282         return err;
1283 }
1284
1285 static void mlx4_free_icms(struct mlx4_dev *dev)
1286 {
1287         struct mlx4_priv *priv = mlx4_priv(dev);
1288
1289         mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1290         mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1291         mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1292         mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1293         mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1294         mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1295         mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1296         mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1297         mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1298         mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1299         mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1300         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1301         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1302         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1303
1304         mlx4_UNMAP_ICM_AUX(dev);
1305         mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1306 }
1307
1308 static void mlx4_slave_exit(struct mlx4_dev *dev)
1309 {
1310         struct mlx4_priv *priv = mlx4_priv(dev);
1311
1312         mutex_lock(&priv->cmd.slave_cmd_mutex);
1313         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1314                 mlx4_warn(dev, "Failed to close slave function.\n");
1315         mutex_unlock(&priv->cmd.slave_cmd_mutex);
1316 }
1317
1318 static int map_bf_area(struct mlx4_dev *dev)
1319 {
1320         struct mlx4_priv *priv = mlx4_priv(dev);
1321         resource_size_t bf_start;
1322         resource_size_t bf_len;
1323         int err = 0;
1324
1325         if (!dev->caps.bf_reg_size)
1326                 return -ENXIO;
1327
1328         bf_start = pci_resource_start(dev->pdev, 2) +
1329                         (dev->caps.num_uars << PAGE_SHIFT);
1330         bf_len = pci_resource_len(dev->pdev, 2) -
1331                         (dev->caps.num_uars << PAGE_SHIFT);
1332         priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1333         if (!priv->bf_mapping)
1334                 err = -ENOMEM;
1335
1336         return err;
1337 }
1338
1339 static void unmap_bf_area(struct mlx4_dev *dev)
1340 {
1341         if (mlx4_priv(dev)->bf_mapping)
1342                 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1343 }
1344
1345 cycle_t mlx4_read_clock(struct mlx4_dev *dev)
1346 {
1347         u32 clockhi, clocklo, clockhi1;
1348         cycle_t cycles;
1349         int i;
1350         struct mlx4_priv *priv = mlx4_priv(dev);
1351
1352         for (i = 0; i < 10; i++) {
1353                 clockhi = swab32(readl(priv->clock_mapping));
1354                 clocklo = swab32(readl(priv->clock_mapping + 4));
1355                 clockhi1 = swab32(readl(priv->clock_mapping));
1356                 if (clockhi == clockhi1)
1357                         break;
1358         }
1359
1360         cycles = (u64) clockhi << 32 | (u64) clocklo;
1361
1362         return cycles;
1363 }
1364 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1365
1366
1367 static int map_internal_clock(struct mlx4_dev *dev)
1368 {
1369         struct mlx4_priv *priv = mlx4_priv(dev);
1370
1371         priv->clock_mapping =
1372                 ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) +
1373                         priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1374
1375         if (!priv->clock_mapping)
1376                 return -ENOMEM;
1377
1378         return 0;
1379 }
1380
1381 static void unmap_internal_clock(struct mlx4_dev *dev)
1382 {
1383         struct mlx4_priv *priv = mlx4_priv(dev);
1384
1385         if (priv->clock_mapping)
1386                 iounmap(priv->clock_mapping);
1387 }
1388
1389 static void mlx4_close_hca(struct mlx4_dev *dev)
1390 {
1391         unmap_internal_clock(dev);
1392         unmap_bf_area(dev);
1393         if (mlx4_is_slave(dev))
1394                 mlx4_slave_exit(dev);
1395         else {
1396                 mlx4_CLOSE_HCA(dev, 0);
1397                 mlx4_free_icms(dev);
1398                 mlx4_UNMAP_FA(dev);
1399                 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1400         }
1401 }
1402
1403 static int mlx4_init_slave(struct mlx4_dev *dev)
1404 {
1405         struct mlx4_priv *priv = mlx4_priv(dev);
1406         u64 dma = (u64) priv->mfunc.vhcr_dma;
1407         int ret_from_reset = 0;
1408         u32 slave_read;
1409         u32 cmd_channel_ver;
1410
1411         if (atomic_read(&pf_loading)) {
1412                 mlx4_warn(dev, "PF is not ready. Deferring probe\n");
1413                 return -EPROBE_DEFER;
1414         }
1415
1416         mutex_lock(&priv->cmd.slave_cmd_mutex);
1417         priv->cmd.max_cmds = 1;
1418         mlx4_warn(dev, "Sending reset\n");
1419         ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1420                                        MLX4_COMM_TIME);
1421         /* if we are in the middle of flr the slave will try
1422          * NUM_OF_RESET_RETRIES times before leaving.*/
1423         if (ret_from_reset) {
1424                 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1425                         mlx4_warn(dev, "slave is currently in the "
1426                                   "middle of FLR. Deferring probe.\n");
1427                         mutex_unlock(&priv->cmd.slave_cmd_mutex);
1428                         return -EPROBE_DEFER;
1429                 } else
1430                         goto err;
1431         }
1432
1433         /* check the driver version - the slave I/F revision
1434          * must match the master's */
1435         slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1436         cmd_channel_ver = mlx4_comm_get_version();
1437
1438         if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1439                 MLX4_COMM_GET_IF_REV(slave_read)) {
1440                 mlx4_err(dev, "slave driver version is not supported"
1441                          " by the master\n");
1442                 goto err;
1443         }
1444
1445         mlx4_warn(dev, "Sending vhcr0\n");
1446         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1447                                                     MLX4_COMM_TIME))
1448                 goto err;
1449         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1450                                                     MLX4_COMM_TIME))
1451                 goto err;
1452         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1453                                                     MLX4_COMM_TIME))
1454                 goto err;
1455         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
1456                 goto err;
1457
1458         mutex_unlock(&priv->cmd.slave_cmd_mutex);
1459         return 0;
1460
1461 err:
1462         mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
1463         mutex_unlock(&priv->cmd.slave_cmd_mutex);
1464         return -EIO;
1465 }
1466
1467 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1468 {
1469         int i;
1470
1471         for (i = 1; i <= dev->caps.num_ports; i++) {
1472                 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1473                         dev->caps.gid_table_len[i] =
1474                                 mlx4_get_slave_num_gids(dev, 0, i);
1475                 else
1476                         dev->caps.gid_table_len[i] = 1;
1477                 dev->caps.pkey_table_len[i] =
1478                         dev->phys_caps.pkey_phys_table_len[i] - 1;
1479         }
1480 }
1481
1482 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1483 {
1484         int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1485
1486         for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1487               i++) {
1488                 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1489                         break;
1490         }
1491
1492         return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1493 }
1494
1495 static void choose_steering_mode(struct mlx4_dev *dev,
1496                                  struct mlx4_dev_cap *dev_cap)
1497 {
1498         if (mlx4_log_num_mgm_entry_size == -1 &&
1499             dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1500             (!mlx4_is_mfunc(dev) ||
1501              (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
1502             choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1503                 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1504                 dev->oper_log_mgm_entry_size =
1505                         choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
1506                 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1507                 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1508                 dev->caps.fs_log_max_ucast_qp_range_size =
1509                         dev_cap->fs_log_max_ucast_qp_range_size;
1510         } else {
1511                 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1512                     dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1513                         dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1514                 else {
1515                         dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1516
1517                         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1518                             dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1519                                 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
1520                                           "set to use B0 steering. Falling back to A0 steering mode.\n");
1521                 }
1522                 dev->oper_log_mgm_entry_size =
1523                         mlx4_log_num_mgm_entry_size > 0 ?
1524                         mlx4_log_num_mgm_entry_size :
1525                         MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1526                 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1527         }
1528         mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
1529                  "modparam log_num_mgm_entry_size = %d\n",
1530                  mlx4_steering_mode_str(dev->caps.steering_mode),
1531                  dev->oper_log_mgm_entry_size,
1532                  mlx4_log_num_mgm_entry_size);
1533 }
1534
1535 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1536                                        struct mlx4_dev_cap *dev_cap)
1537 {
1538         if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
1539             dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
1540                 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
1541         else
1542                 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
1543
1544         mlx4_dbg(dev, "Tunneling offload mode is: %s\n",  (dev->caps.tunnel_offload_mode
1545                  == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
1546 }
1547
1548 static int mlx4_init_hca(struct mlx4_dev *dev)
1549 {
1550         struct mlx4_priv          *priv = mlx4_priv(dev);
1551         struct mlx4_adapter        adapter;
1552         struct mlx4_dev_cap        dev_cap;
1553         struct mlx4_mod_stat_cfg   mlx4_cfg;
1554         struct mlx4_profile        profile;
1555         struct mlx4_init_hca_param init_hca;
1556         u64 icm_size;
1557         int err;
1558
1559         if (!mlx4_is_slave(dev)) {
1560                 err = mlx4_QUERY_FW(dev);
1561                 if (err) {
1562                         if (err == -EACCES)
1563                                 mlx4_info(dev, "non-primary physical function, skipping.\n");
1564                         else
1565                                 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
1566                         return err;
1567                 }
1568
1569                 err = mlx4_load_fw(dev);
1570                 if (err) {
1571                         mlx4_err(dev, "Failed to start FW, aborting.\n");
1572                         return err;
1573                 }
1574
1575                 mlx4_cfg.log_pg_sz_m = 1;
1576                 mlx4_cfg.log_pg_sz = 0;
1577                 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
1578                 if (err)
1579                         mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
1580
1581                 err = mlx4_dev_cap(dev, &dev_cap);
1582                 if (err) {
1583                         mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
1584                         goto err_stop_fw;
1585                 }
1586
1587                 choose_steering_mode(dev, &dev_cap);
1588                 choose_tunnel_offload_mode(dev, &dev_cap);
1589
1590                 err = mlx4_get_phys_port_id(dev);
1591                 if (err)
1592                         mlx4_err(dev, "Fail to get physical port id\n");
1593
1594                 if (mlx4_is_master(dev))
1595                         mlx4_parav_master_pf_caps(dev);
1596
1597                 profile = default_profile;
1598                 if (dev->caps.steering_mode ==
1599                     MLX4_STEERING_MODE_DEVICE_MANAGED)
1600                         profile.num_mcg = MLX4_FS_NUM_MCG;
1601
1602                 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1603                                              &init_hca);
1604                 if ((long long) icm_size < 0) {
1605                         err = icm_size;
1606                         goto err_stop_fw;
1607                 }
1608
1609                 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
1610
1611                 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
1612                 init_hca.uar_page_sz = PAGE_SHIFT - 12;
1613                 init_hca.mw_enabled = 0;
1614                 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
1615                     dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
1616                         init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
1617
1618                 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
1619                 if (err)
1620                         goto err_stop_fw;
1621
1622                 err = mlx4_INIT_HCA(dev, &init_hca);
1623                 if (err) {
1624                         mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
1625                         goto err_free_icm;
1626                 }
1627                 /*
1628                  * If TS is supported by FW
1629                  * read HCA frequency by QUERY_HCA command
1630                  */
1631                 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
1632                         memset(&init_hca, 0, sizeof(init_hca));
1633                         err = mlx4_QUERY_HCA(dev, &init_hca);
1634                         if (err) {
1635                                 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
1636                                 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1637                         } else {
1638                                 dev->caps.hca_core_clock =
1639                                         init_hca.hca_core_clock;
1640                         }
1641
1642                         /* In case we got HCA frequency 0 - disable timestamping
1643                          * to avoid dividing by zero
1644                          */
1645                         if (!dev->caps.hca_core_clock) {
1646                                 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1647                                 mlx4_err(dev,
1648                                          "HCA frequency is 0. Timestamping is not supported.");
1649                         } else if (map_internal_clock(dev)) {
1650                                 /*
1651                                  * Map internal clock,
1652                                  * in case of failure disable timestamping
1653                                  */
1654                                 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1655                                 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
1656                         }
1657                 }
1658         } else {
1659                 err = mlx4_init_slave(dev);
1660                 if (err) {
1661                         if (err != -EPROBE_DEFER)
1662                                 mlx4_err(dev, "Failed to initialize slave\n");
1663                         return err;
1664                 }
1665
1666                 err = mlx4_slave_cap(dev);
1667                 if (err) {
1668                         mlx4_err(dev, "Failed to obtain slave caps\n");
1669                         goto err_close;
1670                 }
1671         }
1672
1673         if (map_bf_area(dev))
1674                 mlx4_dbg(dev, "Failed to map blue flame area\n");
1675
1676         /*Only the master set the ports, all the rest got it from it.*/
1677         if (!mlx4_is_slave(dev))
1678                 mlx4_set_port_mask(dev);
1679
1680         err = mlx4_QUERY_ADAPTER(dev, &adapter);
1681         if (err) {
1682                 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
1683                 goto unmap_bf;
1684         }
1685
1686         priv->eq_table.inta_pin = adapter.inta_pin;
1687         memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
1688
1689         return 0;
1690
1691 unmap_bf:
1692         unmap_internal_clock(dev);
1693         unmap_bf_area(dev);
1694
1695 err_close:
1696         if (mlx4_is_slave(dev))
1697                 mlx4_slave_exit(dev);
1698         else
1699                 mlx4_CLOSE_HCA(dev, 0);
1700
1701 err_free_icm:
1702         if (!mlx4_is_slave(dev))
1703                 mlx4_free_icms(dev);
1704
1705 err_stop_fw:
1706         if (!mlx4_is_slave(dev)) {
1707                 mlx4_UNMAP_FA(dev);
1708                 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1709         }
1710         return err;
1711 }
1712
1713 static int mlx4_init_counters_table(struct mlx4_dev *dev)
1714 {
1715         struct mlx4_priv *priv = mlx4_priv(dev);
1716         int nent;
1717
1718         if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1719                 return -ENOENT;
1720
1721         nent = dev->caps.max_counters;
1722         return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
1723 }
1724
1725 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1726 {
1727         mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1728 }
1729
1730 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1731 {
1732         struct mlx4_priv *priv = mlx4_priv(dev);
1733
1734         if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1735                 return -ENOENT;
1736
1737         *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
1738         if (*idx == -1)
1739                 return -ENOMEM;
1740
1741         return 0;
1742 }
1743
1744 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1745 {
1746         u64 out_param;
1747         int err;
1748
1749         if (mlx4_is_mfunc(dev)) {
1750                 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
1751                                    RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
1752                                    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1753                 if (!err)
1754                         *idx = get_param_l(&out_param);
1755
1756                 return err;
1757         }
1758         return __mlx4_counter_alloc(dev, idx);
1759 }
1760 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1761
1762 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1763 {
1764         mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
1765         return;
1766 }
1767
1768 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1769 {
1770         u64 in_param = 0;
1771
1772         if (mlx4_is_mfunc(dev)) {
1773                 set_param_l(&in_param, idx);
1774                 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
1775                          MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
1776                          MLX4_CMD_WRAPPED);
1777                 return;
1778         }
1779         __mlx4_counter_free(dev, idx);
1780 }
1781 EXPORT_SYMBOL_GPL(mlx4_counter_free);
1782
1783 static int mlx4_setup_hca(struct mlx4_dev *dev)
1784 {
1785         struct mlx4_priv *priv = mlx4_priv(dev);
1786         int err;
1787         int port;
1788         __be32 ib_port_default_caps;
1789
1790         err = mlx4_init_uar_table(dev);
1791         if (err) {
1792                 mlx4_err(dev, "Failed to initialize "
1793                          "user access region table, aborting.\n");
1794                 return err;
1795         }
1796
1797         err = mlx4_uar_alloc(dev, &priv->driver_uar);
1798         if (err) {
1799                 mlx4_err(dev, "Failed to allocate driver access region, "
1800                          "aborting.\n");
1801                 goto err_uar_table_free;
1802         }
1803
1804         priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1805         if (!priv->kar) {
1806                 mlx4_err(dev, "Couldn't map kernel access region, "
1807                          "aborting.\n");
1808                 err = -ENOMEM;
1809                 goto err_uar_free;
1810         }
1811
1812         err = mlx4_init_pd_table(dev);
1813         if (err) {
1814                 mlx4_err(dev, "Failed to initialize "
1815                          "protection domain table, aborting.\n");
1816                 goto err_kar_unmap;
1817         }
1818
1819         err = mlx4_init_xrcd_table(dev);
1820         if (err) {
1821                 mlx4_err(dev, "Failed to initialize "
1822                          "reliable connection domain table, aborting.\n");
1823                 goto err_pd_table_free;
1824         }
1825
1826         err = mlx4_init_mr_table(dev);
1827         if (err) {
1828                 mlx4_err(dev, "Failed to initialize "
1829                          "memory region table, aborting.\n");
1830                 goto err_xrcd_table_free;
1831         }
1832
1833         if (!mlx4_is_slave(dev)) {
1834                 err = mlx4_init_mcg_table(dev);
1835                 if (err) {
1836                         mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
1837                         goto err_mr_table_free;
1838                 }
1839         }
1840
1841         err = mlx4_init_eq_table(dev);
1842         if (err) {
1843                 mlx4_err(dev, "Failed to initialize "
1844                          "event queue table, aborting.\n");
1845                 goto err_mcg_table_free;
1846         }
1847
1848         err = mlx4_cmd_use_events(dev);
1849         if (err) {
1850                 mlx4_err(dev, "Failed to switch to event-driven "
1851                          "firmware commands, aborting.\n");
1852                 goto err_eq_table_free;
1853         }
1854
1855         err = mlx4_NOP(dev);
1856         if (err) {
1857                 if (dev->flags & MLX4_FLAG_MSI_X) {
1858                         mlx4_warn(dev, "NOP command failed to generate MSI-X "
1859                                   "interrupt IRQ %d).\n",
1860                                   priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1861                         mlx4_warn(dev, "Trying again without MSI-X.\n");
1862                 } else {
1863                         mlx4_err(dev, "NOP command failed to generate interrupt "
1864                                  "(IRQ %d), aborting.\n",
1865                                  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1866                         mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1867                 }
1868
1869                 goto err_cmd_poll;
1870         }
1871
1872         mlx4_dbg(dev, "NOP command IRQ test passed\n");
1873
1874         err = mlx4_init_cq_table(dev);
1875         if (err) {
1876                 mlx4_err(dev, "Failed to initialize "
1877                          "completion queue table, aborting.\n");
1878                 goto err_cmd_poll;
1879         }
1880
1881         err = mlx4_init_srq_table(dev);
1882         if (err) {
1883                 mlx4_err(dev, "Failed to initialize "
1884                          "shared receive queue table, aborting.\n");
1885                 goto err_cq_table_free;
1886         }
1887
1888         err = mlx4_init_qp_table(dev);
1889         if (err) {
1890                 mlx4_err(dev, "Failed to initialize "
1891                          "queue pair table, aborting.\n");
1892                 goto err_srq_table_free;
1893         }
1894
1895         err = mlx4_init_counters_table(dev);
1896         if (err && err != -ENOENT) {
1897                 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
1898                 goto err_qp_table_free;
1899         }
1900
1901         if (!mlx4_is_slave(dev)) {
1902                 for (port = 1; port <= dev->caps.num_ports; port++) {
1903                         ib_port_default_caps = 0;
1904                         err = mlx4_get_port_ib_caps(dev, port,
1905                                                     &ib_port_default_caps);
1906                         if (err)
1907                                 mlx4_warn(dev, "failed to get port %d default "
1908                                           "ib capabilities (%d). Continuing "
1909                                           "with caps = 0\n", port, err);
1910                         dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1911
1912                         /* initialize per-slave default ib port capabilities */
1913                         if (mlx4_is_master(dev)) {
1914                                 int i;
1915                                 for (i = 0; i < dev->num_slaves; i++) {
1916                                         if (i == mlx4_master_func_num(dev))
1917                                                 continue;
1918                                         priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
1919                                                         ib_port_default_caps;
1920                                 }
1921                         }
1922
1923                         if (mlx4_is_mfunc(dev))
1924                                 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
1925                         else
1926                                 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
1927
1928                         err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
1929                                             dev->caps.pkey_table_len[port] : -1);
1930                         if (err) {
1931                                 mlx4_err(dev, "Failed to set port %d, aborting\n",
1932                                         port);
1933                                 goto err_counters_table_free;
1934                         }
1935                 }
1936         }
1937
1938         return 0;
1939
1940 err_counters_table_free:
1941         mlx4_cleanup_counters_table(dev);
1942
1943 err_qp_table_free:
1944         mlx4_cleanup_qp_table(dev);
1945
1946 err_srq_table_free:
1947         mlx4_cleanup_srq_table(dev);
1948
1949 err_cq_table_free:
1950         mlx4_cleanup_cq_table(dev);
1951
1952 err_cmd_poll:
1953         mlx4_cmd_use_polling(dev);
1954
1955 err_eq_table_free:
1956         mlx4_cleanup_eq_table(dev);
1957
1958 err_mcg_table_free:
1959         if (!mlx4_is_slave(dev))
1960                 mlx4_cleanup_mcg_table(dev);
1961
1962 err_mr_table_free:
1963         mlx4_cleanup_mr_table(dev);
1964
1965 err_xrcd_table_free:
1966         mlx4_cleanup_xrcd_table(dev);
1967
1968 err_pd_table_free:
1969         mlx4_cleanup_pd_table(dev);
1970
1971 err_kar_unmap:
1972         iounmap(priv->kar);
1973
1974 err_uar_free:
1975         mlx4_uar_free(dev, &priv->driver_uar);
1976
1977 err_uar_table_free:
1978         mlx4_cleanup_uar_table(dev);
1979         return err;
1980 }
1981
1982 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1983 {
1984         struct mlx4_priv *priv = mlx4_priv(dev);
1985         struct msix_entry *entries;
1986         int nreq = min_t(int, dev->caps.num_ports *
1987                          min_t(int, num_online_cpus() + 1,
1988                                MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
1989         int i;
1990
1991         if (msi_x) {
1992                 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
1993                              nreq);
1994
1995                 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1996                 if (!entries)
1997                         goto no_msi;
1998
1999                 for (i = 0; i < nreq; ++i)
2000                         entries[i].entry = i;
2001
2002                 nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
2003
2004                 if (nreq < 0) {
2005                         kfree(entries);
2006                         goto no_msi;
2007                 } else if (nreq < MSIX_LEGACY_SZ +
2008                                   dev->caps.num_ports * MIN_MSIX_P_PORT) {
2009                         /*Working in legacy mode , all EQ's shared*/
2010                         dev->caps.comp_pool           = 0;
2011                         dev->caps.num_comp_vectors = nreq - 1;
2012                 } else {
2013                         dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
2014                         dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
2015                 }
2016                 for (i = 0; i < nreq; ++i)
2017                         priv->eq_table.eq[i].irq = entries[i].vector;
2018
2019                 dev->flags |= MLX4_FLAG_MSI_X;
2020
2021                 kfree(entries);
2022                 return;
2023         }
2024
2025 no_msi:
2026         dev->caps.num_comp_vectors = 1;
2027         dev->caps.comp_pool        = 0;
2028
2029         for (i = 0; i < 2; ++i)
2030                 priv->eq_table.eq[i].irq = dev->pdev->irq;
2031 }
2032
2033 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2034 {
2035         struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2036         int err = 0;
2037
2038         info->dev = dev;
2039         info->port = port;
2040         if (!mlx4_is_slave(dev)) {
2041                 mlx4_init_mac_table(dev, &info->mac_table);
2042                 mlx4_init_vlan_table(dev, &info->vlan_table);
2043                 info->base_qpn = mlx4_get_base_qpn(dev, port);
2044         }
2045
2046         sprintf(info->dev_name, "mlx4_port%d", port);
2047         info->port_attr.attr.name = info->dev_name;
2048         if (mlx4_is_mfunc(dev))
2049                 info->port_attr.attr.mode = S_IRUGO;
2050         else {
2051                 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2052                 info->port_attr.store     = set_port_type;
2053         }
2054         info->port_attr.show      = show_port_type;
2055         sysfs_attr_init(&info->port_attr.attr);
2056
2057         err = device_create_file(&dev->pdev->dev, &info->port_attr);
2058         if (err) {
2059                 mlx4_err(dev, "Failed to create file for port %d\n", port);
2060                 info->port = -1;
2061         }
2062
2063         sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2064         info->port_mtu_attr.attr.name = info->dev_mtu_name;
2065         if (mlx4_is_mfunc(dev))
2066                 info->port_mtu_attr.attr.mode = S_IRUGO;
2067         else {
2068                 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2069                 info->port_mtu_attr.store     = set_port_ib_mtu;
2070         }
2071         info->port_mtu_attr.show      = show_port_ib_mtu;
2072         sysfs_attr_init(&info->port_mtu_attr.attr);
2073
2074         err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
2075         if (err) {
2076                 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2077                 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
2078                 info->port = -1;
2079         }
2080
2081         return err;
2082 }
2083
2084 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2085 {
2086         if (info->port < 0)
2087                 return;
2088
2089         device_remove_file(&info->dev->pdev->dev, &info->port_attr);
2090         device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
2091 }
2092
2093 static int mlx4_init_steering(struct mlx4_dev *dev)
2094 {
2095         struct mlx4_priv *priv = mlx4_priv(dev);
2096         int num_entries = dev->caps.num_ports;
2097         int i, j;
2098
2099         priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
2100         if (!priv->steer)
2101                 return -ENOMEM;
2102
2103         for (i = 0; i < num_entries; i++)
2104                 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2105                         INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
2106                         INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
2107                 }
2108         return 0;
2109 }
2110
2111 static void mlx4_clear_steering(struct mlx4_dev *dev)
2112 {
2113         struct mlx4_priv *priv = mlx4_priv(dev);
2114         struct mlx4_steer_index *entry, *tmp_entry;
2115         struct mlx4_promisc_qp *pqp, *tmp_pqp;
2116         int num_entries = dev->caps.num_ports;
2117         int i, j;
2118
2119         for (i = 0; i < num_entries; i++) {
2120                 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2121                         list_for_each_entry_safe(pqp, tmp_pqp,
2122                                                  &priv->steer[i].promisc_qps[j],
2123                                                  list) {
2124                                 list_del(&pqp->list);
2125                                 kfree(pqp);
2126                         }
2127                         list_for_each_entry_safe(entry, tmp_entry,
2128                                                  &priv->steer[i].steer_entries[j],
2129                                                  list) {
2130                                 list_del(&entry->list);
2131                                 list_for_each_entry_safe(pqp, tmp_pqp,
2132                                                          &entry->duplicates,
2133                                                          list) {
2134                                         list_del(&pqp->list);
2135                                         kfree(pqp);
2136                                 }
2137                                 kfree(entry);
2138                         }
2139                 }
2140         }
2141         kfree(priv->steer);
2142 }
2143
2144 static int extended_func_num(struct pci_dev *pdev)
2145 {
2146         return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
2147 }
2148
2149 #define MLX4_OWNER_BASE 0x8069c
2150 #define MLX4_OWNER_SIZE 4
2151
2152 static int mlx4_get_ownership(struct mlx4_dev *dev)
2153 {
2154         void __iomem *owner;
2155         u32 ret;
2156
2157         if (pci_channel_offline(dev->pdev))
2158                 return -EIO;
2159
2160         owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2161                         MLX4_OWNER_SIZE);
2162         if (!owner) {
2163                 mlx4_err(dev, "Failed to obtain ownership bit\n");
2164                 return -ENOMEM;
2165         }
2166
2167         ret = readl(owner);
2168         iounmap(owner);
2169         return (int) !!ret;
2170 }
2171
2172 static void mlx4_free_ownership(struct mlx4_dev *dev)
2173 {
2174         void __iomem *owner;
2175
2176         if (pci_channel_offline(dev->pdev))
2177                 return;
2178
2179         owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2180                         MLX4_OWNER_SIZE);
2181         if (!owner) {
2182                 mlx4_err(dev, "Failed to obtain ownership bit\n");
2183                 return;
2184         }
2185         writel(0, owner);
2186         msleep(1000);
2187         iounmap(owner);
2188 }
2189
2190 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2191 {
2192         struct mlx4_priv *priv;
2193         struct mlx4_dev *dev;
2194         int err;
2195         int port;
2196         int nvfs[MLX4_MAX_PORTS + 1], prb_vf[MLX4_MAX_PORTS + 1];
2197         unsigned total_vfs = 0;
2198         int sriov_initialized = 0;
2199         unsigned int i;
2200
2201         pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
2202
2203         err = pci_enable_device(pdev);
2204         if (err) {
2205                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2206                         "aborting.\n");
2207                 return err;
2208         }
2209
2210         /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
2211          * per port, we must limit the number of VFs to 63 (since their are
2212          * 128 MACs)
2213          */
2214         for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]);
2215              total_vfs += nvfs[i], i++) {
2216                 nvfs[i] = i == MLX4_MAX_PORTS ? num_vfs : 0;
2217                 if (nvfs[i] < 0) {
2218                         dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
2219                         return -EINVAL;
2220                 }
2221         }
2222         for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]); i++) {
2223                 prb_vf[i] = i == MLX4_MAX_PORTS ? probe_vf : 0;
2224                 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
2225                         dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
2226                         return -EINVAL;
2227                 }
2228         }
2229         if (total_vfs >= MLX4_MAX_NUM_VF) {
2230                 dev_err(&pdev->dev,
2231                         "Requested more VF's (%d) than allowed (%d)\n",
2232                         total_vfs, MLX4_MAX_NUM_VF - 1);
2233                 return -EINVAL;
2234         }
2235
2236         for (i = 0; i < MLX4_MAX_PORTS; i++) {
2237                 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
2238                         dev_err(&pdev->dev,
2239                                 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
2240                                 nvfs[i] + nvfs[2], i + 1,
2241                                 MLX4_MAX_NUM_VF_P_PORT - 1);
2242                         return -EINVAL;
2243                 }
2244         }
2245
2246
2247         /*
2248          * Check for BARs.
2249          */
2250         if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
2251             !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2252                 dev_err(&pdev->dev, "Missing DCS, aborting."
2253                         "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2254                         pci_dev_data, pci_resource_flags(pdev, 0));
2255                 err = -ENODEV;
2256                 goto err_disable_pdev;
2257         }
2258         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2259                 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
2260                 err = -ENODEV;
2261                 goto err_disable_pdev;
2262         }
2263
2264         err = pci_request_regions(pdev, DRV_NAME);
2265         if (err) {
2266                 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
2267                 goto err_disable_pdev;
2268         }
2269
2270         pci_set_master(pdev);
2271
2272         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2273         if (err) {
2274                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
2275                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2276                 if (err) {
2277                         dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
2278                         goto err_release_regions;
2279                 }
2280         }
2281         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2282         if (err) {
2283                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
2284                          "consistent PCI DMA mask.\n");
2285                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2286                 if (err) {
2287                         dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
2288                                 "aborting.\n");
2289                         goto err_release_regions;
2290                 }
2291         }
2292
2293         /* Allow large DMA segments, up to the firmware limit of 1 GB */
2294         dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
2295
2296         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2297         if (!priv) {
2298                 err = -ENOMEM;
2299                 goto err_release_regions;
2300         }
2301
2302         dev       = &priv->dev;
2303         dev->pdev = pdev;
2304         INIT_LIST_HEAD(&priv->ctx_list);
2305         spin_lock_init(&priv->ctx_lock);
2306
2307         mutex_init(&priv->port_mutex);
2308
2309         INIT_LIST_HEAD(&priv->pgdir_list);
2310         mutex_init(&priv->pgdir_mutex);
2311
2312         INIT_LIST_HEAD(&priv->bf_list);
2313         mutex_init(&priv->bf_mutex);
2314
2315         dev->rev_id = pdev->revision;
2316         dev->numa_node = dev_to_node(&pdev->dev);
2317         /* Detect if this device is a virtual function */
2318         if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2319                 /* When acting as pf, we normally skip vfs unless explicitly
2320                  * requested to probe them. */
2321                 if (total_vfs) {
2322                         unsigned vfs_offset = 0;
2323                         for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
2324                              vfs_offset + nvfs[i] < extended_func_num(pdev);
2325                              vfs_offset += nvfs[i], i++)
2326                                 ;
2327                         if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
2328                                 err = -ENODEV;
2329                                 goto err_free_dev;
2330                         }
2331                         if ((extended_func_num(pdev) - vfs_offset)
2332                             > prb_vf[i]) {
2333                                 mlx4_warn(dev, "Skipping virtual function:%d\n",
2334                                           extended_func_num(pdev));
2335                                 err = -ENODEV;
2336                                 goto err_free_dev;
2337                         }
2338                 }
2339                 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2340                 dev->flags |= MLX4_FLAG_SLAVE;
2341         } else {
2342                 /* We reset the device and enable SRIOV only for physical
2343                  * devices.  Try to claim ownership on the device;
2344                  * if already taken, skip -- do not allow multiple PFs */
2345                 err = mlx4_get_ownership(dev);
2346                 if (err) {
2347                         if (err < 0)
2348                                 goto err_free_dev;
2349                         else {
2350                                 mlx4_warn(dev, "Multiple PFs not yet supported."
2351                                           " Skipping PF.\n");
2352                                 err = -EINVAL;
2353                                 goto err_free_dev;
2354                         }
2355                 }
2356
2357                 if (total_vfs) {
2358                         mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2359                                   total_vfs);
2360                         dev->dev_vfs = kzalloc(
2361                                         total_vfs * sizeof(*dev->dev_vfs),
2362                                         GFP_KERNEL);
2363                         if (NULL == dev->dev_vfs) {
2364                                 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2365                                 err = 0;
2366                         } else {
2367                                 atomic_inc(&pf_loading);
2368                                 err = pci_enable_sriov(pdev, total_vfs);
2369                                 atomic_dec(&pf_loading);
2370                                 if (err) {
2371                                         mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2372                                                  err);
2373                                         err = 0;
2374                                 } else {
2375                                         mlx4_warn(dev, "Running in master mode\n");
2376                                         dev->flags |= MLX4_FLAG_SRIOV |
2377                                                       MLX4_FLAG_MASTER;
2378                                         dev->num_vfs = total_vfs;
2379                                         sriov_initialized = 1;
2380                                 }
2381                         }
2382                 }
2383
2384                 atomic_set(&priv->opreq_count, 0);
2385                 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
2386
2387                 /*
2388                  * Now reset the HCA before we touch the PCI capabilities or
2389                  * attempt a firmware command, since a boot ROM may have left
2390                  * the HCA in an undefined state.
2391                  */
2392                 err = mlx4_reset(dev);
2393                 if (err) {
2394                         mlx4_err(dev, "Failed to reset HCA, aborting.\n");
2395                         goto err_rel_own;
2396                 }
2397         }
2398
2399 slave_start:
2400         err = mlx4_cmd_init(dev);
2401         if (err) {
2402                 mlx4_err(dev, "Failed to init command interface, aborting.\n");
2403                 goto err_sriov;
2404         }
2405
2406         /* In slave functions, the communication channel must be initialized
2407          * before posting commands. Also, init num_slaves before calling
2408          * mlx4_init_hca */
2409         if (mlx4_is_mfunc(dev)) {
2410                 if (mlx4_is_master(dev))
2411                         dev->num_slaves = MLX4_MAX_NUM_SLAVES;
2412                 else {
2413                         dev->num_slaves = 0;
2414                         err = mlx4_multi_func_init(dev);
2415                         if (err) {
2416                                 mlx4_err(dev, "Failed to init slave mfunc"
2417                                          " interface, aborting.\n");
2418                                 goto err_cmd;
2419                         }
2420                 }
2421         }
2422
2423         err = mlx4_init_hca(dev);
2424         if (err) {
2425                 if (err == -EACCES) {
2426                         /* Not primary Physical function
2427                          * Running in slave mode */
2428                         mlx4_cmd_cleanup(dev);
2429                         dev->flags |= MLX4_FLAG_SLAVE;
2430                         dev->flags &= ~MLX4_FLAG_MASTER;
2431                         goto slave_start;
2432                 } else
2433                         goto err_mfunc;
2434         }
2435
2436         /* check if the device is functioning at its maximum possible speed.
2437          * No return code for this call, just warn the user in case of PCI
2438          * express device capabilities are under-satisfied by the bus.
2439          */
2440         mlx4_check_pcie_caps(dev);
2441
2442         /* In master functions, the communication channel must be initialized
2443          * after obtaining its address from fw */
2444         if (mlx4_is_master(dev)) {
2445                 unsigned sum = 0;
2446                 err = mlx4_multi_func_init(dev);
2447                 if (err) {
2448                         mlx4_err(dev, "Failed to init master mfunc"
2449                                  "interface, aborting.\n");
2450                         goto err_close;
2451                 }
2452                 if (sriov_initialized) {
2453                         for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
2454                                 unsigned j;
2455                                 for (j = 0; j < nvfs[i]; ++sum, ++j) {
2456                                         dev->dev_vfs[sum].min_port =
2457                                                 i < 2 ? i + 1 : 1;
2458                                         dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
2459                                                 dev->caps.num_ports;
2460                                 }
2461                         }
2462                 }
2463         }
2464
2465         err = mlx4_alloc_eq_table(dev);
2466         if (err)
2467                 goto err_master_mfunc;
2468
2469         priv->msix_ctl.pool_bm = 0;
2470         mutex_init(&priv->msix_ctl.pool_lock);
2471
2472         mlx4_enable_msi_x(dev);
2473         if ((mlx4_is_mfunc(dev)) &&
2474             !(dev->flags & MLX4_FLAG_MSI_X)) {
2475                 err = -ENOSYS;
2476                 mlx4_err(dev, "INTx is not supported in multi-function mode."
2477                          " aborting.\n");
2478                 goto err_free_eq;
2479         }
2480
2481         if (!mlx4_is_slave(dev)) {
2482                 err = mlx4_init_steering(dev);
2483                 if (err)
2484                         goto err_free_eq;
2485         }
2486
2487         err = mlx4_setup_hca(dev);
2488         if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
2489             !mlx4_is_mfunc(dev)) {
2490                 dev->flags &= ~MLX4_FLAG_MSI_X;
2491                 dev->caps.num_comp_vectors = 1;
2492                 dev->caps.comp_pool        = 0;
2493                 pci_disable_msix(pdev);
2494                 err = mlx4_setup_hca(dev);
2495         }
2496
2497         if (err)
2498                 goto err_steer;
2499
2500         mlx4_init_quotas(dev);
2501
2502         for (port = 1; port <= dev->caps.num_ports; port++) {
2503                 err = mlx4_init_port_info(dev, port);
2504                 if (err)
2505                         goto err_port;
2506         }
2507
2508         err = mlx4_register_device(dev);
2509         if (err)
2510                 goto err_port;
2511
2512         mlx4_request_modules(dev);
2513
2514         mlx4_sense_init(dev);
2515         mlx4_start_sense(dev);
2516
2517         priv->pci_dev_data = pci_dev_data;
2518         pci_set_drvdata(pdev, dev);
2519
2520         return 0;
2521
2522 err_port:
2523         for (--port; port >= 1; --port)
2524                 mlx4_cleanup_port_info(&priv->port[port]);
2525
2526         mlx4_cleanup_counters_table(dev);
2527         mlx4_cleanup_qp_table(dev);
2528         mlx4_cleanup_srq_table(dev);
2529         mlx4_cleanup_cq_table(dev);
2530         mlx4_cmd_use_polling(dev);
2531         mlx4_cleanup_eq_table(dev);
2532         mlx4_cleanup_mcg_table(dev);
2533         mlx4_cleanup_mr_table(dev);
2534         mlx4_cleanup_xrcd_table(dev);
2535         mlx4_cleanup_pd_table(dev);
2536         mlx4_cleanup_uar_table(dev);
2537
2538 err_steer:
2539         if (!mlx4_is_slave(dev))
2540                 mlx4_clear_steering(dev);
2541
2542 err_free_eq:
2543         mlx4_free_eq_table(dev);
2544
2545 err_master_mfunc:
2546         if (mlx4_is_master(dev))
2547                 mlx4_multi_func_cleanup(dev);
2548
2549 err_close:
2550         if (dev->flags & MLX4_FLAG_MSI_X)
2551                 pci_disable_msix(pdev);
2552
2553         mlx4_close_hca(dev);
2554
2555 err_mfunc:
2556         if (mlx4_is_slave(dev))
2557                 mlx4_multi_func_cleanup(dev);
2558
2559 err_cmd:
2560         mlx4_cmd_cleanup(dev);
2561
2562 err_sriov:
2563         if (dev->flags & MLX4_FLAG_SRIOV)
2564                 pci_disable_sriov(pdev);
2565
2566 err_rel_own:
2567         if (!mlx4_is_slave(dev))
2568                 mlx4_free_ownership(dev);
2569
2570         kfree(priv->dev.dev_vfs);
2571
2572 err_free_dev:
2573         kfree(priv);
2574
2575 err_release_regions:
2576         pci_release_regions(pdev);
2577
2578 err_disable_pdev:
2579         pci_disable_device(pdev);
2580         pci_set_drvdata(pdev, NULL);
2581         return err;
2582 }
2583
2584 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
2585 {
2586         printk_once(KERN_INFO "%s", mlx4_version);
2587
2588         return __mlx4_init_one(pdev, id->driver_data);
2589 }
2590
2591 static void mlx4_remove_one(struct pci_dev *pdev)
2592 {
2593         struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
2594         struct mlx4_priv *priv = mlx4_priv(dev);
2595         int p;
2596
2597         if (dev) {
2598                 /* in SRIOV it is not allowed to unload the pf's
2599                  * driver while there are alive vf's */
2600                 if (mlx4_is_master(dev)) {
2601                         if (mlx4_how_many_lives_vf(dev))
2602                                 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2603                 }
2604                 mlx4_stop_sense(dev);
2605                 mlx4_unregister_device(dev);
2606
2607                 for (p = 1; p <= dev->caps.num_ports; p++) {
2608                         mlx4_cleanup_port_info(&priv->port[p]);
2609                         mlx4_CLOSE_PORT(dev, p);
2610                 }
2611
2612                 if (mlx4_is_master(dev))
2613                         mlx4_free_resource_tracker(dev,
2614                                                    RES_TR_FREE_SLAVES_ONLY);
2615
2616                 mlx4_cleanup_counters_table(dev);
2617                 mlx4_cleanup_qp_table(dev);
2618                 mlx4_cleanup_srq_table(dev);
2619                 mlx4_cleanup_cq_table(dev);
2620                 mlx4_cmd_use_polling(dev);
2621                 mlx4_cleanup_eq_table(dev);
2622                 mlx4_cleanup_mcg_table(dev);
2623                 mlx4_cleanup_mr_table(dev);
2624                 mlx4_cleanup_xrcd_table(dev);
2625                 mlx4_cleanup_pd_table(dev);
2626
2627                 if (mlx4_is_master(dev))
2628                         mlx4_free_resource_tracker(dev,
2629                                                    RES_TR_FREE_STRUCTS_ONLY);
2630
2631                 iounmap(priv->kar);
2632                 mlx4_uar_free(dev, &priv->driver_uar);
2633                 mlx4_cleanup_uar_table(dev);
2634                 if (!mlx4_is_slave(dev))
2635                         mlx4_clear_steering(dev);
2636                 mlx4_free_eq_table(dev);
2637                 if (mlx4_is_master(dev))
2638                         mlx4_multi_func_cleanup(dev);
2639                 mlx4_close_hca(dev);
2640                 if (mlx4_is_slave(dev))
2641                         mlx4_multi_func_cleanup(dev);
2642                 mlx4_cmd_cleanup(dev);
2643
2644                 if (dev->flags & MLX4_FLAG_MSI_X)
2645                         pci_disable_msix(pdev);
2646                 if (dev->flags & MLX4_FLAG_SRIOV) {
2647                         mlx4_warn(dev, "Disabling SR-IOV\n");
2648                         pci_disable_sriov(pdev);
2649                 }
2650
2651                 if (!mlx4_is_slave(dev))
2652                         mlx4_free_ownership(dev);
2653
2654                 kfree(dev->caps.qp0_tunnel);
2655                 kfree(dev->caps.qp0_proxy);
2656                 kfree(dev->caps.qp1_tunnel);
2657                 kfree(dev->caps.qp1_proxy);
2658                 kfree(dev->dev_vfs);
2659
2660                 kfree(priv);
2661                 pci_release_regions(pdev);
2662                 pci_disable_device(pdev);
2663                 pci_set_drvdata(pdev, NULL);
2664         }
2665 }
2666
2667 int mlx4_restart_one(struct pci_dev *pdev)
2668 {
2669         struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
2670         struct mlx4_priv *priv = mlx4_priv(dev);
2671         int               pci_dev_data;
2672
2673         pci_dev_data = priv->pci_dev_data;
2674         mlx4_remove_one(pdev);
2675         return __mlx4_init_one(pdev, pci_dev_data);
2676 }
2677
2678 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
2679         /* MT25408 "Hermon" SDR */
2680         { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2681         /* MT25408 "Hermon" DDR */
2682         { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2683         /* MT25408 "Hermon" QDR */
2684         { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2685         /* MT25408 "Hermon" DDR PCIe gen2 */
2686         { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2687         /* MT25408 "Hermon" QDR PCIe gen2 */
2688         { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2689         /* MT25408 "Hermon" EN 10GigE */
2690         { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2691         /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
2692         { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2693         /* MT25458 ConnectX EN 10GBASE-T 10GigE */
2694         { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2695         /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
2696         { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2697         /* MT26468 ConnectX EN 10GigE PCIe gen2*/
2698         { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2699         /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
2700         { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2701         /* MT26478 ConnectX2 40GigE PCIe gen2 */
2702         { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2703         /* MT25400 Family [ConnectX-2 Virtual Function] */
2704         { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
2705         /* MT27500 Family [ConnectX-3] */
2706         { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
2707         /* MT27500 Family [ConnectX-3 Virtual Function] */
2708         { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
2709         { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
2710         { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
2711         { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
2712         { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
2713         { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
2714         { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
2715         { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
2716         { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
2717         { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
2718         { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
2719         { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
2720         { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
2721         { 0, }
2722 };
2723
2724 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2725
2726 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2727                                               pci_channel_state_t state)
2728 {
2729         mlx4_remove_one(pdev);
2730
2731         return state == pci_channel_io_perm_failure ?
2732                 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
2733 }
2734
2735 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2736 {
2737         int ret = __mlx4_init_one(pdev, 0);
2738
2739         return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2740 }
2741
2742 static const struct pci_error_handlers mlx4_err_handler = {
2743         .error_detected = mlx4_pci_err_detected,
2744         .slot_reset     = mlx4_pci_slot_reset,
2745 };
2746
2747 static struct pci_driver mlx4_driver = {
2748         .name           = DRV_NAME,
2749         .id_table       = mlx4_pci_table,
2750         .probe          = mlx4_init_one,
2751         .shutdown       = mlx4_remove_one,
2752         .remove         = mlx4_remove_one,
2753         .err_handler    = &mlx4_err_handler,
2754 };
2755
2756 static int __init mlx4_verify_params(void)
2757 {
2758         if ((log_num_mac < 0) || (log_num_mac > 7)) {
2759                 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
2760                 return -1;
2761         }
2762
2763         if (log_num_vlan != 0)
2764                 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2765                            MLX4_LOG_NUM_VLANS);
2766
2767         if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
2768                 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
2769                 return -1;
2770         }
2771
2772         /* Check if module param for ports type has legal combination */
2773         if (port_type_array[0] == false && port_type_array[1] == true) {
2774                 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2775                 port_type_array[0] = true;
2776         }
2777
2778         if (mlx4_log_num_mgm_entry_size != -1 &&
2779             (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2780              mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2781                 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
2782                            "in legal range (-1 or %d..%d)\n",
2783                            mlx4_log_num_mgm_entry_size,
2784                            MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2785                            MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2786                 return -1;
2787         }
2788
2789         return 0;
2790 }
2791
2792 static int __init mlx4_init(void)
2793 {
2794         int ret;
2795
2796         if (mlx4_verify_params())
2797                 return -EINVAL;
2798
2799         mlx4_catas_init();
2800
2801         mlx4_wq = create_singlethread_workqueue("mlx4");
2802         if (!mlx4_wq)
2803                 return -ENOMEM;
2804
2805         ret = pci_register_driver(&mlx4_driver);
2806         if (ret < 0)
2807                 destroy_workqueue(mlx4_wq);
2808         return ret < 0 ? ret : 0;
2809 }
2810
2811 static void __exit mlx4_cleanup(void)
2812 {
2813         pci_unregister_driver(&mlx4_driver);
2814         destroy_workqueue(mlx4_wq);
2815 }
2816
2817 module_init(mlx4_init);
2818 module_exit(mlx4_cleanup);