2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/interrupt.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/mlx4/cmd.h>
41 #include <linux/cpu_rmap.h>
47 MLX4_IRQNAME_SIZE = 32
51 MLX4_NUM_ASYNC_EQE = 0x100,
52 MLX4_NUM_SPARE_EQE = 0x80,
53 MLX4_EQ_ENTRY_SIZE = 0x20
56 struct mlx4_irq_notify {
58 struct irq_affinity_notify notify;
61 #define MLX4_EQ_STATUS_OK ( 0 << 28)
62 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
63 #define MLX4_EQ_OWNER_SW ( 0 << 24)
64 #define MLX4_EQ_OWNER_HW ( 1 << 24)
65 #define MLX4_EQ_FLAG_EC ( 1 << 18)
66 #define MLX4_EQ_FLAG_OI ( 1 << 17)
67 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
68 #define MLX4_EQ_STATE_FIRED (10 << 8)
69 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
71 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
72 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
73 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
74 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
75 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
76 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
77 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
78 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
79 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
80 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
81 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
82 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
83 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
84 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
85 (1ull << MLX4_EVENT_TYPE_CMD) | \
86 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
87 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
88 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
89 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
91 static u64 get_async_ev_mask(struct mlx4_dev *dev)
93 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
94 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
95 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
100 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
102 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
105 /* We still want ordering, just not swabbing, so add a barrier */
109 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
111 /* (entry & (eq->nent - 1)) gives us a cyclic array */
112 unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
113 /* CX3 is capable of extending the EQE from 32 to 64 bytes.
114 * When this feature is enabled, the first (in the lower addresses)
115 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
116 * contain the legacy EQE information.
118 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
121 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
127 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
129 struct mlx4_eqe *eqe =
130 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
131 return (!!(eqe->owner & 0x80) ^
132 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
136 void mlx4_gen_slave_eqe(struct work_struct *work)
138 struct mlx4_mfunc_master_ctx *master =
139 container_of(work, struct mlx4_mfunc_master_ctx,
141 struct mlx4_mfunc *mfunc =
142 container_of(master, struct mlx4_mfunc, master);
143 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
144 struct mlx4_dev *dev = &priv->dev;
145 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
146 struct mlx4_eqe *eqe;
150 for (eqe = next_slave_event_eqe(slave_eq); eqe;
151 eqe = next_slave_event_eqe(slave_eq)) {
152 slave = eqe->slave_id;
154 /* All active slaves need to receive the event */
155 if (slave == ALL_SLAVES) {
156 for (i = 0; i < dev->num_slaves; i++) {
157 if (i != dev->caps.function &&
158 master->slave_state[i].active)
159 if (mlx4_GEN_EQE(dev, i, eqe))
160 mlx4_warn(dev, "Failed to generate event for slave %d\n",
164 if (mlx4_GEN_EQE(dev, slave, eqe))
165 mlx4_warn(dev, "Failed to generate event for slave %d\n",
173 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
175 struct mlx4_priv *priv = mlx4_priv(dev);
176 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
177 struct mlx4_eqe *s_eqe;
180 spin_lock_irqsave(&slave_eq->event_lock, flags);
181 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
182 if ((!!(s_eqe->owner & 0x80)) ^
183 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
184 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
186 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
190 memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
191 s_eqe->slave_id = slave;
192 /* ensure all information is written before setting the ownersip bit */
194 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
197 queue_work(priv->mfunc.master.comm_wq,
198 &priv->mfunc.master.slave_event_work);
199 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
202 static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
203 struct mlx4_eqe *eqe)
205 struct mlx4_priv *priv = mlx4_priv(dev);
206 struct mlx4_slave_state *s_slave =
207 &priv->mfunc.master.slave_state[slave];
209 if (!s_slave->active) {
210 /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
214 slave_event(dev, slave, eqe);
217 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
221 struct mlx4_priv *priv = mlx4_priv(dev);
222 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
224 if (!s_slave->active)
227 memset(&eqe, 0, sizeof eqe);
229 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
230 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
231 eqe.event.port_mgmt_change.port = port;
233 return mlx4_GEN_EQE(dev, slave, &eqe);
235 EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
237 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
241 /*don't send if we don't have the that slave */
242 if (dev->num_vfs < slave)
244 memset(&eqe, 0, sizeof eqe);
246 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
247 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
248 eqe.event.port_mgmt_change.port = port;
250 return mlx4_GEN_EQE(dev, slave, &eqe);
252 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
254 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
255 u8 port_subtype_change)
259 /*don't send if we don't have the that slave */
260 if (dev->num_vfs < slave)
262 memset(&eqe, 0, sizeof eqe);
264 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
265 eqe.subtype = port_subtype_change;
266 eqe.event.port_change.port = cpu_to_be32(port << 28);
268 mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
269 port_subtype_change, slave, port);
270 return mlx4_GEN_EQE(dev, slave, &eqe);
272 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
274 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
276 struct mlx4_priv *priv = mlx4_priv(dev);
277 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
278 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
280 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
281 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
282 pr_err("%s: Error: asking for slave:%d, port:%d\n",
283 __func__, slave, port);
284 return SLAVE_PORT_DOWN;
286 return s_state[slave].port_state[port];
288 EXPORT_SYMBOL(mlx4_get_slave_port_state);
290 static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
291 enum slave_port_state state)
293 struct mlx4_priv *priv = mlx4_priv(dev);
294 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
295 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
297 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
298 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
299 pr_err("%s: Error: asking for slave:%d, port:%d\n",
300 __func__, slave, port);
303 s_state[slave].port_state[port] = state;
308 static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
311 enum slave_port_gen_event gen_event;
312 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
315 for (i = 0; i < dev->num_vfs + 1; i++)
316 if (test_bit(i, slaves_pport.slaves))
317 set_and_calc_slave_port_state(dev, i, port,
320 /**************************************************************************
321 The function get as input the new event to that port,
322 and according to the prev state change the slave's port state.
324 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
325 MLX4_PORT_STATE_DEV_EVENT_PORT_UP
326 MLX4_PORT_STATE_IB_EVENT_GID_VALID
327 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
328 ***************************************************************************/
329 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
331 enum slave_port_gen_event *gen_event)
333 struct mlx4_priv *priv = mlx4_priv(dev);
334 struct mlx4_slave_state *ctx = NULL;
337 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
338 enum slave_port_state cur_state =
339 mlx4_get_slave_port_state(dev, slave, port);
341 *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
343 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
344 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
345 pr_err("%s: Error: asking for slave:%d, port:%d\n",
346 __func__, slave, port);
350 ctx = &priv->mfunc.master.slave_state[slave];
351 spin_lock_irqsave(&ctx->lock, flags);
354 case SLAVE_PORT_DOWN:
355 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
356 mlx4_set_slave_port_state(dev, slave, port,
359 case SLAVE_PENDING_UP:
360 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
361 mlx4_set_slave_port_state(dev, slave, port,
363 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
364 mlx4_set_slave_port_state(dev, slave, port,
366 *gen_event = SLAVE_PORT_GEN_EVENT_UP;
370 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
371 mlx4_set_slave_port_state(dev, slave, port,
373 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
374 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
376 mlx4_set_slave_port_state(dev, slave, port,
378 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
382 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
383 __func__, slave, port);
386 ret = mlx4_get_slave_port_state(dev, slave, port);
389 spin_unlock_irqrestore(&ctx->lock, flags);
393 EXPORT_SYMBOL(set_and_calc_slave_port_state);
395 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
399 memset(&eqe, 0, sizeof eqe);
401 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
402 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
403 eqe.event.port_mgmt_change.port = port;
404 eqe.event.port_mgmt_change.params.port_info.changed_attr =
405 cpu_to_be32((u32) attr);
407 slave_event(dev, ALL_SLAVES, &eqe);
410 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
412 void mlx4_master_handle_slave_flr(struct work_struct *work)
414 struct mlx4_mfunc_master_ctx *master =
415 container_of(work, struct mlx4_mfunc_master_ctx,
416 slave_flr_event_work);
417 struct mlx4_mfunc *mfunc =
418 container_of(master, struct mlx4_mfunc, master);
419 struct mlx4_priv *priv =
420 container_of(mfunc, struct mlx4_priv, mfunc);
421 struct mlx4_dev *dev = &priv->dev;
422 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
427 mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
429 for (i = 0 ; i < dev->num_slaves; i++) {
431 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
432 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
435 mlx4_delete_all_resources_for_slave(dev, i);
436 /*return the slave to running mode*/
437 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
438 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
439 slave_state[i].is_slave_going_down = 0;
440 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
442 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
443 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
445 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
451 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
453 struct mlx4_priv *priv = mlx4_priv(dev);
454 struct mlx4_eqe *eqe;
462 u8 update_slave_state;
464 enum slave_port_gen_event gen_event;
466 struct mlx4_vport_state *s_info;
468 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
470 * Make sure we read EQ entry contents after we've
471 * checked the ownership bit.
476 case MLX4_EVENT_TYPE_COMP:
477 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
478 mlx4_cq_completion(dev, cqn);
481 case MLX4_EVENT_TYPE_PATH_MIG:
482 case MLX4_EVENT_TYPE_COMM_EST:
483 case MLX4_EVENT_TYPE_SQ_DRAINED:
484 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
485 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
486 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
487 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
488 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
489 mlx4_dbg(dev, "event %d arrived\n", eqe->type);
490 if (mlx4_is_master(dev)) {
491 /* forward only to slave owning the QP */
492 ret = mlx4_get_slave_from_resource_id(dev,
494 be32_to_cpu(eqe->event.qp.qpn)
496 if (ret && ret != -ENOENT) {
497 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
498 eqe->type, eqe->subtype,
499 eq->eqn, eq->cons_index, ret);
503 if (!ret && slave != dev->caps.function) {
504 mlx4_slave_event(dev, slave, eqe);
509 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
510 0xffffff, eqe->type);
513 case MLX4_EVENT_TYPE_SRQ_LIMIT:
514 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
516 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
517 if (mlx4_is_master(dev)) {
518 /* forward only to slave owning the SRQ */
519 ret = mlx4_get_slave_from_resource_id(dev,
521 be32_to_cpu(eqe->event.srq.srqn)
524 if (ret && ret != -ENOENT) {
525 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
526 eqe->type, eqe->subtype,
527 eq->eqn, eq->cons_index, ret);
530 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
532 be32_to_cpu(eqe->event.srq.srqn),
533 eqe->type, eqe->subtype);
535 if (!ret && slave != dev->caps.function) {
536 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
538 eqe->subtype, slave);
539 mlx4_slave_event(dev, slave, eqe);
543 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
544 0xffffff, eqe->type);
547 case MLX4_EVENT_TYPE_CMD:
549 be16_to_cpu(eqe->event.cmd.token),
550 eqe->event.cmd.status,
551 be64_to_cpu(eqe->event.cmd.out_param));
554 case MLX4_EVENT_TYPE_PORT_CHANGE: {
555 struct mlx4_slaves_pport slaves_port;
556 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
557 slaves_port = mlx4_phys_to_slaves_pport(dev, port);
558 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
559 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
561 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
562 if (!mlx4_is_master(dev))
564 for (i = 0; i < dev->num_vfs + 1; i++) {
565 if (!test_bit(i, slaves_port.slaves))
567 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
568 if (i == mlx4_master_func_num(dev))
570 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
572 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
573 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
574 eqe->event.port_change.port =
576 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
577 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
578 mlx4_slave_event(dev, i, eqe);
580 } else { /* IB port */
581 set_and_calc_slave_port_state(dev, i, port,
582 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
584 /*we can be in pending state, then do not send port_down event*/
585 if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
586 if (i == mlx4_master_func_num(dev))
588 mlx4_slave_event(dev, i, eqe);
593 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port);
595 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
597 if (!mlx4_is_master(dev))
599 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
600 for (i = 0; i < dev->num_vfs + 1; i++) {
601 if (!test_bit(i, slaves_port.slaves))
603 if (i == mlx4_master_func_num(dev))
605 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
606 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
607 eqe->event.port_change.port =
609 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
610 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
611 mlx4_slave_event(dev, i, eqe);
615 /* port-up event will be sent to a slave when the
616 * slave's alias-guid is set. This is done in alias_GUID.c
618 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
623 case MLX4_EVENT_TYPE_CQ_ERROR:
624 mlx4_warn(dev, "CQ %s on CQN %06x\n",
625 eqe->event.cq_err.syndrome == 1 ?
626 "overrun" : "access violation",
627 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
628 if (mlx4_is_master(dev)) {
629 ret = mlx4_get_slave_from_resource_id(dev,
631 be32_to_cpu(eqe->event.cq_err.cqn)
633 if (ret && ret != -ENOENT) {
634 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
635 eqe->type, eqe->subtype,
636 eq->eqn, eq->cons_index, ret);
640 if (!ret && slave != dev->caps.function) {
641 mlx4_slave_event(dev, slave, eqe);
646 be32_to_cpu(eqe->event.cq_err.cqn)
651 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
652 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
655 case MLX4_EVENT_TYPE_OP_REQUIRED:
656 atomic_inc(&priv->opreq_count);
657 /* FW commands can't be executed from interrupt context
658 * working in deferred task
660 queue_work(mlx4_wq, &priv->opreq_task);
663 case MLX4_EVENT_TYPE_COMM_CHANNEL:
664 if (!mlx4_is_master(dev)) {
665 mlx4_warn(dev, "Received comm channel event for non master device\n");
668 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
669 eqe->event.comm_channel_arm.bit_vec,
670 sizeof eqe->event.comm_channel_arm.bit_vec);
671 queue_work(priv->mfunc.master.comm_wq,
672 &priv->mfunc.master.comm_work);
675 case MLX4_EVENT_TYPE_FLR_EVENT:
676 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
677 if (!mlx4_is_master(dev)) {
678 mlx4_warn(dev, "Non-master function received FLR event\n");
682 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
684 if (flr_slave >= dev->num_slaves) {
686 "Got FLR for unknown function: %d\n",
688 update_slave_state = 0;
690 update_slave_state = 1;
692 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
693 if (update_slave_state) {
694 priv->mfunc.master.slave_state[flr_slave].active = false;
695 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
696 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
698 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
699 queue_work(priv->mfunc.master.comm_wq,
700 &priv->mfunc.master.slave_flr_event_work);
703 case MLX4_EVENT_TYPE_FATAL_WARNING:
704 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
705 if (mlx4_is_master(dev))
706 for (i = 0; i < dev->num_slaves; i++) {
707 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
709 if (i == dev->caps.function)
711 mlx4_slave_event(dev, i, eqe);
713 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
714 be16_to_cpu(eqe->event.warming.warning_threshold),
715 be16_to_cpu(eqe->event.warming.current_temperature));
717 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
718 eqe->type, eqe->subtype, eq->eqn,
719 eq->cons_index, eqe->owner, eq->nent,
721 !!(eqe->owner & 0x80) ^
722 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
726 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
727 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
728 (unsigned long) eqe);
731 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
732 case MLX4_EVENT_TYPE_ECC_DETECT:
734 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
735 eqe->type, eqe->subtype, eq->eqn,
736 eq->cons_index, eqe->owner, eq->nent,
738 !!(eqe->owner & 0x80) ^
739 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
748 * The HCA will think the queue has overflowed if we
749 * don't tell it we've been processing events. We
750 * create our EQs with MLX4_NUM_SPARE_EQE extra
751 * entries, so we must update our consumer index at
754 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
765 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
767 struct mlx4_dev *dev = dev_ptr;
768 struct mlx4_priv *priv = mlx4_priv(dev);
772 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
774 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
775 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
777 return IRQ_RETVAL(work);
780 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
782 struct mlx4_eq *eq = eq_ptr;
783 struct mlx4_dev *dev = eq->dev;
785 mlx4_eq_int(dev, eq);
787 /* MSI-X vectors always belong to us */
791 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
792 struct mlx4_vhcr *vhcr,
793 struct mlx4_cmd_mailbox *inbox,
794 struct mlx4_cmd_mailbox *outbox,
795 struct mlx4_cmd_info *cmd)
797 struct mlx4_priv *priv = mlx4_priv(dev);
798 struct mlx4_slave_event_eq_info *event_eq =
799 priv->mfunc.master.slave_state[slave].event_eq;
800 u32 in_modifier = vhcr->in_modifier;
801 u32 eqn = in_modifier & 0x3FF;
802 u64 in_param = vhcr->in_param;
806 if (slave == dev->caps.function)
807 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
808 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
811 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
812 if (in_param & (1LL << i))
813 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
818 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
821 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
822 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
826 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
829 return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
830 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
834 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
837 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
838 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
842 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
845 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
846 * we need to map, take the difference of highest index and
847 * the lowest index we'll use and add 1.
849 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
850 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
853 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
855 struct mlx4_priv *priv = mlx4_priv(dev);
858 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
860 if (!priv->eq_table.uar_map[index]) {
861 priv->eq_table.uar_map[index] =
862 ioremap(pci_resource_start(dev->pdev, 2) +
863 ((eq->eqn / 4) << PAGE_SHIFT),
865 if (!priv->eq_table.uar_map[index]) {
866 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
872 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
875 static void mlx4_unmap_uar(struct mlx4_dev *dev)
877 struct mlx4_priv *priv = mlx4_priv(dev);
880 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
881 if (priv->eq_table.uar_map[i]) {
882 iounmap(priv->eq_table.uar_map[i]);
883 priv->eq_table.uar_map[i] = NULL;
887 static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
888 u8 intr, struct mlx4_eq *eq)
890 struct mlx4_priv *priv = mlx4_priv(dev);
891 struct mlx4_cmd_mailbox *mailbox;
892 struct mlx4_eq_context *eq_context;
894 u64 *dma_list = NULL;
901 eq->nent = roundup_pow_of_two(max(nent, 2));
902 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
903 npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE;
905 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
910 for (i = 0; i < npages; ++i)
911 eq->page_list[i].buf = NULL;
913 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
917 mailbox = mlx4_alloc_cmd_mailbox(dev);
920 eq_context = mailbox->buf;
922 for (i = 0; i < npages; ++i) {
923 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
924 PAGE_SIZE, &t, GFP_KERNEL);
925 if (!eq->page_list[i].buf)
926 goto err_out_free_pages;
929 eq->page_list[i].map = t;
931 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
934 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
936 goto err_out_free_pages;
938 eq->doorbell = mlx4_get_eq_uar(dev, eq);
941 goto err_out_free_eq;
944 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
946 goto err_out_free_eq;
948 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
950 goto err_out_free_mtt;
952 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
953 MLX4_EQ_STATE_ARMED);
954 eq_context->log_eq_size = ilog2(eq->nent);
955 eq_context->intr = intr;
956 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
958 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
959 eq_context->mtt_base_addr_h = mtt_addr >> 32;
960 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
962 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
964 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
965 goto err_out_free_mtt;
969 mlx4_free_cmd_mailbox(dev, mailbox);
976 mlx4_mtt_cleanup(dev, &eq->mtt);
979 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
982 for (i = 0; i < npages; ++i)
983 if (eq->page_list[i].buf)
984 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
985 eq->page_list[i].buf,
986 eq->page_list[i].map);
988 mlx4_free_cmd_mailbox(dev, mailbox);
991 kfree(eq->page_list);
998 static void mlx4_free_eq(struct mlx4_dev *dev,
1001 struct mlx4_priv *priv = mlx4_priv(dev);
1002 struct mlx4_cmd_mailbox *mailbox;
1005 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
1006 int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE;
1008 mailbox = mlx4_alloc_cmd_mailbox(dev);
1009 if (IS_ERR(mailbox))
1012 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
1014 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
1017 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
1018 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
1020 pr_cont("[%02x] ", i * 4);
1021 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
1022 if ((i + 1) % 4 == 0)
1027 mlx4_mtt_cleanup(dev, &eq->mtt);
1028 for (i = 0; i < npages; ++i)
1029 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1030 eq->page_list[i].buf,
1031 eq->page_list[i].map);
1033 kfree(eq->page_list);
1034 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1035 mlx4_free_cmd_mailbox(dev, mailbox);
1038 static void mlx4_free_irqs(struct mlx4_dev *dev)
1040 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
1041 struct mlx4_priv *priv = mlx4_priv(dev);
1044 if (eq_table->have_irq)
1045 free_irq(dev->pdev->irq, dev);
1047 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1048 if (eq_table->eq[i].have_irq) {
1049 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
1050 eq_table->eq[i].have_irq = 0;
1053 for (i = 0; i < dev->caps.comp_pool; i++) {
1055 * Freeing the assigned irq's
1056 * all bits should be 0, but we need to validate
1058 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1059 /* NO need protecting*/
1060 vec = dev->caps.num_comp_vectors + 1 + i;
1061 free_irq(priv->eq_table.eq[vec].irq,
1062 &priv->eq_table.eq[vec]);
1067 kfree(eq_table->irq_names);
1070 static int mlx4_map_clr_int(struct mlx4_dev *dev)
1072 struct mlx4_priv *priv = mlx4_priv(dev);
1074 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
1075 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1076 if (!priv->clr_base) {
1077 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
1084 static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1086 struct mlx4_priv *priv = mlx4_priv(dev);
1088 iounmap(priv->clr_base);
1091 static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
1092 const cpumask_t *mask)
1094 struct mlx4_irq_notify *n = container_of(notify,
1095 struct mlx4_irq_notify,
1097 struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
1098 struct radix_tree_iter iter;
1101 radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
1102 struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
1104 if (cq->irq == notify->irq)
1105 cq->irq_affinity_change = true;
1109 static void mlx4_release_irq_notifier(struct kref *ref)
1111 struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
1116 static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
1117 struct mlx4_dev *dev, int irq)
1119 struct mlx4_irq_notify *irq_notifier = NULL;
1122 irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
1123 if (!irq_notifier) {
1124 mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
1129 irq_notifier->notify.irq = irq;
1130 irq_notifier->notify.notify = mlx4_irq_notifier_notify;
1131 irq_notifier->notify.release = mlx4_release_irq_notifier;
1132 irq_notifier->arg = priv;
1133 err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
1135 kfree(irq_notifier);
1136 irq_notifier = NULL;
1137 mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
1142 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1144 struct mlx4_priv *priv = mlx4_priv(dev);
1146 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
1147 sizeof *priv->eq_table.eq, GFP_KERNEL);
1148 if (!priv->eq_table.eq)
1154 void mlx4_free_eq_table(struct mlx4_dev *dev)
1156 kfree(mlx4_priv(dev)->eq_table.eq);
1159 int mlx4_init_eq_table(struct mlx4_dev *dev)
1161 struct mlx4_priv *priv = mlx4_priv(dev);
1165 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
1166 sizeof *priv->eq_table.uar_map,
1168 if (!priv->eq_table.uar_map) {
1173 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
1174 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
1178 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
1179 priv->eq_table.uar_map[i] = NULL;
1181 if (!mlx4_is_slave(dev)) {
1182 err = mlx4_map_clr_int(dev);
1184 goto err_out_bitmap;
1186 priv->eq_table.clr_mask =
1187 swab32(1 << (priv->eq_table.inta_pin & 31));
1188 priv->eq_table.clr_int = priv->clr_base +
1189 (priv->eq_table.inta_pin < 32 ? 4 : 0);
1192 priv->eq_table.irq_names =
1193 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
1194 dev->caps.comp_pool),
1196 if (!priv->eq_table.irq_names) {
1198 goto err_out_bitmap;
1201 for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
1202 err = mlx4_create_eq(dev, dev->caps.num_cqs -
1203 dev->caps.reserved_cqs +
1205 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
1206 &priv->eq_table.eq[i]);
1213 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
1214 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
1215 &priv->eq_table.eq[dev->caps.num_comp_vectors]);
1219 /*if additional completion vectors poolsize is 0 this loop will not run*/
1220 for (i = dev->caps.num_comp_vectors + 1;
1221 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
1223 err = mlx4_create_eq(dev, dev->caps.num_cqs -
1224 dev->caps.reserved_cqs +
1226 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
1227 &priv->eq_table.eq[i]);
1235 if (dev->flags & MLX4_FLAG_MSI_X) {
1236 const char *eq_name;
1238 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
1239 if (i < dev->caps.num_comp_vectors) {
1240 snprintf(priv->eq_table.irq_names +
1241 i * MLX4_IRQNAME_SIZE,
1243 "mlx4-comp-%d@pci:%s", i,
1244 pci_name(dev->pdev));
1246 snprintf(priv->eq_table.irq_names +
1247 i * MLX4_IRQNAME_SIZE,
1249 "mlx4-async@pci:%s",
1250 pci_name(dev->pdev));
1253 eq_name = priv->eq_table.irq_names +
1254 i * MLX4_IRQNAME_SIZE;
1255 err = request_irq(priv->eq_table.eq[i].irq,
1256 mlx4_msi_x_interrupt, 0, eq_name,
1257 priv->eq_table.eq + i);
1261 priv->eq_table.eq[i].have_irq = 1;
1264 snprintf(priv->eq_table.irq_names,
1267 pci_name(dev->pdev));
1268 err = request_irq(dev->pdev->irq, mlx4_interrupt,
1269 IRQF_SHARED, priv->eq_table.irq_names, dev);
1273 priv->eq_table.have_irq = 1;
1276 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1277 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1279 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
1280 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
1282 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1283 eq_set_ci(&priv->eq_table.eq[i], 1);
1288 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
1291 i = dev->caps.num_comp_vectors - 1;
1295 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
1298 if (!mlx4_is_slave(dev))
1299 mlx4_unmap_clr_int(dev);
1300 mlx4_free_irqs(dev);
1303 mlx4_unmap_uar(dev);
1304 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1307 kfree(priv->eq_table.uar_map);
1312 void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1314 struct mlx4_priv *priv = mlx4_priv(dev);
1317 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
1318 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1320 mlx4_free_irqs(dev);
1322 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
1323 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
1325 if (!mlx4_is_slave(dev))
1326 mlx4_unmap_clr_int(dev);
1328 mlx4_unmap_uar(dev);
1329 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1331 kfree(priv->eq_table.uar_map);
1334 /* A test that verifies that we can accept interrupts on all
1335 * the irq vectors of the device.
1336 * Interrupts are checked using the NOP command.
1338 int mlx4_test_interrupts(struct mlx4_dev *dev)
1340 struct mlx4_priv *priv = mlx4_priv(dev);
1344 err = mlx4_NOP(dev);
1345 /* When not in MSI_X, there is only one irq to check */
1346 if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
1349 /* A loop over all completion vectors, for each vector we will check
1350 * whether it works by mapping command completions to that vector
1351 * and performing a NOP command
1353 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
1354 /* Temporary use polling for command completions */
1355 mlx4_cmd_use_polling(dev);
1357 /* Map the new eq to handle all asynchronous events */
1358 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1359 priv->eq_table.eq[i].eqn);
1361 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
1362 mlx4_cmd_use_events(dev);
1366 /* Go back to using events */
1367 mlx4_cmd_use_events(dev);
1368 err = mlx4_NOP(dev);
1371 /* Return to default */
1372 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1373 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1376 EXPORT_SYMBOL(mlx4_test_interrupts);
1378 int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1382 struct mlx4_priv *priv = mlx4_priv(dev);
1383 int vec = 0, err = 0, i;
1385 mutex_lock(&priv->msix_ctl.pool_lock);
1386 for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
1387 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
1388 priv->msix_ctl.pool_bm |= 1ULL << i;
1389 vec = dev->caps.num_comp_vectors + 1 + i;
1390 snprintf(priv->eq_table.irq_names +
1391 vec * MLX4_IRQNAME_SIZE,
1392 MLX4_IRQNAME_SIZE, "%s", name);
1393 #ifdef CONFIG_RFS_ACCEL
1395 err = irq_cpu_rmap_add(rmap,
1396 priv->eq_table.eq[vec].irq);
1398 mlx4_warn(dev, "Failed adding irq rmap\n");
1401 err = request_irq(priv->eq_table.eq[vec].irq,
1402 mlx4_msi_x_interrupt, 0,
1403 &priv->eq_table.irq_names[vec<<5],
1404 priv->eq_table.eq + vec);
1406 /*zero out bit by fliping it*/
1407 priv->msix_ctl.pool_bm ^= 1 << i;
1410 /*we dont want to break here*/
1412 mlx4_assign_irq_notifier(priv, dev,
1413 priv->eq_table.eq[vec].irq);
1415 eq_set_ci(&priv->eq_table.eq[vec], 1);
1418 mutex_unlock(&priv->msix_ctl.pool_lock);
1424 err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
1428 EXPORT_SYMBOL(mlx4_assign_eq);
1430 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1432 struct mlx4_priv *priv = mlx4_priv(dev);
1434 int i = vec - dev->caps.num_comp_vectors - 1;
1436 if (likely(i >= 0)) {
1437 /*sanity check , making sure were not trying to free irq's
1438 Belonging to a legacy EQ*/
1439 mutex_lock(&priv->msix_ctl.pool_lock);
1440 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1441 irq_set_affinity_notifier(
1442 priv->eq_table.eq[vec].irq,
1444 free_irq(priv->eq_table.eq[vec].irq,
1445 &priv->eq_table.eq[vec]);
1446 priv->msix_ctl.pool_bm &= ~(1ULL << i);
1448 mutex_unlock(&priv->msix_ctl.pool_lock);
1452 EXPORT_SYMBOL(mlx4_release_eq);