mlx4: Use port management change event instead of smp_snoop
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx4 / eq.c
CommitLineData
225c7b1f 1/*
51a379d0 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
acba2420 34#include <linux/init.h>
225c7b1f 35#include <linux/interrupt.h>
5a0e3ad6 36#include <linux/slab.h>
ee40fa06 37#include <linux/export.h>
27ac792c 38#include <linux/mm.h>
9cbe05c7 39#include <linux/dma-mapping.h>
225c7b1f
RD
40
41#include <linux/mlx4/cmd.h>
42
43#include "mlx4.h"
44#include "fw.h"
45
f5f5951c 46enum {
0b7ca5a9 47 MLX4_IRQNAME_SIZE = 32
f5f5951c
AB
48};
49
225c7b1f
RD
50enum {
51 MLX4_NUM_ASYNC_EQE = 0x100,
52 MLX4_NUM_SPARE_EQE = 0x80,
53 MLX4_EQ_ENTRY_SIZE = 0x20
54};
55
225c7b1f
RD
56#define MLX4_EQ_STATUS_OK ( 0 << 28)
57#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
58#define MLX4_EQ_OWNER_SW ( 0 << 24)
59#define MLX4_EQ_OWNER_HW ( 1 << 24)
60#define MLX4_EQ_FLAG_EC ( 1 << 18)
61#define MLX4_EQ_FLAG_OI ( 1 << 17)
62#define MLX4_EQ_STATE_ARMED ( 9 << 8)
63#define MLX4_EQ_STATE_FIRED (10 << 8)
64#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
65
66#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
67 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
68 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
69 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
70 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
71 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
72 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
73 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
74 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
225c7b1f
RD
75 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
76 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
77 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
78 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
79 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
acba2420
JM
80 (1ull << MLX4_EVENT_TYPE_CMD) | \
81 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
5984be90
JM
82 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
83 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
225c7b1f 84
00f5ce99
JM
85static u64 get_async_ev_mask(struct mlx4_dev *dev)
86{
87 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
88 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
89 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
90
91 return async_ev_mask;
92}
93
225c7b1f
RD
94static void eq_set_ci(struct mlx4_eq *eq, int req_not)
95{
96 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
97 req_not << 31),
98 eq->doorbell);
99 /* We still want ordering, just not swabbing, so add a barrier */
100 mb();
101}
102
103static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
104{
105 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
106 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
107}
108
109static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
110{
111 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
112 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
113}
114
acba2420
JM
115static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
116{
117 struct mlx4_eqe *eqe =
118 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
119 return (!!(eqe->owner & 0x80) ^
120 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
121 eqe : NULL;
122}
123
acba2420
JM
124void mlx4_gen_slave_eqe(struct work_struct *work)
125{
126 struct mlx4_mfunc_master_ctx *master =
127 container_of(work, struct mlx4_mfunc_master_ctx,
128 slave_event_work);
129 struct mlx4_mfunc *mfunc =
130 container_of(master, struct mlx4_mfunc, master);
131 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
132 struct mlx4_dev *dev = &priv->dev;
133 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
134 struct mlx4_eqe *eqe;
135 u8 slave;
136 int i;
137
138 for (eqe = next_slave_event_eqe(slave_eq); eqe;
139 eqe = next_slave_event_eqe(slave_eq)) {
140 slave = eqe->slave_id;
141
142 /* All active slaves need to receive the event */
143 if (slave == ALL_SLAVES) {
144 for (i = 0; i < dev->num_slaves; i++) {
145 if (i != dev->caps.function &&
146 master->slave_state[i].active)
147 if (mlx4_GEN_EQE(dev, i, eqe))
148 mlx4_warn(dev, "Failed to "
149 " generate event "
150 "for slave %d\n", i);
151 }
152 } else {
153 if (mlx4_GEN_EQE(dev, slave, eqe))
154 mlx4_warn(dev, "Failed to generate event "
155 "for slave %d\n", slave);
156 }
157 ++slave_eq->cons;
158 }
159}
160
161
162static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
163{
164 struct mlx4_priv *priv = mlx4_priv(dev);
165 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
166 struct mlx4_eqe *s_eqe =
167 &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
168
169 if ((!!(s_eqe->owner & 0x80)) ^
170 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
171 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
172 "No free EQE on slave events queue\n", slave);
173 return;
174 }
175
176 memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
177 s_eqe->slave_id = slave;
178 /* ensure all information is written before setting the ownersip bit */
179 wmb();
180 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
181 ++slave_eq->prod;
182
183 queue_work(priv->mfunc.master.comm_wq,
184 &priv->mfunc.master.slave_event_work);
185}
186
187static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
188 struct mlx4_eqe *eqe)
189{
190 struct mlx4_priv *priv = mlx4_priv(dev);
191 struct mlx4_slave_state *s_slave =
192 &priv->mfunc.master.slave_state[slave];
193
194 if (!s_slave->active) {
195 /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
196 return;
197 }
198
199 slave_event(dev, slave, eqe);
200}
201
202void mlx4_master_handle_slave_flr(struct work_struct *work)
203{
204 struct mlx4_mfunc_master_ctx *master =
205 container_of(work, struct mlx4_mfunc_master_ctx,
206 slave_flr_event_work);
207 struct mlx4_mfunc *mfunc =
208 container_of(master, struct mlx4_mfunc, master);
209 struct mlx4_priv *priv =
210 container_of(mfunc, struct mlx4_priv, mfunc);
211 struct mlx4_dev *dev = &priv->dev;
212 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
213 int i;
214 int err;
215
216 mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
217
218 for (i = 0 ; i < dev->num_slaves; i++) {
219
220 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
221 mlx4_dbg(dev, "mlx4_handle_slave_flr: "
222 "clean slave: %d\n", i);
223
224 mlx4_delete_all_resources_for_slave(dev, i);
225 /*return the slave to running mode*/
226 spin_lock(&priv->mfunc.master.slave_state_lock);
227 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
228 slave_state[i].is_slave_going_down = 0;
229 spin_unlock(&priv->mfunc.master.slave_state_lock);
230 /*notify the FW:*/
231 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
232 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
233 if (err)
234 mlx4_warn(dev, "Failed to notify FW on "
235 "FLR done (slave:%d)\n", i);
236 }
237 }
238}
239
225c7b1f
RD
240static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
241{
acba2420 242 struct mlx4_priv *priv = mlx4_priv(dev);
225c7b1f
RD
243 struct mlx4_eqe *eqe;
244 int cqn;
245 int eqes_found = 0;
246 int set_ci = 0;
27bf91d6 247 int port;
acba2420
JM
248 int slave = 0;
249 int ret;
250 u32 flr_slave;
251 u8 update_slave_state;
252 int i;
225c7b1f
RD
253
254 while ((eqe = next_eqe_sw(eq))) {
255 /*
256 * Make sure we read EQ entry contents after we've
257 * checked the ownership bit.
258 */
259 rmb();
260
261 switch (eqe->type) {
262 case MLX4_EVENT_TYPE_COMP:
263 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
264 mlx4_cq_completion(dev, cqn);
265 break;
266
267 case MLX4_EVENT_TYPE_PATH_MIG:
268 case MLX4_EVENT_TYPE_COMM_EST:
269 case MLX4_EVENT_TYPE_SQ_DRAINED:
270 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
271 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
272 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
273 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
274 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
acba2420
JM
275 mlx4_dbg(dev, "event %d arrived\n", eqe->type);
276 if (mlx4_is_master(dev)) {
277 /* forward only to slave owning the QP */
278 ret = mlx4_get_slave_from_resource_id(dev,
279 RES_QP,
280 be32_to_cpu(eqe->event.qp.qpn)
281 & 0xffffff, &slave);
282 if (ret && ret != -ENOENT) {
283 mlx4_dbg(dev, "QP event %02x(%02x) on "
284 "EQ %d at index %u: could "
285 "not get slave id (%d)\n",
286 eqe->type, eqe->subtype,
287 eq->eqn, eq->cons_index, ret);
288 break;
289 }
290
291 if (!ret && slave != dev->caps.function) {
292 mlx4_slave_event(dev, slave, eqe);
293 break;
294 }
295
296 }
297 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
298 0xffffff, eqe->type);
225c7b1f
RD
299 break;
300
301 case MLX4_EVENT_TYPE_SRQ_LIMIT:
acba2420
JM
302 mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
303 __func__);
225c7b1f 304 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
acba2420
JM
305 if (mlx4_is_master(dev)) {
306 /* forward only to slave owning the SRQ */
307 ret = mlx4_get_slave_from_resource_id(dev,
308 RES_SRQ,
309 be32_to_cpu(eqe->event.srq.srqn)
310 & 0xffffff,
311 &slave);
312 if (ret && ret != -ENOENT) {
313 mlx4_warn(dev, "SRQ event %02x(%02x) "
314 "on EQ %d at index %u: could"
315 " not get slave id (%d)\n",
316 eqe->type, eqe->subtype,
317 eq->eqn, eq->cons_index, ret);
318 break;
319 }
320 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
321 " event: %02x(%02x)\n", __func__,
322 slave,
323 be32_to_cpu(eqe->event.srq.srqn),
324 eqe->type, eqe->subtype);
325
326 if (!ret && slave != dev->caps.function) {
327 mlx4_warn(dev, "%s: sending event "
328 "%02x(%02x) to slave:%d\n",
329 __func__, eqe->type,
330 eqe->subtype, slave);
331 mlx4_slave_event(dev, slave, eqe);
332 break;
333 }
334 }
335 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
336 0xffffff, eqe->type);
225c7b1f
RD
337 break;
338
339 case MLX4_EVENT_TYPE_CMD:
340 mlx4_cmd_event(dev,
341 be16_to_cpu(eqe->event.cmd.token),
342 eqe->event.cmd.status,
343 be64_to_cpu(eqe->event.cmd.out_param));
344 break;
345
346 case MLX4_EVENT_TYPE_PORT_CHANGE:
27bf91d6
YP
347 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
348 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
acba2420
JM
349 mlx4_dispatch_event(dev,
350 MLX4_DEV_EVENT_PORT_DOWN,
27bf91d6
YP
351 port);
352 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
acba2420
JM
353 if (mlx4_is_master(dev))
354 /*change the state of all slave's port
355 * to down:*/
356 for (i = 0; i < dev->num_slaves; i++) {
357 mlx4_dbg(dev, "%s: Sending "
358 "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
359 " to slave: %d, port:%d\n",
360 __func__, i, port);
361 if (i == dev->caps.function)
362 continue;
363 mlx4_slave_event(dev, i, eqe);
364 }
27bf91d6 365 } else {
acba2420
JM
366 mlx4_dispatch_event(dev,
367 MLX4_DEV_EVENT_PORT_UP,
27bf91d6
YP
368 port);
369 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
acba2420
JM
370
371 if (mlx4_is_master(dev)) {
372 for (i = 0; i < dev->num_slaves; i++) {
373 if (i == dev->caps.function)
374 continue;
375 mlx4_slave_event(dev, i, eqe);
376 }
377 }
27bf91d6 378 }
225c7b1f
RD
379 break;
380
381 case MLX4_EVENT_TYPE_CQ_ERROR:
382 mlx4_warn(dev, "CQ %s on CQN %06x\n",
383 eqe->event.cq_err.syndrome == 1 ?
384 "overrun" : "access violation",
385 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
acba2420
JM
386 if (mlx4_is_master(dev)) {
387 ret = mlx4_get_slave_from_resource_id(dev,
388 RES_CQ,
389 be32_to_cpu(eqe->event.cq_err.cqn)
390 & 0xffffff, &slave);
391 if (ret && ret != -ENOENT) {
392 mlx4_dbg(dev, "CQ event %02x(%02x) on "
393 "EQ %d at index %u: could "
394 "not get slave id (%d)\n",
395 eqe->type, eqe->subtype,
396 eq->eqn, eq->cons_index, ret);
397 break;
398 }
399
400 if (!ret && slave != dev->caps.function) {
401 mlx4_slave_event(dev, slave, eqe);
402 break;
403 }
404 }
405 mlx4_cq_event(dev,
406 be32_to_cpu(eqe->event.cq_err.cqn)
407 & 0xffffff,
225c7b1f
RD
408 eqe->type);
409 break;
410
411 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
412 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
413 break;
414
acba2420
JM
415 case MLX4_EVENT_TYPE_COMM_CHANNEL:
416 if (!mlx4_is_master(dev)) {
417 mlx4_warn(dev, "Received comm channel event "
418 "for non master device\n");
419 break;
420 }
421 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
422 eqe->event.comm_channel_arm.bit_vec,
423 sizeof eqe->event.comm_channel_arm.bit_vec);
424 queue_work(priv->mfunc.master.comm_wq,
425 &priv->mfunc.master.comm_work);
426 break;
427
428 case MLX4_EVENT_TYPE_FLR_EVENT:
429 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
430 if (!mlx4_is_master(dev)) {
431 mlx4_warn(dev, "Non-master function received"
432 "FLR event\n");
433 break;
434 }
435
436 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
437
30f7c73b 438 if (flr_slave >= dev->num_slaves) {
acba2420
JM
439 mlx4_warn(dev,
440 "Got FLR for unknown function: %d\n",
441 flr_slave);
442 update_slave_state = 0;
443 } else
444 update_slave_state = 1;
445
446 spin_lock(&priv->mfunc.master.slave_state_lock);
447 if (update_slave_state) {
448 priv->mfunc.master.slave_state[flr_slave].active = false;
449 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
450 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
451 }
452 spin_unlock(&priv->mfunc.master.slave_state_lock);
453 queue_work(priv->mfunc.master.comm_wq,
454 &priv->mfunc.master.slave_flr_event_work);
455 break;
5984be90
JM
456
457 case MLX4_EVENT_TYPE_FATAL_WARNING:
458 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
459 if (mlx4_is_master(dev))
460 for (i = 0; i < dev->num_slaves; i++) {
461 mlx4_dbg(dev, "%s: Sending "
462 "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
463 " to slave: %d\n", __func__, i);
464 if (i == dev->caps.function)
465 continue;
466 mlx4_slave_event(dev, i, eqe);
467 }
468 mlx4_err(dev, "Temperature Threshold was reached! "
469 "Threshold: %d celsius degrees; "
470 "Current Temperature: %d\n",
471 be16_to_cpu(eqe->event.warming.warning_threshold),
472 be16_to_cpu(eqe->event.warming.current_temperature));
473 } else
474 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
475 "subtype %02x on EQ %d at index %u. owner=%x, "
476 "nent=0x%x, slave=%x, ownership=%s\n",
477 eqe->type, eqe->subtype, eq->eqn,
478 eq->cons_index, eqe->owner, eq->nent,
479 eqe->slave_id,
480 !!(eqe->owner & 0x80) ^
481 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
482
483 break;
484
00f5ce99
JM
485 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
486 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
487 (unsigned long) eqe);
488 break;
489
225c7b1f
RD
490 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
491 case MLX4_EVENT_TYPE_ECC_DETECT:
492 default:
acba2420
JM
493 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
494 "index %u. owner=%x, nent=0x%x, slave=%x, "
495 "ownership=%s\n",
496 eqe->type, eqe->subtype, eq->eqn,
497 eq->cons_index, eqe->owner, eq->nent,
498 eqe->slave_id,
499 !!(eqe->owner & 0x80) ^
500 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
225c7b1f 501 break;
acba2420 502 };
225c7b1f
RD
503
504 ++eq->cons_index;
505 eqes_found = 1;
506 ++set_ci;
507
508 /*
509 * The HCA will think the queue has overflowed if we
510 * don't tell it we've been processing events. We
511 * create our EQs with MLX4_NUM_SPARE_EQE extra
512 * entries, so we must update our consumer index at
513 * least that often.
514 */
515 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
225c7b1f
RD
516 eq_set_ci(eq, 0);
517 set_ci = 0;
518 }
519 }
520
521 eq_set_ci(eq, 1);
522
523 return eqes_found;
524}
525
526static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
527{
528 struct mlx4_dev *dev = dev_ptr;
529 struct mlx4_priv *priv = mlx4_priv(dev);
530 int work = 0;
531 int i;
532
533 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
534
b8dd786f 535 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
225c7b1f
RD
536 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
537
538 return IRQ_RETVAL(work);
539}
540
541static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
542{
543 struct mlx4_eq *eq = eq_ptr;
544 struct mlx4_dev *dev = eq->dev;
545
546 mlx4_eq_int(dev, eq);
547
548 /* MSI-X vectors always belong to us */
549 return IRQ_HANDLED;
550}
551
acba2420
JM
552int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
553 struct mlx4_vhcr *vhcr,
554 struct mlx4_cmd_mailbox *inbox,
555 struct mlx4_cmd_mailbox *outbox,
556 struct mlx4_cmd_info *cmd)
557{
558 struct mlx4_priv *priv = mlx4_priv(dev);
559 struct mlx4_slave_event_eq_info *event_eq =
803143fb 560 priv->mfunc.master.slave_state[slave].event_eq;
acba2420
JM
561 u32 in_modifier = vhcr->in_modifier;
562 u32 eqn = in_modifier & 0x1FF;
563 u64 in_param = vhcr->in_param;
564 int err = 0;
803143fb 565 int i;
acba2420
JM
566
567 if (slave == dev->caps.function)
568 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
569 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
570 MLX4_CMD_NATIVE);
803143fb
MA
571 if (!err)
572 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
573 if (in_param & (1LL << i))
574 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
575
acba2420
JM
576 return err;
577}
578
225c7b1f
RD
579static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
580 int eq_num)
581{
582 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
f9baff50
JM
583 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
584 MLX4_CMD_WRAPPED);
225c7b1f
RD
585}
586
587static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
588 int eq_num)
589{
eb41049f 590 return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
acba2420 591 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
f9baff50 592 MLX4_CMD_WRAPPED);
225c7b1f
RD
593}
594
595static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
596 int eq_num)
597{
eb41049f 598 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
acba2420 599 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
f9baff50 600 MLX4_CMD_WRAPPED);
225c7b1f
RD
601}
602
b8dd786f
YP
603static int mlx4_num_eq_uar(struct mlx4_dev *dev)
604{
605 /*
606 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
607 * we need to map, take the difference of highest index and
608 * the lowest index we'll use and add 1.
609 */
0b7ca5a9
YP
610 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
611 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
b8dd786f
YP
612}
613
3d73c288 614static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
225c7b1f
RD
615{
616 struct mlx4_priv *priv = mlx4_priv(dev);
617 int index;
618
619 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
620
621 if (!priv->eq_table.uar_map[index]) {
622 priv->eq_table.uar_map[index] =
623 ioremap(pci_resource_start(dev->pdev, 2) +
624 ((eq->eqn / 4) << PAGE_SHIFT),
625 PAGE_SIZE);
626 if (!priv->eq_table.uar_map[index]) {
627 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
628 eq->eqn);
629 return NULL;
630 }
631 }
632
633 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
634}
635
3d73c288
RD
636static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
637 u8 intr, struct mlx4_eq *eq)
225c7b1f
RD
638{
639 struct mlx4_priv *priv = mlx4_priv(dev);
640 struct mlx4_cmd_mailbox *mailbox;
641 struct mlx4_eq_context *eq_context;
642 int npages;
643 u64 *dma_list = NULL;
644 dma_addr_t t;
645 u64 mtt_addr;
646 int err = -ENOMEM;
647 int i;
648
649 eq->dev = dev;
650 eq->nent = roundup_pow_of_two(max(nent, 2));
651 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
652
653 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
654 GFP_KERNEL);
655 if (!eq->page_list)
656 goto err_out;
657
658 for (i = 0; i < npages; ++i)
659 eq->page_list[i].buf = NULL;
660
661 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
662 if (!dma_list)
663 goto err_out_free;
664
665 mailbox = mlx4_alloc_cmd_mailbox(dev);
666 if (IS_ERR(mailbox))
667 goto err_out_free;
668 eq_context = mailbox->buf;
669
670 for (i = 0; i < npages; ++i) {
671 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
672 PAGE_SIZE, &t, GFP_KERNEL);
673 if (!eq->page_list[i].buf)
674 goto err_out_free_pages;
675
676 dma_list[i] = t;
677 eq->page_list[i].map = t;
678
679 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
680 }
681
682 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
683 if (eq->eqn == -1)
684 goto err_out_free_pages;
685
686 eq->doorbell = mlx4_get_eq_uar(dev, eq);
687 if (!eq->doorbell) {
688 err = -ENOMEM;
689 goto err_out_free_eq;
690 }
691
692 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
693 if (err)
694 goto err_out_free_eq;
695
696 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
697 if (err)
698 goto err_out_free_mtt;
699
700 memset(eq_context, 0, sizeof *eq_context);
701 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
702 MLX4_EQ_STATE_ARMED);
703 eq_context->log_eq_size = ilog2(eq->nent);
704 eq_context->intr = intr;
705 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
706
707 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
708 eq_context->mtt_base_addr_h = mtt_addr >> 32;
709 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
710
711 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
712 if (err) {
713 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
714 goto err_out_free_mtt;
715 }
716
717 kfree(dma_list);
718 mlx4_free_cmd_mailbox(dev, mailbox);
719
720 eq->cons_index = 0;
721
722 return err;
723
724err_out_free_mtt:
725 mlx4_mtt_cleanup(dev, &eq->mtt);
726
727err_out_free_eq:
728 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
729
730err_out_free_pages:
731 for (i = 0; i < npages; ++i)
732 if (eq->page_list[i].buf)
733 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
734 eq->page_list[i].buf,
735 eq->page_list[i].map);
736
737 mlx4_free_cmd_mailbox(dev, mailbox);
738
739err_out_free:
740 kfree(eq->page_list);
741 kfree(dma_list);
742
743err_out:
744 return err;
745}
746
747static void mlx4_free_eq(struct mlx4_dev *dev,
748 struct mlx4_eq *eq)
749{
750 struct mlx4_priv *priv = mlx4_priv(dev);
751 struct mlx4_cmd_mailbox *mailbox;
752 int err;
753 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
754 int i;
755
756 mailbox = mlx4_alloc_cmd_mailbox(dev);
757 if (IS_ERR(mailbox))
758 return;
759
760 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
761 if (err)
762 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
763
764 if (0) {
765 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
766 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
767 if (i % 4 == 0)
0a645e80
JP
768 pr_cont("[%02x] ", i * 4);
769 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
225c7b1f 770 if ((i + 1) % 4 == 0)
0a645e80 771 pr_cont("\n");
225c7b1f
RD
772 }
773 }
774
775 mlx4_mtt_cleanup(dev, &eq->mtt);
776 for (i = 0; i < npages; ++i)
a8dc0dff 777 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
225c7b1f
RD
778 eq->page_list[i].buf,
779 eq->page_list[i].map);
780
781 kfree(eq->page_list);
782 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
783 mlx4_free_cmd_mailbox(dev, mailbox);
784}
785
786static void mlx4_free_irqs(struct mlx4_dev *dev)
787{
788 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
0b7ca5a9
YP
789 struct mlx4_priv *priv = mlx4_priv(dev);
790 int i, vec;
225c7b1f
RD
791
792 if (eq_table->have_irq)
793 free_irq(dev->pdev->irq, dev);
0b7ca5a9 794
b8dd786f 795 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
d1fdf24b 796 if (eq_table->eq[i].have_irq) {
225c7b1f 797 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
d1fdf24b
RD
798 eq_table->eq[i].have_irq = 0;
799 }
b8dd786f 800
0b7ca5a9
YP
801 for (i = 0; i < dev->caps.comp_pool; i++) {
802 /*
803 * Freeing the assigned irq's
804 * all bits should be 0, but we need to validate
805 */
806 if (priv->msix_ctl.pool_bm & 1ULL << i) {
807 /* NO need protecting*/
808 vec = dev->caps.num_comp_vectors + 1 + i;
809 free_irq(priv->eq_table.eq[vec].irq,
810 &priv->eq_table.eq[vec]);
811 }
812 }
813
814
b8dd786f 815 kfree(eq_table->irq_names);
225c7b1f
RD
816}
817
3d73c288 818static int mlx4_map_clr_int(struct mlx4_dev *dev)
225c7b1f
RD
819{
820 struct mlx4_priv *priv = mlx4_priv(dev);
821
822 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
823 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
824 if (!priv->clr_base) {
825 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
826 return -ENOMEM;
827 }
828
829 return 0;
830}
831
832static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
833{
834 struct mlx4_priv *priv = mlx4_priv(dev);
835
836 iounmap(priv->clr_base);
837}
838
b8dd786f
YP
839int mlx4_alloc_eq_table(struct mlx4_dev *dev)
840{
841 struct mlx4_priv *priv = mlx4_priv(dev);
842
843 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
844 sizeof *priv->eq_table.eq, GFP_KERNEL);
845 if (!priv->eq_table.eq)
846 return -ENOMEM;
847
848 return 0;
849}
850
851void mlx4_free_eq_table(struct mlx4_dev *dev)
852{
853 kfree(mlx4_priv(dev)->eq_table.eq);
854}
855
3d73c288 856int mlx4_init_eq_table(struct mlx4_dev *dev)
225c7b1f
RD
857{
858 struct mlx4_priv *priv = mlx4_priv(dev);
859 int err;
860 int i;
861
758ff235
AL
862 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
863 sizeof *priv->eq_table.uar_map,
864 GFP_KERNEL);
b8dd786f
YP
865 if (!priv->eq_table.uar_map) {
866 err = -ENOMEM;
867 goto err_out_free;
868 }
869
225c7b1f 870 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
93fc9e1b 871 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
225c7b1f 872 if (err)
b8dd786f 873 goto err_out_free;
225c7b1f 874
b8dd786f 875 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
225c7b1f
RD
876 priv->eq_table.uar_map[i] = NULL;
877
acba2420
JM
878 if (!mlx4_is_slave(dev)) {
879 err = mlx4_map_clr_int(dev);
880 if (err)
881 goto err_out_bitmap;
225c7b1f 882
acba2420
JM
883 priv->eq_table.clr_mask =
884 swab32(1 << (priv->eq_table.inta_pin & 31));
885 priv->eq_table.clr_int = priv->clr_base +
886 (priv->eq_table.inta_pin < 32 ? 4 : 0);
887 }
225c7b1f 888
f5f5951c 889 priv->eq_table.irq_names =
0b7ca5a9
YP
890 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
891 dev->caps.comp_pool),
f5f5951c 892 GFP_KERNEL);
b8dd786f
YP
893 if (!priv->eq_table.irq_names) {
894 err = -ENOMEM;
895 goto err_out_bitmap;
896 }
897
898 for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
c3794745
YP
899 err = mlx4_create_eq(dev, dev->caps.num_cqs -
900 dev->caps.reserved_cqs +
901 MLX4_NUM_SPARE_EQE,
b8dd786f
YP
902 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
903 &priv->eq_table.eq[i]);
a5b19b63
YP
904 if (err) {
905 --i;
b8dd786f 906 goto err_out_unmap;
a5b19b63 907 }
b8dd786f 908 }
225c7b1f
RD
909
910 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
b8dd786f
YP
911 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
912 &priv->eq_table.eq[dev->caps.num_comp_vectors]);
225c7b1f
RD
913 if (err)
914 goto err_out_comp;
915
0b7ca5a9
YP
916 /*if additional completion vectors poolsize is 0 this loop will not run*/
917 for (i = dev->caps.num_comp_vectors + 1;
918 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
919
920 err = mlx4_create_eq(dev, dev->caps.num_cqs -
921 dev->caps.reserved_cqs +
922 MLX4_NUM_SPARE_EQE,
923 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
924 &priv->eq_table.eq[i]);
925 if (err) {
926 --i;
927 goto err_out_unmap;
928 }
929 }
930
931
225c7b1f 932 if (dev->flags & MLX4_FLAG_MSI_X) {
b8dd786f
YP
933 const char *eq_name;
934
935 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
936 if (i < dev->caps.num_comp_vectors) {
f5f5951c
AB
937 snprintf(priv->eq_table.irq_names +
938 i * MLX4_IRQNAME_SIZE,
939 MLX4_IRQNAME_SIZE,
940 "mlx4-comp-%d@pci:%s", i,
941 pci_name(dev->pdev));
942 } else {
943 snprintf(priv->eq_table.irq_names +
944 i * MLX4_IRQNAME_SIZE,
945 MLX4_IRQNAME_SIZE,
946 "mlx4-async@pci:%s",
947 pci_name(dev->pdev));
948 }
225c7b1f 949
f5f5951c
AB
950 eq_name = priv->eq_table.irq_names +
951 i * MLX4_IRQNAME_SIZE;
225c7b1f 952 err = request_irq(priv->eq_table.eq[i].irq,
b8dd786f
YP
953 mlx4_msi_x_interrupt, 0, eq_name,
954 priv->eq_table.eq + i);
225c7b1f 955 if (err)
ee49bd93 956 goto err_out_async;
225c7b1f
RD
957
958 priv->eq_table.eq[i].have_irq = 1;
959 }
225c7b1f 960 } else {
f5f5951c
AB
961 snprintf(priv->eq_table.irq_names,
962 MLX4_IRQNAME_SIZE,
963 DRV_NAME "@pci:%s",
964 pci_name(dev->pdev));
225c7b1f 965 err = request_irq(dev->pdev->irq, mlx4_interrupt,
f5f5951c 966 IRQF_SHARED, priv->eq_table.irq_names, dev);
225c7b1f
RD
967 if (err)
968 goto err_out_async;
969
970 priv->eq_table.have_irq = 1;
971 }
972
00f5ce99 973 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
b8dd786f 974 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
225c7b1f
RD
975 if (err)
976 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
b8dd786f 977 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
225c7b1f 978
b8dd786f 979 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
225c7b1f
RD
980 eq_set_ci(&priv->eq_table.eq[i], 1);
981
225c7b1f
RD
982 return 0;
983
225c7b1f 984err_out_async:
b8dd786f 985 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
225c7b1f
RD
986
987err_out_comp:
b8dd786f 988 i = dev->caps.num_comp_vectors - 1;
225c7b1f
RD
989
990err_out_unmap:
b8dd786f
YP
991 while (i >= 0) {
992 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
993 --i;
994 }
acba2420
JM
995 if (!mlx4_is_slave(dev))
996 mlx4_unmap_clr_int(dev);
225c7b1f
RD
997 mlx4_free_irqs(dev);
998
b8dd786f 999err_out_bitmap:
225c7b1f 1000 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
b8dd786f
YP
1001
1002err_out_free:
1003 kfree(priv->eq_table.uar_map);
1004
225c7b1f
RD
1005 return err;
1006}
1007
1008void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1009{
1010 struct mlx4_priv *priv = mlx4_priv(dev);
1011 int i;
1012
00f5ce99 1013 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
b8dd786f 1014 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
225c7b1f
RD
1015
1016 mlx4_free_irqs(dev);
1017
0b7ca5a9 1018 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
225c7b1f 1019 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
225c7b1f 1020
acba2420
JM
1021 if (!mlx4_is_slave(dev))
1022 mlx4_unmap_clr_int(dev);
225c7b1f 1023
b8dd786f 1024 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
225c7b1f
RD
1025 if (priv->eq_table.uar_map[i])
1026 iounmap(priv->eq_table.uar_map[i]);
1027
1028 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
b8dd786f
YP
1029
1030 kfree(priv->eq_table.uar_map);
225c7b1f 1031}
e7c1c2c4
YP
1032
1033/* A test that verifies that we can accept interrupts on all
1034 * the irq vectors of the device.
1035 * Interrupts are checked using the NOP command.
1036 */
1037int mlx4_test_interrupts(struct mlx4_dev *dev)
1038{
1039 struct mlx4_priv *priv = mlx4_priv(dev);
1040 int i;
1041 int err;
1042
1043 err = mlx4_NOP(dev);
1044 /* When not in MSI_X, there is only one irq to check */
acba2420 1045 if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
e7c1c2c4
YP
1046 return err;
1047
1048 /* A loop over all completion vectors, for each vector we will check
1049 * whether it works by mapping command completions to that vector
1050 * and performing a NOP command
1051 */
1052 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
1053 /* Temporary use polling for command completions */
1054 mlx4_cmd_use_polling(dev);
1055
1056 /* Map the new eq to handle all asyncronous events */
00f5ce99 1057 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
e7c1c2c4
YP
1058 priv->eq_table.eq[i].eqn);
1059 if (err) {
1060 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
1061 mlx4_cmd_use_events(dev);
1062 break;
1063 }
1064
1065 /* Go back to using events */
1066 mlx4_cmd_use_events(dev);
1067 err = mlx4_NOP(dev);
1068 }
1069
1070 /* Return to default */
00f5ce99 1071 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
e7c1c2c4
YP
1072 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1073 return err;
1074}
1075EXPORT_SYMBOL(mlx4_test_interrupts);
0b7ca5a9
YP
1076
1077int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
1078{
1079
1080 struct mlx4_priv *priv = mlx4_priv(dev);
1081 int vec = 0, err = 0, i;
1082
730c41d5 1083 mutex_lock(&priv->msix_ctl.pool_lock);
0b7ca5a9
YP
1084 for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
1085 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
1086 priv->msix_ctl.pool_bm |= 1ULL << i;
1087 vec = dev->caps.num_comp_vectors + 1 + i;
1088 snprintf(priv->eq_table.irq_names +
1089 vec * MLX4_IRQNAME_SIZE,
1090 MLX4_IRQNAME_SIZE, "%s", name);
1091 err = request_irq(priv->eq_table.eq[vec].irq,
1092 mlx4_msi_x_interrupt, 0,
1093 &priv->eq_table.irq_names[vec<<5],
1094 priv->eq_table.eq + vec);
1095 if (err) {
1096 /*zero out bit by fliping it*/
1097 priv->msix_ctl.pool_bm ^= 1 << i;
1098 vec = 0;
1099 continue;
1100 /*we dont want to break here*/
1101 }
1102 eq_set_ci(&priv->eq_table.eq[vec], 1);
1103 }
1104 }
730c41d5 1105 mutex_unlock(&priv->msix_ctl.pool_lock);
0b7ca5a9
YP
1106
1107 if (vec) {
1108 *vector = vec;
1109 } else {
1110 *vector = 0;
1111 err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
1112 }
1113 return err;
1114}
1115EXPORT_SYMBOL(mlx4_assign_eq);
1116
1117void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1118{
1119 struct mlx4_priv *priv = mlx4_priv(dev);
1120 /*bm index*/
1121 int i = vec - dev->caps.num_comp_vectors - 1;
1122
1123 if (likely(i >= 0)) {
1124 /*sanity check , making sure were not trying to free irq's
1125 Belonging to a legacy EQ*/
730c41d5 1126 mutex_lock(&priv->msix_ctl.pool_lock);
0b7ca5a9
YP
1127 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1128 free_irq(priv->eq_table.eq[vec].irq,
1129 &priv->eq_table.eq[vec]);
1130 priv->msix_ctl.pool_bm &= ~(1ULL << i);
1131 }
730c41d5 1132 mutex_unlock(&priv->msix_ctl.pool_lock);
0b7ca5a9
YP
1133 }
1134
1135}
1136EXPORT_SYMBOL(mlx4_release_eq);
1137