Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | |
2a1d9b7f | 3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | * | |
33 | * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $ | |
34 | */ | |
35 | ||
36 | #include <linux/init.h> | |
37 | #include <linux/errno.h> | |
38 | #include <linux/interrupt.h> | |
39 | #include <linux/pci.h> | |
40 | ||
41 | #include "mthca_dev.h" | |
42 | #include "mthca_cmd.h" | |
43 | #include "mthca_config_reg.h" | |
44 | ||
45 | enum { | |
46 | MTHCA_NUM_ASYNC_EQE = 0x80, | |
47 | MTHCA_NUM_CMD_EQE = 0x80, | |
48 | MTHCA_EQ_ENTRY_SIZE = 0x20 | |
49 | }; | |
50 | ||
51 | /* | |
52 | * Must be packed because start is 64 bits but only aligned to 32 bits. | |
53 | */ | |
54 | struct mthca_eq_context { | |
97f52eb4 SH |
55 | __be32 flags; |
56 | __be64 start; | |
57 | __be32 logsize_usrpage; | |
58 | __be32 tavor_pd; /* reserved for Arbel */ | |
59 | u8 reserved1[3]; | |
60 | u8 intr; | |
61 | __be32 arbel_pd; /* lost_count for Tavor */ | |
62 | __be32 lkey; | |
63 | u32 reserved2[2]; | |
64 | __be32 consumer_index; | |
65 | __be32 producer_index; | |
66 | u32 reserved3[4]; | |
1da177e4 LT |
67 | } __attribute__((packed)); |
68 | ||
69 | #define MTHCA_EQ_STATUS_OK ( 0 << 28) | |
70 | #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28) | |
71 | #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28) | |
72 | #define MTHCA_EQ_OWNER_SW ( 0 << 24) | |
73 | #define MTHCA_EQ_OWNER_HW ( 1 << 24) | |
74 | #define MTHCA_EQ_FLAG_TR ( 1 << 18) | |
75 | #define MTHCA_EQ_FLAG_OI ( 1 << 17) | |
76 | #define MTHCA_EQ_STATE_ARMED ( 1 << 8) | |
77 | #define MTHCA_EQ_STATE_FIRED ( 2 << 8) | |
78 | #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8) | |
79 | #define MTHCA_EQ_STATE_ARBEL ( 8 << 8) | |
80 | ||
81 | enum { | |
82 | MTHCA_EVENT_TYPE_COMP = 0x00, | |
83 | MTHCA_EVENT_TYPE_PATH_MIG = 0x01, | |
84 | MTHCA_EVENT_TYPE_COMM_EST = 0x02, | |
85 | MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, | |
90f104da RD |
86 | MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, |
87 | MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14, | |
1da177e4 LT |
88 | MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, |
89 | MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, | |
90 | MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, | |
91 | MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07, | |
92 | MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, | |
93 | MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, | |
94 | MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, | |
95 | MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, | |
96 | MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09, | |
97 | MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f, | |
98 | MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e, | |
99 | MTHCA_EVENT_TYPE_CMD = 0x0a | |
100 | }; | |
101 | ||
102 | #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \ | |
103 | (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \ | |
104 | (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \ | |
105 | (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \ | |
106 | (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \ | |
107 | (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \ | |
108 | (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \ | |
109 | (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | |
110 | (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | |
111 | (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ | |
112 | (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ | |
113 | (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) | |
90f104da RD |
114 | #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ |
115 | (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ | |
116 | (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT)) | |
1da177e4 LT |
117 | #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) |
118 | ||
119 | #define MTHCA_EQ_DB_INC_CI (1 << 24) | |
120 | #define MTHCA_EQ_DB_REQ_NOT (2 << 24) | |
121 | #define MTHCA_EQ_DB_DISARM_CQ (3 << 24) | |
122 | #define MTHCA_EQ_DB_SET_CI (4 << 24) | |
123 | #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24) | |
124 | ||
125 | struct mthca_eqe { | |
126 | u8 reserved1; | |
127 | u8 type; | |
128 | u8 reserved2; | |
129 | u8 subtype; | |
130 | union { | |
131 | u32 raw[6]; | |
132 | struct { | |
97f52eb4 | 133 | __be32 cqn; |
1da177e4 LT |
134 | } __attribute__((packed)) comp; |
135 | struct { | |
97f52eb4 SH |
136 | u16 reserved1; |
137 | __be16 token; | |
138 | u32 reserved2; | |
139 | u8 reserved3[3]; | |
140 | u8 status; | |
141 | __be64 out_param; | |
1da177e4 LT |
142 | } __attribute__((packed)) cmd; |
143 | struct { | |
97f52eb4 | 144 | __be32 qpn; |
1da177e4 | 145 | } __attribute__((packed)) qp; |
90f104da RD |
146 | struct { |
147 | __be32 srqn; | |
148 | } __attribute__((packed)) srq; | |
1da177e4 | 149 | struct { |
97f52eb4 SH |
150 | __be32 cqn; |
151 | u32 reserved1; | |
152 | u8 reserved2[3]; | |
153 | u8 syndrome; | |
1da177e4 LT |
154 | } __attribute__((packed)) cq_err; |
155 | struct { | |
97f52eb4 SH |
156 | u32 reserved1[2]; |
157 | __be32 port; | |
1da177e4 LT |
158 | } __attribute__((packed)) port_change; |
159 | } event; | |
160 | u8 reserved3[3]; | |
161 | u8 owner; | |
162 | } __attribute__((packed)); | |
163 | ||
164 | #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7) | |
165 | #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7) | |
166 | ||
167 | static inline u64 async_mask(struct mthca_dev *dev) | |
168 | { | |
169 | return dev->mthca_flags & MTHCA_FLAG_SRQ ? | |
170 | MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK : | |
171 | MTHCA_ASYNC_EVENT_MASK; | |
172 | } | |
173 | ||
174 | static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) | |
175 | { | |
97f52eb4 | 176 | __be32 doorbell[2]; |
1da177e4 LT |
177 | |
178 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); | |
179 | doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); | |
180 | ||
181 | /* | |
182 | * This barrier makes sure that all updates to ownership bits | |
183 | * done by set_eqe_hw() hit memory before the consumer index | |
184 | * is updated. set_eq_ci() allows the HCA to possibly write | |
185 | * more EQ entries, and we want to avoid the exceedingly | |
186 | * unlikely possibility of the HCA writing an entry and then | |
187 | * having set_eqe_hw() overwrite the owner field. | |
188 | */ | |
189 | wmb(); | |
190 | mthca_write64(doorbell, | |
191 | dev->kar + MTHCA_EQ_DOORBELL, | |
192 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
193 | } | |
194 | ||
195 | static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) | |
196 | { | |
197 | /* See comment in tavor_set_eq_ci() above. */ | |
198 | wmb(); | |
97f52eb4 SH |
199 | __raw_writel((__force u32) cpu_to_be32(ci), |
200 | dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); | |
1da177e4 LT |
201 | /* We still want ordering, just not swabbing, so add a barrier */ |
202 | mb(); | |
203 | } | |
204 | ||
205 | static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) | |
206 | { | |
d10ddbf6 | 207 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
208 | arbel_set_eq_ci(dev, eq, ci); |
209 | else | |
210 | tavor_set_eq_ci(dev, eq, ci); | |
211 | } | |
212 | ||
213 | static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) | |
214 | { | |
97f52eb4 | 215 | __be32 doorbell[2]; |
1da177e4 LT |
216 | |
217 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); | |
218 | doorbell[1] = 0; | |
219 | ||
220 | mthca_write64(doorbell, | |
221 | dev->kar + MTHCA_EQ_DOORBELL, | |
222 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
223 | } | |
224 | ||
225 | static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) | |
226 | { | |
227 | writel(eqn_mask, dev->eq_regs.arbel.eq_arm); | |
228 | } | |
229 | ||
230 | static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) | |
231 | { | |
d10ddbf6 | 232 | if (!mthca_is_memfree(dev)) { |
97f52eb4 | 233 | __be32 doorbell[2]; |
1da177e4 LT |
234 | |
235 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); | |
236 | doorbell[1] = cpu_to_be32(cqn); | |
237 | ||
238 | mthca_write64(doorbell, | |
239 | dev->kar + MTHCA_EQ_DOORBELL, | |
240 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
241 | } | |
242 | } | |
243 | ||
244 | static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry) | |
245 | { | |
246 | unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; | |
247 | return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; | |
248 | } | |
249 | ||
250 | static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq) | |
251 | { | |
252 | struct mthca_eqe* eqe; | |
253 | eqe = get_eqe(eq, eq->cons_index); | |
254 | return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; | |
255 | } | |
256 | ||
257 | static inline void set_eqe_hw(struct mthca_eqe *eqe) | |
258 | { | |
259 | eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; | |
260 | } | |
261 | ||
262 | static void port_change(struct mthca_dev *dev, int port, int active) | |
263 | { | |
264 | struct ib_event record; | |
265 | ||
266 | mthca_dbg(dev, "Port change to %s for port %d\n", | |
267 | active ? "active" : "down", port); | |
268 | ||
269 | record.device = &dev->ib_dev; | |
270 | record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; | |
271 | record.element.port_num = port; | |
272 | ||
273 | ib_dispatch_event(&record); | |
274 | } | |
275 | ||
276 | static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) | |
277 | { | |
278 | struct mthca_eqe *eqe; | |
279 | int disarm_cqn; | |
280 | int eqes_found = 0; | |
281 | ||
282 | while ((eqe = next_eqe_sw(eq))) { | |
283 | int set_ci = 0; | |
284 | ||
285 | /* | |
286 | * Make sure we read EQ entry contents after we've | |
287 | * checked the ownership bit. | |
288 | */ | |
289 | rmb(); | |
290 | ||
291 | switch (eqe->type) { | |
292 | case MTHCA_EVENT_TYPE_COMP: | |
293 | disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; | |
294 | disarm_cq(dev, eq->eqn, disarm_cqn); | |
295 | mthca_cq_event(dev, disarm_cqn); | |
296 | break; | |
297 | ||
298 | case MTHCA_EVENT_TYPE_PATH_MIG: | |
299 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
300 | IB_EVENT_PATH_MIG); | |
301 | break; | |
302 | ||
303 | case MTHCA_EVENT_TYPE_COMM_EST: | |
304 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
305 | IB_EVENT_COMM_EST); | |
306 | break; | |
307 | ||
308 | case MTHCA_EVENT_TYPE_SQ_DRAINED: | |
309 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
310 | IB_EVENT_SQ_DRAINED); | |
311 | break; | |
312 | ||
90f104da RD |
313 | case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE: |
314 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
315 | IB_EVENT_QP_LAST_WQE_REACHED); | |
316 | break; | |
317 | ||
318 | case MTHCA_EVENT_TYPE_SRQ_LIMIT: | |
319 | mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, | |
320 | IB_EVENT_SRQ_LIMIT_REACHED); | |
321 | break; | |
322 | ||
1da177e4 LT |
323 | case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: |
324 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
325 | IB_EVENT_QP_FATAL); | |
326 | break; | |
327 | ||
328 | case MTHCA_EVENT_TYPE_PATH_MIG_FAILED: | |
329 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
330 | IB_EVENT_PATH_MIG_ERR); | |
331 | break; | |
332 | ||
333 | case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
334 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
335 | IB_EVENT_QP_REQ_ERR); | |
336 | break; | |
337 | ||
338 | case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR: | |
339 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
340 | IB_EVENT_QP_ACCESS_ERR); | |
341 | break; | |
342 | ||
343 | case MTHCA_EVENT_TYPE_CMD: | |
344 | mthca_cmd_event(dev, | |
345 | be16_to_cpu(eqe->event.cmd.token), | |
346 | eqe->event.cmd.status, | |
347 | be64_to_cpu(eqe->event.cmd.out_param)); | |
348 | /* | |
349 | * cmd_event() may add more commands. | |
350 | * The card will think the queue has overflowed if | |
351 | * we don't tell it we've been processing events. | |
352 | */ | |
353 | set_ci = 1; | |
354 | break; | |
355 | ||
356 | case MTHCA_EVENT_TYPE_PORT_CHANGE: | |
357 | port_change(dev, | |
358 | (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3, | |
359 | eqe->subtype == 0x4); | |
360 | break; | |
361 | ||
362 | case MTHCA_EVENT_TYPE_CQ_ERROR: | |
b87dcfba | 363 | mthca_warn(dev, "CQ %s on CQN %06x\n", |
1da177e4 LT |
364 | eqe->event.cq_err.syndrome == 1 ? |
365 | "overrun" : "access violation", | |
b87dcfba | 366 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); |
1da177e4 LT |
367 | break; |
368 | ||
369 | case MTHCA_EVENT_TYPE_EQ_OVERFLOW: | |
370 | mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); | |
371 | break; | |
372 | ||
373 | case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR: | |
374 | case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR: | |
375 | case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR: | |
376 | case MTHCA_EVENT_TYPE_ECC_DETECT: | |
377 | default: | |
378 | mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", | |
379 | eqe->type, eqe->subtype, eq->eqn); | |
380 | break; | |
381 | }; | |
382 | ||
383 | set_eqe_hw(eqe); | |
384 | ++eq->cons_index; | |
385 | eqes_found = 1; | |
386 | ||
387 | if (unlikely(set_ci)) { | |
388 | /* | |
389 | * Conditional on hca_type is OK here because | |
390 | * this is a rare case, not the fast path. | |
391 | */ | |
392 | set_eq_ci(dev, eq, eq->cons_index); | |
393 | set_ci = 0; | |
394 | } | |
395 | } | |
396 | ||
397 | /* | |
398 | * Rely on caller to set consumer index so that we don't have | |
399 | * to test hca_type in our interrupt handling fast path. | |
400 | */ | |
401 | return eqes_found; | |
402 | } | |
403 | ||
404 | static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr, struct pt_regs *regs) | |
405 | { | |
406 | struct mthca_dev *dev = dev_ptr; | |
407 | u32 ecr; | |
408 | int i; | |
409 | ||
410 | if (dev->eq_table.clr_mask) | |
411 | writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); | |
412 | ||
413 | ecr = readl(dev->eq_regs.tavor.ecr_base + 4); | |
c8e0ca68 RD |
414 | if (!ecr) |
415 | return IRQ_NONE; | |
1da177e4 | 416 | |
c8e0ca68 RD |
417 | writel(ecr, dev->eq_regs.tavor.ecr_base + |
418 | MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4); | |
419 | ||
420 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | |
421 | if (ecr & dev->eq_table.eq[i].eqn_mask) { | |
422 | if (mthca_eq_int(dev, &dev->eq_table.eq[i])) | |
1da177e4 LT |
423 | tavor_set_eq_ci(dev, &dev->eq_table.eq[i], |
424 | dev->eq_table.eq[i].cons_index); | |
c8e0ca68 RD |
425 | tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); |
426 | } | |
1da177e4 | 427 | |
c8e0ca68 | 428 | return IRQ_HANDLED; |
1da177e4 LT |
429 | } |
430 | ||
431 | static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr, | |
432 | struct pt_regs *regs) | |
433 | { | |
434 | struct mthca_eq *eq = eq_ptr; | |
435 | struct mthca_dev *dev = eq->dev; | |
436 | ||
437 | mthca_eq_int(dev, eq); | |
438 | tavor_set_eq_ci(dev, eq, eq->cons_index); | |
439 | tavor_eq_req_not(dev, eq->eqn); | |
440 | ||
441 | /* MSI-X vectors always belong to us */ | |
442 | return IRQ_HANDLED; | |
443 | } | |
444 | ||
445 | static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr, struct pt_regs *regs) | |
446 | { | |
447 | struct mthca_dev *dev = dev_ptr; | |
448 | int work = 0; | |
449 | int i; | |
450 | ||
451 | if (dev->eq_table.clr_mask) | |
452 | writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); | |
453 | ||
454 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | |
455 | if (mthca_eq_int(dev, &dev->eq_table.eq[i])) { | |
456 | work = 1; | |
457 | arbel_set_eq_ci(dev, &dev->eq_table.eq[i], | |
458 | dev->eq_table.eq[i].cons_index); | |
459 | } | |
460 | ||
461 | arbel_eq_req_not(dev, dev->eq_table.arm_mask); | |
462 | ||
463 | return IRQ_RETVAL(work); | |
464 | } | |
465 | ||
466 | static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr, | |
467 | struct pt_regs *regs) | |
468 | { | |
469 | struct mthca_eq *eq = eq_ptr; | |
470 | struct mthca_dev *dev = eq->dev; | |
471 | ||
472 | mthca_eq_int(dev, eq); | |
473 | arbel_set_eq_ci(dev, eq, eq->cons_index); | |
474 | arbel_eq_req_not(dev, eq->eqn_mask); | |
475 | ||
476 | /* MSI-X vectors always belong to us */ | |
477 | return IRQ_HANDLED; | |
478 | } | |
479 | ||
480 | static int __devinit mthca_create_eq(struct mthca_dev *dev, | |
481 | int nent, | |
482 | u8 intr, | |
483 | struct mthca_eq *eq) | |
484 | { | |
485 | int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / | |
486 | PAGE_SIZE; | |
487 | u64 *dma_list = NULL; | |
488 | dma_addr_t t; | |
ed878458 | 489 | struct mthca_mailbox *mailbox; |
1da177e4 LT |
490 | struct mthca_eq_context *eq_context; |
491 | int err = -ENOMEM; | |
492 | int i; | |
493 | u8 status; | |
494 | ||
c915033f RD |
495 | eq->dev = dev; |
496 | eq->nent = roundup_pow_of_two(max(nent, 2)); | |
1da177e4 LT |
497 | |
498 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, | |
499 | GFP_KERNEL); | |
500 | if (!eq->page_list) | |
501 | goto err_out; | |
502 | ||
503 | for (i = 0; i < npages; ++i) | |
504 | eq->page_list[i].buf = NULL; | |
505 | ||
506 | dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); | |
507 | if (!dma_list) | |
508 | goto err_out_free; | |
509 | ||
ed878458 RD |
510 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
511 | if (IS_ERR(mailbox)) | |
1da177e4 | 512 | goto err_out_free; |
ed878458 | 513 | eq_context = mailbox->buf; |
1da177e4 LT |
514 | |
515 | for (i = 0; i < npages; ++i) { | |
64dc81fc RD |
516 | eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, |
517 | PAGE_SIZE, &t, GFP_KERNEL); | |
1da177e4 | 518 | if (!eq->page_list[i].buf) |
ed878458 | 519 | goto err_out_free_pages; |
1da177e4 LT |
520 | |
521 | dma_list[i] = t; | |
522 | pci_unmap_addr_set(&eq->page_list[i], mapping, t); | |
523 | ||
524 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); | |
525 | } | |
526 | ||
c915033f | 527 | for (i = 0; i < eq->nent; ++i) |
1da177e4 LT |
528 | set_eqe_hw(get_eqe(eq, i)); |
529 | ||
530 | eq->eqn = mthca_alloc(&dev->eq_table.alloc); | |
531 | if (eq->eqn == -1) | |
ed878458 | 532 | goto err_out_free_pages; |
1da177e4 LT |
533 | |
534 | err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, | |
535 | dma_list, PAGE_SHIFT, npages, | |
536 | 0, npages * PAGE_SIZE, | |
537 | MTHCA_MPT_FLAG_LOCAL_WRITE | | |
538 | MTHCA_MPT_FLAG_LOCAL_READ, | |
539 | &eq->mr); | |
540 | if (err) | |
541 | goto err_out_free_eq; | |
542 | ||
1da177e4 LT |
543 | memset(eq_context, 0, sizeof *eq_context); |
544 | eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | | |
545 | MTHCA_EQ_OWNER_HW | | |
546 | MTHCA_EQ_STATE_ARMED | | |
547 | MTHCA_EQ_FLAG_TR); | |
d10ddbf6 | 548 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
549 | eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); |
550 | ||
c915033f | 551 | eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); |
d10ddbf6 | 552 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
553 | eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); |
554 | } else { | |
555 | eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); | |
556 | eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num); | |
557 | } | |
558 | eq_context->intr = intr; | |
559 | eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); | |
560 | ||
ed878458 | 561 | err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); |
1da177e4 LT |
562 | if (err) { |
563 | mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); | |
564 | goto err_out_free_mr; | |
565 | } | |
566 | if (status) { | |
567 | mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n", | |
568 | status); | |
569 | err = -EINVAL; | |
570 | goto err_out_free_mr; | |
571 | } | |
572 | ||
573 | kfree(dma_list); | |
ed878458 | 574 | mthca_free_mailbox(dev, mailbox); |
1da177e4 LT |
575 | |
576 | eq->eqn_mask = swab32(1 << eq->eqn); | |
577 | eq->cons_index = 0; | |
578 | ||
579 | dev->eq_table.arm_mask |= eq->eqn_mask; | |
580 | ||
581 | mthca_dbg(dev, "Allocated EQ %d with %d entries\n", | |
c915033f | 582 | eq->eqn, eq->nent); |
1da177e4 LT |
583 | |
584 | return err; | |
585 | ||
586 | err_out_free_mr: | |
587 | mthca_free_mr(dev, &eq->mr); | |
588 | ||
589 | err_out_free_eq: | |
590 | mthca_free(&dev->eq_table.alloc, eq->eqn); | |
591 | ||
ed878458 | 592 | err_out_free_pages: |
1da177e4 LT |
593 | for (i = 0; i < npages; ++i) |
594 | if (eq->page_list[i].buf) | |
64dc81fc RD |
595 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
596 | eq->page_list[i].buf, | |
597 | pci_unmap_addr(&eq->page_list[i], | |
598 | mapping)); | |
1da177e4 | 599 | |
ed878458 RD |
600 | mthca_free_mailbox(dev, mailbox); |
601 | ||
602 | err_out_free: | |
1da177e4 LT |
603 | kfree(eq->page_list); |
604 | kfree(dma_list); | |
1da177e4 LT |
605 | |
606 | err_out: | |
607 | return err; | |
608 | } | |
609 | ||
610 | static void mthca_free_eq(struct mthca_dev *dev, | |
611 | struct mthca_eq *eq) | |
612 | { | |
ed878458 | 613 | struct mthca_mailbox *mailbox; |
1da177e4 LT |
614 | int err; |
615 | u8 status; | |
616 | int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / | |
617 | PAGE_SIZE; | |
618 | int i; | |
619 | ||
ed878458 RD |
620 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
621 | if (IS_ERR(mailbox)) | |
1da177e4 LT |
622 | return; |
623 | ||
ed878458 | 624 | err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); |
1da177e4 LT |
625 | if (err) |
626 | mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); | |
627 | if (status) | |
177214af | 628 | mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status); |
1da177e4 LT |
629 | |
630 | dev->eq_table.arm_mask &= ~eq->eqn_mask; | |
631 | ||
632 | if (0) { | |
633 | mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); | |
634 | for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) { | |
635 | if (i % 4 == 0) | |
636 | printk("[%02x] ", i * 4); | |
ed878458 | 637 | printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); |
1da177e4 LT |
638 | if ((i + 1) % 4 == 0) |
639 | printk("\n"); | |
640 | } | |
641 | } | |
642 | ||
643 | mthca_free_mr(dev, &eq->mr); | |
644 | for (i = 0; i < npages; ++i) | |
645 | pci_free_consistent(dev->pdev, PAGE_SIZE, | |
646 | eq->page_list[i].buf, | |
647 | pci_unmap_addr(&eq->page_list[i], mapping)); | |
648 | ||
649 | kfree(eq->page_list); | |
ed878458 | 650 | mthca_free_mailbox(dev, mailbox); |
1da177e4 LT |
651 | } |
652 | ||
653 | static void mthca_free_irqs(struct mthca_dev *dev) | |
654 | { | |
655 | int i; | |
656 | ||
657 | if (dev->eq_table.have_irq) | |
658 | free_irq(dev->pdev->irq, dev); | |
659 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | |
660 | if (dev->eq_table.eq[i].have_irq) | |
661 | free_irq(dev->eq_table.eq[i].msi_x_vector, | |
662 | dev->eq_table.eq + i); | |
663 | } | |
664 | ||
665 | static int __devinit mthca_map_reg(struct mthca_dev *dev, | |
666 | unsigned long offset, unsigned long size, | |
667 | void __iomem **map) | |
668 | { | |
669 | unsigned long base = pci_resource_start(dev->pdev, 0); | |
670 | ||
671 | if (!request_mem_region(base + offset, size, DRV_NAME)) | |
672 | return -EBUSY; | |
673 | ||
674 | *map = ioremap(base + offset, size); | |
675 | if (!*map) { | |
676 | release_mem_region(base + offset, size); | |
677 | return -ENOMEM; | |
678 | } | |
679 | ||
680 | return 0; | |
681 | } | |
682 | ||
683 | static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset, | |
684 | unsigned long size, void __iomem *map) | |
685 | { | |
686 | unsigned long base = pci_resource_start(dev->pdev, 0); | |
687 | ||
688 | release_mem_region(base + offset, size); | |
689 | iounmap(map); | |
690 | } | |
691 | ||
692 | static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) | |
693 | { | |
694 | unsigned long mthca_base; | |
695 | ||
696 | mthca_base = pci_resource_start(dev->pdev, 0); | |
697 | ||
d10ddbf6 | 698 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
699 | /* |
700 | * We assume that the EQ arm and EQ set CI registers | |
701 | * fall within the first BAR. We can't trust the | |
702 | * values firmware gives us, since those addresses are | |
703 | * valid on the HCA's side of the PCI bus but not | |
704 | * necessarily the host side. | |
705 | */ | |
706 | if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & | |
707 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, | |
708 | &dev->clr_base)) { | |
709 | mthca_err(dev, "Couldn't map interrupt clear register, " | |
710 | "aborting.\n"); | |
711 | return -ENOMEM; | |
712 | } | |
713 | ||
714 | /* | |
715 | * Add 4 because we limit ourselves to EQs 0 ... 31, | |
716 | * so we only need the low word of the register. | |
717 | */ | |
718 | if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & | |
719 | dev->fw.arbel.eq_arm_base) + 4, 4, | |
720 | &dev->eq_regs.arbel.eq_arm)) { | |
177214af | 721 | mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); |
1da177e4 LT |
722 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
723 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, | |
724 | dev->clr_base); | |
725 | return -ENOMEM; | |
726 | } | |
727 | ||
728 | if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & | |
729 | dev->fw.arbel.eq_set_ci_base, | |
730 | MTHCA_EQ_SET_CI_SIZE, | |
731 | &dev->eq_regs.arbel.eq_set_ci_base)) { | |
177214af | 732 | mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); |
1da177e4 LT |
733 | mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & |
734 | dev->fw.arbel.eq_arm_base) + 4, 4, | |
735 | dev->eq_regs.arbel.eq_arm); | |
736 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & | |
737 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, | |
738 | dev->clr_base); | |
739 | return -ENOMEM; | |
740 | } | |
741 | } else { | |
742 | if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, | |
743 | &dev->clr_base)) { | |
744 | mthca_err(dev, "Couldn't map interrupt clear register, " | |
745 | "aborting.\n"); | |
746 | return -ENOMEM; | |
747 | } | |
748 | ||
749 | if (mthca_map_reg(dev, MTHCA_ECR_BASE, | |
750 | MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, | |
751 | &dev->eq_regs.tavor.ecr_base)) { | |
752 | mthca_err(dev, "Couldn't map ecr register, " | |
753 | "aborting.\n"); | |
754 | mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, | |
755 | dev->clr_base); | |
756 | return -ENOMEM; | |
757 | } | |
758 | } | |
759 | ||
760 | return 0; | |
761 | ||
762 | } | |
763 | ||
764 | static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev) | |
765 | { | |
d10ddbf6 | 766 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
767 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
768 | dev->fw.arbel.eq_set_ci_base, | |
769 | MTHCA_EQ_SET_CI_SIZE, | |
770 | dev->eq_regs.arbel.eq_set_ci_base); | |
771 | mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & | |
772 | dev->fw.arbel.eq_arm_base) + 4, 4, | |
773 | dev->eq_regs.arbel.eq_arm); | |
774 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & | |
775 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, | |
776 | dev->clr_base); | |
777 | } else { | |
778 | mthca_unmap_reg(dev, MTHCA_ECR_BASE, | |
779 | MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, | |
780 | dev->eq_regs.tavor.ecr_base); | |
781 | mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, | |
782 | dev->clr_base); | |
783 | } | |
784 | } | |
785 | ||
786 | int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) | |
787 | { | |
788 | int ret; | |
789 | u8 status; | |
790 | ||
791 | /* | |
792 | * We assume that mapping one page is enough for the whole EQ | |
793 | * context table. This is fine with all current HCAs, because | |
794 | * we only use 32 EQs and each EQ uses 32 bytes of context | |
795 | * memory, or 1 KB total. | |
796 | */ | |
797 | dev->eq_table.icm_virt = icm_virt; | |
798 | dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER); | |
799 | if (!dev->eq_table.icm_page) | |
800 | return -ENOMEM; | |
801 | dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, | |
802 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
803 | if (pci_dma_mapping_error(dev->eq_table.icm_dma)) { | |
804 | __free_page(dev->eq_table.icm_page); | |
805 | return -ENOMEM; | |
806 | } | |
807 | ||
808 | ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status); | |
809 | if (!ret && status) | |
810 | ret = -EINVAL; | |
811 | if (ret) { | |
812 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, | |
813 | PCI_DMA_BIDIRECTIONAL); | |
814 | __free_page(dev->eq_table.icm_page); | |
815 | } | |
816 | ||
817 | return ret; | |
818 | } | |
819 | ||
820 | void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev) | |
821 | { | |
822 | u8 status; | |
823 | ||
824 | mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status); | |
825 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, | |
826 | PCI_DMA_BIDIRECTIONAL); | |
827 | __free_page(dev->eq_table.icm_page); | |
828 | } | |
829 | ||
830 | int __devinit mthca_init_eq_table(struct mthca_dev *dev) | |
831 | { | |
832 | int err; | |
833 | u8 status; | |
834 | u8 intr; | |
835 | int i; | |
836 | ||
837 | err = mthca_alloc_init(&dev->eq_table.alloc, | |
838 | dev->limits.num_eqs, | |
839 | dev->limits.num_eqs - 1, | |
840 | dev->limits.reserved_eqs); | |
841 | if (err) | |
842 | return err; | |
843 | ||
844 | err = mthca_map_eq_regs(dev); | |
845 | if (err) | |
846 | goto err_out_free; | |
847 | ||
848 | if (dev->mthca_flags & MTHCA_FLAG_MSI || | |
849 | dev->mthca_flags & MTHCA_FLAG_MSI_X) { | |
850 | dev->eq_table.clr_mask = 0; | |
851 | } else { | |
852 | dev->eq_table.clr_mask = | |
853 | swab32(1 << (dev->eq_table.inta_pin & 31)); | |
854 | dev->eq_table.clr_int = dev->clr_base + | |
f7ed3a59 | 855 | (dev->eq_table.inta_pin < 32 ? 4 : 0); |
1da177e4 LT |
856 | } |
857 | ||
858 | dev->eq_table.arm_mask = 0; | |
859 | ||
860 | intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? | |
861 | 128 : dev->eq_table.inta_pin; | |
862 | ||
863 | err = mthca_create_eq(dev, dev->limits.num_cqs, | |
864 | (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, | |
865 | &dev->eq_table.eq[MTHCA_EQ_COMP]); | |
866 | if (err) | |
867 | goto err_out_unmap; | |
868 | ||
869 | err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE, | |
870 | (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, | |
871 | &dev->eq_table.eq[MTHCA_EQ_ASYNC]); | |
872 | if (err) | |
873 | goto err_out_comp; | |
874 | ||
875 | err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE, | |
876 | (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, | |
877 | &dev->eq_table.eq[MTHCA_EQ_CMD]); | |
878 | if (err) | |
879 | goto err_out_async; | |
880 | ||
881 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { | |
882 | static const char *eq_name[] = { | |
883 | [MTHCA_EQ_COMP] = DRV_NAME " (comp)", | |
884 | [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", | |
885 | [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" | |
886 | }; | |
887 | ||
888 | for (i = 0; i < MTHCA_NUM_EQ; ++i) { | |
889 | err = request_irq(dev->eq_table.eq[i].msi_x_vector, | |
d10ddbf6 | 890 | mthca_is_memfree(dev) ? |
1da177e4 LT |
891 | mthca_arbel_msi_x_interrupt : |
892 | mthca_tavor_msi_x_interrupt, | |
893 | 0, eq_name[i], dev->eq_table.eq + i); | |
894 | if (err) | |
895 | goto err_out_cmd; | |
896 | dev->eq_table.eq[i].have_irq = 1; | |
897 | } | |
898 | } else { | |
899 | err = request_irq(dev->pdev->irq, | |
d10ddbf6 | 900 | mthca_is_memfree(dev) ? |
1da177e4 LT |
901 | mthca_arbel_interrupt : |
902 | mthca_tavor_interrupt, | |
903 | SA_SHIRQ, DRV_NAME, dev); | |
904 | if (err) | |
905 | goto err_out_cmd; | |
906 | dev->eq_table.have_irq = 1; | |
907 | } | |
908 | ||
909 | err = mthca_MAP_EQ(dev, async_mask(dev), | |
910 | 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); | |
911 | if (err) | |
912 | mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | |
913 | dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); | |
914 | if (status) | |
915 | mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n", | |
916 | dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status); | |
917 | ||
918 | err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, | |
919 | 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); | |
920 | if (err) | |
921 | mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", | |
922 | dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); | |
923 | if (status) | |
924 | mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n", | |
925 | dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status); | |
926 | ||
927 | for (i = 0; i < MTHCA_EQ_CMD; ++i) | |
d10ddbf6 | 928 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
929 | arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); |
930 | else | |
931 | tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); | |
932 | ||
933 | return 0; | |
934 | ||
935 | err_out_cmd: | |
936 | mthca_free_irqs(dev); | |
937 | mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]); | |
938 | ||
939 | err_out_async: | |
940 | mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); | |
941 | ||
942 | err_out_comp: | |
943 | mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]); | |
944 | ||
945 | err_out_unmap: | |
946 | mthca_unmap_eq_regs(dev); | |
947 | ||
948 | err_out_free: | |
949 | mthca_alloc_cleanup(&dev->eq_table.alloc); | |
950 | return err; | |
951 | } | |
952 | ||
953 | void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev) | |
954 | { | |
955 | u8 status; | |
956 | int i; | |
957 | ||
958 | mthca_free_irqs(dev); | |
959 | ||
960 | mthca_MAP_EQ(dev, async_mask(dev), | |
961 | 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); | |
962 | mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, | |
963 | 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); | |
964 | ||
965 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | |
966 | mthca_free_eq(dev, &dev->eq_table.eq[i]); | |
967 | ||
968 | mthca_unmap_eq_regs(dev); | |
969 | ||
970 | mthca_alloc_cleanup(&dev->eq_table.alloc); | |
971 | } |