Commit | Line | Data |
---|---|---|
225c7b1f | 1 | /* |
51a379d0 | 2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/init.h> | |
35 | #include <linux/interrupt.h> | |
27ac792c | 36 | #include <linux/mm.h> |
9cbe05c7 | 37 | #include <linux/dma-mapping.h> |
225c7b1f RD |
38 | |
39 | #include <linux/mlx4/cmd.h> | |
40 | ||
41 | #include "mlx4.h" | |
42 | #include "fw.h" | |
43 | ||
44 | enum { | |
45 | MLX4_NUM_ASYNC_EQE = 0x100, | |
46 | MLX4_NUM_SPARE_EQE = 0x80, | |
47 | MLX4_EQ_ENTRY_SIZE = 0x20 | |
48 | }; | |
49 | ||
50 | /* | |
51 | * Must be packed because start is 64 bits but only aligned to 32 bits. | |
52 | */ | |
53 | struct mlx4_eq_context { | |
54 | __be32 flags; | |
55 | u16 reserved1[3]; | |
56 | __be16 page_offset; | |
57 | u8 log_eq_size; | |
58 | u8 reserved2[4]; | |
59 | u8 eq_period; | |
60 | u8 reserved3; | |
61 | u8 eq_max_count; | |
62 | u8 reserved4[3]; | |
63 | u8 intr; | |
64 | u8 log_page_size; | |
65 | u8 reserved5[2]; | |
66 | u8 mtt_base_addr_h; | |
67 | __be32 mtt_base_addr_l; | |
68 | u32 reserved6[2]; | |
69 | __be32 consumer_index; | |
70 | __be32 producer_index; | |
71 | u32 reserved7[4]; | |
72 | }; | |
73 | ||
74 | #define MLX4_EQ_STATUS_OK ( 0 << 28) | |
75 | #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) | |
76 | #define MLX4_EQ_OWNER_SW ( 0 << 24) | |
77 | #define MLX4_EQ_OWNER_HW ( 1 << 24) | |
78 | #define MLX4_EQ_FLAG_EC ( 1 << 18) | |
79 | #define MLX4_EQ_FLAG_OI ( 1 << 17) | |
80 | #define MLX4_EQ_STATE_ARMED ( 9 << 8) | |
81 | #define MLX4_EQ_STATE_FIRED (10 << 8) | |
82 | #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) | |
83 | ||
84 | #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ | |
85 | (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ | |
86 | (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ | |
87 | (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ | |
88 | (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ | |
89 | (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ | |
90 | (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ | |
91 | (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | |
92 | (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | |
225c7b1f RD |
93 | (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ |
94 | (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ | |
95 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | |
96 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ | |
97 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ | |
98 | (1ull << MLX4_EVENT_TYPE_CMD)) | |
225c7b1f RD |
99 | |
100 | struct mlx4_eqe { | |
101 | u8 reserved1; | |
102 | u8 type; | |
103 | u8 reserved2; | |
104 | u8 subtype; | |
105 | union { | |
106 | u32 raw[6]; | |
107 | struct { | |
108 | __be32 cqn; | |
109 | } __attribute__((packed)) comp; | |
110 | struct { | |
111 | u16 reserved1; | |
112 | __be16 token; | |
113 | u32 reserved2; | |
114 | u8 reserved3[3]; | |
115 | u8 status; | |
116 | __be64 out_param; | |
117 | } __attribute__((packed)) cmd; | |
118 | struct { | |
119 | __be32 qpn; | |
120 | } __attribute__((packed)) qp; | |
121 | struct { | |
122 | __be32 srqn; | |
123 | } __attribute__((packed)) srq; | |
124 | struct { | |
125 | __be32 cqn; | |
126 | u32 reserved1; | |
127 | u8 reserved2[3]; | |
128 | u8 syndrome; | |
129 | } __attribute__((packed)) cq_err; | |
130 | struct { | |
131 | u32 reserved1[2]; | |
132 | __be32 port; | |
133 | } __attribute__((packed)) port_change; | |
134 | } event; | |
135 | u8 reserved3[3]; | |
136 | u8 owner; | |
137 | } __attribute__((packed)); | |
138 | ||
139 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) | |
140 | { | |
141 | __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | | |
142 | req_not << 31), | |
143 | eq->doorbell); | |
144 | /* We still want ordering, just not swabbing, so add a barrier */ | |
145 | mb(); | |
146 | } | |
147 | ||
148 | static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry) | |
149 | { | |
150 | unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE; | |
151 | return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; | |
152 | } | |
153 | ||
154 | static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) | |
155 | { | |
156 | struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index); | |
157 | return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; | |
158 | } | |
159 | ||
160 | static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |
161 | { | |
162 | struct mlx4_eqe *eqe; | |
163 | int cqn; | |
164 | int eqes_found = 0; | |
165 | int set_ci = 0; | |
27bf91d6 | 166 | int port; |
225c7b1f RD |
167 | |
168 | while ((eqe = next_eqe_sw(eq))) { | |
169 | /* | |
170 | * Make sure we read EQ entry contents after we've | |
171 | * checked the ownership bit. | |
172 | */ | |
173 | rmb(); | |
174 | ||
175 | switch (eqe->type) { | |
176 | case MLX4_EVENT_TYPE_COMP: | |
177 | cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; | |
178 | mlx4_cq_completion(dev, cqn); | |
179 | break; | |
180 | ||
181 | case MLX4_EVENT_TYPE_PATH_MIG: | |
182 | case MLX4_EVENT_TYPE_COMM_EST: | |
183 | case MLX4_EVENT_TYPE_SQ_DRAINED: | |
184 | case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: | |
185 | case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: | |
186 | case MLX4_EVENT_TYPE_PATH_MIG_FAILED: | |
187 | case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
188 | case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: | |
189 | mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
190 | eqe->type); | |
191 | break; | |
192 | ||
193 | case MLX4_EVENT_TYPE_SRQ_LIMIT: | |
194 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: | |
195 | mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, | |
196 | eqe->type); | |
197 | break; | |
198 | ||
199 | case MLX4_EVENT_TYPE_CMD: | |
200 | mlx4_cmd_event(dev, | |
201 | be16_to_cpu(eqe->event.cmd.token), | |
202 | eqe->event.cmd.status, | |
203 | be64_to_cpu(eqe->event.cmd.out_param)); | |
204 | break; | |
205 | ||
206 | case MLX4_EVENT_TYPE_PORT_CHANGE: | |
27bf91d6 YP |
207 | port = be32_to_cpu(eqe->event.port_change.port) >> 28; |
208 | if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { | |
209 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, | |
210 | port); | |
211 | mlx4_priv(dev)->sense.do_sense_port[port] = 1; | |
212 | } else { | |
213 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, | |
214 | port); | |
215 | mlx4_priv(dev)->sense.do_sense_port[port] = 0; | |
216 | } | |
225c7b1f RD |
217 | break; |
218 | ||
219 | case MLX4_EVENT_TYPE_CQ_ERROR: | |
220 | mlx4_warn(dev, "CQ %s on CQN %06x\n", | |
221 | eqe->event.cq_err.syndrome == 1 ? | |
222 | "overrun" : "access violation", | |
223 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); | |
224 | mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), | |
225 | eqe->type); | |
226 | break; | |
227 | ||
228 | case MLX4_EVENT_TYPE_EQ_OVERFLOW: | |
229 | mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); | |
230 | break; | |
231 | ||
232 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: | |
233 | case MLX4_EVENT_TYPE_ECC_DETECT: | |
234 | default: | |
235 | mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", | |
236 | eqe->type, eqe->subtype, eq->eqn, eq->cons_index); | |
237 | break; | |
238 | }; | |
239 | ||
240 | ++eq->cons_index; | |
241 | eqes_found = 1; | |
242 | ++set_ci; | |
243 | ||
244 | /* | |
245 | * The HCA will think the queue has overflowed if we | |
246 | * don't tell it we've been processing events. We | |
247 | * create our EQs with MLX4_NUM_SPARE_EQE extra | |
248 | * entries, so we must update our consumer index at | |
249 | * least that often. | |
250 | */ | |
251 | if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { | |
225c7b1f RD |
252 | eq_set_ci(eq, 0); |
253 | set_ci = 0; | |
254 | } | |
255 | } | |
256 | ||
257 | eq_set_ci(eq, 1); | |
258 | ||
259 | return eqes_found; | |
260 | } | |
261 | ||
262 | static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) | |
263 | { | |
264 | struct mlx4_dev *dev = dev_ptr; | |
265 | struct mlx4_priv *priv = mlx4_priv(dev); | |
266 | int work = 0; | |
267 | int i; | |
268 | ||
269 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); | |
270 | ||
b8dd786f | 271 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
225c7b1f RD |
272 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); |
273 | ||
274 | return IRQ_RETVAL(work); | |
275 | } | |
276 | ||
277 | static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) | |
278 | { | |
279 | struct mlx4_eq *eq = eq_ptr; | |
280 | struct mlx4_dev *dev = eq->dev; | |
281 | ||
282 | mlx4_eq_int(dev, eq); | |
283 | ||
284 | /* MSI-X vectors always belong to us */ | |
285 | return IRQ_HANDLED; | |
286 | } | |
287 | ||
225c7b1f RD |
288 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, |
289 | int eq_num) | |
290 | { | |
291 | return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, | |
292 | 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B); | |
293 | } | |
294 | ||
295 | static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |
296 | int eq_num) | |
297 | { | |
298 | return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, | |
299 | MLX4_CMD_TIME_CLASS_A); | |
300 | } | |
301 | ||
302 | static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |
303 | int eq_num) | |
304 | { | |
305 | return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, | |
306 | MLX4_CMD_TIME_CLASS_A); | |
307 | } | |
308 | ||
b8dd786f YP |
309 | static int mlx4_num_eq_uar(struct mlx4_dev *dev) |
310 | { | |
311 | /* | |
312 | * Each UAR holds 4 EQ doorbells. To figure out how many UARs | |
313 | * we need to map, take the difference of highest index and | |
314 | * the lowest index we'll use and add 1. | |
315 | */ | |
316 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - | |
317 | dev->caps.reserved_eqs / 4 + 1; | |
318 | } | |
319 | ||
3d73c288 | 320 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) |
225c7b1f RD |
321 | { |
322 | struct mlx4_priv *priv = mlx4_priv(dev); | |
323 | int index; | |
324 | ||
325 | index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; | |
326 | ||
327 | if (!priv->eq_table.uar_map[index]) { | |
328 | priv->eq_table.uar_map[index] = | |
329 | ioremap(pci_resource_start(dev->pdev, 2) + | |
330 | ((eq->eqn / 4) << PAGE_SHIFT), | |
331 | PAGE_SIZE); | |
332 | if (!priv->eq_table.uar_map[index]) { | |
333 | mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", | |
334 | eq->eqn); | |
335 | return NULL; | |
336 | } | |
337 | } | |
338 | ||
339 | return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); | |
340 | } | |
341 | ||
3d73c288 RD |
342 | static int mlx4_create_eq(struct mlx4_dev *dev, int nent, |
343 | u8 intr, struct mlx4_eq *eq) | |
225c7b1f RD |
344 | { |
345 | struct mlx4_priv *priv = mlx4_priv(dev); | |
346 | struct mlx4_cmd_mailbox *mailbox; | |
347 | struct mlx4_eq_context *eq_context; | |
348 | int npages; | |
349 | u64 *dma_list = NULL; | |
350 | dma_addr_t t; | |
351 | u64 mtt_addr; | |
352 | int err = -ENOMEM; | |
353 | int i; | |
354 | ||
355 | eq->dev = dev; | |
356 | eq->nent = roundup_pow_of_two(max(nent, 2)); | |
357 | npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE; | |
358 | ||
359 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, | |
360 | GFP_KERNEL); | |
361 | if (!eq->page_list) | |
362 | goto err_out; | |
363 | ||
364 | for (i = 0; i < npages; ++i) | |
365 | eq->page_list[i].buf = NULL; | |
366 | ||
367 | dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); | |
368 | if (!dma_list) | |
369 | goto err_out_free; | |
370 | ||
371 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
372 | if (IS_ERR(mailbox)) | |
373 | goto err_out_free; | |
374 | eq_context = mailbox->buf; | |
375 | ||
376 | for (i = 0; i < npages; ++i) { | |
377 | eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, | |
378 | PAGE_SIZE, &t, GFP_KERNEL); | |
379 | if (!eq->page_list[i].buf) | |
380 | goto err_out_free_pages; | |
381 | ||
382 | dma_list[i] = t; | |
383 | eq->page_list[i].map = t; | |
384 | ||
385 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); | |
386 | } | |
387 | ||
388 | eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); | |
389 | if (eq->eqn == -1) | |
390 | goto err_out_free_pages; | |
391 | ||
392 | eq->doorbell = mlx4_get_eq_uar(dev, eq); | |
393 | if (!eq->doorbell) { | |
394 | err = -ENOMEM; | |
395 | goto err_out_free_eq; | |
396 | } | |
397 | ||
398 | err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); | |
399 | if (err) | |
400 | goto err_out_free_eq; | |
401 | ||
402 | err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); | |
403 | if (err) | |
404 | goto err_out_free_mtt; | |
405 | ||
406 | memset(eq_context, 0, sizeof *eq_context); | |
407 | eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | | |
408 | MLX4_EQ_STATE_ARMED); | |
409 | eq_context->log_eq_size = ilog2(eq->nent); | |
410 | eq_context->intr = intr; | |
411 | eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; | |
412 | ||
413 | mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); | |
414 | eq_context->mtt_base_addr_h = mtt_addr >> 32; | |
415 | eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | |
416 | ||
417 | err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); | |
418 | if (err) { | |
419 | mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); | |
420 | goto err_out_free_mtt; | |
421 | } | |
422 | ||
423 | kfree(dma_list); | |
424 | mlx4_free_cmd_mailbox(dev, mailbox); | |
425 | ||
426 | eq->cons_index = 0; | |
427 | ||
428 | return err; | |
429 | ||
430 | err_out_free_mtt: | |
431 | mlx4_mtt_cleanup(dev, &eq->mtt); | |
432 | ||
433 | err_out_free_eq: | |
434 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); | |
435 | ||
436 | err_out_free_pages: | |
437 | for (i = 0; i < npages; ++i) | |
438 | if (eq->page_list[i].buf) | |
439 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | |
440 | eq->page_list[i].buf, | |
441 | eq->page_list[i].map); | |
442 | ||
443 | mlx4_free_cmd_mailbox(dev, mailbox); | |
444 | ||
445 | err_out_free: | |
446 | kfree(eq->page_list); | |
447 | kfree(dma_list); | |
448 | ||
449 | err_out: | |
450 | return err; | |
451 | } | |
452 | ||
453 | static void mlx4_free_eq(struct mlx4_dev *dev, | |
454 | struct mlx4_eq *eq) | |
455 | { | |
456 | struct mlx4_priv *priv = mlx4_priv(dev); | |
457 | struct mlx4_cmd_mailbox *mailbox; | |
458 | int err; | |
459 | int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE; | |
460 | int i; | |
461 | ||
462 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
463 | if (IS_ERR(mailbox)) | |
464 | return; | |
465 | ||
466 | err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); | |
467 | if (err) | |
468 | mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); | |
469 | ||
470 | if (0) { | |
471 | mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); | |
472 | for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { | |
473 | if (i % 4 == 0) | |
474 | printk("[%02x] ", i * 4); | |
475 | printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); | |
476 | if ((i + 1) % 4 == 0) | |
477 | printk("\n"); | |
478 | } | |
479 | } | |
480 | ||
481 | mlx4_mtt_cleanup(dev, &eq->mtt); | |
482 | for (i = 0; i < npages; ++i) | |
483 | pci_free_consistent(dev->pdev, PAGE_SIZE, | |
484 | eq->page_list[i].buf, | |
485 | eq->page_list[i].map); | |
486 | ||
487 | kfree(eq->page_list); | |
488 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); | |
489 | mlx4_free_cmd_mailbox(dev, mailbox); | |
490 | } | |
491 | ||
492 | static void mlx4_free_irqs(struct mlx4_dev *dev) | |
493 | { | |
494 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; | |
495 | int i; | |
496 | ||
497 | if (eq_table->have_irq) | |
498 | free_irq(dev->pdev->irq, dev); | |
b8dd786f | 499 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
d1fdf24b | 500 | if (eq_table->eq[i].have_irq) { |
225c7b1f | 501 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
d1fdf24b RD |
502 | eq_table->eq[i].have_irq = 0; |
503 | } | |
b8dd786f YP |
504 | |
505 | kfree(eq_table->irq_names); | |
225c7b1f RD |
506 | } |
507 | ||
3d73c288 | 508 | static int mlx4_map_clr_int(struct mlx4_dev *dev) |
225c7b1f RD |
509 | { |
510 | struct mlx4_priv *priv = mlx4_priv(dev); | |
511 | ||
512 | priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + | |
513 | priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); | |
514 | if (!priv->clr_base) { | |
515 | mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); | |
516 | return -ENOMEM; | |
517 | } | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
522 | static void mlx4_unmap_clr_int(struct mlx4_dev *dev) | |
523 | { | |
524 | struct mlx4_priv *priv = mlx4_priv(dev); | |
525 | ||
526 | iounmap(priv->clr_base); | |
527 | } | |
528 | ||
3d73c288 | 529 | int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) |
225c7b1f RD |
530 | { |
531 | struct mlx4_priv *priv = mlx4_priv(dev); | |
532 | int ret; | |
533 | ||
534 | /* | |
535 | * We assume that mapping one page is enough for the whole EQ | |
536 | * context table. This is fine with all current HCAs, because | |
537 | * we only use 32 EQs and each EQ uses 64 bytes of context | |
538 | * memory, or 1 KB total. | |
539 | */ | |
540 | priv->eq_table.icm_virt = icm_virt; | |
541 | priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER); | |
542 | if (!priv->eq_table.icm_page) | |
543 | return -ENOMEM; | |
544 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, | |
545 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
8d8bb39b | 546 | if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { |
225c7b1f RD |
547 | __free_page(priv->eq_table.icm_page); |
548 | return -ENOMEM; | |
549 | } | |
550 | ||
551 | ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt); | |
552 | if (ret) { | |
553 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | |
554 | PCI_DMA_BIDIRECTIONAL); | |
555 | __free_page(priv->eq_table.icm_page); | |
556 | } | |
557 | ||
558 | return ret; | |
559 | } | |
560 | ||
561 | void mlx4_unmap_eq_icm(struct mlx4_dev *dev) | |
562 | { | |
563 | struct mlx4_priv *priv = mlx4_priv(dev); | |
564 | ||
565 | mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1); | |
566 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | |
567 | PCI_DMA_BIDIRECTIONAL); | |
568 | __free_page(priv->eq_table.icm_page); | |
569 | } | |
570 | ||
b8dd786f YP |
571 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) |
572 | { | |
573 | struct mlx4_priv *priv = mlx4_priv(dev); | |
574 | ||
575 | priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, | |
576 | sizeof *priv->eq_table.eq, GFP_KERNEL); | |
577 | if (!priv->eq_table.eq) | |
578 | return -ENOMEM; | |
579 | ||
580 | return 0; | |
581 | } | |
582 | ||
583 | void mlx4_free_eq_table(struct mlx4_dev *dev) | |
584 | { | |
585 | kfree(mlx4_priv(dev)->eq_table.eq); | |
586 | } | |
587 | ||
3d73c288 | 588 | int mlx4_init_eq_table(struct mlx4_dev *dev) |
225c7b1f RD |
589 | { |
590 | struct mlx4_priv *priv = mlx4_priv(dev); | |
591 | int err; | |
592 | int i; | |
593 | ||
b8dd786f YP |
594 | priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map, |
595 | mlx4_num_eq_uar(dev), GFP_KERNEL); | |
596 | if (!priv->eq_table.uar_map) { | |
597 | err = -ENOMEM; | |
598 | goto err_out_free; | |
599 | } | |
600 | ||
225c7b1f | 601 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, |
93fc9e1b | 602 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); |
225c7b1f | 603 | if (err) |
b8dd786f | 604 | goto err_out_free; |
225c7b1f | 605 | |
b8dd786f | 606 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
225c7b1f RD |
607 | priv->eq_table.uar_map[i] = NULL; |
608 | ||
609 | err = mlx4_map_clr_int(dev); | |
610 | if (err) | |
b8dd786f | 611 | goto err_out_bitmap; |
225c7b1f RD |
612 | |
613 | priv->eq_table.clr_mask = | |
614 | swab32(1 << (priv->eq_table.inta_pin & 31)); | |
615 | priv->eq_table.clr_int = priv->clr_base + | |
616 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | |
617 | ||
b8dd786f YP |
618 | priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); |
619 | if (!priv->eq_table.irq_names) { | |
620 | err = -ENOMEM; | |
621 | goto err_out_bitmap; | |
622 | } | |
623 | ||
624 | for (i = 0; i < dev->caps.num_comp_vectors; ++i) { | |
625 | err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, | |
626 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, | |
627 | &priv->eq_table.eq[i]); | |
628 | if (err) | |
629 | goto err_out_unmap; | |
630 | } | |
225c7b1f RD |
631 | |
632 | err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, | |
b8dd786f YP |
633 | (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, |
634 | &priv->eq_table.eq[dev->caps.num_comp_vectors]); | |
225c7b1f RD |
635 | if (err) |
636 | goto err_out_comp; | |
637 | ||
638 | if (dev->flags & MLX4_FLAG_MSI_X) { | |
b8dd786f YP |
639 | static const char async_eq_name[] = "mlx4-async"; |
640 | const char *eq_name; | |
641 | ||
642 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { | |
643 | if (i < dev->caps.num_comp_vectors) { | |
644 | snprintf(priv->eq_table.irq_names + i * 16, 16, | |
645 | "mlx4-comp-%d", i); | |
646 | eq_name = priv->eq_table.irq_names + i * 16; | |
647 | } else | |
648 | eq_name = async_eq_name; | |
225c7b1f | 649 | |
225c7b1f | 650 | err = request_irq(priv->eq_table.eq[i].irq, |
b8dd786f YP |
651 | mlx4_msi_x_interrupt, 0, eq_name, |
652 | priv->eq_table.eq + i); | |
225c7b1f | 653 | if (err) |
ee49bd93 | 654 | goto err_out_async; |
225c7b1f RD |
655 | |
656 | priv->eq_table.eq[i].have_irq = 1; | |
657 | } | |
225c7b1f RD |
658 | } else { |
659 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | |
4093785d | 660 | IRQF_SHARED, DRV_NAME, dev); |
225c7b1f RD |
661 | if (err) |
662 | goto err_out_async; | |
663 | ||
664 | priv->eq_table.have_irq = 1; | |
665 | } | |
666 | ||
667 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | |
b8dd786f | 668 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); |
225c7b1f RD |
669 | if (err) |
670 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | |
b8dd786f | 671 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); |
225c7b1f | 672 | |
b8dd786f | 673 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
225c7b1f RD |
674 | eq_set_ci(&priv->eq_table.eq[i], 1); |
675 | ||
225c7b1f RD |
676 | return 0; |
677 | ||
225c7b1f | 678 | err_out_async: |
b8dd786f | 679 | mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); |
225c7b1f RD |
680 | |
681 | err_out_comp: | |
b8dd786f | 682 | i = dev->caps.num_comp_vectors - 1; |
225c7b1f RD |
683 | |
684 | err_out_unmap: | |
b8dd786f YP |
685 | while (i >= 0) { |
686 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | |
687 | --i; | |
688 | } | |
225c7b1f RD |
689 | mlx4_unmap_clr_int(dev); |
690 | mlx4_free_irqs(dev); | |
691 | ||
b8dd786f | 692 | err_out_bitmap: |
225c7b1f | 693 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); |
b8dd786f YP |
694 | |
695 | err_out_free: | |
696 | kfree(priv->eq_table.uar_map); | |
697 | ||
225c7b1f RD |
698 | return err; |
699 | } | |
700 | ||
701 | void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |
702 | { | |
703 | struct mlx4_priv *priv = mlx4_priv(dev); | |
704 | int i; | |
705 | ||
225c7b1f | 706 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, |
b8dd786f | 707 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); |
225c7b1f RD |
708 | |
709 | mlx4_free_irqs(dev); | |
710 | ||
b8dd786f | 711 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
225c7b1f | 712 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
225c7b1f RD |
713 | |
714 | mlx4_unmap_clr_int(dev); | |
715 | ||
b8dd786f | 716 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
225c7b1f RD |
717 | if (priv->eq_table.uar_map[i]) |
718 | iounmap(priv->eq_table.uar_map[i]); | |
719 | ||
720 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); | |
b8dd786f YP |
721 | |
722 | kfree(priv->eq_table.uar_map); | |
225c7b1f | 723 | } |