2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/interrupt.h>
36 #include <linux/dma-mapping.h>
38 #include <linux/mlx4/cmd.h>
44 MLX4_NUM_ASYNC_EQE = 0x100,
45 MLX4_NUM_SPARE_EQE = 0x80,
46 MLX4_EQ_ENTRY_SIZE = 0x20
50 * Must be packed because start is 64 bits but only aligned to 32 bits.
52 struct mlx4_eq_context {
66 __be32 mtt_base_addr_l;
68 __be32 consumer_index;
69 __be32 producer_index;
73 #define MLX4_EQ_STATUS_OK ( 0 << 28)
74 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
75 #define MLX4_EQ_OWNER_SW ( 0 << 24)
76 #define MLX4_EQ_OWNER_HW ( 1 << 24)
77 #define MLX4_EQ_FLAG_EC ( 1 << 18)
78 #define MLX4_EQ_FLAG_OI ( 1 << 17)
79 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
80 #define MLX4_EQ_STATE_FIRED (10 << 8)
81 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
83 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
84 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
85 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
86 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
87 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
88 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
89 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
90 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
91 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
92 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
93 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
94 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
95 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
96 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
97 (1ull << MLX4_EVENT_TYPE_CMD))
108 } __attribute__((packed)) comp;
116 } __attribute__((packed)) cmd;
119 } __attribute__((packed)) qp;
122 } __attribute__((packed)) srq;
128 } __attribute__((packed)) cq_err;
132 } __attribute__((packed)) port_change;
136 } __attribute__((packed));
138 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
140 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
143 /* We still want ordering, just not swabbing, so add a barrier */
147 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
149 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
150 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
153 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
155 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
156 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
159 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
161 struct mlx4_eqe *eqe;
167 while ((eqe = next_eqe_sw(eq))) {
169 * Make sure we read EQ entry contents after we've
170 * checked the ownership bit.
175 case MLX4_EVENT_TYPE_COMP:
176 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
177 mlx4_cq_completion(dev, cqn);
180 case MLX4_EVENT_TYPE_PATH_MIG:
181 case MLX4_EVENT_TYPE_COMM_EST:
182 case MLX4_EVENT_TYPE_SQ_DRAINED:
183 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
184 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
185 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
186 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
187 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
188 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
192 case MLX4_EVENT_TYPE_SRQ_LIMIT:
193 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
194 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
198 case MLX4_EVENT_TYPE_CMD:
200 be16_to_cpu(eqe->event.cmd.token),
201 eqe->event.cmd.status,
202 be64_to_cpu(eqe->event.cmd.out_param));
205 case MLX4_EVENT_TYPE_PORT_CHANGE:
206 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
207 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
208 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
210 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
212 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
214 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
218 case MLX4_EVENT_TYPE_CQ_ERROR:
219 mlx4_warn(dev, "CQ %s on CQN %06x\n",
220 eqe->event.cq_err.syndrome == 1 ?
221 "overrun" : "access violation",
222 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
223 mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
227 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
228 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
231 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
232 case MLX4_EVENT_TYPE_ECC_DETECT:
234 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
235 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
244 * The HCA will think the queue has overflowed if we
245 * don't tell it we've been processing events. We
246 * create our EQs with MLX4_NUM_SPARE_EQE extra
247 * entries, so we must update our consumer index at
250 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
261 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
263 struct mlx4_dev *dev = dev_ptr;
264 struct mlx4_priv *priv = mlx4_priv(dev);
268 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
270 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
271 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
273 return IRQ_RETVAL(work);
276 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
278 struct mlx4_eq *eq = eq_ptr;
279 struct mlx4_dev *dev = eq->dev;
281 mlx4_eq_int(dev, eq);
283 /* MSI-X vectors always belong to us */
287 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
290 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
291 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
294 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
297 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
298 MLX4_CMD_TIME_CLASS_A);
301 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
304 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
305 MLX4_CMD_TIME_CLASS_A);
308 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
311 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
312 * we need to map, take the difference of highest index and
313 * the lowest index we'll use and add 1.
315 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
316 dev->caps.reserved_eqs / 4 + 1;
319 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
321 struct mlx4_priv *priv = mlx4_priv(dev);
324 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
326 if (!priv->eq_table.uar_map[index]) {
327 priv->eq_table.uar_map[index] =
328 ioremap(pci_resource_start(dev->pdev, 2) +
329 ((eq->eqn / 4) << PAGE_SHIFT),
331 if (!priv->eq_table.uar_map[index]) {
332 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
338 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
341 static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
342 u8 intr, struct mlx4_eq *eq)
344 struct mlx4_priv *priv = mlx4_priv(dev);
345 struct mlx4_cmd_mailbox *mailbox;
346 struct mlx4_eq_context *eq_context;
348 u64 *dma_list = NULL;
355 eq->nent = roundup_pow_of_two(max(nent, 2));
356 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
358 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
363 for (i = 0; i < npages; ++i)
364 eq->page_list[i].buf = NULL;
366 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
370 mailbox = mlx4_alloc_cmd_mailbox(dev);
373 eq_context = mailbox->buf;
375 for (i = 0; i < npages; ++i) {
376 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
377 PAGE_SIZE, &t, GFP_KERNEL);
378 if (!eq->page_list[i].buf)
379 goto err_out_free_pages;
382 eq->page_list[i].map = t;
384 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
387 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
389 goto err_out_free_pages;
391 eq->doorbell = mlx4_get_eq_uar(dev, eq);
394 goto err_out_free_eq;
397 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
399 goto err_out_free_eq;
401 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
403 goto err_out_free_mtt;
405 memset(eq_context, 0, sizeof *eq_context);
406 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
407 MLX4_EQ_STATE_ARMED);
408 eq_context->log_eq_size = ilog2(eq->nent);
409 eq_context->intr = intr;
410 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
412 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
413 eq_context->mtt_base_addr_h = mtt_addr >> 32;
414 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
416 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
418 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
419 goto err_out_free_mtt;
423 mlx4_free_cmd_mailbox(dev, mailbox);
430 mlx4_mtt_cleanup(dev, &eq->mtt);
433 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
436 for (i = 0; i < npages; ++i)
437 if (eq->page_list[i].buf)
438 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
439 eq->page_list[i].buf,
440 eq->page_list[i].map);
442 mlx4_free_cmd_mailbox(dev, mailbox);
445 kfree(eq->page_list);
452 static void mlx4_free_eq(struct mlx4_dev *dev,
455 struct mlx4_priv *priv = mlx4_priv(dev);
456 struct mlx4_cmd_mailbox *mailbox;
458 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
461 mailbox = mlx4_alloc_cmd_mailbox(dev);
465 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
467 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
470 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
471 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
473 printk("[%02x] ", i * 4);
474 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
475 if ((i + 1) % 4 == 0)
480 mlx4_mtt_cleanup(dev, &eq->mtt);
481 for (i = 0; i < npages; ++i)
482 pci_free_consistent(dev->pdev, PAGE_SIZE,
483 eq->page_list[i].buf,
484 eq->page_list[i].map);
486 kfree(eq->page_list);
487 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
488 mlx4_free_cmd_mailbox(dev, mailbox);
491 static void mlx4_free_irqs(struct mlx4_dev *dev)
493 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
496 if (eq_table->have_irq)
497 free_irq(dev->pdev->irq, dev);
498 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
499 if (eq_table->eq[i].have_irq) {
500 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
501 eq_table->eq[i].have_irq = 0;
504 kfree(eq_table->irq_names);
507 static int mlx4_map_clr_int(struct mlx4_dev *dev)
509 struct mlx4_priv *priv = mlx4_priv(dev);
511 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
512 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
513 if (!priv->clr_base) {
514 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
521 static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
523 struct mlx4_priv *priv = mlx4_priv(dev);
525 iounmap(priv->clr_base);
528 int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
530 struct mlx4_priv *priv = mlx4_priv(dev);
534 * We assume that mapping one page is enough for the whole EQ
535 * context table. This is fine with all current HCAs, because
536 * we only use 32 EQs and each EQ uses 64 bytes of context
537 * memory, or 1 KB total.
539 priv->eq_table.icm_virt = icm_virt;
540 priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
541 if (!priv->eq_table.icm_page)
543 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
544 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
545 if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
546 __free_page(priv->eq_table.icm_page);
550 ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
552 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
553 PCI_DMA_BIDIRECTIONAL);
554 __free_page(priv->eq_table.icm_page);
560 void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
562 struct mlx4_priv *priv = mlx4_priv(dev);
564 mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
565 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
566 PCI_DMA_BIDIRECTIONAL);
567 __free_page(priv->eq_table.icm_page);
570 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
572 struct mlx4_priv *priv = mlx4_priv(dev);
574 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
575 sizeof *priv->eq_table.eq, GFP_KERNEL);
576 if (!priv->eq_table.eq)
582 void mlx4_free_eq_table(struct mlx4_dev *dev)
584 kfree(mlx4_priv(dev)->eq_table.eq);
587 int mlx4_init_eq_table(struct mlx4_dev *dev)
589 struct mlx4_priv *priv = mlx4_priv(dev);
593 priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
594 mlx4_num_eq_uar(dev), GFP_KERNEL);
595 if (!priv->eq_table.uar_map) {
600 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
601 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
605 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
606 priv->eq_table.uar_map[i] = NULL;
608 err = mlx4_map_clr_int(dev);
612 priv->eq_table.clr_mask =
613 swab32(1 << (priv->eq_table.inta_pin & 31));
614 priv->eq_table.clr_int = priv->clr_base +
615 (priv->eq_table.inta_pin < 32 ? 4 : 0);
617 priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL);
618 if (!priv->eq_table.irq_names) {
623 for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
624 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
625 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
626 &priv->eq_table.eq[i]);
633 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
634 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
635 &priv->eq_table.eq[dev->caps.num_comp_vectors]);
639 if (dev->flags & MLX4_FLAG_MSI_X) {
640 static const char async_eq_name[] = "mlx4-async";
643 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
644 if (i < dev->caps.num_comp_vectors) {
645 snprintf(priv->eq_table.irq_names + i * 16, 16,
647 eq_name = priv->eq_table.irq_names + i * 16;
649 eq_name = async_eq_name;
651 err = request_irq(priv->eq_table.eq[i].irq,
652 mlx4_msi_x_interrupt, 0, eq_name,
653 priv->eq_table.eq + i);
657 priv->eq_table.eq[i].have_irq = 1;
660 err = request_irq(dev->pdev->irq, mlx4_interrupt,
661 IRQF_SHARED, DRV_NAME, dev);
665 priv->eq_table.have_irq = 1;
668 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
669 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
671 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
672 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
674 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
675 eq_set_ci(&priv->eq_table.eq[i], 1);
680 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
683 i = dev->caps.num_comp_vectors - 1;
687 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
690 mlx4_unmap_clr_int(dev);
694 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
697 kfree(priv->eq_table.uar_map);
702 void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
704 struct mlx4_priv *priv = mlx4_priv(dev);
707 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
708 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
712 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
713 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
715 mlx4_unmap_clr_int(dev);
717 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
718 if (priv->eq_table.uar_map[i])
719 iounmap(priv->eq_table.uar_map[i]);
721 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
723 kfree(priv->eq_table.uar_map);