2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <asm/delay.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/sched.h>
37 #include <linux/spinlock.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
41 #include "cxio_resource.h"
43 #include "cxgb3_offload.h"
46 static LIST_HEAD(rdev_list);
47 static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
49 static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
51 struct cxio_rdev *rdev;
53 list_for_each_entry(rdev, &rdev_list, entry)
54 if (!strcmp(rdev->dev_name, dev_name))
59 static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
61 struct cxio_rdev *rdev;
63 list_for_each_entry(rdev, &rdev_list, entry)
64 if (rdev->t3cdev_p == tdev)
69 int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
70 enum t3_cq_opcode op, u32 credit)
76 struct rdma_cq_op setup;
78 setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
80 ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
82 if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
86 * If the rearm returned an index other than our current index,
87 * then there might be CQE's in flight (being DMA'd). We must wait
88 * here for them to complete or the consumer can miss a notification.
90 if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
96 * Keep the generation correct by bumping rptr until it
97 * matches the index returned by the rearm - 1.
99 while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
103 * Now rptr is the index for the (last) cqe that was
104 * in-flight at the time the HW rearmed the CQ. We
105 * spin until that CQE is valid.
107 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
108 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
112 printk(KERN_ERR "%s: stalled rnic\n",
121 static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
123 struct rdma_cq_setup setup;
125 setup.base_addr = 0; /* NULL address */
126 setup.size = 0; /* disaable the CQ */
128 setup.credit_thres = 0;
130 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
133 static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
136 struct t3_modify_qp_wr *wqe;
137 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
139 PDBG("%s alloc_skb failed\n", __FUNCTION__);
142 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
143 memset(wqe, 0, sizeof(*wqe));
144 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 3, 1, qpid, 7);
145 wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
146 sge_cmd = qpid << 8 | 3;
147 wqe->sge_cmd = cpu_to_be64(sge_cmd);
148 skb->priority = CPL_PRIORITY_CONTROL;
149 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
152 int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
154 struct rdma_cq_setup setup;
155 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
157 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
160 cq->sw_queue = kzalloc(size, GFP_KERNEL);
163 cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
164 (1UL << (cq->size_log2)) *
165 sizeof(struct t3_cqe),
166 &(cq->dma_addr), GFP_KERNEL);
171 pci_unmap_addr_set(cq, mapping, cq->dma_addr);
172 memset(cq->queue, 0, size);
174 setup.base_addr = (u64) (cq->dma_addr);
175 setup.size = 1UL << cq->size_log2;
176 setup.credits = 65535;
177 setup.credit_thres = 1;
178 if (rdev_p->t3cdev_p->type == T3B)
182 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
185 int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
187 struct rdma_cq_setup setup;
189 setup.base_addr = (u64) (cq->dma_addr);
190 setup.size = 1UL << cq->size_log2;
191 setup.credits = setup.size;
192 setup.credit_thres = setup.size; /* TBD: overflow recovery */
194 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
197 static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
199 struct cxio_qpid_list *entry;
203 mutex_lock(&uctx->lock);
204 if (!list_empty(&uctx->qpids)) {
205 entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
207 list_del(&entry->entry);
211 qpid = cxio_hal_get_qpid(rdev_p->rscp);
214 for (i = qpid+1; i & rdev_p->qpmask; i++) {
215 entry = kmalloc(sizeof *entry, GFP_KERNEL);
219 list_add_tail(&entry->entry, &uctx->qpids);
223 mutex_unlock(&uctx->lock);
224 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
228 static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
229 struct cxio_ucontext *uctx)
231 struct cxio_qpid_list *entry;
233 entry = kmalloc(sizeof *entry, GFP_KERNEL);
236 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
238 mutex_lock(&uctx->lock);
239 list_add_tail(&entry->entry, &uctx->qpids);
240 mutex_unlock(&uctx->lock);
243 void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
245 struct list_head *pos, *nxt;
246 struct cxio_qpid_list *entry;
248 mutex_lock(&uctx->lock);
249 list_for_each_safe(pos, nxt, &uctx->qpids) {
250 entry = list_entry(pos, struct cxio_qpid_list, entry);
251 list_del_init(&entry->entry);
252 if (!(entry->qpid & rdev_p->qpmask))
253 cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
256 mutex_unlock(&uctx->lock);
259 void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
261 INIT_LIST_HEAD(&uctx->qpids);
262 mutex_init(&uctx->lock);
265 int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
266 struct t3_wq *wq, struct cxio_ucontext *uctx)
268 int depth = 1UL << wq->size_log2;
269 int rqsize = 1UL << wq->rq_size_log2;
271 wq->qpid = get_qpid(rdev_p, uctx);
275 wq->rq = kzalloc(depth * sizeof(u64), GFP_KERNEL);
279 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
283 wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL);
287 wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
288 depth * sizeof(union t3_wr),
289 &(wq->dma_addr), GFP_KERNEL);
293 memset(wq->queue, 0, depth * sizeof(union t3_wr));
294 pci_unmap_addr_set(wq, mapping, wq->dma_addr);
295 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
297 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
298 (wq->qpid << rdev_p->qpshift);
299 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__,
300 wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
305 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
309 put_qpid(rdev_p, wq->qpid, uctx);
313 int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
316 err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
318 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
319 (1UL << (cq->size_log2))
320 * sizeof(struct t3_cqe), cq->queue,
321 pci_unmap_addr(cq, mapping));
322 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
326 int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
327 struct cxio_ucontext *uctx)
329 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
330 (1UL << (wq->size_log2))
331 * sizeof(union t3_wr), wq->queue,
332 pci_unmap_addr(wq, mapping));
334 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
336 put_qpid(rdev_p, wq->qpid, uctx);
340 static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
344 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
345 wq, cq, cq->sw_rptr, cq->sw_wptr);
346 memset(&cqe, 0, sizeof(cqe));
347 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
348 V_CQE_OPCODE(T3_SEND) |
351 V_CQE_QPID(wq->qpid) |
352 V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
354 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
358 void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
362 PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq);
365 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__,
366 wq->rq_rptr, wq->rq_wptr, count);
367 ptr = wq->rq_rptr + count;
368 while (ptr++ != wq->rq_wptr)
369 insert_recv_cqe(wq, cq);
372 static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
377 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
378 wq, cq, cq->sw_rptr, cq->sw_wptr);
379 memset(&cqe, 0, sizeof(cqe));
380 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
381 V_CQE_OPCODE(sqp->opcode) |
384 V_CQE_QPID(wq->qpid) |
385 V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
387 cqe.u.scqe.wrid_hi = sqp->sq_wptr;
389 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
393 void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
396 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
398 ptr = wq->sq_rptr + count;
400 while (ptr != wq->sq_wptr) {
401 insert_sq_cqe(wq, cq, sqp);
408 * Move all CQEs from the HWCQ into the SWCQ.
410 void cxio_flush_hw_cq(struct t3_cq *cq)
412 struct t3_cqe *cqe, *swcqe;
414 PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid);
415 cqe = cxio_next_hw_cqe(cq);
417 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
418 __FUNCTION__, cq->rptr, cq->sw_wptr);
419 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
421 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
424 cqe = cxio_next_hw_cqe(cq);
428 static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
430 if (CQE_OPCODE(*cqe) == T3_TERMINATE)
433 if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
436 if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
439 if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) &&
440 Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
446 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
453 while (!Q_EMPTY(ptr, cq->sw_wptr)) {
454 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
455 if ((SQ_TYPE(*cqe) || (CQE_OPCODE(*cqe) == T3_READ_RESP)) &&
456 (CQE_QPID(*cqe) == wq->qpid))
460 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
463 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
469 PDBG("%s count zero %d\n", __FUNCTION__, *count);
471 while (!Q_EMPTY(ptr, cq->sw_wptr)) {
472 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
473 if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
474 (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
478 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
481 static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
483 struct rdma_cq_setup setup;
485 setup.base_addr = 0; /* NULL address */
486 setup.size = 1; /* enable the CQ */
489 /* force SGE to redirect to RspQ and interrupt */
490 setup.credit_thres = 0;
492 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
495 static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
498 u64 sge_cmd, ctx0, ctx1;
500 struct t3_modify_qp_wr *wqe;
503 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
505 PDBG("%s alloc_skb failed\n", __FUNCTION__);
508 err = cxio_hal_init_ctrl_cq(rdev_p);
510 PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err);
513 rdev_p->ctrl_qp.workq = dma_alloc_coherent(
514 &(rdev_p->rnic_info.pdev->dev),
515 (1 << T3_CTRL_QP_SIZE_LOG2) *
517 &(rdev_p->ctrl_qp.dma_addr),
519 if (!rdev_p->ctrl_qp.workq) {
520 PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__);
524 pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
525 rdev_p->ctrl_qp.dma_addr);
526 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
527 memset(rdev_p->ctrl_qp.workq, 0,
528 (1 << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr));
530 mutex_init(&rdev_p->ctrl_qp.lock);
531 init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
533 /* update HW Ctrl QP context */
534 base_addr = rdev_p->ctrl_qp.dma_addr;
536 ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
537 V_EC_BASE_LO((u32) base_addr & 0xffff));
539 ctx0 |= V_EC_CREDITS(FW_WR_NUM);
541 ctx1 = (u32) base_addr;
543 ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
544 V_EC_TYPE(0) | V_EC_GEN(1) |
545 V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
546 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
547 memset(wqe, 0, sizeof(*wqe));
548 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 1,
550 wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
551 sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
552 wqe->sge_cmd = cpu_to_be64(sge_cmd);
553 wqe->ctx1 = cpu_to_be64(ctx1);
554 wqe->ctx0 = cpu_to_be64(ctx0);
555 PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n",
556 (unsigned long long) rdev_p->ctrl_qp.dma_addr,
557 rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
558 skb->priority = CPL_PRIORITY_CONTROL;
559 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
565 static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
567 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
568 (1UL << T3_CTRL_QP_SIZE_LOG2)
569 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
570 pci_unmap_addr(&rdev_p->ctrl_qp, mapping));
571 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
574 /* write len bytes of data into addr (32B aligned address)
575 * If data is NULL, clear len byte of memory to zero.
576 * caller aquires the ctrl_qp lock before the call
578 static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
579 u32 len, void *data, int completion)
581 u32 i, nr_wqe, copy_len;
583 u8 wr_len, utx_len; /* lenght in 8 byte flit */
584 enum t3_wr_flags flag;
588 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
589 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
590 __FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
592 utx_len = 3; /* in 32B unit */
593 for (i = 0; i < nr_wqe; i++) {
594 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
595 T3_CTRL_QP_SIZE_LOG2)) {
596 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
597 "wait for more space i %d\n", __FUNCTION__,
598 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
599 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
600 !Q_FULL(rdev_p->ctrl_qp.rptr,
601 rdev_p->ctrl_qp.wptr,
602 T3_CTRL_QP_SIZE_LOG2))) {
603 PDBG("%s ctrl_qp workq interrupted\n",
607 PDBG("%s ctrl_qp wakeup, continue posting work request "
608 "i %d\n", __FUNCTION__, i);
610 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
611 (1 << T3_CTRL_QP_SIZE_LOG2)));
613 if (i == (nr_wqe - 1)) {
615 flag = completion ? T3_COMPLETION_FLAG : 0;
617 utx_len = len / 32 + 1;
623 * Force a CQE to return the credit to the workq in case
624 * we posted more than half the max QP size of WRs
627 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
628 flag = T3_COMPLETION_FLAG;
629 PDBG("%s force completion at i %d\n", __FUNCTION__, i);
632 /* build the utx mem command */
633 wqe += (sizeof(struct t3_bypass_wr) >> 3);
634 utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
636 utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
637 *wqe = cpu_to_be64(utx_cmd);
639 copy_data = (u8 *) data + i * 96;
640 copy_len = len > 96 ? 96 : len;
642 /* clear memory content if data is NULL */
644 memcpy(wqe, copy_data, copy_len);
646 memset(wqe, 0, copy_len);
648 memset(((u8 *) wqe) + copy_len, 0,
649 32 - (copy_len % 32));
650 wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
652 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
653 (1 << T3_CTRL_QP_SIZE_LOG2)));
655 /* wptr in the WRID[31:0] */
656 ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
659 * This must be the last write with a memory barrier
662 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
663 Q_GENBIT(rdev_p->ctrl_qp.wptr,
664 T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
666 if (flag == T3_COMPLETION_FLAG)
667 ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
669 rdev_p->ctrl_qp.wptr++;
674 /* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size
675 * OUT: stag index, actual pbl_size, pbl_addr allocated.
676 * TBD: shared memory region support
678 static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
679 u32 *stag, u8 stag_state, u32 pdid,
680 enum tpt_mem_type type, enum tpt_mem_perm perm,
681 u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl,
682 u32 *pbl_size, u32 *pbl_addr)
685 struct tpt_entry tpt;
688 int rereg = (*stag != T3_STAG_UNSET);
690 stag_state = stag_state > 0;
691 stag_idx = (*stag) >> 8;
693 if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
694 stag_idx = cxio_hal_get_stag(rdev_p->rscp);
697 *stag = (stag_idx << 8) | ((*stag) & 0xFF);
699 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
700 __FUNCTION__, stag_state, type, pdid, stag_idx);
703 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
705 *pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3);
711 mutex_lock(&rdev_p->ctrl_qp.lock);
713 /* write PBL first if any - update pbl only if pbl list exist */
716 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
717 __FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base,
719 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
721 (*pbl_size << 3), pbl, 0);
726 /* write TPT entry */
728 memset(&tpt, 0, sizeof(tpt));
730 tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
731 V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
732 V_TPT_STAG_STATE(stag_state) |
733 V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
734 BUG_ON(page_size >= 28);
735 tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
736 F_TPT_MW_BIND_ENABLE |
737 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
738 V_TPT_PAGE_SIZE(page_size));
739 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
740 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3));
741 tpt.len = cpu_to_be32(len);
742 tpt.va_hi = cpu_to_be32((u32) (to >> 32));
743 tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
744 tpt.rsvd_bind_cnt_or_pstag = 0;
745 tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
746 cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2));
748 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
750 (rdev_p->rnic_info.tpt_base >> 5),
751 sizeof(tpt), &tpt, 1);
753 /* release the stag index to free pool */
755 cxio_hal_put_stag(rdev_p->rscp, stag_idx);
757 wptr = rdev_p->ctrl_qp.wptr;
758 mutex_unlock(&rdev_p->ctrl_qp.lock);
760 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
761 SEQ32_GE(rdev_p->ctrl_qp.rptr,
767 int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
768 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
769 u8 page_size, __be64 *pbl, u32 *pbl_size,
772 *stag = T3_STAG_UNSET;
773 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
774 zbva, to, len, page_size, pbl, pbl_size, pbl_addr);
777 int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
778 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
779 u8 page_size, __be64 *pbl, u32 *pbl_size,
782 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
783 zbva, to, len, page_size, pbl, pbl_size, pbl_addr);
786 int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
789 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,
790 &pbl_size, &pbl_addr);
793 int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
796 *stag = T3_STAG_UNSET;
797 return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
798 NULL, &pbl_size, NULL);
801 int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
803 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,
807 int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
809 struct t3_rdma_init_wr *wqe;
810 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
813 PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p);
814 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
815 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
816 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
817 V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
819 wqe->qpid = cpu_to_be32(attr->qpid);
820 wqe->pdid = cpu_to_be32(attr->pdid);
821 wqe->scqid = cpu_to_be32(attr->scqid);
822 wqe->rcqid = cpu_to_be32(attr->rcqid);
823 wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
824 wqe->rq_size = cpu_to_be32(attr->rq_size);
825 wqe->mpaattrs = attr->mpaattrs;
826 wqe->qpcaps = attr->qpcaps;
827 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
828 wqe->flags = cpu_to_be32(attr->flags);
829 wqe->ord = cpu_to_be32(attr->ord);
830 wqe->ird = cpu_to_be32(attr->ird);
831 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
832 wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
834 skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
835 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
838 void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
843 void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
848 static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
851 struct cxio_rdev *rdev_p = NULL;
852 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
853 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
854 " se %0x notify %0x cqbranch %0x creditth %0x\n",
855 cnt, __FUNCTION__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
856 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
857 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
858 RSPQ_CREDIT_THRESH(rsp_msg));
859 PDBG("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d "
860 "len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
861 CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
862 CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
863 CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
864 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
865 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
867 PDBG("%s called by t3cdev %p with null ulp\n", __FUNCTION__,
871 if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
872 rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
873 wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
874 dev_kfree_skb_irq(skb);
875 } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
876 dev_kfree_skb_irq(skb);
878 (*cxio_ev_cb) (rdev_p, skb);
880 dev_kfree_skb_irq(skb);
885 /* Caller takes care of locking if needed */
886 int cxio_rdev_open(struct cxio_rdev *rdev_p)
888 struct net_device *netdev_p = NULL;
890 if (strlen(rdev_p->dev_name)) {
891 if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
894 netdev_p = dev_get_by_name(rdev_p->dev_name);
899 } else if (rdev_p->t3cdev_p) {
900 if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
903 netdev_p = rdev_p->t3cdev_p->lldev;
904 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
905 T3_MAX_DEV_NAME_LEN);
907 PDBG("%s t3cdev_p or dev_name must be set\n", __FUNCTION__);
911 list_add_tail(&rdev_p->entry, &rdev_list);
913 PDBG("%s opening rnic dev %s\n", __FUNCTION__, rdev_p->dev_name);
914 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
915 if (!rdev_p->t3cdev_p)
916 rdev_p->t3cdev_p = T3CDEV(netdev_p);
917 rdev_p->t3cdev_p->ulp = (void *) rdev_p;
918 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
919 &(rdev_p->rnic_info));
921 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
922 __FUNCTION__, rdev_p->t3cdev_p, err);
925 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
926 &(rdev_p->port_info));
928 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
929 __FUNCTION__, rdev_p->t3cdev_p, err);
934 * qpshift is the number of bits to shift the qpid left in order
935 * to get the correct address of the doorbell for that qp.
937 cxio_init_ucontext(rdev_p, &rdev_p->uctx);
938 rdev_p->qpshift = PAGE_SHIFT -
940 ilog2(rdev_p->rnic_info.udbell_len >>
942 rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
943 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
944 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
945 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
946 __FUNCTION__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
947 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
948 rdev_p->rnic_info.pbl_base,
949 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
950 rdev_p->rnic_info.rqt_top);
951 PDBG("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu "
952 "qpnr %d qpmask 0x%x\n",
953 rdev_p->rnic_info.udbell_len,
954 rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
955 rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
957 err = cxio_hal_init_ctrl_qp(rdev_p);
959 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
963 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
964 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
967 printk(KERN_ERR "%s error %d initializing hal resources.\n",
971 err = cxio_hal_pblpool_create(rdev_p);
973 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
977 err = cxio_hal_rqtpool_create(rdev_p);
979 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
985 cxio_hal_pblpool_destroy(rdev_p);
987 cxio_hal_destroy_resource(rdev_p->rscp);
989 cxio_hal_destroy_ctrl_qp(rdev_p);
991 list_del(&rdev_p->entry);
995 void cxio_rdev_close(struct cxio_rdev *rdev_p)
998 cxio_hal_pblpool_destroy(rdev_p);
999 cxio_hal_rqtpool_destroy(rdev_p);
1000 list_del(&rdev_p->entry);
1001 rdev_p->t3cdev_p->ulp = NULL;
1002 cxio_hal_destroy_ctrl_qp(rdev_p);
1003 cxio_hal_destroy_resource(rdev_p->rscp);
1007 int __init cxio_hal_init(void)
1009 if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
1011 t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
1015 void __exit cxio_hal_exit(void)
1017 struct cxio_rdev *rdev, *tmp;
1019 t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
1020 list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
1021 cxio_rdev_close(rdev);
1022 cxio_hal_destroy_rhdl_resource();
1025 static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
1027 struct t3_swsq *sqp;
1028 __u32 ptr = wq->sq_rptr;
1029 int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
1031 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
1033 if (!sqp->signaled) {
1035 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
1036 } else if (sqp->complete) {
1039 * Insert this completed cqe into the swcq.
1041 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
1042 __FUNCTION__, Q_PTR2IDX(ptr, wq->sq_size_log2),
1043 Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
1044 sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
1045 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
1054 static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
1055 struct t3_cqe *read_cqe)
1057 read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
1058 read_cqe->len = wq->oldest_read->read_len;
1059 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
1060 V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
1061 V_CQE_OPCODE(T3_READ_REQ) |
1066 * Return a ptr to the next read wr in the SWSQ or NULL.
1068 static void advance_oldest_read(struct t3_wq *wq)
1071 u32 rptr = wq->oldest_read - wq->sq + 1;
1072 u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
1074 while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
1075 wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
1077 if (wq->oldest_read->opcode == T3_READ_REQ)
1081 wq->oldest_read = NULL;
1088 * check the validity of the first CQE,
1089 * supply the wq assicated with the qpid.
1091 * credit: cq credit to return to sge.
1092 * cqe_flushed: 1 iff the CQE is flushed.
1093 * cqe: copy of the polled CQE.
1097 * -1 CQE skipped, try again.
1099 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1100 u8 *cqe_flushed, u64 *cookie, u32 *credit)
1103 struct t3_cqe *hw_cqe, read_cqe;
1107 hw_cqe = cxio_next_cqe(cq);
1109 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
1110 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
1111 __FUNCTION__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
1112 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
1113 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
1114 CQE_WRID_LOW(*hw_cqe));
1117 * skip cqe's not affiliated with a QP.
1125 * Gotta tweak READ completions:
1126 * 1) the cqe doesn't contain the sq_wptr from the wr.
1127 * 2) opcode not reflected from the wr.
1128 * 3) read_len not reflected from the wr.
1129 * 4) cq_type is RQ_TYPE not SQ_TYPE.
1131 if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
1134 * Don't write to the HWCQ, so create a new read req CQE
1137 create_read_req_cqe(wq, hw_cqe, &read_cqe);
1139 advance_oldest_read(wq);
1143 * T3A: Discard TERMINATE CQEs.
1145 if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
1151 if (CQE_STATUS(*hw_cqe) || wq->error) {
1152 *cqe_flushed = wq->error;
1156 * T3A inserts errors into the CQE. We cannot return
1157 * these as work completions.
1159 /* incoming write failures */
1160 if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
1161 && RQ_TYPE(*hw_cqe)) {
1165 /* incoming read request failures */
1166 if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
1171 /* incoming SEND with no receive posted failures */
1172 if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) &&
1173 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
1183 if (RQ_TYPE(*hw_cqe)) {
1186 * HW only validates 4 bits of MSN. So we must validate that
1187 * the MSN in the SEND is the next expected MSN. If its not,
1188 * then we complete this with TPT_ERR_MSN and mark the wq in
1191 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
1193 hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
1200 * If we get here its a send completion.
1202 * Handle out of order completion. These get stuffed
1203 * in the SW SQ. Then the SW SQ is walked to move any
1204 * now in-order completions into the SW CQ. This handles
1206 * 1) reaping unsignaled WRs when the first subsequent
1207 * signaled WR is completed.
1208 * 2) out of order read completions.
1210 if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
1211 struct t3_swsq *sqp;
1213 PDBG("%s out of order completion going in swsq at idx %ld\n",
1215 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
1217 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
1228 * Reap the associated WR(s) that are freed up with this
1231 if (SQ_TYPE(*hw_cqe)) {
1232 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
1233 PDBG("%s completing sq idx %ld\n", __FUNCTION__,
1234 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
1236 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
1239 PDBG("%s completing rq idx %ld\n", __FUNCTION__,
1240 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1241 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1247 * Flush any completed cqes that are now in-order.
1249 flush_completed_wrs(wq, cq);
1252 if (SW_CQE(*hw_cqe)) {
1253 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
1254 __FUNCTION__, cq, cq->cqid, cq->sw_rptr);
1257 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
1258 __FUNCTION__, cq, cq->cqid, cq->rptr);
1262 * T3A: compute credits.
1264 if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
1265 || ((cq->rptr - cq->wptr) >= 128)) {
1266 *credit = cq->rptr - cq->wptr;
1267 cq->wptr = cq->rptr;