1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
20 * Contact Information:
21 * linux-drivers@emulex.com
25 * Costa Mesa, CA 92626
26 *******************************************************************/
28 #include <linux/sched.h>
29 #include <linux/interrupt.h>
30 #include <linux/log2.h>
31 #include <linux/dma-mapping.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_addr.h>
38 #include "ocrdma_hw.h"
39 #include "ocrdma_verbs.h"
40 #include "ocrdma_ah.h"
43 OCRDMA_MBX_STATUS_FAILED = 1,
44 OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3,
45 OCRDMA_MBX_STATUS_OOR = 100,
46 OCRDMA_MBX_STATUS_INVALID_PD = 101,
47 OCRDMA_MBX_STATUS_PD_INUSE = 102,
48 OCRDMA_MBX_STATUS_INVALID_CQ = 103,
49 OCRDMA_MBX_STATUS_INVALID_QP = 104,
50 OCRDMA_MBX_STATUS_INVALID_LKEY = 105,
51 OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106,
52 OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107,
53 OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,
54 OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,
55 OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110,
56 OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,
57 OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112,
58 OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113,
59 OCRDMA_MBX_STATUS_MW_BOUND = 114,
60 OCRDMA_MBX_STATUS_INVALID_VA = 115,
61 OCRDMA_MBX_STATUS_INVALID_LENGTH = 116,
62 OCRDMA_MBX_STATUS_INVALID_FBO = 117,
63 OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118,
64 OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119,
65 OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,
66 OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,
67 OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129,
68 OCRDMA_MBX_STATUS_SRQ_ERROR = 133,
69 OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134,
70 OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135,
71 OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136,
72 OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,
73 OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,
74 OCRDMA_MBX_STATUS_QP_BOUND = 130,
75 OCRDMA_MBX_STATUS_INVALID_CHANGE = 139,
76 OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140,
77 OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,
78 OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142,
79 OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143,
80 OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144
83 enum additional_status {
84 OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
88 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1,
89 OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2,
90 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3,
91 OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4,
92 OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5
95 static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
97 return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
100 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
102 eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
105 static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
107 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
108 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
110 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
115 static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
117 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
120 static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
122 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
125 static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
127 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
130 static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
132 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
135 enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
140 case OCRDMA_QPS_INIT:
147 case OCRDMA_QPS_SQ_DRAINING:
157 static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
161 return OCRDMA_QPS_RST;
163 return OCRDMA_QPS_INIT;
165 return OCRDMA_QPS_RTR;
167 return OCRDMA_QPS_RTS;
169 return OCRDMA_QPS_SQD;
171 return OCRDMA_QPS_SQE;
173 return OCRDMA_QPS_ERR;
175 return OCRDMA_QPS_ERR;
178 static int ocrdma_get_mbx_errno(u32 status)
181 u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
182 OCRDMA_MBX_RSP_STATUS_SHIFT;
183 u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
184 OCRDMA_MBX_RSP_ASTATUS_SHIFT;
186 switch (mbox_status) {
187 case OCRDMA_MBX_STATUS_OOR:
188 case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
192 case OCRDMA_MBX_STATUS_INVALID_PD:
193 case OCRDMA_MBX_STATUS_INVALID_CQ:
194 case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
195 case OCRDMA_MBX_STATUS_INVALID_QP:
196 case OCRDMA_MBX_STATUS_INVALID_CHANGE:
197 case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
198 case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
199 case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
200 case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
201 case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
202 case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
203 case OCRDMA_MBX_STATUS_INVALID_LKEY:
204 case OCRDMA_MBX_STATUS_INVALID_VA:
205 case OCRDMA_MBX_STATUS_INVALID_LENGTH:
206 case OCRDMA_MBX_STATUS_INVALID_FBO:
207 case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
208 case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
209 case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
210 case OCRDMA_MBX_STATUS_SRQ_ERROR:
211 case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
215 case OCRDMA_MBX_STATUS_PD_INUSE:
216 case OCRDMA_MBX_STATUS_QP_BOUND:
217 case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
218 case OCRDMA_MBX_STATUS_MW_BOUND:
222 case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
223 case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
224 case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
225 case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
226 case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
227 case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
228 case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
229 case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
230 case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
234 case OCRDMA_MBX_STATUS_FAILED:
235 switch (add_status) {
236 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
246 static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
248 int err_num = -EINVAL;
250 switch (cqe_status) {
251 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
254 case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
257 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
258 case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
261 case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
268 void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
269 bool solicited, u16 cqe_popped)
271 u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
273 val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
274 OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
277 val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
279 val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
280 val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
281 iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
284 static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
288 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
289 val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
290 iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
293 static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
294 bool arm, bool clear_int, u16 num_eqe)
298 val |= eq_id & OCRDMA_EQ_ID_MASK;
299 val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
301 val |= (1 << OCRDMA_REARM_SHIFT);
303 val |= (1 << OCRDMA_EQ_CLR_SHIFT);
304 val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
305 val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
306 iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
309 static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
310 u8 opcode, u8 subsys, u32 cmd_len)
312 cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
313 cmd_hdr->timeout = 20; /* seconds */
314 cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
317 static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
319 struct ocrdma_mqe *mqe;
321 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
324 mqe->hdr.spcl_sge_cnt_emb |=
325 (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
326 OCRDMA_MQE_HDR_EMB_MASK;
327 mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
329 ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
334 static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
336 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
339 static int ocrdma_alloc_q(struct ocrdma_dev *dev,
340 struct ocrdma_queue_info *q, u16 len, u16 entry_size)
342 memset(q, 0, sizeof(*q));
344 q->entry_size = entry_size;
345 q->size = len * entry_size;
346 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
347 &q->dma, GFP_KERNEL);
350 memset(q->va, 0, q->size);
354 static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
355 dma_addr_t host_pa, int hw_page_size)
359 for (i = 0; i < cnt; i++) {
360 q_pa[i].lo = (u32) (host_pa & 0xffffffff);
361 q_pa[i].hi = (u32) upper_32_bits(host_pa);
362 host_pa += hw_page_size;
366 static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev,
367 struct ocrdma_eq *eq)
369 /* assign vector and update vector id for next EQ */
370 eq->vector = dev->nic_info.msix.start_vector;
371 dev->nic_info.msix.start_vector += 1;
374 static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev)
376 /* this assumes that EQs are freed in exactly reverse order
379 dev->nic_info.msix.start_vector -= 1;
382 static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
387 struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
389 switch (queue_type) {
391 opcode = OCRDMA_CMD_DELETE_MQ;
394 opcode = OCRDMA_CMD_DELETE_CQ;
397 opcode = OCRDMA_CMD_DELETE_EQ;
402 memset(cmd, 0, sizeof(*cmd));
403 ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
406 status = be_roce_mcc_cmd(dev->nic_info.netdev,
407 cmd, sizeof(*cmd), NULL, NULL);
413 static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
416 struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
417 struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
419 memset(cmd, 0, sizeof(*cmd));
420 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
422 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
423 cmd->req.rsvd_version = 0;
425 cmd->req.rsvd_version = 2;
428 cmd->valid = OCRDMA_CREATE_EQ_VALID;
429 cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
431 ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
433 status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
436 eq->q.id = rsp->vector_eqid & 0xffff;
437 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
438 ocrdma_assign_eq_vect_gen2(dev, eq);
440 eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
441 dev->nic_info.msix.start_vector += 1;
443 eq->q.created = true;
448 static int ocrdma_create_eq(struct ocrdma_dev *dev,
449 struct ocrdma_eq *eq, u16 q_len)
453 status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
454 sizeof(struct ocrdma_eqe));
458 status = ocrdma_mbx_create_eq(dev, eq);
462 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
466 ocrdma_free_q(dev, &eq->q);
470 static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
474 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
475 irq = dev->nic_info.pdev->irq;
477 irq = dev->nic_info.msix.vector_list[eq->vector];
481 static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
484 ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
485 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
486 ocrdma_free_eq_vect_gen2(dev);
487 ocrdma_free_q(dev, &eq->q);
491 static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
495 /* disarm EQ so that interrupts are not generated
496 * during freeing and EQ delete is in progress.
498 ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
500 irq = ocrdma_get_irq(dev, eq);
502 _ocrdma_destroy_eq(dev, eq);
505 static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev)
509 /* deallocate the data path eqs */
510 for (i = 0; i < dev->eq_cnt; i++)
511 ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
514 static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
515 struct ocrdma_queue_info *cq,
516 struct ocrdma_queue_info *eq)
518 struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
519 struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
522 memset(cmd, 0, sizeof(*cmd));
523 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
524 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
526 cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size);
527 cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
528 cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT);
530 ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt,
531 cq->dma, PAGE_SIZE_4K);
532 status = be_roce_mcc_cmd(dev->nic_info.netdev,
533 cmd, sizeof(*cmd), NULL, NULL);
535 cq->id = (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
541 static u32 ocrdma_encoded_q_len(int q_len)
543 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
545 if (len_encoded == 16)
550 static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
551 struct ocrdma_queue_info *mq,
552 struct ocrdma_queue_info *cq)
554 int num_pages, status;
555 struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
556 struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
557 struct ocrdma_pa *pa;
559 memset(cmd, 0, sizeof(*cmd));
560 num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
562 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
563 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
564 cmd->req.rsvd_version = 1;
565 cmd->cqid_pages = num_pages;
566 cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
567 cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
568 cmd->async_event_bitmap = Bit(20);
569 cmd->async_cqid_ringsize = cq->id;
570 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
571 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
572 cmd->valid = OCRDMA_CREATE_MQ_VALID;
575 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
576 status = be_roce_mcc_cmd(dev->nic_info.netdev,
577 cmd, sizeof(*cmd), NULL, NULL);
585 static int ocrdma_create_mq(struct ocrdma_dev *dev)
589 /* Alloc completion queue for Mailbox queue */
590 status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
591 sizeof(struct ocrdma_mcqe));
595 status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q);
599 memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
600 init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
601 mutex_init(&dev->mqe_ctx.lock);
603 /* Alloc Mailbox queue */
604 status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
605 sizeof(struct ocrdma_mqe));
608 status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
611 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
615 ocrdma_free_q(dev, &dev->mq.sq);
617 ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
619 ocrdma_free_q(dev, &dev->mq.cq);
624 static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
626 struct ocrdma_queue_info *mbxq, *cq;
628 /* mqe_ctx lock synchronizes with any other pending cmds. */
629 mutex_lock(&dev->mqe_ctx.lock);
632 ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
633 ocrdma_free_q(dev, mbxq);
635 mutex_unlock(&dev->mqe_ctx.lock);
639 ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
640 ocrdma_free_q(dev, cq);
644 static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
645 struct ocrdma_qp *qp)
647 enum ib_qp_state new_ib_qps = IB_QPS_ERR;
648 enum ib_qp_state old_ib_qps;
652 ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps);
655 static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
656 struct ocrdma_ae_mcqe *cqe)
658 struct ocrdma_qp *qp = NULL;
659 struct ocrdma_cq *cq = NULL;
660 struct ib_event ib_evt;
665 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
666 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
668 if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID)
669 qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK];
670 if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
671 cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
673 ib_evt.device = &dev->ibdev;
676 case OCRDMA_CQ_ERROR:
677 ib_evt.element.cq = &cq->ibcq;
678 ib_evt.event = IB_EVENT_CQ_ERR;
682 case OCRDMA_CQ_OVERRUN_ERROR:
683 ib_evt.element.cq = &cq->ibcq;
684 ib_evt.event = IB_EVENT_CQ_ERR;
686 case OCRDMA_CQ_QPCAT_ERROR:
687 ib_evt.element.qp = &qp->ibqp;
688 ib_evt.event = IB_EVENT_QP_FATAL;
689 ocrdma_process_qpcat_error(dev, qp);
691 case OCRDMA_QP_ACCESS_ERROR:
692 ib_evt.element.qp = &qp->ibqp;
693 ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
695 case OCRDMA_QP_COMM_EST_EVENT:
696 ib_evt.element.qp = &qp->ibqp;
697 ib_evt.event = IB_EVENT_COMM_EST;
699 case OCRDMA_SQ_DRAINED_EVENT:
700 ib_evt.element.qp = &qp->ibqp;
701 ib_evt.event = IB_EVENT_SQ_DRAINED;
703 case OCRDMA_DEVICE_FATAL_EVENT:
704 ib_evt.element.port_num = 1;
705 ib_evt.event = IB_EVENT_DEVICE_FATAL;
709 case OCRDMA_SRQCAT_ERROR:
710 ib_evt.element.srq = &qp->srq->ibsrq;
711 ib_evt.event = IB_EVENT_SRQ_ERR;
715 case OCRDMA_SRQ_LIMIT_EVENT:
716 ib_evt.element.srq = &qp->srq->ibsrq;
717 ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
721 case OCRDMA_QP_LAST_WQE_EVENT:
722 ib_evt.element.qp = &qp->ibqp;
723 ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
730 pr_err("%s() unknown type=0x%x\n", __func__, type);
735 if (qp->ibqp.event_handler)
736 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
737 } else if (cq_event) {
738 if (cq->ibcq.event_handler)
739 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
740 } else if (srq_event) {
741 if (qp->srq->ibsrq.event_handler)
742 qp->srq->ibsrq.event_handler(&ib_evt,
745 } else if (dev_event) {
746 ib_dispatch_event(&ib_evt);
751 static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
753 /* async CQE processing */
754 struct ocrdma_ae_mcqe *cqe = ae_cqe;
755 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
756 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
758 if (evt_code == OCRDMA_ASYNC_EVE_CODE)
759 ocrdma_dispatch_ibevent(dev, cqe);
761 pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
765 static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
767 if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
768 dev->mqe_ctx.cqe_status = (cqe->status &
769 OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
770 dev->mqe_ctx.ext_status =
771 (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
772 >> OCRDMA_MCQE_ESTATUS_SHIFT;
773 dev->mqe_ctx.cmd_done = true;
774 wake_up(&dev->mqe_ctx.cmd_wait);
776 pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
777 __func__, cqe->tag_lo, dev->mqe_ctx.tag);
780 static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
783 struct ocrdma_mcqe *cqe;
786 cqe = ocrdma_get_mcqe(dev);
789 ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
791 if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
792 ocrdma_process_acqe(dev, cqe);
793 else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
794 ocrdma_process_mcqe(dev, cqe);
796 pr_err("%s() cqe->compl is not set.\n", __func__);
797 memset(cqe, 0, sizeof(struct ocrdma_mcqe));
798 ocrdma_mcq_inc_tail(dev);
800 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
804 static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
805 struct ocrdma_cq *cq)
808 struct ocrdma_qp *qp;
809 bool buddy_cq_found = false;
810 /* Go through list of QPs in error state which are using this CQ
811 * and invoke its callback handler to trigger CQE processing for
812 * error/flushed CQE. It is rare to find more than few entries in
813 * this list as most consumers stops after getting error CQE.
814 * List is traversed only once when a matching buddy cq found for a QP.
816 spin_lock_irqsave(&dev->flush_q_lock, flags);
817 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
820 /* if wq and rq share the same cq, than comp_handler
821 * is already invoked.
823 if (qp->sq_cq == qp->rq_cq)
825 /* if completion came on sq, rq's cq is buddy cq.
826 * if completion came on rq, sq's cq is buddy cq.
832 buddy_cq_found = true;
835 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
836 if (buddy_cq_found == false)
838 if (cq->ibcq.comp_handler) {
839 spin_lock_irqsave(&cq->comp_handler_lock, flags);
840 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
841 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
845 static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
848 struct ocrdma_cq *cq;
850 if (cq_idx >= OCRDMA_MAX_CQ)
853 cq = dev->cq_tbl[cq_idx];
855 pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);
858 spin_lock_irqsave(&cq->cq_lock, flags);
860 cq->solicited = false;
861 spin_unlock_irqrestore(&cq->cq_lock, flags);
863 ocrdma_ring_cq_db(dev, cq->id, false, false, 0);
865 if (cq->ibcq.comp_handler) {
866 spin_lock_irqsave(&cq->comp_handler_lock, flags);
867 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
868 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
870 ocrdma_qp_buddy_cq_handler(dev, cq);
873 static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
875 /* process the MQ-CQE. */
876 if (cq_id == dev->mq.cq.id)
877 ocrdma_mq_cq_handler(dev, cq_id);
879 ocrdma_qp_cq_handler(dev, cq_id);
882 static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
884 struct ocrdma_eq *eq = handle;
885 struct ocrdma_dev *dev = eq->dev;
886 struct ocrdma_eqe eqe;
887 struct ocrdma_eqe *ptr;
891 ptr = ocrdma_get_eqe(eq);
893 ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
894 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
898 /* check whether its CQE or not. */
899 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
900 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
901 ocrdma_cq_handler(dev, cq_id);
903 ocrdma_eq_inc_tail(eq);
905 ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);
906 /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
907 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
908 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
912 static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
914 struct ocrdma_mqe *mqe;
916 dev->mqe_ctx.tag = dev->mq.sq.head;
917 dev->mqe_ctx.cmd_done = false;
918 mqe = ocrdma_get_mqe(dev);
919 cmd->hdr.tag_lo = dev->mq.sq.head;
920 ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
921 /* make sure descriptor is written before ringing doorbell */
923 ocrdma_mq_inc_head(dev);
924 ocrdma_ring_mq_db(dev);
927 static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
931 status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
932 (dev->mqe_ctx.cmd_done != false),
933 msecs_to_jiffies(30000));
940 /* issue a mailbox command on the MQ */
941 static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
944 u16 cqe_status, ext_status;
945 struct ocrdma_mqe *rsp;
947 mutex_lock(&dev->mqe_ctx.lock);
948 ocrdma_post_mqe(dev, mqe);
949 status = ocrdma_wait_mqe_cmpl(dev);
952 cqe_status = dev->mqe_ctx.cqe_status;
953 ext_status = dev->mqe_ctx.ext_status;
954 rsp = ocrdma_get_mqe_rsp(dev);
955 ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
956 if (cqe_status || ext_status) {
957 pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
959 (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
960 OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
961 status = ocrdma_get_mbx_cqe_errno(cqe_status);
964 if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)
965 status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
967 mutex_unlock(&dev->mqe_ctx.lock);
971 static void ocrdma_get_attr(struct ocrdma_dev *dev,
972 struct ocrdma_dev_attr *attr,
973 struct ocrdma_mbx_query_config *rsp)
976 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
977 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
979 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
980 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
981 attr->max_send_sge = ((rsp->max_write_send_sge &
982 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
983 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
984 attr->max_recv_sge = (rsp->max_write_send_sge &
985 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
986 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
987 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
988 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
989 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
990 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
991 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
992 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
993 attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
994 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
995 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
996 attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
997 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
998 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
999 attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1000 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
1001 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
1002 attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1003 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
1004 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
1005 attr->max_mr = rsp->max_mr;
1006 attr->max_mr_size = ~0ull;
1008 attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
1009 attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1010 attr->max_cqe = rsp->max_cq_cqes_per_cq &
1011 OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
1012 attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1013 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
1014 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
1016 attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1017 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
1018 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
1020 attr->max_inline_data =
1021 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1022 sizeof(struct ocrdma_sge));
1023 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1025 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1026 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
1028 dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1029 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1030 dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1031 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
1034 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1035 struct ocrdma_fw_conf_rsp *conf)
1039 fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1040 if (fn_mode != OCRDMA_FN_MODE_RDMA)
1042 dev->base_eqid = conf->base_eqid;
1043 dev->max_eq = conf->max_eq;
1044 dev->attr.max_cq = OCRDMA_MAX_CQ - 1;
1048 /* can be issued only during init time. */
1049 static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1051 int status = -ENOMEM;
1052 struct ocrdma_mqe *cmd;
1053 struct ocrdma_fw_ver_rsp *rsp;
1055 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1058 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1059 OCRDMA_CMD_GET_FW_VER,
1060 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1062 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1065 rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1066 memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1067 memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1068 sizeof(rsp->running_ver));
1069 ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1075 /* can be issued only during init time. */
1076 static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1078 int status = -ENOMEM;
1079 struct ocrdma_mqe *cmd;
1080 struct ocrdma_fw_conf_rsp *rsp;
1082 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1085 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1086 OCRDMA_CMD_GET_FW_CONFIG,
1087 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1088 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1091 rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1092 status = ocrdma_check_fw_config(dev, rsp);
1098 static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1100 int status = -ENOMEM;
1101 struct ocrdma_mbx_query_config *rsp;
1102 struct ocrdma_mqe *cmd;
1104 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1107 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1110 rsp = (struct ocrdma_mbx_query_config *)cmd;
1111 ocrdma_get_attr(dev, &dev->attr, rsp);
1117 int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1119 int status = -ENOMEM;
1120 struct ocrdma_alloc_pd *cmd;
1121 struct ocrdma_alloc_pd_rsp *rsp;
1123 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1126 if (pd->dpp_enabled)
1127 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1128 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1131 rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1132 pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
1133 if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
1134 pd->dpp_enabled = true;
1135 pd->dpp_page = rsp->dpp_page_pdid >>
1136 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1138 pd->dpp_enabled = false;
1146 int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1148 int status = -ENOMEM;
1149 struct ocrdma_dealloc_pd *cmd;
1151 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1155 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1160 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1161 int *num_pages, int *page_size)
1166 *num_entries = roundup_pow_of_two(*num_entries);
1167 mem_size = *num_entries * entry_size;
1168 /* find the possible lowest possible multiplier */
1169 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1170 if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1173 if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1175 mem_size = roundup(mem_size,
1176 ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
1178 mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1179 *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1180 *num_entries = mem_size / entry_size;
1184 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1189 struct ocrdma_create_ah_tbl *cmd;
1190 struct ocrdma_create_ah_tbl_rsp *rsp;
1191 struct pci_dev *pdev = dev->nic_info.pdev;
1193 struct ocrdma_pbe *pbes;
1195 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1199 max_ah = OCRDMA_MAX_AH;
1200 dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1202 /* number of PBEs in PBL */
1203 cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1204 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
1205 OCRDMA_CREATE_AH_NUM_PAGES_MASK;
1208 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1209 if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1212 cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1213 OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
1216 cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1217 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
1218 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
1220 dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1221 &dev->av_tbl.pbl.pa,
1223 if (dev->av_tbl.pbl.va == NULL)
1226 dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1228 if (dev->av_tbl.va == NULL)
1230 dev->av_tbl.pa = pa;
1231 dev->av_tbl.num_ah = max_ah;
1232 memset(dev->av_tbl.va, 0, dev->av_tbl.size);
1234 pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1235 for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
1236 pbes[i].pa_lo = (u32) (pa & 0xffffffff);
1237 pbes[i].pa_hi = (u32) upper_32_bits(pa);
1240 cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1241 cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1242 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1245 rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1246 dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1251 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1253 dev->av_tbl.va = NULL;
1255 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1256 dev->av_tbl.pbl.pa);
1257 dev->av_tbl.pbl.va = NULL;
1258 dev->av_tbl.size = 0;
1264 static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1266 struct ocrdma_delete_ah_tbl *cmd;
1267 struct pci_dev *pdev = dev->nic_info.pdev;
1269 if (dev->av_tbl.va == NULL)
1272 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1275 cmd->ahid = dev->av_tbl.ahid;
1277 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1278 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1280 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1281 dev->av_tbl.pbl.pa);
1285 /* Multiple CQs uses the EQ. This routine returns least used
1286 * EQ to associate with CQ. This will distributes the interrupt
1287 * processing and CPU load to associated EQ, vector and so to that CPU.
1289 static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1291 int i, selected_eq = 0, cq_cnt = 0;
1294 mutex_lock(&dev->dev_lock);
1295 cq_cnt = dev->qp_eq_tbl[0].cq_cnt;
1296 eq_id = dev->qp_eq_tbl[0].q.id;
1297 /* find the EQ which is has the least number of
1298 * CQs associated with it.
1300 for (i = 0; i < dev->eq_cnt; i++) {
1301 if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) {
1302 cq_cnt = dev->qp_eq_tbl[i].cq_cnt;
1303 eq_id = dev->qp_eq_tbl[i].q.id;
1307 dev->qp_eq_tbl[selected_eq].cq_cnt += 1;
1308 mutex_unlock(&dev->dev_lock);
1312 static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1316 mutex_lock(&dev->dev_lock);
1317 for (i = 0; i < dev->eq_cnt; i++) {
1318 if (dev->qp_eq_tbl[i].q.id != eq_id)
1320 dev->qp_eq_tbl[i].cq_cnt -= 1;
1323 mutex_unlock(&dev->dev_lock);
1326 int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1327 int entries, int dpp_cq)
1329 int status = -ENOMEM; int max_hw_cqe;
1330 struct pci_dev *pdev = dev->nic_info.pdev;
1331 struct ocrdma_create_cq *cmd;
1332 struct ocrdma_create_cq_rsp *rsp;
1333 u32 hw_pages, cqe_size, page_size, cqe_count;
1337 if (entries > dev->attr.max_cqe) {
1338 pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1339 __func__, dev->id, dev->attr.max_cqe, entries);
1342 if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))
1348 cqe_size = OCRDMA_DPP_CQE_SIZE;
1351 cq->max_hw_cqe = dev->attr.max_cqe;
1352 max_hw_cqe = dev->attr.max_cqe;
1353 cqe_size = sizeof(struct ocrdma_cqe);
1354 hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1357 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1359 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1362 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1363 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1364 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1369 memset(cq->va, 0, cq->len);
1370 page_size = cq->len / hw_pages;
1371 cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1372 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
1373 cmd->cmd.pgsz_pgcnt |= hw_pages;
1374 cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1376 cq->eqn = ocrdma_bind_eq(dev);
1377 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
1378 cqe_count = cq->len / cqe_size;
1379 if (cqe_count > 1024) {
1380 /* Set cnt to 3 to indicate more than 1024 cq entries */
1381 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
1384 switch (cqe_count) {
1397 cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1399 /* shared eq between all the consumer cqs. */
1400 cmd->cmd.eqn = cq->eqn;
1401 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1403 cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1404 OCRDMA_CREATE_CQ_TYPE_SHIFT;
1405 cq->phase_change = false;
1406 cmd->cmd.cqe_count = (cq->len / cqe_size);
1408 cmd->cmd.cqe_count = (cq->len / cqe_size) - 1;
1409 cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1410 cq->phase_change = true;
1413 ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1414 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1418 rsp = (struct ocrdma_create_cq_rsp *)cmd;
1419 cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1423 ocrdma_unbind_eq(dev, cq->eqn);
1424 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1430 int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1432 int status = -ENOMEM;
1433 struct ocrdma_destroy_cq *cmd;
1435 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1438 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1439 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1441 cmd->bypass_flush_qid |=
1442 (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1443 OCRDMA_DESTROY_CQ_QID_MASK;
1445 ocrdma_unbind_eq(dev, cq->eqn);
1446 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1449 dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
1455 int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1456 u32 pdid, int addr_check)
1458 int status = -ENOMEM;
1459 struct ocrdma_alloc_lkey *cmd;
1460 struct ocrdma_alloc_lkey_rsp *rsp;
1462 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1466 cmd->pbl_sz_flags |= addr_check;
1467 cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1468 cmd->pbl_sz_flags |=
1469 (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
1470 cmd->pbl_sz_flags |=
1471 (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
1472 cmd->pbl_sz_flags |=
1473 (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
1474 cmd->pbl_sz_flags |=
1475 (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
1476 cmd->pbl_sz_flags |=
1477 (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
1479 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1482 rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1483 hwmr->lkey = rsp->lrkey;
1489 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1491 int status = -ENOMEM;
1492 struct ocrdma_dealloc_lkey *cmd;
1494 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1498 cmd->rsvd_frmr = fr_mr ? 1 : 0;
1499 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1507 static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1508 u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1510 int status = -ENOMEM;
1512 struct ocrdma_reg_nsmr *cmd;
1513 struct ocrdma_reg_nsmr_rsp *rsp;
1515 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1519 pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
1521 cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1522 OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
1523 cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1524 OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
1525 cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1526 OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
1527 cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1528 OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
1529 cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1530 OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
1531 cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
1533 cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
1534 cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
1535 OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
1536 cmd->totlen_low = hwmr->len;
1537 cmd->totlen_high = upper_32_bits(hwmr->len);
1538 cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
1539 cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
1540 cmd->va_loaddr = (u32) hwmr->va;
1541 cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1543 for (i = 0; i < pbl_cnt; i++) {
1544 cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
1545 cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
1547 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1550 rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
1551 hwmr->lkey = rsp->lrkey;
1557 static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
1558 struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
1559 u32 pbl_offset, u32 last)
1561 int status = -ENOMEM;
1563 struct ocrdma_reg_nsmr_cont *cmd;
1565 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
1568 cmd->lrkey = hwmr->lkey;
1569 cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
1570 (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
1571 cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
1573 for (i = 0; i < pbl_cnt; i++) {
1575 (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
1577 upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
1579 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1587 int ocrdma_reg_mr(struct ocrdma_dev *dev,
1588 struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
1592 u32 cur_pbl_cnt, pbl_offset;
1593 u32 pending_pbl_cnt = hwmr->num_pbls;
1596 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1597 if (cur_pbl_cnt == pending_pbl_cnt)
1600 status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
1601 cur_pbl_cnt, hwmr->pbe_size, last);
1603 pr_err("%s() status=%d\n", __func__, status);
1606 /* if there is no more pbls to register then exit. */
1611 pbl_offset += cur_pbl_cnt;
1612 pending_pbl_cnt -= cur_pbl_cnt;
1613 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1614 /* if we reach the end of the pbls, then need to set the last
1615 * bit, indicating no more pbls to register for this memory key.
1617 if (cur_pbl_cnt == pending_pbl_cnt)
1620 status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
1626 pr_err("%s() err. status=%d\n", __func__, status);
1631 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
1633 struct ocrdma_qp *tmp;
1635 list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
1644 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
1646 struct ocrdma_qp *tmp;
1648 list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
1657 void ocrdma_flush_qp(struct ocrdma_qp *qp)
1660 unsigned long flags;
1662 spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
1663 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1665 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
1667 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1669 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
1671 spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
1674 int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
1675 enum ib_qp_state *old_ib_state)
1677 unsigned long flags;
1679 enum ocrdma_qp_state new_state;
1680 new_state = get_ocrdma_qp_state(new_ib_state);
1682 /* sync with wqe and rqe posting */
1683 spin_lock_irqsave(&qp->q_lock, flags);
1686 *old_ib_state = get_ibqp_state(qp->state);
1687 if (new_state == qp->state) {
1688 spin_unlock_irqrestore(&qp->q_lock, flags);
1692 switch (qp->state) {
1693 case OCRDMA_QPS_RST:
1694 switch (new_state) {
1695 case OCRDMA_QPS_RST:
1696 case OCRDMA_QPS_INIT:
1703 case OCRDMA_QPS_INIT:
1704 /* qps: INIT->XXX */
1705 switch (new_state) {
1706 case OCRDMA_QPS_INIT:
1707 case OCRDMA_QPS_RTR:
1709 case OCRDMA_QPS_ERR:
1710 ocrdma_flush_qp(qp);
1717 case OCRDMA_QPS_RTR:
1719 switch (new_state) {
1720 case OCRDMA_QPS_RTS:
1722 case OCRDMA_QPS_ERR:
1723 ocrdma_flush_qp(qp);
1730 case OCRDMA_QPS_RTS:
1732 switch (new_state) {
1733 case OCRDMA_QPS_SQD:
1734 case OCRDMA_QPS_SQE:
1736 case OCRDMA_QPS_ERR:
1737 ocrdma_flush_qp(qp);
1744 case OCRDMA_QPS_SQD:
1746 switch (new_state) {
1747 case OCRDMA_QPS_RTS:
1748 case OCRDMA_QPS_SQE:
1749 case OCRDMA_QPS_ERR:
1756 case OCRDMA_QPS_SQE:
1757 switch (new_state) {
1758 case OCRDMA_QPS_RTS:
1759 case OCRDMA_QPS_ERR:
1766 case OCRDMA_QPS_ERR:
1768 switch (new_state) {
1769 case OCRDMA_QPS_RST:
1781 qp->state = new_state;
1783 spin_unlock_irqrestore(&qp->q_lock, flags);
1787 static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
1790 if (qp->cap_flags & OCRDMA_QP_INB_RD)
1791 flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
1792 if (qp->cap_flags & OCRDMA_QP_INB_WR)
1793 flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
1794 if (qp->cap_flags & OCRDMA_QP_MW_BIND)
1795 flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
1796 if (qp->cap_flags & OCRDMA_QP_LKEY0)
1797 flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
1798 if (qp->cap_flags & OCRDMA_QP_FAST_REG)
1799 flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
1803 static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
1804 struct ib_qp_init_attr *attrs,
1805 struct ocrdma_qp *qp)
1808 u32 len, hw_pages, hw_page_size;
1810 struct ocrdma_dev *dev = qp->dev;
1811 struct pci_dev *pdev = dev->nic_info.pdev;
1812 u32 max_wqe_allocated;
1813 u32 max_sges = attrs->cap.max_send_sge;
1815 max_wqe_allocated = attrs->cap.max_send_wr;
1816 /* need to allocate one extra to for GEN1 family */
1817 if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)
1818 max_wqe_allocated += 1;
1820 status = ocrdma_build_q_conf(&max_wqe_allocated,
1821 dev->attr.wqe_size, &hw_pages, &hw_page_size);
1823 pr_err("%s() req. max_send_wr=0x%x\n", __func__,
1827 qp->sq.max_cnt = max_wqe_allocated;
1828 len = (hw_pages * hw_page_size);
1830 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
1833 memset(qp->sq.va, 0, len);
1836 qp->sq.entry_size = dev->attr.wqe_size;
1837 ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
1839 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
1840 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
1841 cmd->num_wq_rq_pages |= (hw_pages <<
1842 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
1843 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
1844 cmd->max_sge_send_write |= (max_sges <<
1845 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
1846 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
1847 cmd->max_sge_send_write |= (max_sges <<
1848 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
1849 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
1850 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
1851 OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
1852 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
1853 cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
1854 OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
1855 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
1859 static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
1860 struct ib_qp_init_attr *attrs,
1861 struct ocrdma_qp *qp)
1864 u32 len, hw_pages, hw_page_size;
1866 struct ocrdma_dev *dev = qp->dev;
1867 struct pci_dev *pdev = dev->nic_info.pdev;
1868 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
1870 status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
1871 &hw_pages, &hw_page_size);
1873 pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
1874 attrs->cap.max_recv_wr + 1);
1877 qp->rq.max_cnt = max_rqe_allocated;
1878 len = (hw_pages * hw_page_size);
1880 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
1883 memset(qp->rq.va, 0, len);
1886 qp->rq.entry_size = dev->attr.rqe_size;
1888 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
1889 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1890 OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
1891 cmd->num_wq_rq_pages |=
1892 (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
1893 OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
1894 cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
1895 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
1896 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
1897 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
1898 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
1899 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
1900 cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
1901 OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
1902 OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
1906 static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
1907 struct ocrdma_pd *pd,
1908 struct ocrdma_qp *qp,
1909 u8 enable_dpp_cq, u16 dpp_cq_id)
1912 qp->dpp_enabled = true;
1913 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
1916 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
1917 cmd->dpp_credits_cqid = dpp_cq_id;
1918 cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
1919 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
1922 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
1923 struct ocrdma_qp *qp)
1925 struct ocrdma_dev *dev = qp->dev;
1926 struct pci_dev *pdev = dev->nic_info.pdev;
1928 int ird_page_size = dev->attr.ird_page_size;
1929 int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
1931 if (dev->attr.ird == 0)
1934 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
1938 memset(qp->ird_q_va, 0, ird_q_len);
1939 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
1944 static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
1945 struct ocrdma_qp *qp,
1946 struct ib_qp_init_attr *attrs,
1947 u16 *dpp_offset, u16 *dpp_credit_lmt)
1949 u32 max_wqe_allocated, max_rqe_allocated;
1950 qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
1951 qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
1952 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
1953 qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
1954 qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
1955 qp->dpp_enabled = false;
1956 if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
1957 qp->dpp_enabled = true;
1958 *dpp_credit_lmt = (rsp->dpp_response &
1959 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
1960 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
1961 *dpp_offset = (rsp->dpp_response &
1962 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
1963 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
1966 rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
1967 max_wqe_allocated = 1 << max_wqe_allocated;
1968 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
1970 qp->sq.max_cnt = max_wqe_allocated;
1971 qp->sq.max_wqe_idx = max_wqe_allocated - 1;
1974 qp->rq.max_cnt = max_rqe_allocated;
1975 qp->rq.max_wqe_idx = max_rqe_allocated - 1;
1979 int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
1980 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
1981 u16 *dpp_credit_lmt)
1983 int status = -ENOMEM;
1985 struct ocrdma_dev *dev = qp->dev;
1986 struct ocrdma_pd *pd = qp->pd;
1987 struct pci_dev *pdev = dev->nic_info.pdev;
1988 struct ocrdma_cq *cq;
1989 struct ocrdma_create_qp_req *cmd;
1990 struct ocrdma_create_qp_rsp *rsp;
1993 switch (attrs->qp_type) {
1995 qptype = OCRDMA_QPT_GSI;
1998 qptype = OCRDMA_QPT_RC;
2001 qptype = OCRDMA_QPT_UD;
2007 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
2010 cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
2011 OCRDMA_CREATE_QP_REQ_QPT_MASK;
2012 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2017 struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
2018 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
2019 cmd->rq_addr[0].lo = srq->id;
2022 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2027 status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2031 cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
2032 OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
2034 flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2036 cmd->max_sge_recv_flags |= flags;
2037 cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
2038 OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
2039 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
2040 cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
2041 OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
2042 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
2043 cq = get_ocrdma_cq(attrs->send_cq);
2044 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
2045 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
2047 cq = get_ocrdma_cq(attrs->recv_cq);
2048 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
2049 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
2052 if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
2053 (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
2054 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2058 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2061 rsp = (struct ocrdma_create_qp_rsp *)cmd;
2062 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2063 qp->state = OCRDMA_QPS_RST;
2068 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2070 pr_err("%s(%d) rq_err\n", __func__, dev->id);
2071 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2073 pr_err("%s(%d) sq_err\n", __func__, dev->id);
2078 int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2079 struct ocrdma_qp_params *param)
2081 int status = -ENOMEM;
2082 struct ocrdma_query_qp *cmd;
2083 struct ocrdma_query_qp_rsp *rsp;
2085 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));
2088 cmd->qp_id = qp->id;
2089 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2092 rsp = (struct ocrdma_query_qp_rsp *)cmd;
2093 memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2099 int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
2102 struct in6_addr in6;
2104 memcpy(&in6, dgid, sizeof in6);
2105 if (rdma_is_multicast_addr(&in6)) {
2106 rdma_get_mcast_mac(&in6, mac_addr);
2107 } else if (rdma_link_local_addr(&in6)) {
2108 rdma_get_ll_mac(&in6, mac_addr);
2110 pr_err("%s() fail to resolve mac_addr.\n", __func__);
2116 static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2117 struct ocrdma_modify_qp *cmd,
2118 struct ib_qp_attr *attrs)
2121 struct ib_ah_attr *ah_attr = &attrs->ah_attr;
2125 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
2127 cmd->params.tclass_sq_psn |=
2128 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2129 cmd->params.rnt_rc_sl_fl |=
2130 (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
2131 cmd->params.hop_lmt_rq_psn |=
2132 (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2133 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2134 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2135 sizeof(cmd->params.dgid));
2136 status = ocrdma_query_gid(&qp->dev->ibdev, 1,
2137 ah_attr->grh.sgid_index, &sgid);
2140 qp->sgid_idx = ah_attr->grh.sgid_index;
2141 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2142 ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
2143 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2144 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2145 /* convert them to LE format. */
2146 ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2147 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2148 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2149 vlan_id = rdma_get_vlan_id(&sgid);
2150 if (vlan_id && (vlan_id < 0x1000)) {
2151 cmd->params.vlan_dmac_b4_to_b5 |=
2152 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2153 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2158 static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2159 struct ocrdma_modify_qp *cmd,
2160 struct ib_qp_attr *attrs, int attr_mask,
2161 enum ib_qp_state old_qps)
2164 struct net_device *netdev = qp->dev->nic_info.netdev;
2165 int eth_mtu = iboe_get_mtu(netdev->mtu);
2167 if (attr_mask & IB_QP_PKEY_INDEX) {
2168 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2169 OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
2170 cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
2172 if (attr_mask & IB_QP_QKEY) {
2173 qp->qkey = attrs->qkey;
2174 cmd->params.qkey = attrs->qkey;
2175 cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2177 if (attr_mask & IB_QP_AV) {
2178 status = ocrdma_set_av_params(qp, cmd, attrs);
2181 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2182 /* set the default mac address for UD, GSI QPs */
2183 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
2184 (qp->dev->nic_info.mac_addr[1] << 8) |
2185 (qp->dev->nic_info.mac_addr[2] << 16) |
2186 (qp->dev->nic_info.mac_addr[3] << 24);
2187 cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
2188 (qp->dev->nic_info.mac_addr[5] << 8);
2190 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2191 attrs->en_sqd_async_notify) {
2192 cmd->params.max_sge_recv_flags |=
2193 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
2194 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2196 if (attr_mask & IB_QP_DEST_QPN) {
2197 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2198 OCRDMA_QP_PARAMS_DEST_QPN_MASK);
2199 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2201 if (attr_mask & IB_QP_PATH_MTU) {
2202 if (ib_mtu_enum_to_int(eth_mtu) <
2203 ib_mtu_enum_to_int(attrs->path_mtu)) {
2207 cmd->params.path_mtu_pkey_indx |=
2208 (ib_mtu_enum_to_int(attrs->path_mtu) <<
2209 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
2210 OCRDMA_QP_PARAMS_PATH_MTU_MASK;
2211 cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
2213 if (attr_mask & IB_QP_TIMEOUT) {
2214 cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2215 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
2216 cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
2218 if (attr_mask & IB_QP_RETRY_CNT) {
2219 cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2220 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
2221 OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
2222 cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
2224 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2225 cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2226 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
2227 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
2228 cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
2230 if (attr_mask & IB_QP_RNR_RETRY) {
2231 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2232 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
2233 & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
2234 cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
2236 if (attr_mask & IB_QP_SQ_PSN) {
2237 cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2238 cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
2240 if (attr_mask & IB_QP_RQ_PSN) {
2241 cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2242 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2244 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2245 if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
2249 qp->max_ord = attrs->max_rd_atomic;
2250 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2252 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2253 if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
2257 qp->max_ird = attrs->max_dest_rd_atomic;
2258 cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
2260 cmd->params.max_ord_ird = (qp->max_ord <<
2261 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
2262 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
2267 int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2268 struct ib_qp_attr *attrs, int attr_mask,
2269 enum ib_qp_state old_qps)
2271 int status = -ENOMEM;
2272 struct ocrdma_modify_qp *cmd;
2274 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2278 cmd->params.id = qp->id;
2280 if (attr_mask & IB_QP_STATE) {
2281 cmd->params.max_sge_recv_flags |=
2282 (get_ocrdma_qp_state(attrs->qp_state) <<
2283 OCRDMA_QP_PARAMS_STATE_SHIFT) &
2284 OCRDMA_QP_PARAMS_STATE_MASK;
2285 cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
2287 cmd->params.max_sge_recv_flags |=
2288 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2289 OCRDMA_QP_PARAMS_STATE_MASK;
2292 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
2295 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2304 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2306 int status = -ENOMEM;
2307 struct ocrdma_destroy_qp *cmd;
2308 struct pci_dev *pdev = dev->nic_info.pdev;
2310 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2313 cmd->qp_id = qp->id;
2314 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2321 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2322 if (!qp->srq && qp->rq.va)
2323 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2324 if (qp->dpp_enabled)
2325 qp->pd->num_dpp_qp++;
2329 int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
2330 struct ib_srq_init_attr *srq_attr,
2331 struct ocrdma_pd *pd)
2333 int status = -ENOMEM;
2334 int hw_pages, hw_page_size;
2336 struct ocrdma_create_srq_rsp *rsp;
2337 struct ocrdma_create_srq *cmd;
2339 struct ocrdma_dev *dev = srq->dev;
2340 struct pci_dev *pdev = dev->nic_info.pdev;
2341 u32 max_rqe_allocated;
2343 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2347 cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
2348 max_rqe_allocated = srq_attr->attr.max_wr + 1;
2349 status = ocrdma_build_q_conf(&max_rqe_allocated,
2351 &hw_pages, &hw_page_size);
2353 pr_err("%s() req. max_wr=0x%x\n", __func__,
2354 srq_attr->attr.max_wr);
2358 len = hw_pages * hw_page_size;
2359 srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2364 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2366 srq->rq.entry_size = dev->attr.rqe_size;
2369 srq->rq.max_cnt = max_rqe_allocated;
2371 cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2372 cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2373 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
2375 cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2376 << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
2377 cmd->pages_rqe_sz |= (dev->attr.rqe_size
2378 << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
2379 & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
2380 cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
2382 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2385 rsp = (struct ocrdma_create_srq_rsp *)cmd;
2387 srq->rq.dbid = rsp->id;
2388 max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2389 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
2390 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
2391 max_rqe_allocated = (1 << max_rqe_allocated);
2392 srq->rq.max_cnt = max_rqe_allocated;
2393 srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2394 srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2395 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
2396 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
2399 dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2405 int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2407 int status = -ENOMEM;
2408 struct ocrdma_modify_srq *cmd;
2409 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2413 cmd->limit_max_rqe |= srq_attr->srq_limit <<
2414 OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
2415 status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
2420 int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2422 int status = -ENOMEM;
2423 struct ocrdma_query_srq *cmd;
2424 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2427 cmd->id = srq->rq.dbid;
2428 status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
2430 struct ocrdma_query_srq_rsp *rsp =
2431 (struct ocrdma_query_srq_rsp *)cmd;
2433 rsp->srq_lmt_max_sge &
2434 OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
2436 rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
2437 srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2438 OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
2444 int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2446 int status = -ENOMEM;
2447 struct ocrdma_destroy_srq *cmd;
2448 struct pci_dev *pdev = dev->nic_info.pdev;
2449 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2453 status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
2455 dma_free_coherent(&pdev->dev, srq->rq.len,
2456 srq->rq.va, srq->rq.pa);
2461 int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2464 int status = -EINVAL;
2465 struct ocrdma_av *av;
2466 unsigned long flags;
2468 av = dev->av_tbl.va;
2469 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2470 for (i = 0; i < dev->av_tbl.num_ah; i++) {
2471 if (av->valid == 0) {
2472 av->valid = OCRDMA_AV_VALID;
2480 if (i == dev->av_tbl.num_ah)
2482 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2486 int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2488 unsigned long flags;
2489 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2491 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2495 static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
2499 unsigned long flags = 0;
2502 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
2503 flags = IRQF_SHARED;
2505 num_eq = dev->nic_info.msix.num_vectors -
2506 dev->nic_info.msix.start_vector;
2507 /* minimum two vectors/eq are required for rdma to work.
2508 * one for control path and one for data path.
2514 status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN);
2517 sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id);
2518 irq = ocrdma_get_irq(dev, &dev->meq);
2519 status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name,
2522 _ocrdma_destroy_eq(dev, &dev->meq);
2526 static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
2528 int num_eq, i, status = 0;
2530 unsigned long flags = 0;
2532 num_eq = dev->nic_info.msix.num_vectors -
2533 dev->nic_info.msix.start_vector;
2534 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
2536 flags = IRQF_SHARED;
2538 num_eq = min_t(u32, num_eq, num_online_cpus());
2541 dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
2542 if (!dev->qp_eq_tbl)
2545 for (i = 0; i < num_eq; i++) {
2546 status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i],
2552 sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d",
2554 irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]);
2555 status = request_irq(irq, ocrdma_irq_handler, flags,
2556 dev->qp_eq_tbl[i].irq_name,
2557 &dev->qp_eq_tbl[i]);
2559 _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
2565 /* one eq is sufficient for data path to work */
2566 if (dev->eq_cnt >= 1)
2568 ocrdma_destroy_qp_eqs(dev);
2572 int ocrdma_init_hw(struct ocrdma_dev *dev)
2575 /* set up control path eq */
2576 status = ocrdma_create_mq_eq(dev);
2579 /* set up data path eq */
2580 status = ocrdma_create_qp_eqs(dev);
2583 status = ocrdma_create_mq(dev);
2586 status = ocrdma_mbx_query_fw_config(dev);
2589 status = ocrdma_mbx_query_dev(dev);
2592 status = ocrdma_mbx_query_fw_ver(dev);
2595 status = ocrdma_mbx_create_ah_tbl(dev);
2601 ocrdma_destroy_mq(dev);
2603 ocrdma_destroy_qp_eqs(dev);
2605 ocrdma_destroy_eq(dev, &dev->meq);
2606 pr_err("%s() status=%d\n", __func__, status);
2610 void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
2612 ocrdma_mbx_delete_ah_tbl(dev);
2614 /* cleanup the data path eqs */
2615 ocrdma_destroy_qp_eqs(dev);
2617 /* cleanup the control path */
2618 ocrdma_destroy_mq(dev);
2619 ocrdma_destroy_eq(dev, &dev->meq);