1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
20 * Contact Information:
21 * linux-drivers@emulex.com
25 * Costa Mesa, CA 92626
26 *******************************************************************/
31 #include <linux/mutex.h>
32 #include <linux/list.h>
33 #include <linux/spinlock.h>
34 #include <linux/pci.h>
36 #include <rdma/ib_verbs.h>
37 #include <rdma/ib_user_verbs.h>
40 #include "ocrdma_sli.h"
42 #define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u"
44 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
45 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
47 #define OCRDMA_MAX_AH 512
49 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
51 struct ocrdma_dev_attr {
72 int max_pages_per_frmr;
77 u8 cq_overflow_detect;
83 u8 local_ca_ack_delay;
93 struct ocrdma_queue_info {
98 u16 entry_size; /* Size of an element in the queue */
99 u16 id; /* qid, where to ring the doorbell. */
105 struct ocrdma_queue_info q;
108 struct ocrdma_dev *dev;
113 struct ocrdma_queue_info sq;
114 struct ocrdma_queue_info cq;
119 struct mutex lock; /* for serializing mailbox commands on MQ */
120 wait_queue_head_t cmd_wait;
127 struct ocrdma_hw_mr {
138 struct ocrdma_pbl *pbl_table;
149 struct ib_umem *umem;
150 struct ocrdma_hw_mr hwmr;
154 struct ib_device ibdev;
155 struct ocrdma_dev_attr attr;
157 struct mutex dev_lock; /* provides syncronise access to device data */
158 spinlock_t flush_q_lock ____cacheline_aligned;
160 struct ocrdma_cq **cq_tbl;
161 struct ocrdma_qp **qp_tbl;
163 struct ocrdma_eq *eq_tbl;
168 union ib_gid *sgid_tbl;
169 /* provided synchronization to sgid table for
170 * updating gid entries triggered by notifier.
172 spinlock_t sgid_lock;
175 struct ocrdma_cq *gsi_sqcq;
176 struct ocrdma_cq *gsi_rqcq;
179 struct ocrdma_av *va;
183 /* provide synchronization for av
188 struct ocrdma_pbl pbl;
193 struct mqe_ctx mqe_ctx;
195 struct be_dev_info nic_info;
197 struct list_head entry;
200 struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG];
207 struct ocrdma_cqe *va;
209 u32 getp; /* pointer to pending wrs to
210 * return to stack, wrap arounds
215 bool deferred_arm, deferred_sol;
218 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
221 /* syncronizes cq completion handler invoked from multiple context */
222 spinlock_t comp_handler_lock ____cacheline_aligned;
226 struct ocrdma_ucontext *ucontext;
231 /* head of all qp's sq and rq for which cqes need to be flushed
234 struct list_head sq_head, rq_head;
239 struct ocrdma_dev *dev;
240 struct ocrdma_ucontext *uctx;
249 struct ocrdma_av *av;
254 struct ocrdma_qp_hwq_info {
255 u8 *va; /* virtual address */
261 u16 dbid; /* qid, where to ring the doorbell. */
269 struct ocrdma_qp_hwq_info rq;
274 /* provide synchronization to multiple context(s) posting rqe */
275 spinlock_t q_lock ____cacheline_aligned;
277 struct ocrdma_pd *pd;
283 struct ocrdma_dev *dev;
286 struct ocrdma_qp_hwq_info sq;
289 uint16_t dpp_wqe_idx;
296 /* provide synchronization to multiple context(s) posting wqe, rqe */
297 spinlock_t q_lock ____cacheline_aligned;
298 struct ocrdma_cq *sq_cq;
299 /* list maintained per CQ to flush SQ errors */
300 struct list_head sq_entry;
303 struct ocrdma_qp_hwq_info rq;
305 struct ocrdma_cq *rq_cq;
306 struct ocrdma_srq *srq;
307 /* list maintained per CQ to flush RQ errors */
308 struct list_head rq_entry;
310 enum ocrdma_qp_state state; /* QP state */
312 u32 max_ord, max_ird;
315 struct ocrdma_pd *pd;
317 enum ib_qp_type qp_type;
328 struct ocrdma_ucontext {
329 struct ib_ucontext ibucontext;
331 struct list_head mm_head;
332 struct mutex mm_list_lock; /* protects list entries of mm type */
333 struct ocrdma_pd *cntxt_pd;
348 struct list_head entry;
351 static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev)
353 return container_of(ibdev, struct ocrdma_dev, ibdev);
356 static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext
359 return container_of(ibucontext, struct ocrdma_ucontext, ibucontext);
362 static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd)
364 return container_of(ibpd, struct ocrdma_pd, ibpd);
367 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq)
369 return container_of(ibcq, struct ocrdma_cq, ibcq);
372 static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp)
374 return container_of(ibqp, struct ocrdma_qp, ibqp);
377 static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr)
379 return container_of(ibmr, struct ocrdma_mr, ibmr);
382 static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah)
384 return container_of(ibah, struct ocrdma_ah, ibah);
387 static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
389 return container_of(ibsrq, struct ocrdma_srq, ibsrq);
392 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
395 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
396 return (cqe_valid == cq->phase);
399 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
401 return (le32_to_cpu(cqe->flags_status_srcqpn) &
402 OCRDMA_CQE_QTYPE) ? 0 : 1;
405 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe)
407 return (le32_to_cpu(cqe->flags_status_srcqpn) &
408 OCRDMA_CQE_INVALIDATE) ? 1 : 0;
411 static inline int is_cqe_imm(struct ocrdma_cqe *cqe)
413 return (le32_to_cpu(cqe->flags_status_srcqpn) &
414 OCRDMA_CQE_IMM) ? 1 : 0;
417 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
419 return (le32_to_cpu(cqe->flags_status_srcqpn) &
420 OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
423 static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
424 struct ib_ah_attr *ah_attr, u8 *mac_addr)
428 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
429 if (rdma_is_multicast_addr(&in6))
430 rdma_get_mcast_mac(&in6, mac_addr);
432 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
436 static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
441 for (indx = 0; indx < dev->eq_cnt; indx++) {
442 if (dev->eq_tbl[indx].q.id == eqid)
449 static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
451 if (dev->nic_info.dev_family == 0xF && !dev->asic_id) {
452 pci_read_config_dword(
454 OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id);
457 return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >>
458 OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;