2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature;
42 MLX5_IB_ACK_REQ_FREQ = 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
57 static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
74 static int is_qp0(enum ib_qp_type qp_type)
76 return qp_type == IB_QPT_SMI;
79 static int is_qp1(enum ib_qp_type qp_type)
81 return qp_type == IB_QPT_GSI;
84 static int is_sqp(enum ib_qp_type qp_type)
86 return is_qp0(qp_type) || is_qp1(qp_type);
89 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
91 return mlx5_buf_offset(&qp->buf, offset);
94 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
96 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
99 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
101 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
104 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
106 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
107 struct ib_event event;
109 if (type == MLX5_EVENT_TYPE_PATH_MIG)
110 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
112 if (ibqp->event_handler) {
113 event.device = ibqp->device;
114 event.element.qp = ibqp;
116 case MLX5_EVENT_TYPE_PATH_MIG:
117 event.event = IB_EVENT_PATH_MIG;
119 case MLX5_EVENT_TYPE_COMM_EST:
120 event.event = IB_EVENT_COMM_EST;
122 case MLX5_EVENT_TYPE_SQ_DRAINED:
123 event.event = IB_EVENT_SQ_DRAINED;
125 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
126 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
128 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
129 event.event = IB_EVENT_QP_FATAL;
131 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
132 event.event = IB_EVENT_PATH_MIG_ERR;
134 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
135 event.event = IB_EVENT_QP_REQ_ERR;
137 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
138 event.event = IB_EVENT_QP_ACCESS_ERR;
141 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
145 ibqp->event_handler(&event, ibqp->qp_context);
149 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
150 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
152 struct mlx5_general_caps *gen;
156 gen = &dev->mdev->caps.gen;
157 /* Sanity check RQ size before proceeding */
158 if (cap->max_recv_wr > gen->max_wqes)
164 qp->rq.wqe_shift = 0;
167 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
168 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
169 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
170 qp->rq.max_post = qp->rq.wqe_cnt;
172 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
173 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
174 wqe_size = roundup_pow_of_two(wqe_size);
175 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
176 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
177 qp->rq.wqe_cnt = wq_size / wqe_size;
178 if (wqe_size > gen->max_rq_desc_sz) {
179 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
181 gen->max_rq_desc_sz);
184 qp->rq.wqe_shift = ilog2(wqe_size);
185 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
186 qp->rq.max_post = qp->rq.wqe_cnt;
193 static int sq_overhead(enum ib_qp_type qp_type)
199 size += sizeof(struct mlx5_wqe_xrc_seg);
202 size += sizeof(struct mlx5_wqe_ctrl_seg) +
203 sizeof(struct mlx5_wqe_atomic_seg) +
204 sizeof(struct mlx5_wqe_raddr_seg);
211 size += sizeof(struct mlx5_wqe_ctrl_seg) +
212 sizeof(struct mlx5_wqe_raddr_seg) +
213 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
214 sizeof(struct mlx5_mkey_seg);
220 size += sizeof(struct mlx5_wqe_ctrl_seg) +
221 sizeof(struct mlx5_wqe_datagram_seg);
224 case MLX5_IB_QPT_REG_UMR:
225 size += sizeof(struct mlx5_wqe_ctrl_seg) +
226 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
227 sizeof(struct mlx5_mkey_seg);
237 static int calc_send_wqe(struct ib_qp_init_attr *attr)
242 size = sq_overhead(attr->qp_type);
246 if (attr->cap.max_inline_data) {
247 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
248 attr->cap.max_inline_data;
251 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
252 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
253 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
254 return MLX5_SIG_WQE_SIZE;
256 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
259 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
260 struct mlx5_ib_qp *qp)
262 struct mlx5_general_caps *gen;
266 gen = &dev->mdev->caps.gen;
267 if (!attr->cap.max_send_wr)
270 wqe_size = calc_send_wqe(attr);
271 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
275 if (wqe_size > gen->max_sq_desc_sz) {
276 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
277 wqe_size, gen->max_sq_desc_sz);
281 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
282 sizeof(struct mlx5_wqe_inline_seg);
283 attr->cap.max_inline_data = qp->max_inline_data;
285 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
286 qp->signature_en = true;
288 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
289 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
290 if (qp->sq.wqe_cnt > gen->max_wqes) {
291 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
292 qp->sq.wqe_cnt, gen->max_wqes);
295 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
296 qp->sq.max_gs = attr->cap.max_send_sge;
297 qp->sq.max_post = wq_size / wqe_size;
298 attr->cap.max_send_wr = qp->sq.max_post;
303 static int set_user_buf_size(struct mlx5_ib_dev *dev,
304 struct mlx5_ib_qp *qp,
305 struct mlx5_ib_create_qp *ucmd)
307 struct mlx5_general_caps *gen;
308 int desc_sz = 1 << qp->sq.wqe_shift;
310 gen = &dev->mdev->caps.gen;
311 if (desc_sz > gen->max_sq_desc_sz) {
312 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
313 desc_sz, gen->max_sq_desc_sz);
317 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
318 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
319 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
323 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
325 if (qp->sq.wqe_cnt > gen->max_wqes) {
326 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
327 qp->sq.wqe_cnt, gen->max_wqes);
331 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
332 (qp->sq.wqe_cnt << 6);
337 static int qp_has_rq(struct ib_qp_init_attr *attr)
339 if (attr->qp_type == IB_QPT_XRC_INI ||
340 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
341 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
342 !attr->cap.max_recv_wr)
348 static int first_med_uuar(void)
353 static int next_uuar(int n)
357 while (((n % 4) & 2))
363 static int num_med_uuar(struct mlx5_uuar_info *uuari)
367 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
368 uuari->num_low_latency_uuars - 1;
370 return n >= 0 ? n : 0;
373 static int max_uuari(struct mlx5_uuar_info *uuari)
375 return uuari->num_uars * 4;
378 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
384 med = num_med_uuar(uuari);
385 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
394 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
398 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
399 if (!test_bit(i, uuari->bitmap)) {
400 set_bit(i, uuari->bitmap);
409 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
411 int minidx = first_med_uuar();
414 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
415 if (uuari->count[i] < uuari->count[minidx])
419 uuari->count[minidx]++;
423 static int alloc_uuar(struct mlx5_uuar_info *uuari,
424 enum mlx5_ib_latency_class lat)
428 mutex_lock(&uuari->lock);
430 case MLX5_IB_LATENCY_CLASS_LOW:
432 uuari->count[uuarn]++;
435 case MLX5_IB_LATENCY_CLASS_MEDIUM:
439 uuarn = alloc_med_class_uuar(uuari);
442 case MLX5_IB_LATENCY_CLASS_HIGH:
446 uuarn = alloc_high_class_uuar(uuari);
449 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
453 mutex_unlock(&uuari->lock);
458 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
460 clear_bit(uuarn, uuari->bitmap);
461 --uuari->count[uuarn];
464 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
466 clear_bit(uuarn, uuari->bitmap);
467 --uuari->count[uuarn];
470 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
472 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
473 int high_uuar = nuuars - uuari->num_low_latency_uuars;
475 mutex_lock(&uuari->lock);
477 --uuari->count[uuarn];
481 if (uuarn < high_uuar) {
482 free_med_class_uuar(uuari, uuarn);
486 free_high_class_uuar(uuari, uuarn);
489 mutex_unlock(&uuari->lock);
492 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
495 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
496 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
497 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
498 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
499 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
500 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
501 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
506 static int to_mlx5_st(enum ib_qp_type type)
509 case IB_QPT_RC: return MLX5_QP_ST_RC;
510 case IB_QPT_UC: return MLX5_QP_ST_UC;
511 case IB_QPT_UD: return MLX5_QP_ST_UD;
512 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
514 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
515 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
516 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
517 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
518 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
519 case IB_QPT_RAW_PACKET:
521 default: return -EINVAL;
525 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
527 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
530 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
531 struct mlx5_ib_qp *qp, struct ib_udata *udata,
532 struct mlx5_create_qp_mbox_in **in,
533 struct mlx5_ib_create_qp_resp *resp, int *inlen)
535 struct mlx5_ib_ucontext *context;
536 struct mlx5_ib_create_qp ucmd;
545 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
547 mlx5_ib_dbg(dev, "copy failed\n");
551 context = to_mucontext(pd->uobject->context);
553 * TBD: should come from the verbs when we have the API
555 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
557 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
558 mlx5_ib_dbg(dev, "reverting to medium latency\n");
559 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
561 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
562 mlx5_ib_dbg(dev, "reverting to high latency\n");
563 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
565 mlx5_ib_warn(dev, "uuar allocation failed\n");
571 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
572 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
575 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
576 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
578 err = set_user_buf_size(dev, qp, &ucmd);
582 if (ucmd.buf_addr && qp->buf_size) {
583 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
585 if (IS_ERR(qp->umem)) {
586 mlx5_ib_dbg(dev, "umem_get failed\n");
587 err = PTR_ERR(qp->umem);
595 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
597 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
599 mlx5_ib_warn(dev, "bad offset\n");
602 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
603 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
606 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
607 *in = mlx5_vzalloc(*inlen);
613 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
614 (*in)->ctx.log_pg_sz_remote_qpn =
615 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
616 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
618 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
619 resp->uuar_index = uuarn;
622 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
624 mlx5_ib_dbg(dev, "map failed\n");
628 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
630 mlx5_ib_dbg(dev, "copy failed\n");
633 qp->create_type = MLX5_QP_USER;
638 mlx5_ib_db_unmap_user(context, &qp->db);
645 ib_umem_release(qp->umem);
648 free_uuar(&context->uuari, uuarn);
652 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
654 struct mlx5_ib_ucontext *context;
656 context = to_mucontext(pd->uobject->context);
657 mlx5_ib_db_unmap_user(context, &qp->db);
659 ib_umem_release(qp->umem);
660 free_uuar(&context->uuari, qp->uuarn);
663 static int create_kernel_qp(struct mlx5_ib_dev *dev,
664 struct ib_qp_init_attr *init_attr,
665 struct mlx5_ib_qp *qp,
666 struct mlx5_create_qp_mbox_in **in, int *inlen)
668 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
669 struct mlx5_uuar_info *uuari;
674 uuari = &dev->mdev->priv.uuari;
675 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
678 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
679 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
681 uuarn = alloc_uuar(uuari, lc);
683 mlx5_ib_dbg(dev, "\n");
687 qp->bf = &uuari->bfs[uuarn];
688 uar_index = qp->bf->uar->index;
690 err = calc_sq_size(dev, init_attr, qp);
692 mlx5_ib_dbg(dev, "err %d\n", err);
697 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
698 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
700 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
702 mlx5_ib_dbg(dev, "err %d\n", err);
706 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
707 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
708 *in = mlx5_vzalloc(*inlen);
713 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
714 (*in)->ctx.log_pg_sz_remote_qpn =
715 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
716 /* Set "fast registration enabled" for all kernel QPs */
717 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
718 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
720 mlx5_fill_page_array(&qp->buf, (*in)->pas);
722 err = mlx5_db_alloc(dev->mdev, &qp->db);
724 mlx5_ib_dbg(dev, "err %d\n", err);
731 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
732 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
733 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
734 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
735 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
737 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
738 !qp->sq.w_list || !qp->sq.wqe_head) {
742 qp->create_type = MLX5_QP_KERNEL;
747 mlx5_db_free(dev->mdev, &qp->db);
748 kfree(qp->sq.wqe_head);
749 kfree(qp->sq.w_list);
751 kfree(qp->sq.wr_data);
758 mlx5_buf_free(dev->mdev, &qp->buf);
761 free_uuar(&dev->mdev->priv.uuari, uuarn);
765 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
767 mlx5_db_free(dev->mdev, &qp->db);
768 kfree(qp->sq.wqe_head);
769 kfree(qp->sq.w_list);
771 kfree(qp->sq.wr_data);
773 mlx5_buf_free(dev->mdev, &qp->buf);
774 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
777 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
779 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
780 (attr->qp_type == IB_QPT_XRC_INI))
781 return cpu_to_be32(MLX5_SRQ_RQ);
782 else if (!qp->has_rq)
783 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
785 return cpu_to_be32(MLX5_NON_ZERO_RQ);
788 static int is_connected(enum ib_qp_type qp_type)
790 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
796 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
797 struct ib_qp_init_attr *init_attr,
798 struct ib_udata *udata, struct mlx5_ib_qp *qp)
800 struct mlx5_ib_resources *devr = &dev->devr;
801 struct mlx5_ib_create_qp_resp resp;
802 struct mlx5_create_qp_mbox_in *in;
803 struct mlx5_general_caps *gen;
804 struct mlx5_ib_create_qp ucmd;
805 int inlen = sizeof(*in);
808 gen = &dev->mdev->caps.gen;
809 mutex_init(&qp->mutex);
810 spin_lock_init(&qp->sq.lock);
811 spin_lock_init(&qp->rq.lock);
813 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
814 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
815 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
818 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
822 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
823 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
825 if (pd && pd->uobject) {
826 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
827 mlx5_ib_dbg(dev, "copy failed\n");
831 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
832 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
834 qp->wq_sig = !!wq_signature;
837 qp->has_rq = qp_has_rq(init_attr);
838 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
839 qp, (pd && pd->uobject) ? &ucmd : NULL);
841 mlx5_ib_dbg(dev, "err %d\n", err);
847 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
848 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
849 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
850 mlx5_ib_dbg(dev, "invalid rq params\n");
853 if (ucmd.sq_wqe_count > gen->max_wqes) {
854 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
855 ucmd.sq_wqe_count, gen->max_wqes);
858 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
860 mlx5_ib_dbg(dev, "err %d\n", err);
862 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
864 mlx5_ib_dbg(dev, "err %d\n", err);
866 qp->pa_lkey = to_mpd(pd)->pa_lkey;
872 in = mlx5_vzalloc(sizeof(*in));
876 qp->create_type = MLX5_QP_EMPTY;
879 if (is_sqp(init_attr->qp_type))
880 qp->port = init_attr->port_num;
882 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
883 MLX5_QP_PM_MIGRATED << 11);
885 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
886 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
888 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
891 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
893 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
894 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
896 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
900 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
901 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
904 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
906 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
908 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
910 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
912 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
916 if (qp->rq.wqe_cnt) {
917 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
918 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
921 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
924 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
926 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
928 /* Set default resources */
929 switch (init_attr->qp_type) {
931 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
932 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
933 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
934 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
937 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
938 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
939 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
942 if (init_attr->srq) {
943 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
944 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
946 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
947 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
951 if (init_attr->send_cq)
952 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
954 if (init_attr->recv_cq)
955 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
957 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
959 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
961 mlx5_ib_dbg(dev, "create qp failed\n");
966 /* Hardware wants QPN written in big-endian order (after
967 * shifting) for send doorbell. Precompute this value to save
968 * a little bit when posting sends.
970 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
972 qp->mqp.event = mlx5_ib_qp_event;
977 if (qp->create_type == MLX5_QP_USER)
978 destroy_qp_user(pd, qp);
979 else if (qp->create_type == MLX5_QP_KERNEL)
980 destroy_qp_kernel(dev, qp);
986 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
987 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
991 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
992 spin_lock_irq(&send_cq->lock);
993 spin_lock_nested(&recv_cq->lock,
994 SINGLE_DEPTH_NESTING);
995 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
996 spin_lock_irq(&send_cq->lock);
997 __acquire(&recv_cq->lock);
999 spin_lock_irq(&recv_cq->lock);
1000 spin_lock_nested(&send_cq->lock,
1001 SINGLE_DEPTH_NESTING);
1004 spin_lock_irq(&send_cq->lock);
1005 __acquire(&recv_cq->lock);
1007 } else if (recv_cq) {
1008 spin_lock_irq(&recv_cq->lock);
1009 __acquire(&send_cq->lock);
1011 __acquire(&send_cq->lock);
1012 __acquire(&recv_cq->lock);
1016 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1017 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1021 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1022 spin_unlock(&recv_cq->lock);
1023 spin_unlock_irq(&send_cq->lock);
1024 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1025 __release(&recv_cq->lock);
1026 spin_unlock_irq(&send_cq->lock);
1028 spin_unlock(&send_cq->lock);
1029 spin_unlock_irq(&recv_cq->lock);
1032 __release(&recv_cq->lock);
1033 spin_unlock_irq(&send_cq->lock);
1035 } else if (recv_cq) {
1036 __release(&send_cq->lock);
1037 spin_unlock_irq(&recv_cq->lock);
1039 __release(&recv_cq->lock);
1040 __release(&send_cq->lock);
1044 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1046 return to_mpd(qp->ibqp.pd);
1049 static void get_cqs(struct mlx5_ib_qp *qp,
1050 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1052 switch (qp->ibqp.qp_type) {
1053 case IB_QPT_XRC_TGT:
1057 case MLX5_IB_QPT_REG_UMR:
1058 case IB_QPT_XRC_INI:
1059 *send_cq = to_mcq(qp->ibqp.send_cq);
1068 case IB_QPT_RAW_IPV6:
1069 case IB_QPT_RAW_ETHERTYPE:
1070 *send_cq = to_mcq(qp->ibqp.send_cq);
1071 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1074 case IB_QPT_RAW_PACKET:
1083 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1085 struct mlx5_ib_cq *send_cq, *recv_cq;
1086 struct mlx5_modify_qp_mbox_in *in;
1089 in = kzalloc(sizeof(*in), GFP_KERNEL);
1092 if (qp->state != IB_QPS_RESET)
1093 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1094 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
1095 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1098 get_cqs(qp, &send_cq, &recv_cq);
1100 if (qp->create_type == MLX5_QP_KERNEL) {
1101 mlx5_ib_lock_cqs(send_cq, recv_cq);
1102 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1103 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1104 if (send_cq != recv_cq)
1105 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1106 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1109 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
1111 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1115 if (qp->create_type == MLX5_QP_KERNEL)
1116 destroy_qp_kernel(dev, qp);
1117 else if (qp->create_type == MLX5_QP_USER)
1118 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1121 static const char *ib_qp_type_str(enum ib_qp_type type)
1125 return "IB_QPT_SMI";
1127 return "IB_QPT_GSI";
1134 case IB_QPT_RAW_IPV6:
1135 return "IB_QPT_RAW_IPV6";
1136 case IB_QPT_RAW_ETHERTYPE:
1137 return "IB_QPT_RAW_ETHERTYPE";
1138 case IB_QPT_XRC_INI:
1139 return "IB_QPT_XRC_INI";
1140 case IB_QPT_XRC_TGT:
1141 return "IB_QPT_XRC_TGT";
1142 case IB_QPT_RAW_PACKET:
1143 return "IB_QPT_RAW_PACKET";
1144 case MLX5_IB_QPT_REG_UMR:
1145 return "MLX5_IB_QPT_REG_UMR";
1148 return "Invalid QP type";
1152 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1153 struct ib_qp_init_attr *init_attr,
1154 struct ib_udata *udata)
1156 struct mlx5_general_caps *gen;
1157 struct mlx5_ib_dev *dev;
1158 struct mlx5_ib_qp *qp;
1163 dev = to_mdev(pd->device);
1165 /* being cautious here */
1166 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1167 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1168 pr_warn("%s: no PD for transport %s\n", __func__,
1169 ib_qp_type_str(init_attr->qp_type));
1170 return ERR_PTR(-EINVAL);
1172 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1174 gen = &dev->mdev->caps.gen;
1176 switch (init_attr->qp_type) {
1177 case IB_QPT_XRC_TGT:
1178 case IB_QPT_XRC_INI:
1179 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
1180 mlx5_ib_dbg(dev, "XRC not supported\n");
1181 return ERR_PTR(-ENOSYS);
1183 init_attr->recv_cq = NULL;
1184 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1185 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1186 init_attr->send_cq = NULL;
1195 case MLX5_IB_QPT_REG_UMR:
1196 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1198 return ERR_PTR(-ENOMEM);
1200 err = create_qp_common(dev, pd, init_attr, udata, qp);
1202 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1204 return ERR_PTR(err);
1207 if (is_qp0(init_attr->qp_type))
1208 qp->ibqp.qp_num = 0;
1209 else if (is_qp1(init_attr->qp_type))
1210 qp->ibqp.qp_num = 1;
1212 qp->ibqp.qp_num = qp->mqp.qpn;
1214 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1215 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1216 to_mcq(init_attr->send_cq)->mcq.cqn);
1222 case IB_QPT_RAW_IPV6:
1223 case IB_QPT_RAW_ETHERTYPE:
1224 case IB_QPT_RAW_PACKET:
1227 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1228 init_attr->qp_type);
1229 /* Don't support raw QPs */
1230 return ERR_PTR(-EINVAL);
1236 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1238 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1239 struct mlx5_ib_qp *mqp = to_mqp(qp);
1241 destroy_qp_common(dev, mqp);
1248 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1251 u32 hw_access_flags = 0;
1255 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1256 dest_rd_atomic = attr->max_dest_rd_atomic;
1258 dest_rd_atomic = qp->resp_depth;
1260 if (attr_mask & IB_QP_ACCESS_FLAGS)
1261 access_flags = attr->qp_access_flags;
1263 access_flags = qp->atomic_rd_en;
1265 if (!dest_rd_atomic)
1266 access_flags &= IB_ACCESS_REMOTE_WRITE;
1268 if (access_flags & IB_ACCESS_REMOTE_READ)
1269 hw_access_flags |= MLX5_QP_BIT_RRE;
1270 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1271 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1272 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1273 hw_access_flags |= MLX5_QP_BIT_RWE;
1275 return cpu_to_be32(hw_access_flags);
1279 MLX5_PATH_FLAG_FL = 1 << 0,
1280 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1281 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1284 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1286 struct mlx5_general_caps *gen;
1288 gen = &dev->mdev->caps.gen;
1289 if (rate == IB_RATE_PORT_CURRENT) {
1291 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1294 while (rate != IB_RATE_2_5_GBPS &&
1295 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1296 gen->stat_rate_support))
1300 return rate + MLX5_STAT_RATE_OFFSET;
1303 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1304 struct mlx5_qp_path *path, u8 port, int attr_mask,
1305 u32 path_flags, const struct ib_qp_attr *attr)
1307 struct mlx5_general_caps *gen;
1310 gen = &dev->mdev->caps.gen;
1311 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1312 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1314 if (attr_mask & IB_QP_PKEY_INDEX)
1315 path->pkey_index = attr->pkey_index;
1317 path->grh_mlid = ah->src_path_bits & 0x7f;
1318 path->rlid = cpu_to_be16(ah->dlid);
1320 if (ah->ah_flags & IB_AH_GRH) {
1321 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
1322 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1323 ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
1326 path->grh_mlid |= 1 << 7;
1327 path->mgid_index = ah->grh.sgid_index;
1328 path->hop_limit = ah->grh.hop_limit;
1329 path->tclass_flowlabel =
1330 cpu_to_be32((ah->grh.traffic_class << 20) |
1331 (ah->grh.flow_label));
1332 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1335 err = ib_rate_to_mlx5(dev, ah->static_rate);
1338 path->static_rate = err;
1341 if (attr_mask & IB_QP_TIMEOUT)
1342 path->ackto_lt = attr->timeout << 3;
1344 path->sl = ah->sl & 0xf;
1349 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1350 [MLX5_QP_STATE_INIT] = {
1351 [MLX5_QP_STATE_INIT] = {
1352 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1353 MLX5_QP_OPTPAR_RAE |
1354 MLX5_QP_OPTPAR_RWE |
1355 MLX5_QP_OPTPAR_PKEY_INDEX |
1356 MLX5_QP_OPTPAR_PRI_PORT,
1357 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1358 MLX5_QP_OPTPAR_PKEY_INDEX |
1359 MLX5_QP_OPTPAR_PRI_PORT,
1360 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1361 MLX5_QP_OPTPAR_Q_KEY |
1362 MLX5_QP_OPTPAR_PRI_PORT,
1364 [MLX5_QP_STATE_RTR] = {
1365 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1366 MLX5_QP_OPTPAR_RRE |
1367 MLX5_QP_OPTPAR_RAE |
1368 MLX5_QP_OPTPAR_RWE |
1369 MLX5_QP_OPTPAR_PKEY_INDEX,
1370 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1371 MLX5_QP_OPTPAR_RWE |
1372 MLX5_QP_OPTPAR_PKEY_INDEX,
1373 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1374 MLX5_QP_OPTPAR_Q_KEY,
1375 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1376 MLX5_QP_OPTPAR_Q_KEY,
1377 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1378 MLX5_QP_OPTPAR_RRE |
1379 MLX5_QP_OPTPAR_RAE |
1380 MLX5_QP_OPTPAR_RWE |
1381 MLX5_QP_OPTPAR_PKEY_INDEX,
1384 [MLX5_QP_STATE_RTR] = {
1385 [MLX5_QP_STATE_RTS] = {
1386 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1387 MLX5_QP_OPTPAR_RRE |
1388 MLX5_QP_OPTPAR_RAE |
1389 MLX5_QP_OPTPAR_RWE |
1390 MLX5_QP_OPTPAR_PM_STATE |
1391 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1392 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1393 MLX5_QP_OPTPAR_RWE |
1394 MLX5_QP_OPTPAR_PM_STATE,
1395 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1398 [MLX5_QP_STATE_RTS] = {
1399 [MLX5_QP_STATE_RTS] = {
1400 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1401 MLX5_QP_OPTPAR_RAE |
1402 MLX5_QP_OPTPAR_RWE |
1403 MLX5_QP_OPTPAR_RNR_TIMEOUT |
1404 MLX5_QP_OPTPAR_PM_STATE |
1405 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1406 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1407 MLX5_QP_OPTPAR_PM_STATE |
1408 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1409 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1410 MLX5_QP_OPTPAR_SRQN |
1411 MLX5_QP_OPTPAR_CQN_RCV,
1414 [MLX5_QP_STATE_SQER] = {
1415 [MLX5_QP_STATE_RTS] = {
1416 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1417 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1418 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1419 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1420 MLX5_QP_OPTPAR_RWE |
1421 MLX5_QP_OPTPAR_RAE |
1427 static int ib_nr_to_mlx5_nr(int ib_mask)
1432 case IB_QP_CUR_STATE:
1434 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1436 case IB_QP_ACCESS_FLAGS:
1437 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1439 case IB_QP_PKEY_INDEX:
1440 return MLX5_QP_OPTPAR_PKEY_INDEX;
1442 return MLX5_QP_OPTPAR_PRI_PORT;
1444 return MLX5_QP_OPTPAR_Q_KEY;
1446 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1447 MLX5_QP_OPTPAR_PRI_PORT;
1448 case IB_QP_PATH_MTU:
1451 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1452 case IB_QP_RETRY_CNT:
1453 return MLX5_QP_OPTPAR_RETRY_COUNT;
1454 case IB_QP_RNR_RETRY:
1455 return MLX5_QP_OPTPAR_RNR_RETRY;
1458 case IB_QP_MAX_QP_RD_ATOMIC:
1459 return MLX5_QP_OPTPAR_SRA_MAX;
1460 case IB_QP_ALT_PATH:
1461 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1462 case IB_QP_MIN_RNR_TIMER:
1463 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1466 case IB_QP_MAX_DEST_RD_ATOMIC:
1467 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1468 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1469 case IB_QP_PATH_MIG_STATE:
1470 return MLX5_QP_OPTPAR_PM_STATE;
1473 case IB_QP_DEST_QPN:
1479 static int ib_mask_to_mlx5_opt(int ib_mask)
1484 for (i = 0; i < 8 * sizeof(int); i++) {
1485 if ((1 << i) & ib_mask)
1486 result |= ib_nr_to_mlx5_nr(1 << i);
1492 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1493 const struct ib_qp_attr *attr, int attr_mask,
1494 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1496 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1497 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1498 struct mlx5_ib_cq *send_cq, *recv_cq;
1499 struct mlx5_qp_context *context;
1500 struct mlx5_general_caps *gen;
1501 struct mlx5_modify_qp_mbox_in *in;
1502 struct mlx5_ib_pd *pd;
1503 enum mlx5_qp_state mlx5_cur, mlx5_new;
1504 enum mlx5_qp_optpar optpar;
1509 gen = &dev->mdev->caps.gen;
1510 in = kzalloc(sizeof(*in), GFP_KERNEL);
1515 err = to_mlx5_st(ibqp->qp_type);
1519 context->flags = cpu_to_be32(err << 16);
1521 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1522 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1524 switch (attr->path_mig_state) {
1525 case IB_MIG_MIGRATED:
1526 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1529 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1532 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1537 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1538 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1539 } else if (ibqp->qp_type == IB_QPT_UD ||
1540 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1541 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1542 } else if (attr_mask & IB_QP_PATH_MTU) {
1543 if (attr->path_mtu < IB_MTU_256 ||
1544 attr->path_mtu > IB_MTU_4096) {
1545 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1549 context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
1552 if (attr_mask & IB_QP_DEST_QPN)
1553 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1555 if (attr_mask & IB_QP_PKEY_INDEX)
1556 context->pri_path.pkey_index = attr->pkey_index;
1558 /* todo implement counter_index functionality */
1560 if (is_sqp(ibqp->qp_type))
1561 context->pri_path.port = qp->port;
1563 if (attr_mask & IB_QP_PORT)
1564 context->pri_path.port = attr->port_num;
1566 if (attr_mask & IB_QP_AV) {
1567 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1568 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1569 attr_mask, 0, attr);
1574 if (attr_mask & IB_QP_TIMEOUT)
1575 context->pri_path.ackto_lt |= attr->timeout << 3;
1577 if (attr_mask & IB_QP_ALT_PATH) {
1578 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1579 attr->alt_port_num, attr_mask, 0, attr);
1585 get_cqs(qp, &send_cq, &recv_cq);
1587 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1588 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1589 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1590 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1592 if (attr_mask & IB_QP_RNR_RETRY)
1593 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1595 if (attr_mask & IB_QP_RETRY_CNT)
1596 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1598 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1599 if (attr->max_rd_atomic)
1601 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1604 if (attr_mask & IB_QP_SQ_PSN)
1605 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1607 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1608 if (attr->max_dest_rd_atomic)
1610 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1613 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1614 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1616 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1617 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1619 if (attr_mask & IB_QP_RQ_PSN)
1620 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1622 if (attr_mask & IB_QP_QKEY)
1623 context->qkey = cpu_to_be32(attr->qkey);
1625 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1626 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1628 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1629 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1634 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1635 context->sq_crq_size |= cpu_to_be16(1 << 4);
1638 mlx5_cur = to_mlx5_state(cur_state);
1639 mlx5_new = to_mlx5_state(new_state);
1640 mlx5_st = to_mlx5_st(ibqp->qp_type);
1644 optpar = ib_mask_to_mlx5_opt(attr_mask);
1645 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1646 in->optparam = cpu_to_be32(optpar);
1647 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
1648 to_mlx5_state(new_state), in, sqd_event,
1653 qp->state = new_state;
1655 if (attr_mask & IB_QP_ACCESS_FLAGS)
1656 qp->atomic_rd_en = attr->qp_access_flags;
1657 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1658 qp->resp_depth = attr->max_dest_rd_atomic;
1659 if (attr_mask & IB_QP_PORT)
1660 qp->port = attr->port_num;
1661 if (attr_mask & IB_QP_ALT_PATH)
1662 qp->alt_port = attr->alt_port_num;
1665 * If we moved a kernel QP to RESET, clean up all old CQ
1666 * entries and reinitialize the QP.
1668 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1669 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1670 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1671 if (send_cq != recv_cq)
1672 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1678 qp->sq.cur_post = 0;
1679 qp->sq.last_poll = 0;
1680 qp->db.db[MLX5_RCV_DBR] = 0;
1681 qp->db.db[MLX5_SND_DBR] = 0;
1689 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1690 int attr_mask, struct ib_udata *udata)
1692 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1693 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1694 enum ib_qp_state cur_state, new_state;
1695 struct mlx5_general_caps *gen;
1699 gen = &dev->mdev->caps.gen;
1700 mutex_lock(&qp->mutex);
1702 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1703 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1705 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1706 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1707 IB_LINK_LAYER_UNSPECIFIED))
1710 if ((attr_mask & IB_QP_PORT) &&
1711 (attr->port_num == 0 || attr->port_num > gen->num_ports))
1714 if (attr_mask & IB_QP_PKEY_INDEX) {
1715 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1716 if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
1720 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1721 attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
1724 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1725 attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
1728 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1733 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1736 mutex_unlock(&qp->mutex);
1740 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1742 struct mlx5_ib_cq *cq;
1745 cur = wq->head - wq->tail;
1746 if (likely(cur + nreq < wq->max_post))
1750 spin_lock(&cq->lock);
1751 cur = wq->head - wq->tail;
1752 spin_unlock(&cq->lock);
1754 return cur + nreq >= wq->max_post;
1757 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1758 u64 remote_addr, u32 rkey)
1760 rseg->raddr = cpu_to_be64(remote_addr);
1761 rseg->rkey = cpu_to_be32(rkey);
1765 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1766 struct ib_send_wr *wr)
1768 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
1769 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
1770 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1773 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1775 dseg->byte_count = cpu_to_be32(sg->length);
1776 dseg->lkey = cpu_to_be32(sg->lkey);
1777 dseg->addr = cpu_to_be64(sg->addr);
1780 static __be16 get_klm_octo(int npages)
1782 return cpu_to_be16(ALIGN(npages, 8) / 2);
1785 static __be64 frwr_mkey_mask(void)
1789 result = MLX5_MKEY_MASK_LEN |
1790 MLX5_MKEY_MASK_PAGE_SIZE |
1791 MLX5_MKEY_MASK_START_ADDR |
1792 MLX5_MKEY_MASK_EN_RINVAL |
1793 MLX5_MKEY_MASK_KEY |
1799 MLX5_MKEY_MASK_SMALL_FENCE |
1800 MLX5_MKEY_MASK_FREE;
1802 return cpu_to_be64(result);
1805 static __be64 sig_mkey_mask(void)
1809 result = MLX5_MKEY_MASK_LEN |
1810 MLX5_MKEY_MASK_PAGE_SIZE |
1811 MLX5_MKEY_MASK_START_ADDR |
1812 MLX5_MKEY_MASK_EN_SIGERR |
1813 MLX5_MKEY_MASK_EN_RINVAL |
1814 MLX5_MKEY_MASK_KEY |
1819 MLX5_MKEY_MASK_SMALL_FENCE |
1820 MLX5_MKEY_MASK_FREE |
1821 MLX5_MKEY_MASK_BSF_EN;
1823 return cpu_to_be64(result);
1826 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1827 struct ib_send_wr *wr, int li)
1829 memset(umr, 0, sizeof(*umr));
1832 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1833 umr->flags = 1 << 7;
1837 umr->flags = (1 << 5); /* fail if not free */
1838 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
1839 umr->mkey_mask = frwr_mkey_mask();
1842 static __be64 get_umr_reg_mr_mask(void)
1846 result = MLX5_MKEY_MASK_LEN |
1847 MLX5_MKEY_MASK_PAGE_SIZE |
1848 MLX5_MKEY_MASK_START_ADDR |
1852 MLX5_MKEY_MASK_KEY |
1856 MLX5_MKEY_MASK_FREE;
1858 return cpu_to_be64(result);
1861 static __be64 get_umr_unreg_mr_mask(void)
1865 result = MLX5_MKEY_MASK_FREE;
1867 return cpu_to_be64(result);
1870 static __be64 get_umr_update_mtt_mask(void)
1874 result = MLX5_MKEY_MASK_FREE;
1876 return cpu_to_be64(result);
1879 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1880 struct ib_send_wr *wr)
1882 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
1884 memset(umr, 0, sizeof(*umr));
1886 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
1887 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
1889 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
1891 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1892 umr->klm_octowords = get_klm_octo(umrwr->npages);
1893 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
1894 umr->mkey_mask = get_umr_update_mtt_mask();
1895 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
1896 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
1898 umr->mkey_mask = get_umr_reg_mr_mask();
1901 umr->mkey_mask = get_umr_unreg_mr_mask();
1905 umr->flags |= MLX5_UMR_INLINE;
1908 static u8 get_umr_flags(int acc)
1910 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1911 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1912 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1913 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1914 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
1917 static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1920 memset(seg, 0, sizeof(*seg));
1922 seg->status = MLX5_MKEY_STATUS_FREE;
1926 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
1927 MLX5_ACCESS_MODE_MTT;
1928 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
1929 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
1930 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
1931 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1932 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1933 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
1934 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1937 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
1939 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
1941 memset(seg, 0, sizeof(*seg));
1942 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
1943 seg->status = MLX5_MKEY_STATUS_FREE;
1947 seg->flags = convert_access(umrwr->access_flags);
1948 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
1949 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
1950 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
1952 seg->len = cpu_to_be64(umrwr->length);
1953 seg->log2_page_size = umrwr->page_shift;
1954 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
1955 mlx5_mkey_variant(umrwr->mkey));
1958 static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
1959 struct ib_send_wr *wr,
1960 struct mlx5_core_dev *mdev,
1961 struct mlx5_ib_pd *pd,
1964 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1965 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
1966 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
1969 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
1970 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
1971 dseg->addr = cpu_to_be64(mfrpl->map);
1972 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
1973 dseg->lkey = cpu_to_be32(pd->pa_lkey);
1976 static __be32 send_ieth(struct ib_send_wr *wr)
1978 switch (wr->opcode) {
1979 case IB_WR_SEND_WITH_IMM:
1980 case IB_WR_RDMA_WRITE_WITH_IMM:
1981 return wr->ex.imm_data;
1983 case IB_WR_SEND_WITH_INV:
1984 return cpu_to_be32(wr->ex.invalidate_rkey);
1991 static u8 calc_sig(void *wqe, int size)
1997 for (i = 0; i < size; i++)
2003 static u8 wq_sig(void *wqe)
2005 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2008 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2011 struct mlx5_wqe_inline_seg *seg;
2012 void *qend = qp->sq.qend;
2020 wqe += sizeof(*seg);
2021 for (i = 0; i < wr->num_sge; i++) {
2022 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2023 len = wr->sg_list[i].length;
2026 if (unlikely(inl > qp->max_inline_data))
2029 if (unlikely(wqe + len > qend)) {
2031 memcpy(wqe, addr, copy);
2034 wqe = mlx5_get_send_wqe(qp, 0);
2036 memcpy(wqe, addr, len);
2040 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2042 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2047 static u16 prot_field_size(enum ib_signature_type type)
2050 case IB_SIG_TYPE_T10_DIF:
2051 return MLX5_DIF_SIZE;
2057 static u8 bs_selector(int block_size)
2059 switch (block_size) {
2060 case 512: return 0x1;
2061 case 520: return 0x2;
2062 case 4096: return 0x3;
2063 case 4160: return 0x4;
2064 case 1073741824: return 0x5;
2069 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2070 struct mlx5_bsf_inl *inl)
2072 /* Valid inline section and allow BSF refresh */
2073 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2074 MLX5_BSF_REFRESH_DIF);
2075 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2076 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
2077 /* repeating block */
2078 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2079 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2080 MLX5_DIF_CRC : MLX5_DIF_IPCS;
2082 if (domain->sig.dif.ref_remap)
2083 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
2085 if (domain->sig.dif.app_escape) {
2086 if (domain->sig.dif.ref_escape)
2087 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2089 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
2092 inl->dif_app_bitmask_check =
2093 cpu_to_be16(domain->sig.dif.apptag_check_mask);
2096 static int mlx5_set_bsf(struct ib_mr *sig_mr,
2097 struct ib_sig_attrs *sig_attrs,
2098 struct mlx5_bsf *bsf, u32 data_size)
2100 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2101 struct mlx5_bsf_basic *basic = &bsf->basic;
2102 struct ib_sig_domain *mem = &sig_attrs->mem;
2103 struct ib_sig_domain *wire = &sig_attrs->wire;
2105 memset(bsf, 0, sizeof(*bsf));
2107 /* Basic + Extended + Inline */
2108 basic->bsf_size_sbs = 1 << 7;
2109 /* Input domain check byte mask */
2110 basic->check_byte_mask = sig_attrs->check_mask;
2111 basic->raw_data_size = cpu_to_be32(data_size);
2114 switch (sig_attrs->mem.sig_type) {
2115 case IB_SIG_TYPE_NONE:
2117 case IB_SIG_TYPE_T10_DIF:
2118 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2119 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2120 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2127 switch (sig_attrs->wire.sig_type) {
2128 case IB_SIG_TYPE_NONE:
2130 case IB_SIG_TYPE_T10_DIF:
2131 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2132 mem->sig_type == wire->sig_type) {
2133 /* Same block structure */
2134 basic->bsf_size_sbs |= 1 << 4;
2135 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2136 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
2137 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
2138 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
2139 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
2140 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
2142 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2144 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
2145 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
2154 static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2155 void **seg, int *size)
2157 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
2158 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2159 struct mlx5_bsf *bsf;
2160 u32 data_len = wr->sg_list->length;
2161 u32 data_key = wr->sg_list->lkey;
2162 u64 data_va = wr->sg_list->addr;
2166 if (!wr->wr.sig_handover.prot ||
2167 (data_key == wr->wr.sig_handover.prot->lkey &&
2168 data_va == wr->wr.sig_handover.prot->addr &&
2169 data_len == wr->wr.sig_handover.prot->length)) {
2171 * Source domain doesn't contain signature information
2172 * or data and protection are interleaved in memory.
2173 * So need construct:
2174 * ------------------
2176 * ------------------
2178 * ------------------
2180 struct mlx5_klm *data_klm = *seg;
2182 data_klm->bcount = cpu_to_be32(data_len);
2183 data_klm->key = cpu_to_be32(data_key);
2184 data_klm->va = cpu_to_be64(data_va);
2185 wqe_size = ALIGN(sizeof(*data_klm), 64);
2188 * Source domain contains signature information
2189 * So need construct a strided block format:
2190 * ---------------------------
2191 * | stride_block_ctrl |
2192 * ---------------------------
2194 * ---------------------------
2196 * ---------------------------
2198 * ---------------------------
2200 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2201 struct mlx5_stride_block_entry *data_sentry;
2202 struct mlx5_stride_block_entry *prot_sentry;
2203 u32 prot_key = wr->wr.sig_handover.prot->lkey;
2204 u64 prot_va = wr->wr.sig_handover.prot->addr;
2205 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2209 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2210 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2212 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2214 pr_err("Bad block size given: %u\n", block_size);
2217 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2219 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2220 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2221 sblock_ctrl->num_entries = cpu_to_be16(2);
2223 data_sentry->bcount = cpu_to_be16(block_size);
2224 data_sentry->key = cpu_to_be32(data_key);
2225 data_sentry->va = cpu_to_be64(data_va);
2226 data_sentry->stride = cpu_to_be16(block_size);
2228 prot_sentry->bcount = cpu_to_be16(prot_size);
2229 prot_sentry->key = cpu_to_be32(prot_key);
2230 prot_sentry->va = cpu_to_be64(prot_va);
2231 prot_sentry->stride = cpu_to_be16(prot_size);
2233 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2234 sizeof(*prot_sentry), 64);
2238 *size += wqe_size / 16;
2239 if (unlikely((*seg == qp->sq.qend)))
2240 *seg = mlx5_get_send_wqe(qp, 0);
2243 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2247 *seg += sizeof(*bsf);
2248 *size += sizeof(*bsf) / 16;
2249 if (unlikely((*seg == qp->sq.qend)))
2250 *seg = mlx5_get_send_wqe(qp, 0);
2255 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2256 struct ib_send_wr *wr, u32 nelements,
2257 u32 length, u32 pdn)
2259 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2260 u32 sig_key = sig_mr->rkey;
2261 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2263 memset(seg, 0, sizeof(*seg));
2265 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
2266 MLX5_ACCESS_MODE_KLM;
2267 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2268 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
2269 MLX5_MKEY_BSF_EN | pdn);
2270 seg->len = cpu_to_be64(length);
2271 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2272 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2275 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2276 struct ib_send_wr *wr, u32 nelements)
2278 memset(umr, 0, sizeof(*umr));
2280 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2281 umr->klm_octowords = get_klm_octo(nelements);
2282 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2283 umr->mkey_mask = sig_mkey_mask();
2287 static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2288 void **seg, int *size)
2290 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
2291 u32 pdn = get_pd(qp)->pdn;
2293 int region_len, ret;
2295 if (unlikely(wr->num_sge != 1) ||
2296 unlikely(wr->wr.sig_handover.access_flags &
2297 IB_ACCESS_REMOTE_ATOMIC) ||
2298 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2299 unlikely(!sig_mr->sig->sig_status_checked))
2302 /* length of the protected region, data + protection */
2303 region_len = wr->sg_list->length;
2304 if (wr->wr.sig_handover.prot &&
2305 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
2306 wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
2307 wr->wr.sig_handover.prot->length != wr->sg_list->length))
2308 region_len += wr->wr.sig_handover.prot->length;
2311 * KLM octoword size - if protection was provided
2312 * then we use strided block format (3 octowords),
2313 * else we use single KLM (1 octoword)
2315 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
2317 set_sig_umr_segment(*seg, wr, klm_oct_size);
2318 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2319 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2320 if (unlikely((*seg == qp->sq.qend)))
2321 *seg = mlx5_get_send_wqe(qp, 0);
2323 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2324 *seg += sizeof(struct mlx5_mkey_seg);
2325 *size += sizeof(struct mlx5_mkey_seg) / 16;
2326 if (unlikely((*seg == qp->sq.qend)))
2327 *seg = mlx5_get_send_wqe(qp, 0);
2329 ret = set_sig_data_segment(wr, qp, seg, size);
2333 sig_mr->sig->sig_status_checked = false;
2337 static int set_psv_wr(struct ib_sig_domain *domain,
2338 u32 psv_idx, void **seg, int *size)
2340 struct mlx5_seg_set_psv *psv_seg = *seg;
2342 memset(psv_seg, 0, sizeof(*psv_seg));
2343 psv_seg->psv_num = cpu_to_be32(psv_idx);
2344 switch (domain->sig_type) {
2345 case IB_SIG_TYPE_NONE:
2347 case IB_SIG_TYPE_T10_DIF:
2348 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2349 domain->sig.dif.app_tag);
2350 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2353 pr_err("Bad signature type given.\n");
2357 *seg += sizeof(*psv_seg);
2358 *size += sizeof(*psv_seg) / 16;
2363 static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
2364 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
2369 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
2370 if (unlikely(wr->send_flags & IB_SEND_INLINE))
2373 set_frwr_umr_segment(*seg, wr, li);
2374 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2375 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2376 if (unlikely((*seg == qp->sq.qend)))
2377 *seg = mlx5_get_send_wqe(qp, 0);
2378 set_mkey_segment(*seg, wr, li, &writ);
2379 *seg += sizeof(struct mlx5_mkey_seg);
2380 *size += sizeof(struct mlx5_mkey_seg) / 16;
2381 if (unlikely((*seg == qp->sq.qend)))
2382 *seg = mlx5_get_send_wqe(qp, 0);
2384 if (unlikely(wr->wr.fast_reg.page_list_len >
2385 wr->wr.fast_reg.page_list->max_page_list_len))
2388 set_frwr_pages(*seg, wr, mdev, pd, writ);
2389 *seg += sizeof(struct mlx5_wqe_data_seg);
2390 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2395 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2401 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2402 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2403 if ((i & 0xf) == 0) {
2404 void *buf = mlx5_get_send_wqe(qp, tidx);
2405 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2409 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2410 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2411 be32_to_cpu(p[j + 3]));
2415 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2416 unsigned bytecnt, struct mlx5_ib_qp *qp)
2418 while (bytecnt > 0) {
2419 __iowrite64_copy(dst++, src++, 8);
2420 __iowrite64_copy(dst++, src++, 8);
2421 __iowrite64_copy(dst++, src++, 8);
2422 __iowrite64_copy(dst++, src++, 8);
2423 __iowrite64_copy(dst++, src++, 8);
2424 __iowrite64_copy(dst++, src++, 8);
2425 __iowrite64_copy(dst++, src++, 8);
2426 __iowrite64_copy(dst++, src++, 8);
2428 if (unlikely(src == qp->sq.qend))
2429 src = mlx5_get_send_wqe(qp, 0);
2433 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2435 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2436 wr->send_flags & IB_SEND_FENCE))
2437 return MLX5_FENCE_MODE_STRONG_ORDERING;
2439 if (unlikely(fence)) {
2440 if (wr->send_flags & IB_SEND_FENCE)
2441 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2450 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2451 struct mlx5_wqe_ctrl_seg **ctrl,
2452 struct ib_send_wr *wr, unsigned *idx,
2453 int *size, int nreq)
2457 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2462 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2463 *seg = mlx5_get_send_wqe(qp, *idx);
2465 *(uint32_t *)(*seg + 8) = 0;
2466 (*ctrl)->imm = send_ieth(wr);
2467 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2468 (wr->send_flags & IB_SEND_SIGNALED ?
2469 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2470 (wr->send_flags & IB_SEND_SOLICITED ?
2471 MLX5_WQE_CTRL_SOLICITED : 0);
2473 *seg += sizeof(**ctrl);
2474 *size = sizeof(**ctrl) / 16;
2479 static void finish_wqe(struct mlx5_ib_qp *qp,
2480 struct mlx5_wqe_ctrl_seg *ctrl,
2481 u8 size, unsigned idx, u64 wr_id,
2482 int nreq, u8 fence, u8 next_fence,
2487 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2488 mlx5_opcode | ((u32)opmod << 24));
2489 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2490 ctrl->fm_ce_se |= fence;
2491 qp->fm_cache = next_fence;
2492 if (unlikely(qp->wq_sig))
2493 ctrl->signature = wq_sig(ctrl);
2495 qp->sq.wrid[idx] = wr_id;
2496 qp->sq.w_list[idx].opcode = mlx5_opcode;
2497 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2498 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2499 qp->sq.w_list[idx].next = qp->sq.cur_post;
2503 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2504 struct ib_send_wr **bad_wr)
2506 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2507 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2508 struct mlx5_core_dev *mdev = dev->mdev;
2509 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2510 struct mlx5_ib_mr *mr;
2511 struct mlx5_wqe_data_seg *dpseg;
2512 struct mlx5_wqe_xrc_seg *xrc;
2513 struct mlx5_bf *bf = qp->bf;
2514 int uninitialized_var(size);
2515 void *qend = qp->sq.qend;
2516 unsigned long flags;
2527 spin_lock_irqsave(&qp->sq.lock, flags);
2529 for (nreq = 0; wr; nreq++, wr = wr->next) {
2530 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
2531 mlx5_ib_warn(dev, "\n");
2537 fence = qp->fm_cache;
2538 num_sge = wr->num_sge;
2539 if (unlikely(num_sge > qp->sq.max_gs)) {
2540 mlx5_ib_warn(dev, "\n");
2546 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2548 mlx5_ib_warn(dev, "\n");
2554 switch (ibqp->qp_type) {
2555 case IB_QPT_XRC_INI:
2557 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2558 seg += sizeof(*xrc);
2559 size += sizeof(*xrc) / 16;
2562 switch (wr->opcode) {
2563 case IB_WR_RDMA_READ:
2564 case IB_WR_RDMA_WRITE:
2565 case IB_WR_RDMA_WRITE_WITH_IMM:
2566 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2568 seg += sizeof(struct mlx5_wqe_raddr_seg);
2569 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2572 case IB_WR_ATOMIC_CMP_AND_SWP:
2573 case IB_WR_ATOMIC_FETCH_AND_ADD:
2574 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2575 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2580 case IB_WR_LOCAL_INV:
2581 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2582 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2583 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2584 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2586 mlx5_ib_warn(dev, "\n");
2593 case IB_WR_FAST_REG_MR:
2594 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2595 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2596 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2597 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2599 mlx5_ib_warn(dev, "\n");
2606 case IB_WR_REG_SIG_MR:
2607 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2608 mr = to_mmr(wr->wr.sig_handover.sig_mr);
2610 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2611 err = set_sig_umr_wr(wr, qp, &seg, &size);
2613 mlx5_ib_warn(dev, "\n");
2618 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2619 nreq, get_fence(fence, wr),
2620 next_fence, MLX5_OPCODE_UMR);
2622 * SET_PSV WQEs are not signaled and solicited
2625 wr->send_flags &= ~IB_SEND_SIGNALED;
2626 wr->send_flags |= IB_SEND_SOLICITED;
2627 err = begin_wqe(qp, &seg, &ctrl, wr,
2630 mlx5_ib_warn(dev, "\n");
2636 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
2637 mr->sig->psv_memory.psv_idx, &seg,
2640 mlx5_ib_warn(dev, "\n");
2645 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2646 nreq, get_fence(fence, wr),
2647 next_fence, MLX5_OPCODE_SET_PSV);
2648 err = begin_wqe(qp, &seg, &ctrl, wr,
2651 mlx5_ib_warn(dev, "\n");
2657 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2658 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
2659 mr->sig->psv_wire.psv_idx, &seg,
2662 mlx5_ib_warn(dev, "\n");
2667 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2668 nreq, get_fence(fence, wr),
2669 next_fence, MLX5_OPCODE_SET_PSV);
2679 switch (wr->opcode) {
2680 case IB_WR_RDMA_WRITE:
2681 case IB_WR_RDMA_WRITE_WITH_IMM:
2682 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2684 seg += sizeof(struct mlx5_wqe_raddr_seg);
2685 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2696 set_datagram_seg(seg, wr);
2697 seg += sizeof(struct mlx5_wqe_datagram_seg);
2698 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2699 if (unlikely((seg == qend)))
2700 seg = mlx5_get_send_wqe(qp, 0);
2703 case MLX5_IB_QPT_REG_UMR:
2704 if (wr->opcode != MLX5_IB_WR_UMR) {
2706 mlx5_ib_warn(dev, "bad opcode\n");
2709 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2710 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2711 set_reg_umr_segment(seg, wr);
2712 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2713 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2714 if (unlikely((seg == qend)))
2715 seg = mlx5_get_send_wqe(qp, 0);
2716 set_reg_mkey_segment(seg, wr);
2717 seg += sizeof(struct mlx5_mkey_seg);
2718 size += sizeof(struct mlx5_mkey_seg) / 16;
2719 if (unlikely((seg == qend)))
2720 seg = mlx5_get_send_wqe(qp, 0);
2727 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2728 int uninitialized_var(sz);
2730 err = set_data_inl_seg(qp, wr, seg, &sz);
2731 if (unlikely(err)) {
2732 mlx5_ib_warn(dev, "\n");
2740 for (i = 0; i < num_sge; i++) {
2741 if (unlikely(dpseg == qend)) {
2742 seg = mlx5_get_send_wqe(qp, 0);
2745 if (likely(wr->sg_list[i].length)) {
2746 set_data_ptr_seg(dpseg, wr->sg_list + i);
2747 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2753 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2754 get_fence(fence, wr), next_fence,
2755 mlx5_ib_opcode[wr->opcode]);
2758 dump_wqe(qp, idx, size);
2763 qp->sq.head += nreq;
2765 /* Make sure that descriptors are written before
2766 * updating doorbell record and ringing the doorbell
2770 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2772 /* Make sure doorbell record is visible to the HCA before
2773 * we hit doorbell */
2777 spin_lock(&bf->lock);
2779 __acquire(&bf->lock);
2782 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2783 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2786 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2787 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2788 /* Make sure doorbells don't leak out of SQ spinlock
2789 * and reach the HCA out of order.
2793 bf->offset ^= bf->buf_size;
2795 spin_unlock(&bf->lock);
2797 __release(&bf->lock);
2800 spin_unlock_irqrestore(&qp->sq.lock, flags);
2805 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2807 sig->signature = calc_sig(sig, size);
2810 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2811 struct ib_recv_wr **bad_wr)
2813 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2814 struct mlx5_wqe_data_seg *scat;
2815 struct mlx5_rwqe_sig *sig;
2816 unsigned long flags;
2822 spin_lock_irqsave(&qp->rq.lock, flags);
2824 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2826 for (nreq = 0; wr; nreq++, wr = wr->next) {
2827 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2833 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2839 scat = get_recv_wqe(qp, ind);
2843 for (i = 0; i < wr->num_sge; i++)
2844 set_data_ptr_seg(scat + i, wr->sg_list + i);
2846 if (i < qp->rq.max_gs) {
2847 scat[i].byte_count = 0;
2848 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2853 sig = (struct mlx5_rwqe_sig *)scat;
2854 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2857 qp->rq.wrid[ind] = wr->wr_id;
2859 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2864 qp->rq.head += nreq;
2866 /* Make sure that descriptors are written before
2871 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2874 spin_unlock_irqrestore(&qp->rq.lock, flags);
2879 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2881 switch (mlx5_state) {
2882 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2883 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2884 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2885 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2886 case MLX5_QP_STATE_SQ_DRAINING:
2887 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2888 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2889 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2894 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2896 switch (mlx5_mig_state) {
2897 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2898 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2899 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2904 static int to_ib_qp_access_flags(int mlx5_flags)
2908 if (mlx5_flags & MLX5_QP_BIT_RRE)
2909 ib_flags |= IB_ACCESS_REMOTE_READ;
2910 if (mlx5_flags & MLX5_QP_BIT_RWE)
2911 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2912 if (mlx5_flags & MLX5_QP_BIT_RAE)
2913 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2918 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2919 struct mlx5_qp_path *path)
2921 struct mlx5_core_dev *dev = ibdev->mdev;
2923 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2924 ib_ah_attr->port_num = path->port;
2926 if (ib_ah_attr->port_num == 0 ||
2927 ib_ah_attr->port_num > dev->caps.gen.num_ports)
2930 ib_ah_attr->sl = path->sl & 0xf;
2932 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
2933 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
2934 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
2935 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
2936 if (ib_ah_attr->ah_flags) {
2937 ib_ah_attr->grh.sgid_index = path->mgid_index;
2938 ib_ah_attr->grh.hop_limit = path->hop_limit;
2939 ib_ah_attr->grh.traffic_class =
2940 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
2941 ib_ah_attr->grh.flow_label =
2942 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
2943 memcpy(ib_ah_attr->grh.dgid.raw,
2944 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
2948 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
2949 struct ib_qp_init_attr *qp_init_attr)
2951 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2952 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2953 struct mlx5_query_qp_mbox_out *outb;
2954 struct mlx5_qp_context *context;
2958 mutex_lock(&qp->mutex);
2959 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
2964 context = &outb->ctx;
2965 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
2969 mlx5_state = be32_to_cpu(context->flags) >> 28;
2971 qp->state = to_ib_qp_state(mlx5_state);
2972 qp_attr->qp_state = qp->state;
2973 qp_attr->path_mtu = context->mtu_msgmax >> 5;
2974 qp_attr->path_mig_state =
2975 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
2976 qp_attr->qkey = be32_to_cpu(context->qkey);
2977 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
2978 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
2979 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
2980 qp_attr->qp_access_flags =
2981 to_ib_qp_access_flags(be32_to_cpu(context->params2));
2983 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2984 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
2985 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
2986 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
2987 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2990 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
2991 qp_attr->port_num = context->pri_path.port;
2993 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2994 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
2996 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
2998 qp_attr->max_dest_rd_atomic =
2999 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3000 qp_attr->min_rnr_timer =
3001 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3002 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
3003 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
3004 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
3005 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
3006 qp_attr->cur_qp_state = qp_attr->qp_state;
3007 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3008 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
3010 if (!ibqp->uobject) {
3011 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
3012 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3014 qp_attr->cap.max_send_wr = 0;
3015 qp_attr->cap.max_send_sge = 0;
3018 /* We don't support inline sends for kernel QPs (yet), and we
3019 * don't know what userspace's value should be.
3021 qp_attr->cap.max_inline_data = 0;
3023 qp_init_attr->cap = qp_attr->cap;
3025 qp_init_attr->create_flags = 0;
3026 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3027 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3029 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3030 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3036 mutex_unlock(&qp->mutex);
3040 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3041 struct ib_ucontext *context,
3042 struct ib_udata *udata)
3044 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3045 struct mlx5_general_caps *gen;
3046 struct mlx5_ib_xrcd *xrcd;
3049 gen = &dev->mdev->caps.gen;
3050 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
3051 return ERR_PTR(-ENOSYS);
3053 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3055 return ERR_PTR(-ENOMEM);
3057 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
3060 return ERR_PTR(-ENOMEM);
3063 return &xrcd->ibxrcd;
3066 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3068 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3069 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3072 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3074 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);