2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature;
42 MLX5_IB_ACK_REQ_FREQ = 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
57 static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
76 unsigned int page_shift;
83 static int is_qp0(enum ib_qp_type qp_type)
85 return qp_type == IB_QPT_SMI;
88 static int is_qp1(enum ib_qp_type qp_type)
90 return qp_type == IB_QPT_GSI;
93 static int is_sqp(enum ib_qp_type qp_type)
95 return is_qp0(qp_type) || is_qp1(qp_type);
98 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
100 return mlx5_buf_offset(&qp->buf, offset);
103 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
105 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
110 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
115 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
116 struct ib_event event;
118 if (type == MLX5_EVENT_TYPE_PATH_MIG)
119 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
121 if (ibqp->event_handler) {
122 event.device = ibqp->device;
123 event.element.qp = ibqp;
125 case MLX5_EVENT_TYPE_PATH_MIG:
126 event.event = IB_EVENT_PATH_MIG;
128 case MLX5_EVENT_TYPE_COMM_EST:
129 event.event = IB_EVENT_COMM_EST;
131 case MLX5_EVENT_TYPE_SQ_DRAINED:
132 event.event = IB_EVENT_SQ_DRAINED;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
135 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
138 event.event = IB_EVENT_QP_FATAL;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
141 event.event = IB_EVENT_PATH_MIG_ERR;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
144 event.event = IB_EVENT_QP_REQ_ERR;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
147 event.event = IB_EVENT_QP_ACCESS_ERR;
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
154 ibqp->event_handler(&event, ibqp->qp_context);
158 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
159 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
161 struct mlx5_general_caps *gen;
165 gen = &dev->mdev->caps.gen;
166 /* Sanity check RQ size before proceeding */
167 if (cap->max_recv_wr > gen->max_wqes)
173 qp->rq.wqe_shift = 0;
176 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
177 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
178 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
179 qp->rq.max_post = qp->rq.wqe_cnt;
181 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
182 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
183 wqe_size = roundup_pow_of_two(wqe_size);
184 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
185 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
186 qp->rq.wqe_cnt = wq_size / wqe_size;
187 if (wqe_size > gen->max_rq_desc_sz) {
188 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
190 gen->max_rq_desc_sz);
193 qp->rq.wqe_shift = ilog2(wqe_size);
194 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
195 qp->rq.max_post = qp->rq.wqe_cnt;
202 static int sq_overhead(enum ib_qp_type qp_type)
208 size += sizeof(struct mlx5_wqe_xrc_seg);
211 size += sizeof(struct mlx5_wqe_ctrl_seg) +
212 sizeof(struct mlx5_wqe_atomic_seg) +
213 sizeof(struct mlx5_wqe_raddr_seg);
220 size += sizeof(struct mlx5_wqe_ctrl_seg) +
221 sizeof(struct mlx5_wqe_raddr_seg) +
222 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
223 sizeof(struct mlx5_mkey_seg);
229 size += sizeof(struct mlx5_wqe_ctrl_seg) +
230 sizeof(struct mlx5_wqe_datagram_seg);
233 case MLX5_IB_QPT_REG_UMR:
234 size += sizeof(struct mlx5_wqe_ctrl_seg) +
235 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
236 sizeof(struct mlx5_mkey_seg);
246 static int calc_send_wqe(struct ib_qp_init_attr *attr)
251 size = sq_overhead(attr->qp_type);
255 if (attr->cap.max_inline_data) {
256 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
257 attr->cap.max_inline_data;
260 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
261 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
262 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
263 return MLX5_SIG_WQE_SIZE;
265 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
268 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
269 struct mlx5_ib_qp *qp)
271 struct mlx5_general_caps *gen;
275 gen = &dev->mdev->caps.gen;
276 if (!attr->cap.max_send_wr)
279 wqe_size = calc_send_wqe(attr);
280 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
284 if (wqe_size > gen->max_sq_desc_sz) {
285 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
286 wqe_size, gen->max_sq_desc_sz);
290 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
291 sizeof(struct mlx5_wqe_inline_seg);
292 attr->cap.max_inline_data = qp->max_inline_data;
294 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
295 qp->signature_en = true;
297 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
298 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
299 if (qp->sq.wqe_cnt > gen->max_wqes) {
300 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
301 qp->sq.wqe_cnt, gen->max_wqes);
304 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
305 qp->sq.max_gs = attr->cap.max_send_sge;
306 qp->sq.max_post = wq_size / wqe_size;
307 attr->cap.max_send_wr = qp->sq.max_post;
312 static int set_user_buf_size(struct mlx5_ib_dev *dev,
313 struct mlx5_ib_qp *qp,
314 struct mlx5_ib_create_qp *ucmd)
316 struct mlx5_general_caps *gen;
317 int desc_sz = 1 << qp->sq.wqe_shift;
319 gen = &dev->mdev->caps.gen;
320 if (desc_sz > gen->max_sq_desc_sz) {
321 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
322 desc_sz, gen->max_sq_desc_sz);
326 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
327 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
328 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
332 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
334 if (qp->sq.wqe_cnt > gen->max_wqes) {
335 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
336 qp->sq.wqe_cnt, gen->max_wqes);
340 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
341 (qp->sq.wqe_cnt << 6);
346 static int qp_has_rq(struct ib_qp_init_attr *attr)
348 if (attr->qp_type == IB_QPT_XRC_INI ||
349 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
350 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
351 !attr->cap.max_recv_wr)
357 static int first_med_uuar(void)
362 static int next_uuar(int n)
366 while (((n % 4) & 2))
372 static int num_med_uuar(struct mlx5_uuar_info *uuari)
376 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
377 uuari->num_low_latency_uuars - 1;
379 return n >= 0 ? n : 0;
382 static int max_uuari(struct mlx5_uuar_info *uuari)
384 return uuari->num_uars * 4;
387 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
393 med = num_med_uuar(uuari);
394 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
403 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
407 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
408 if (!test_bit(i, uuari->bitmap)) {
409 set_bit(i, uuari->bitmap);
418 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
420 int minidx = first_med_uuar();
423 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
424 if (uuari->count[i] < uuari->count[minidx])
428 uuari->count[minidx]++;
432 static int alloc_uuar(struct mlx5_uuar_info *uuari,
433 enum mlx5_ib_latency_class lat)
437 mutex_lock(&uuari->lock);
439 case MLX5_IB_LATENCY_CLASS_LOW:
441 uuari->count[uuarn]++;
444 case MLX5_IB_LATENCY_CLASS_MEDIUM:
448 uuarn = alloc_med_class_uuar(uuari);
451 case MLX5_IB_LATENCY_CLASS_HIGH:
455 uuarn = alloc_high_class_uuar(uuari);
458 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
462 mutex_unlock(&uuari->lock);
467 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
469 clear_bit(uuarn, uuari->bitmap);
470 --uuari->count[uuarn];
473 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
475 clear_bit(uuarn, uuari->bitmap);
476 --uuari->count[uuarn];
479 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
481 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
482 int high_uuar = nuuars - uuari->num_low_latency_uuars;
484 mutex_lock(&uuari->lock);
486 --uuari->count[uuarn];
490 if (uuarn < high_uuar) {
491 free_med_class_uuar(uuari, uuarn);
495 free_high_class_uuar(uuari, uuarn);
498 mutex_unlock(&uuari->lock);
501 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
504 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
505 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
506 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
507 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
508 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
509 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
510 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
515 static int to_mlx5_st(enum ib_qp_type type)
518 case IB_QPT_RC: return MLX5_QP_ST_RC;
519 case IB_QPT_UC: return MLX5_QP_ST_UC;
520 case IB_QPT_UD: return MLX5_QP_ST_UD;
521 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
523 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
524 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
525 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
526 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
527 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
528 case IB_QPT_RAW_PACKET:
530 default: return -EINVAL;
534 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
536 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
539 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
540 struct mlx5_ib_qp *qp, struct ib_udata *udata,
541 struct mlx5_create_qp_mbox_in **in,
542 struct mlx5_ib_create_qp_resp *resp, int *inlen)
544 struct mlx5_ib_ucontext *context;
545 struct mlx5_ib_create_qp ucmd;
554 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
556 mlx5_ib_dbg(dev, "copy failed\n");
560 context = to_mucontext(pd->uobject->context);
562 * TBD: should come from the verbs when we have the API
564 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
566 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
567 mlx5_ib_dbg(dev, "reverting to medium latency\n");
568 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
570 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
571 mlx5_ib_dbg(dev, "reverting to high latency\n");
572 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
574 mlx5_ib_warn(dev, "uuar allocation failed\n");
580 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
581 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
584 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
585 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
587 err = set_user_buf_size(dev, qp, &ucmd);
591 if (ucmd.buf_addr && qp->buf_size) {
592 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
594 if (IS_ERR(qp->umem)) {
595 mlx5_ib_dbg(dev, "umem_get failed\n");
596 err = PTR_ERR(qp->umem);
604 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
606 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
608 mlx5_ib_warn(dev, "bad offset\n");
611 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
612 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
615 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
616 *in = mlx5_vzalloc(*inlen);
622 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
623 (*in)->ctx.log_pg_sz_remote_qpn =
624 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
625 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
627 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
628 resp->uuar_index = uuarn;
631 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
633 mlx5_ib_dbg(dev, "map failed\n");
637 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
639 mlx5_ib_dbg(dev, "copy failed\n");
642 qp->create_type = MLX5_QP_USER;
647 mlx5_ib_db_unmap_user(context, &qp->db);
654 ib_umem_release(qp->umem);
657 free_uuar(&context->uuari, uuarn);
661 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
663 struct mlx5_ib_ucontext *context;
665 context = to_mucontext(pd->uobject->context);
666 mlx5_ib_db_unmap_user(context, &qp->db);
668 ib_umem_release(qp->umem);
669 free_uuar(&context->uuari, qp->uuarn);
672 static int create_kernel_qp(struct mlx5_ib_dev *dev,
673 struct ib_qp_init_attr *init_attr,
674 struct mlx5_ib_qp *qp,
675 struct mlx5_create_qp_mbox_in **in, int *inlen)
677 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
678 struct mlx5_uuar_info *uuari;
683 uuari = &dev->mdev->priv.uuari;
684 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
687 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
688 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
690 uuarn = alloc_uuar(uuari, lc);
692 mlx5_ib_dbg(dev, "\n");
696 qp->bf = &uuari->bfs[uuarn];
697 uar_index = qp->bf->uar->index;
699 err = calc_sq_size(dev, init_attr, qp);
701 mlx5_ib_dbg(dev, "err %d\n", err);
706 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
707 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
709 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
711 mlx5_ib_dbg(dev, "err %d\n", err);
715 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
716 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
717 *in = mlx5_vzalloc(*inlen);
722 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
723 (*in)->ctx.log_pg_sz_remote_qpn =
724 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
725 /* Set "fast registration enabled" for all kernel QPs */
726 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
727 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
729 mlx5_fill_page_array(&qp->buf, (*in)->pas);
731 err = mlx5_db_alloc(dev->mdev, &qp->db);
733 mlx5_ib_dbg(dev, "err %d\n", err);
740 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
741 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
742 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
743 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
744 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
746 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
747 !qp->sq.w_list || !qp->sq.wqe_head) {
751 qp->create_type = MLX5_QP_KERNEL;
756 mlx5_db_free(dev->mdev, &qp->db);
757 kfree(qp->sq.wqe_head);
758 kfree(qp->sq.w_list);
760 kfree(qp->sq.wr_data);
767 mlx5_buf_free(dev->mdev, &qp->buf);
770 free_uuar(&dev->mdev->priv.uuari, uuarn);
774 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
776 mlx5_db_free(dev->mdev, &qp->db);
777 kfree(qp->sq.wqe_head);
778 kfree(qp->sq.w_list);
780 kfree(qp->sq.wr_data);
782 mlx5_buf_free(dev->mdev, &qp->buf);
783 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
786 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
788 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
789 (attr->qp_type == IB_QPT_XRC_INI))
790 return cpu_to_be32(MLX5_SRQ_RQ);
791 else if (!qp->has_rq)
792 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
794 return cpu_to_be32(MLX5_NON_ZERO_RQ);
797 static int is_connected(enum ib_qp_type qp_type)
799 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
805 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
806 struct ib_qp_init_attr *init_attr,
807 struct ib_udata *udata, struct mlx5_ib_qp *qp)
809 struct mlx5_ib_resources *devr = &dev->devr;
810 struct mlx5_ib_create_qp_resp resp;
811 struct mlx5_create_qp_mbox_in *in;
812 struct mlx5_general_caps *gen;
813 struct mlx5_ib_create_qp ucmd;
814 int inlen = sizeof(*in);
817 gen = &dev->mdev->caps.gen;
818 mutex_init(&qp->mutex);
819 spin_lock_init(&qp->sq.lock);
820 spin_lock_init(&qp->rq.lock);
822 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
823 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
824 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
827 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
831 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
832 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
834 if (pd && pd->uobject) {
835 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
836 mlx5_ib_dbg(dev, "copy failed\n");
840 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
841 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
843 qp->wq_sig = !!wq_signature;
846 qp->has_rq = qp_has_rq(init_attr);
847 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
848 qp, (pd && pd->uobject) ? &ucmd : NULL);
850 mlx5_ib_dbg(dev, "err %d\n", err);
856 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
857 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
858 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
859 mlx5_ib_dbg(dev, "invalid rq params\n");
862 if (ucmd.sq_wqe_count > gen->max_wqes) {
863 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
864 ucmd.sq_wqe_count, gen->max_wqes);
867 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
869 mlx5_ib_dbg(dev, "err %d\n", err);
871 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
873 mlx5_ib_dbg(dev, "err %d\n", err);
875 qp->pa_lkey = to_mpd(pd)->pa_lkey;
881 in = mlx5_vzalloc(sizeof(*in));
885 qp->create_type = MLX5_QP_EMPTY;
888 if (is_sqp(init_attr->qp_type))
889 qp->port = init_attr->port_num;
891 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
892 MLX5_QP_PM_MIGRATED << 11);
894 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
895 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
897 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
900 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
902 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
903 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
905 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
909 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
910 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
913 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
915 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
917 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
919 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
921 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
925 if (qp->rq.wqe_cnt) {
926 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
927 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
930 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
933 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
935 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
937 /* Set default resources */
938 switch (init_attr->qp_type) {
940 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
941 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
942 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
943 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
946 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
947 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
948 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
951 if (init_attr->srq) {
952 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
953 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
955 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
956 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
960 if (init_attr->send_cq)
961 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
963 if (init_attr->recv_cq)
964 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
966 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
968 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
970 mlx5_ib_dbg(dev, "create qp failed\n");
975 /* Hardware wants QPN written in big-endian order (after
976 * shifting) for send doorbell. Precompute this value to save
977 * a little bit when posting sends.
979 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
981 qp->mqp.event = mlx5_ib_qp_event;
986 if (qp->create_type == MLX5_QP_USER)
987 destroy_qp_user(pd, qp);
988 else if (qp->create_type == MLX5_QP_KERNEL)
989 destroy_qp_kernel(dev, qp);
995 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
996 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1000 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1001 spin_lock_irq(&send_cq->lock);
1002 spin_lock_nested(&recv_cq->lock,
1003 SINGLE_DEPTH_NESTING);
1004 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1005 spin_lock_irq(&send_cq->lock);
1006 __acquire(&recv_cq->lock);
1008 spin_lock_irq(&recv_cq->lock);
1009 spin_lock_nested(&send_cq->lock,
1010 SINGLE_DEPTH_NESTING);
1013 spin_lock_irq(&send_cq->lock);
1015 } else if (recv_cq) {
1016 spin_lock_irq(&recv_cq->lock);
1020 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1021 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1025 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1026 spin_unlock(&recv_cq->lock);
1027 spin_unlock_irq(&send_cq->lock);
1028 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1029 __release(&recv_cq->lock);
1030 spin_unlock_irq(&send_cq->lock);
1032 spin_unlock(&send_cq->lock);
1033 spin_unlock_irq(&recv_cq->lock);
1036 spin_unlock_irq(&send_cq->lock);
1038 } else if (recv_cq) {
1039 spin_unlock_irq(&recv_cq->lock);
1043 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1045 return to_mpd(qp->ibqp.pd);
1048 static void get_cqs(struct mlx5_ib_qp *qp,
1049 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1051 switch (qp->ibqp.qp_type) {
1052 case IB_QPT_XRC_TGT:
1056 case MLX5_IB_QPT_REG_UMR:
1057 case IB_QPT_XRC_INI:
1058 *send_cq = to_mcq(qp->ibqp.send_cq);
1067 case IB_QPT_RAW_IPV6:
1068 case IB_QPT_RAW_ETHERTYPE:
1069 *send_cq = to_mcq(qp->ibqp.send_cq);
1070 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1073 case IB_QPT_RAW_PACKET:
1082 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1084 struct mlx5_ib_cq *send_cq, *recv_cq;
1085 struct mlx5_modify_qp_mbox_in *in;
1088 in = kzalloc(sizeof(*in), GFP_KERNEL);
1091 if (qp->state != IB_QPS_RESET)
1092 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1093 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
1094 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1097 get_cqs(qp, &send_cq, &recv_cq);
1099 if (qp->create_type == MLX5_QP_KERNEL) {
1100 mlx5_ib_lock_cqs(send_cq, recv_cq);
1101 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1102 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1103 if (send_cq != recv_cq)
1104 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1105 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1108 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
1110 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1114 if (qp->create_type == MLX5_QP_KERNEL)
1115 destroy_qp_kernel(dev, qp);
1116 else if (qp->create_type == MLX5_QP_USER)
1117 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1120 static const char *ib_qp_type_str(enum ib_qp_type type)
1124 return "IB_QPT_SMI";
1126 return "IB_QPT_GSI";
1133 case IB_QPT_RAW_IPV6:
1134 return "IB_QPT_RAW_IPV6";
1135 case IB_QPT_RAW_ETHERTYPE:
1136 return "IB_QPT_RAW_ETHERTYPE";
1137 case IB_QPT_XRC_INI:
1138 return "IB_QPT_XRC_INI";
1139 case IB_QPT_XRC_TGT:
1140 return "IB_QPT_XRC_TGT";
1141 case IB_QPT_RAW_PACKET:
1142 return "IB_QPT_RAW_PACKET";
1143 case MLX5_IB_QPT_REG_UMR:
1144 return "MLX5_IB_QPT_REG_UMR";
1147 return "Invalid QP type";
1151 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1152 struct ib_qp_init_attr *init_attr,
1153 struct ib_udata *udata)
1155 struct mlx5_general_caps *gen;
1156 struct mlx5_ib_dev *dev;
1157 struct mlx5_ib_qp *qp;
1162 dev = to_mdev(pd->device);
1164 /* being cautious here */
1165 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1166 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1167 pr_warn("%s: no PD for transport %s\n", __func__,
1168 ib_qp_type_str(init_attr->qp_type));
1169 return ERR_PTR(-EINVAL);
1171 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1173 gen = &dev->mdev->caps.gen;
1175 switch (init_attr->qp_type) {
1176 case IB_QPT_XRC_TGT:
1177 case IB_QPT_XRC_INI:
1178 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
1179 mlx5_ib_dbg(dev, "XRC not supported\n");
1180 return ERR_PTR(-ENOSYS);
1182 init_attr->recv_cq = NULL;
1183 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1184 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1185 init_attr->send_cq = NULL;
1194 case MLX5_IB_QPT_REG_UMR:
1195 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1197 return ERR_PTR(-ENOMEM);
1199 err = create_qp_common(dev, pd, init_attr, udata, qp);
1201 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1203 return ERR_PTR(err);
1206 if (is_qp0(init_attr->qp_type))
1207 qp->ibqp.qp_num = 0;
1208 else if (is_qp1(init_attr->qp_type))
1209 qp->ibqp.qp_num = 1;
1211 qp->ibqp.qp_num = qp->mqp.qpn;
1213 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1214 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1215 to_mcq(init_attr->send_cq)->mcq.cqn);
1221 case IB_QPT_RAW_IPV6:
1222 case IB_QPT_RAW_ETHERTYPE:
1223 case IB_QPT_RAW_PACKET:
1226 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1227 init_attr->qp_type);
1228 /* Don't support raw QPs */
1229 return ERR_PTR(-EINVAL);
1235 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1237 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1238 struct mlx5_ib_qp *mqp = to_mqp(qp);
1240 destroy_qp_common(dev, mqp);
1247 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1250 u32 hw_access_flags = 0;
1254 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1255 dest_rd_atomic = attr->max_dest_rd_atomic;
1257 dest_rd_atomic = qp->resp_depth;
1259 if (attr_mask & IB_QP_ACCESS_FLAGS)
1260 access_flags = attr->qp_access_flags;
1262 access_flags = qp->atomic_rd_en;
1264 if (!dest_rd_atomic)
1265 access_flags &= IB_ACCESS_REMOTE_WRITE;
1267 if (access_flags & IB_ACCESS_REMOTE_READ)
1268 hw_access_flags |= MLX5_QP_BIT_RRE;
1269 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1270 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1271 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1272 hw_access_flags |= MLX5_QP_BIT_RWE;
1274 return cpu_to_be32(hw_access_flags);
1278 MLX5_PATH_FLAG_FL = 1 << 0,
1279 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1280 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1283 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1285 struct mlx5_general_caps *gen;
1287 gen = &dev->mdev->caps.gen;
1288 if (rate == IB_RATE_PORT_CURRENT) {
1290 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1293 while (rate != IB_RATE_2_5_GBPS &&
1294 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1295 gen->stat_rate_support))
1299 return rate + MLX5_STAT_RATE_OFFSET;
1302 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1303 struct mlx5_qp_path *path, u8 port, int attr_mask,
1304 u32 path_flags, const struct ib_qp_attr *attr)
1306 struct mlx5_general_caps *gen;
1309 gen = &dev->mdev->caps.gen;
1310 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1311 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1313 if (attr_mask & IB_QP_PKEY_INDEX)
1314 path->pkey_index = attr->pkey_index;
1316 path->grh_mlid = ah->src_path_bits & 0x7f;
1317 path->rlid = cpu_to_be16(ah->dlid);
1319 if (ah->ah_flags & IB_AH_GRH) {
1320 path->grh_mlid |= 1 << 7;
1321 path->mgid_index = ah->grh.sgid_index;
1322 path->hop_limit = ah->grh.hop_limit;
1323 path->tclass_flowlabel =
1324 cpu_to_be32((ah->grh.traffic_class << 20) |
1325 (ah->grh.flow_label));
1326 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1329 err = ib_rate_to_mlx5(dev, ah->static_rate);
1332 path->static_rate = err;
1335 if (ah->ah_flags & IB_AH_GRH) {
1336 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
1337 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1338 ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
1342 path->grh_mlid |= 1 << 7;
1343 path->mgid_index = ah->grh.sgid_index;
1344 path->hop_limit = ah->grh.hop_limit;
1345 path->tclass_flowlabel =
1346 cpu_to_be32((ah->grh.traffic_class << 20) |
1347 (ah->grh.flow_label));
1348 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1351 if (attr_mask & IB_QP_TIMEOUT)
1352 path->ackto_lt = attr->timeout << 3;
1354 path->sl = ah->sl & 0xf;
1359 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1360 [MLX5_QP_STATE_INIT] = {
1361 [MLX5_QP_STATE_INIT] = {
1362 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1363 MLX5_QP_OPTPAR_RAE |
1364 MLX5_QP_OPTPAR_RWE |
1365 MLX5_QP_OPTPAR_PKEY_INDEX |
1366 MLX5_QP_OPTPAR_PRI_PORT,
1367 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1368 MLX5_QP_OPTPAR_PKEY_INDEX |
1369 MLX5_QP_OPTPAR_PRI_PORT,
1370 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1371 MLX5_QP_OPTPAR_Q_KEY |
1372 MLX5_QP_OPTPAR_PRI_PORT,
1374 [MLX5_QP_STATE_RTR] = {
1375 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1376 MLX5_QP_OPTPAR_RRE |
1377 MLX5_QP_OPTPAR_RAE |
1378 MLX5_QP_OPTPAR_RWE |
1379 MLX5_QP_OPTPAR_PKEY_INDEX,
1380 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1381 MLX5_QP_OPTPAR_RWE |
1382 MLX5_QP_OPTPAR_PKEY_INDEX,
1383 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1384 MLX5_QP_OPTPAR_Q_KEY,
1385 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1386 MLX5_QP_OPTPAR_Q_KEY,
1387 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1388 MLX5_QP_OPTPAR_RRE |
1389 MLX5_QP_OPTPAR_RAE |
1390 MLX5_QP_OPTPAR_RWE |
1391 MLX5_QP_OPTPAR_PKEY_INDEX,
1394 [MLX5_QP_STATE_RTR] = {
1395 [MLX5_QP_STATE_RTS] = {
1396 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1397 MLX5_QP_OPTPAR_RRE |
1398 MLX5_QP_OPTPAR_RAE |
1399 MLX5_QP_OPTPAR_RWE |
1400 MLX5_QP_OPTPAR_PM_STATE |
1401 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1402 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1403 MLX5_QP_OPTPAR_RWE |
1404 MLX5_QP_OPTPAR_PM_STATE,
1405 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1408 [MLX5_QP_STATE_RTS] = {
1409 [MLX5_QP_STATE_RTS] = {
1410 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1411 MLX5_QP_OPTPAR_RAE |
1412 MLX5_QP_OPTPAR_RWE |
1413 MLX5_QP_OPTPAR_RNR_TIMEOUT |
1414 MLX5_QP_OPTPAR_PM_STATE |
1415 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1416 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1417 MLX5_QP_OPTPAR_PM_STATE |
1418 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1419 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1420 MLX5_QP_OPTPAR_SRQN |
1421 MLX5_QP_OPTPAR_CQN_RCV,
1424 [MLX5_QP_STATE_SQER] = {
1425 [MLX5_QP_STATE_RTS] = {
1426 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1427 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1428 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1429 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1430 MLX5_QP_OPTPAR_RWE |
1431 MLX5_QP_OPTPAR_RAE |
1437 static int ib_nr_to_mlx5_nr(int ib_mask)
1442 case IB_QP_CUR_STATE:
1444 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1446 case IB_QP_ACCESS_FLAGS:
1447 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1449 case IB_QP_PKEY_INDEX:
1450 return MLX5_QP_OPTPAR_PKEY_INDEX;
1452 return MLX5_QP_OPTPAR_PRI_PORT;
1454 return MLX5_QP_OPTPAR_Q_KEY;
1456 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1457 MLX5_QP_OPTPAR_PRI_PORT;
1458 case IB_QP_PATH_MTU:
1461 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1462 case IB_QP_RETRY_CNT:
1463 return MLX5_QP_OPTPAR_RETRY_COUNT;
1464 case IB_QP_RNR_RETRY:
1465 return MLX5_QP_OPTPAR_RNR_RETRY;
1468 case IB_QP_MAX_QP_RD_ATOMIC:
1469 return MLX5_QP_OPTPAR_SRA_MAX;
1470 case IB_QP_ALT_PATH:
1471 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1472 case IB_QP_MIN_RNR_TIMER:
1473 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1476 case IB_QP_MAX_DEST_RD_ATOMIC:
1477 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1478 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1479 case IB_QP_PATH_MIG_STATE:
1480 return MLX5_QP_OPTPAR_PM_STATE;
1483 case IB_QP_DEST_QPN:
1489 static int ib_mask_to_mlx5_opt(int ib_mask)
1494 for (i = 0; i < 8 * sizeof(int); i++) {
1495 if ((1 << i) & ib_mask)
1496 result |= ib_nr_to_mlx5_nr(1 << i);
1502 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1503 const struct ib_qp_attr *attr, int attr_mask,
1504 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1506 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1507 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1508 struct mlx5_ib_cq *send_cq, *recv_cq;
1509 struct mlx5_qp_context *context;
1510 struct mlx5_general_caps *gen;
1511 struct mlx5_modify_qp_mbox_in *in;
1512 struct mlx5_ib_pd *pd;
1513 enum mlx5_qp_state mlx5_cur, mlx5_new;
1514 enum mlx5_qp_optpar optpar;
1519 gen = &dev->mdev->caps.gen;
1520 in = kzalloc(sizeof(*in), GFP_KERNEL);
1525 err = to_mlx5_st(ibqp->qp_type);
1529 context->flags = cpu_to_be32(err << 16);
1531 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1532 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1534 switch (attr->path_mig_state) {
1535 case IB_MIG_MIGRATED:
1536 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1539 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1542 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1547 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1548 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1549 } else if (ibqp->qp_type == IB_QPT_UD ||
1550 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1551 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1552 } else if (attr_mask & IB_QP_PATH_MTU) {
1553 if (attr->path_mtu < IB_MTU_256 ||
1554 attr->path_mtu > IB_MTU_4096) {
1555 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1559 context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
1562 if (attr_mask & IB_QP_DEST_QPN)
1563 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1565 if (attr_mask & IB_QP_PKEY_INDEX)
1566 context->pri_path.pkey_index = attr->pkey_index;
1568 /* todo implement counter_index functionality */
1570 if (is_sqp(ibqp->qp_type))
1571 context->pri_path.port = qp->port;
1573 if (attr_mask & IB_QP_PORT)
1574 context->pri_path.port = attr->port_num;
1576 if (attr_mask & IB_QP_AV) {
1577 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1578 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1579 attr_mask, 0, attr);
1584 if (attr_mask & IB_QP_TIMEOUT)
1585 context->pri_path.ackto_lt |= attr->timeout << 3;
1587 if (attr_mask & IB_QP_ALT_PATH) {
1588 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1589 attr->alt_port_num, attr_mask, 0, attr);
1595 get_cqs(qp, &send_cq, &recv_cq);
1597 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1598 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1599 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1600 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1602 if (attr_mask & IB_QP_RNR_RETRY)
1603 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1605 if (attr_mask & IB_QP_RETRY_CNT)
1606 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1608 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1609 if (attr->max_rd_atomic)
1611 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1614 if (attr_mask & IB_QP_SQ_PSN)
1615 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1617 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1618 if (attr->max_dest_rd_atomic)
1620 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1623 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1624 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1626 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1627 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1629 if (attr_mask & IB_QP_RQ_PSN)
1630 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1632 if (attr_mask & IB_QP_QKEY)
1633 context->qkey = cpu_to_be32(attr->qkey);
1635 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1636 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1638 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1639 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1644 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1645 context->sq_crq_size |= cpu_to_be16(1 << 4);
1648 mlx5_cur = to_mlx5_state(cur_state);
1649 mlx5_new = to_mlx5_state(new_state);
1650 mlx5_st = to_mlx5_st(ibqp->qp_type);
1654 optpar = ib_mask_to_mlx5_opt(attr_mask);
1655 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1656 in->optparam = cpu_to_be32(optpar);
1657 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
1658 to_mlx5_state(new_state), in, sqd_event,
1663 qp->state = new_state;
1665 if (attr_mask & IB_QP_ACCESS_FLAGS)
1666 qp->atomic_rd_en = attr->qp_access_flags;
1667 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1668 qp->resp_depth = attr->max_dest_rd_atomic;
1669 if (attr_mask & IB_QP_PORT)
1670 qp->port = attr->port_num;
1671 if (attr_mask & IB_QP_ALT_PATH)
1672 qp->alt_port = attr->alt_port_num;
1675 * If we moved a kernel QP to RESET, clean up all old CQ
1676 * entries and reinitialize the QP.
1678 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1679 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1680 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1681 if (send_cq != recv_cq)
1682 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1688 qp->sq.cur_post = 0;
1689 qp->sq.last_poll = 0;
1690 qp->db.db[MLX5_RCV_DBR] = 0;
1691 qp->db.db[MLX5_SND_DBR] = 0;
1699 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1700 int attr_mask, struct ib_udata *udata)
1702 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1703 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1704 enum ib_qp_state cur_state, new_state;
1705 struct mlx5_general_caps *gen;
1709 gen = &dev->mdev->caps.gen;
1710 mutex_lock(&qp->mutex);
1712 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1713 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1715 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1716 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1717 IB_LINK_LAYER_UNSPECIFIED))
1720 if ((attr_mask & IB_QP_PORT) &&
1721 (attr->port_num == 0 || attr->port_num > gen->num_ports))
1724 if (attr_mask & IB_QP_PKEY_INDEX) {
1725 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1726 if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
1730 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1731 attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
1734 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1735 attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
1738 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1743 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1746 mutex_unlock(&qp->mutex);
1750 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1752 struct mlx5_ib_cq *cq;
1755 cur = wq->head - wq->tail;
1756 if (likely(cur + nreq < wq->max_post))
1760 spin_lock(&cq->lock);
1761 cur = wq->head - wq->tail;
1762 spin_unlock(&cq->lock);
1764 return cur + nreq >= wq->max_post;
1767 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1768 u64 remote_addr, u32 rkey)
1770 rseg->raddr = cpu_to_be64(remote_addr);
1771 rseg->rkey = cpu_to_be32(rkey);
1775 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1776 struct ib_send_wr *wr)
1778 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
1779 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
1780 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1783 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1785 dseg->byte_count = cpu_to_be32(sg->length);
1786 dseg->lkey = cpu_to_be32(sg->lkey);
1787 dseg->addr = cpu_to_be64(sg->addr);
1790 static __be16 get_klm_octo(int npages)
1792 return cpu_to_be16(ALIGN(npages, 8) / 2);
1795 static __be64 frwr_mkey_mask(void)
1799 result = MLX5_MKEY_MASK_LEN |
1800 MLX5_MKEY_MASK_PAGE_SIZE |
1801 MLX5_MKEY_MASK_START_ADDR |
1802 MLX5_MKEY_MASK_EN_RINVAL |
1803 MLX5_MKEY_MASK_KEY |
1809 MLX5_MKEY_MASK_SMALL_FENCE |
1810 MLX5_MKEY_MASK_FREE;
1812 return cpu_to_be64(result);
1815 static __be64 sig_mkey_mask(void)
1819 result = MLX5_MKEY_MASK_LEN |
1820 MLX5_MKEY_MASK_PAGE_SIZE |
1821 MLX5_MKEY_MASK_START_ADDR |
1822 MLX5_MKEY_MASK_EN_SIGERR |
1823 MLX5_MKEY_MASK_EN_RINVAL |
1824 MLX5_MKEY_MASK_KEY |
1829 MLX5_MKEY_MASK_SMALL_FENCE |
1830 MLX5_MKEY_MASK_FREE |
1831 MLX5_MKEY_MASK_BSF_EN;
1833 return cpu_to_be64(result);
1836 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1837 struct ib_send_wr *wr, int li)
1839 memset(umr, 0, sizeof(*umr));
1842 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1843 umr->flags = 1 << 7;
1847 umr->flags = (1 << 5); /* fail if not free */
1848 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
1849 umr->mkey_mask = frwr_mkey_mask();
1852 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1853 struct ib_send_wr *wr)
1855 struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg;
1858 memset(umr, 0, sizeof(*umr));
1860 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1861 umr->flags = 1 << 5; /* fail if not free */
1862 umr->klm_octowords = get_klm_octo(umrwr->npages);
1863 mask = MLX5_MKEY_MASK_LEN |
1864 MLX5_MKEY_MASK_PAGE_SIZE |
1865 MLX5_MKEY_MASK_START_ADDR |
1869 MLX5_MKEY_MASK_KEY |
1873 MLX5_MKEY_MASK_FREE;
1874 umr->mkey_mask = cpu_to_be64(mask);
1876 umr->flags = 2 << 5; /* fail if free */
1877 mask = MLX5_MKEY_MASK_FREE;
1878 umr->mkey_mask = cpu_to_be64(mask);
1882 umr->flags |= (1 << 7); /* inline */
1885 static u8 get_umr_flags(int acc)
1887 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1888 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1889 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1890 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1891 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
1894 static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1897 memset(seg, 0, sizeof(*seg));
1899 seg->status = 1 << 6;
1903 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
1904 MLX5_ACCESS_MODE_MTT;
1905 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
1906 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
1907 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
1908 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1909 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1910 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
1911 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1914 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
1916 memset(seg, 0, sizeof(*seg));
1917 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
1918 seg->status = 1 << 6;
1922 seg->flags = convert_access(wr->wr.fast_reg.access_flags);
1923 seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn);
1924 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1925 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1926 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1927 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
1928 mlx5_mkey_variant(wr->wr.fast_reg.rkey));
1931 static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
1932 struct ib_send_wr *wr,
1933 struct mlx5_core_dev *mdev,
1934 struct mlx5_ib_pd *pd,
1937 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1938 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
1939 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
1942 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
1943 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
1944 dseg->addr = cpu_to_be64(mfrpl->map);
1945 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
1946 dseg->lkey = cpu_to_be32(pd->pa_lkey);
1949 static __be32 send_ieth(struct ib_send_wr *wr)
1951 switch (wr->opcode) {
1952 case IB_WR_SEND_WITH_IMM:
1953 case IB_WR_RDMA_WRITE_WITH_IMM:
1954 return wr->ex.imm_data;
1956 case IB_WR_SEND_WITH_INV:
1957 return cpu_to_be32(wr->ex.invalidate_rkey);
1964 static u8 calc_sig(void *wqe, int size)
1970 for (i = 0; i < size; i++)
1976 static u8 wq_sig(void *wqe)
1978 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
1981 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
1984 struct mlx5_wqe_inline_seg *seg;
1985 void *qend = qp->sq.qend;
1993 wqe += sizeof(*seg);
1994 for (i = 0; i < wr->num_sge; i++) {
1995 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
1996 len = wr->sg_list[i].length;
1999 if (unlikely(inl > qp->max_inline_data))
2002 if (unlikely(wqe + len > qend)) {
2004 memcpy(wqe, addr, copy);
2007 wqe = mlx5_get_send_wqe(qp, 0);
2009 memcpy(wqe, addr, len);
2013 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2015 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2020 static u16 prot_field_size(enum ib_signature_type type)
2023 case IB_SIG_TYPE_T10_DIF:
2024 return MLX5_DIF_SIZE;
2030 static u8 bs_selector(int block_size)
2032 switch (block_size) {
2033 case 512: return 0x1;
2034 case 520: return 0x2;
2035 case 4096: return 0x3;
2036 case 4160: return 0x4;
2037 case 1073741824: return 0x5;
2042 static int format_selector(struct ib_sig_attrs *attr,
2043 struct ib_sig_domain *domain,
2047 #define FORMAT_DIF_NONE 0
2048 #define FORMAT_DIF_CRC_INC 8
2049 #define FORMAT_DIF_CRC_NO_INC 12
2050 #define FORMAT_DIF_CSUM_INC 13
2051 #define FORMAT_DIF_CSUM_NO_INC 14
2053 switch (domain->sig.dif.type) {
2054 case IB_T10DIF_NONE:
2056 *selector = FORMAT_DIF_NONE;
2058 case IB_T10DIF_TYPE1: /* Fall through */
2059 case IB_T10DIF_TYPE2:
2060 switch (domain->sig.dif.bg_type) {
2062 *selector = FORMAT_DIF_CRC_INC;
2064 case IB_T10DIF_CSUM:
2065 *selector = FORMAT_DIF_CSUM_INC;
2071 case IB_T10DIF_TYPE3:
2072 switch (domain->sig.dif.bg_type) {
2074 *selector = domain->sig.dif.type3_inc_reftag ?
2075 FORMAT_DIF_CRC_INC :
2076 FORMAT_DIF_CRC_NO_INC;
2078 case IB_T10DIF_CSUM:
2079 *selector = domain->sig.dif.type3_inc_reftag ?
2080 FORMAT_DIF_CSUM_INC :
2081 FORMAT_DIF_CSUM_NO_INC;
2094 static int mlx5_set_bsf(struct ib_mr *sig_mr,
2095 struct ib_sig_attrs *sig_attrs,
2096 struct mlx5_bsf *bsf, u32 data_size)
2098 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2099 struct mlx5_bsf_basic *basic = &bsf->basic;
2100 struct ib_sig_domain *mem = &sig_attrs->mem;
2101 struct ib_sig_domain *wire = &sig_attrs->wire;
2104 memset(bsf, 0, sizeof(*bsf));
2105 switch (sig_attrs->mem.sig_type) {
2106 case IB_SIG_TYPE_T10_DIF:
2107 if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF)
2110 /* Input domain check byte mask */
2111 basic->check_byte_mask = sig_attrs->check_mask;
2112 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2113 mem->sig.dif.type == wire->sig.dif.type) {
2114 /* Same block structure */
2115 basic->bsf_size_sbs = 1 << 4;
2116 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2117 basic->wire.copy_byte_mask |= 0xc0;
2118 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
2119 basic->wire.copy_byte_mask |= 0x30;
2120 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
2121 basic->wire.copy_byte_mask |= 0x0f;
2123 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2125 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2126 basic->raw_data_size = cpu_to_be32(data_size);
2128 ret = format_selector(sig_attrs, mem, &selector);
2131 basic->m_bfs_psv = cpu_to_be32(selector << 24 |
2132 msig->psv_memory.psv_idx);
2134 ret = format_selector(sig_attrs, wire, &selector);
2137 basic->w_bfs_psv = cpu_to_be32(selector << 24 |
2138 msig->psv_wire.psv_idx);
2148 static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2149 void **seg, int *size)
2151 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
2152 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2153 struct mlx5_bsf *bsf;
2154 u32 data_len = wr->sg_list->length;
2155 u32 data_key = wr->sg_list->lkey;
2156 u64 data_va = wr->sg_list->addr;
2160 if (!wr->wr.sig_handover.prot ||
2161 (data_key == wr->wr.sig_handover.prot->lkey &&
2162 data_va == wr->wr.sig_handover.prot->addr &&
2163 data_len == wr->wr.sig_handover.prot->length)) {
2165 * Source domain doesn't contain signature information
2166 * or data and protection are interleaved in memory.
2167 * So need construct:
2168 * ------------------
2170 * ------------------
2172 * ------------------
2174 struct mlx5_klm *data_klm = *seg;
2176 data_klm->bcount = cpu_to_be32(data_len);
2177 data_klm->key = cpu_to_be32(data_key);
2178 data_klm->va = cpu_to_be64(data_va);
2179 wqe_size = ALIGN(sizeof(*data_klm), 64);
2182 * Source domain contains signature information
2183 * So need construct a strided block format:
2184 * ---------------------------
2185 * | stride_block_ctrl |
2186 * ---------------------------
2188 * ---------------------------
2190 * ---------------------------
2192 * ---------------------------
2194 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2195 struct mlx5_stride_block_entry *data_sentry;
2196 struct mlx5_stride_block_entry *prot_sentry;
2197 u32 prot_key = wr->wr.sig_handover.prot->lkey;
2198 u64 prot_va = wr->wr.sig_handover.prot->addr;
2199 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2203 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2204 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2206 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2208 pr_err("Bad block size given: %u\n", block_size);
2211 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2213 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2214 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2215 sblock_ctrl->num_entries = cpu_to_be16(2);
2217 data_sentry->bcount = cpu_to_be16(block_size);
2218 data_sentry->key = cpu_to_be32(data_key);
2219 data_sentry->va = cpu_to_be64(data_va);
2220 data_sentry->stride = cpu_to_be16(block_size);
2222 prot_sentry->bcount = cpu_to_be16(prot_size);
2223 prot_sentry->key = cpu_to_be32(prot_key);
2224 prot_sentry->va = cpu_to_be64(prot_va);
2225 prot_sentry->stride = cpu_to_be16(prot_size);
2227 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2228 sizeof(*prot_sentry), 64);
2232 *size += wqe_size / 16;
2233 if (unlikely((*seg == qp->sq.qend)))
2234 *seg = mlx5_get_send_wqe(qp, 0);
2237 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2241 *seg += sizeof(*bsf);
2242 *size += sizeof(*bsf) / 16;
2243 if (unlikely((*seg == qp->sq.qend)))
2244 *seg = mlx5_get_send_wqe(qp, 0);
2249 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2250 struct ib_send_wr *wr, u32 nelements,
2251 u32 length, u32 pdn)
2253 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2254 u32 sig_key = sig_mr->rkey;
2255 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2257 memset(seg, 0, sizeof(*seg));
2259 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
2260 MLX5_ACCESS_MODE_KLM;
2261 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2262 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
2263 MLX5_MKEY_BSF_EN | pdn);
2264 seg->len = cpu_to_be64(length);
2265 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2266 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2269 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2270 struct ib_send_wr *wr, u32 nelements)
2272 memset(umr, 0, sizeof(*umr));
2274 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2275 umr->klm_octowords = get_klm_octo(nelements);
2276 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2277 umr->mkey_mask = sig_mkey_mask();
2281 static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2282 void **seg, int *size)
2284 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
2285 u32 pdn = get_pd(qp)->pdn;
2287 int region_len, ret;
2289 if (unlikely(wr->num_sge != 1) ||
2290 unlikely(wr->wr.sig_handover.access_flags &
2291 IB_ACCESS_REMOTE_ATOMIC) ||
2292 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2293 unlikely(!sig_mr->sig->sig_status_checked))
2296 /* length of the protected region, data + protection */
2297 region_len = wr->sg_list->length;
2298 if (wr->wr.sig_handover.prot &&
2299 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
2300 wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
2301 wr->wr.sig_handover.prot->length != wr->sg_list->length))
2302 region_len += wr->wr.sig_handover.prot->length;
2305 * KLM octoword size - if protection was provided
2306 * then we use strided block format (3 octowords),
2307 * else we use single KLM (1 octoword)
2309 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
2311 set_sig_umr_segment(*seg, wr, klm_oct_size);
2312 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2313 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2314 if (unlikely((*seg == qp->sq.qend)))
2315 *seg = mlx5_get_send_wqe(qp, 0);
2317 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2318 *seg += sizeof(struct mlx5_mkey_seg);
2319 *size += sizeof(struct mlx5_mkey_seg) / 16;
2320 if (unlikely((*seg == qp->sq.qend)))
2321 *seg = mlx5_get_send_wqe(qp, 0);
2323 ret = set_sig_data_segment(wr, qp, seg, size);
2327 sig_mr->sig->sig_status_checked = false;
2331 static int set_psv_wr(struct ib_sig_domain *domain,
2332 u32 psv_idx, void **seg, int *size)
2334 struct mlx5_seg_set_psv *psv_seg = *seg;
2336 memset(psv_seg, 0, sizeof(*psv_seg));
2337 psv_seg->psv_num = cpu_to_be32(psv_idx);
2338 switch (domain->sig_type) {
2339 case IB_SIG_TYPE_T10_DIF:
2340 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2341 domain->sig.dif.app_tag);
2342 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2344 *seg += sizeof(*psv_seg);
2345 *size += sizeof(*psv_seg) / 16;
2349 pr_err("Bad signature type given.\n");
2356 static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
2357 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
2362 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
2363 if (unlikely(wr->send_flags & IB_SEND_INLINE))
2366 set_frwr_umr_segment(*seg, wr, li);
2367 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2368 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2369 if (unlikely((*seg == qp->sq.qend)))
2370 *seg = mlx5_get_send_wqe(qp, 0);
2371 set_mkey_segment(*seg, wr, li, &writ);
2372 *seg += sizeof(struct mlx5_mkey_seg);
2373 *size += sizeof(struct mlx5_mkey_seg) / 16;
2374 if (unlikely((*seg == qp->sq.qend)))
2375 *seg = mlx5_get_send_wqe(qp, 0);
2377 if (unlikely(wr->wr.fast_reg.page_list_len >
2378 wr->wr.fast_reg.page_list->max_page_list_len))
2381 set_frwr_pages(*seg, wr, mdev, pd, writ);
2382 *seg += sizeof(struct mlx5_wqe_data_seg);
2383 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2388 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2394 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2395 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2396 if ((i & 0xf) == 0) {
2397 void *buf = mlx5_get_send_wqe(qp, tidx);
2398 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2402 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2403 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2404 be32_to_cpu(p[j + 3]));
2408 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2409 unsigned bytecnt, struct mlx5_ib_qp *qp)
2411 while (bytecnt > 0) {
2412 __iowrite64_copy(dst++, src++, 8);
2413 __iowrite64_copy(dst++, src++, 8);
2414 __iowrite64_copy(dst++, src++, 8);
2415 __iowrite64_copy(dst++, src++, 8);
2416 __iowrite64_copy(dst++, src++, 8);
2417 __iowrite64_copy(dst++, src++, 8);
2418 __iowrite64_copy(dst++, src++, 8);
2419 __iowrite64_copy(dst++, src++, 8);
2421 if (unlikely(src == qp->sq.qend))
2422 src = mlx5_get_send_wqe(qp, 0);
2426 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2428 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2429 wr->send_flags & IB_SEND_FENCE))
2430 return MLX5_FENCE_MODE_STRONG_ORDERING;
2432 if (unlikely(fence)) {
2433 if (wr->send_flags & IB_SEND_FENCE)
2434 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2443 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2444 struct mlx5_wqe_ctrl_seg **ctrl,
2445 struct ib_send_wr *wr, int *idx,
2446 int *size, int nreq)
2450 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2455 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2456 *seg = mlx5_get_send_wqe(qp, *idx);
2458 *(uint32_t *)(*seg + 8) = 0;
2459 (*ctrl)->imm = send_ieth(wr);
2460 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2461 (wr->send_flags & IB_SEND_SIGNALED ?
2462 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2463 (wr->send_flags & IB_SEND_SOLICITED ?
2464 MLX5_WQE_CTRL_SOLICITED : 0);
2466 *seg += sizeof(**ctrl);
2467 *size = sizeof(**ctrl) / 16;
2472 static void finish_wqe(struct mlx5_ib_qp *qp,
2473 struct mlx5_wqe_ctrl_seg *ctrl,
2474 u8 size, unsigned idx, u64 wr_id,
2475 int nreq, u8 fence, u8 next_fence,
2480 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2481 mlx5_opcode | ((u32)opmod << 24));
2482 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2483 ctrl->fm_ce_se |= fence;
2484 qp->fm_cache = next_fence;
2485 if (unlikely(qp->wq_sig))
2486 ctrl->signature = wq_sig(ctrl);
2488 qp->sq.wrid[idx] = wr_id;
2489 qp->sq.w_list[idx].opcode = mlx5_opcode;
2490 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2491 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2492 qp->sq.w_list[idx].next = qp->sq.cur_post;
2496 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2497 struct ib_send_wr **bad_wr)
2499 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2500 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2501 struct mlx5_core_dev *mdev = dev->mdev;
2502 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2503 struct mlx5_ib_mr *mr;
2504 struct mlx5_wqe_data_seg *dpseg;
2505 struct mlx5_wqe_xrc_seg *xrc;
2506 struct mlx5_bf *bf = qp->bf;
2507 int uninitialized_var(size);
2508 void *qend = qp->sq.qend;
2509 unsigned long flags;
2520 spin_lock_irqsave(&qp->sq.lock, flags);
2522 for (nreq = 0; wr; nreq++, wr = wr->next) {
2523 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
2524 mlx5_ib_warn(dev, "\n");
2530 fence = qp->fm_cache;
2531 num_sge = wr->num_sge;
2532 if (unlikely(num_sge > qp->sq.max_gs)) {
2533 mlx5_ib_warn(dev, "\n");
2539 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2541 mlx5_ib_warn(dev, "\n");
2547 switch (ibqp->qp_type) {
2548 case IB_QPT_XRC_INI:
2550 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2551 seg += sizeof(*xrc);
2552 size += sizeof(*xrc) / 16;
2555 switch (wr->opcode) {
2556 case IB_WR_RDMA_READ:
2557 case IB_WR_RDMA_WRITE:
2558 case IB_WR_RDMA_WRITE_WITH_IMM:
2559 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2561 seg += sizeof(struct mlx5_wqe_raddr_seg);
2562 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2565 case IB_WR_ATOMIC_CMP_AND_SWP:
2566 case IB_WR_ATOMIC_FETCH_AND_ADD:
2567 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2568 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2573 case IB_WR_LOCAL_INV:
2574 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2575 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2576 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2577 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2579 mlx5_ib_warn(dev, "\n");
2586 case IB_WR_FAST_REG_MR:
2587 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2588 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2589 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2590 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2592 mlx5_ib_warn(dev, "\n");
2599 case IB_WR_REG_SIG_MR:
2600 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2601 mr = to_mmr(wr->wr.sig_handover.sig_mr);
2603 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2604 err = set_sig_umr_wr(wr, qp, &seg, &size);
2606 mlx5_ib_warn(dev, "\n");
2611 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2612 nreq, get_fence(fence, wr),
2613 next_fence, MLX5_OPCODE_UMR);
2615 * SET_PSV WQEs are not signaled and solicited
2618 wr->send_flags &= ~IB_SEND_SIGNALED;
2619 wr->send_flags |= IB_SEND_SOLICITED;
2620 err = begin_wqe(qp, &seg, &ctrl, wr,
2623 mlx5_ib_warn(dev, "\n");
2629 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
2630 mr->sig->psv_memory.psv_idx, &seg,
2633 mlx5_ib_warn(dev, "\n");
2638 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2639 nreq, get_fence(fence, wr),
2640 next_fence, MLX5_OPCODE_SET_PSV);
2641 err = begin_wqe(qp, &seg, &ctrl, wr,
2644 mlx5_ib_warn(dev, "\n");
2650 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2651 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
2652 mr->sig->psv_wire.psv_idx, &seg,
2655 mlx5_ib_warn(dev, "\n");
2660 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2661 nreq, get_fence(fence, wr),
2662 next_fence, MLX5_OPCODE_SET_PSV);
2672 switch (wr->opcode) {
2673 case IB_WR_RDMA_WRITE:
2674 case IB_WR_RDMA_WRITE_WITH_IMM:
2675 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2677 seg += sizeof(struct mlx5_wqe_raddr_seg);
2678 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2689 set_datagram_seg(seg, wr);
2690 seg += sizeof(struct mlx5_wqe_datagram_seg);
2691 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2692 if (unlikely((seg == qend)))
2693 seg = mlx5_get_send_wqe(qp, 0);
2696 case MLX5_IB_QPT_REG_UMR:
2697 if (wr->opcode != MLX5_IB_WR_UMR) {
2699 mlx5_ib_warn(dev, "bad opcode\n");
2702 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2703 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2704 set_reg_umr_segment(seg, wr);
2705 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2706 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2707 if (unlikely((seg == qend)))
2708 seg = mlx5_get_send_wqe(qp, 0);
2709 set_reg_mkey_segment(seg, wr);
2710 seg += sizeof(struct mlx5_mkey_seg);
2711 size += sizeof(struct mlx5_mkey_seg) / 16;
2712 if (unlikely((seg == qend)))
2713 seg = mlx5_get_send_wqe(qp, 0);
2720 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2721 int uninitialized_var(sz);
2723 err = set_data_inl_seg(qp, wr, seg, &sz);
2724 if (unlikely(err)) {
2725 mlx5_ib_warn(dev, "\n");
2733 for (i = 0; i < num_sge; i++) {
2734 if (unlikely(dpseg == qend)) {
2735 seg = mlx5_get_send_wqe(qp, 0);
2738 if (likely(wr->sg_list[i].length)) {
2739 set_data_ptr_seg(dpseg, wr->sg_list + i);
2740 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2746 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2747 get_fence(fence, wr), next_fence,
2748 mlx5_ib_opcode[wr->opcode]);
2751 dump_wqe(qp, idx, size);
2756 qp->sq.head += nreq;
2758 /* Make sure that descriptors are written before
2759 * updating doorbell record and ringing the doorbell
2763 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2765 /* Make sure doorbell record is visible to the HCA before
2766 * we hit doorbell */
2770 spin_lock(&bf->lock);
2773 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2774 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2777 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2778 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2779 /* Make sure doorbells don't leak out of SQ spinlock
2780 * and reach the HCA out of order.
2784 bf->offset ^= bf->buf_size;
2786 spin_unlock(&bf->lock);
2789 spin_unlock_irqrestore(&qp->sq.lock, flags);
2794 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2796 sig->signature = calc_sig(sig, size);
2799 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2800 struct ib_recv_wr **bad_wr)
2802 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2803 struct mlx5_wqe_data_seg *scat;
2804 struct mlx5_rwqe_sig *sig;
2805 unsigned long flags;
2811 spin_lock_irqsave(&qp->rq.lock, flags);
2813 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2815 for (nreq = 0; wr; nreq++, wr = wr->next) {
2816 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2822 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2828 scat = get_recv_wqe(qp, ind);
2832 for (i = 0; i < wr->num_sge; i++)
2833 set_data_ptr_seg(scat + i, wr->sg_list + i);
2835 if (i < qp->rq.max_gs) {
2836 scat[i].byte_count = 0;
2837 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2842 sig = (struct mlx5_rwqe_sig *)scat;
2843 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2846 qp->rq.wrid[ind] = wr->wr_id;
2848 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2853 qp->rq.head += nreq;
2855 /* Make sure that descriptors are written before
2860 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2863 spin_unlock_irqrestore(&qp->rq.lock, flags);
2868 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2870 switch (mlx5_state) {
2871 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2872 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2873 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2874 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2875 case MLX5_QP_STATE_SQ_DRAINING:
2876 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2877 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2878 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2883 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2885 switch (mlx5_mig_state) {
2886 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2887 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2888 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2893 static int to_ib_qp_access_flags(int mlx5_flags)
2897 if (mlx5_flags & MLX5_QP_BIT_RRE)
2898 ib_flags |= IB_ACCESS_REMOTE_READ;
2899 if (mlx5_flags & MLX5_QP_BIT_RWE)
2900 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2901 if (mlx5_flags & MLX5_QP_BIT_RAE)
2902 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2907 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2908 struct mlx5_qp_path *path)
2910 struct mlx5_core_dev *dev = ibdev->mdev;
2912 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2913 ib_ah_attr->port_num = path->port;
2915 if (ib_ah_attr->port_num == 0 ||
2916 ib_ah_attr->port_num > dev->caps.gen.num_ports)
2919 ib_ah_attr->sl = path->sl & 0xf;
2921 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
2922 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
2923 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
2924 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
2925 if (ib_ah_attr->ah_flags) {
2926 ib_ah_attr->grh.sgid_index = path->mgid_index;
2927 ib_ah_attr->grh.hop_limit = path->hop_limit;
2928 ib_ah_attr->grh.traffic_class =
2929 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
2930 ib_ah_attr->grh.flow_label =
2931 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
2932 memcpy(ib_ah_attr->grh.dgid.raw,
2933 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
2937 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
2938 struct ib_qp_init_attr *qp_init_attr)
2940 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2941 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2942 struct mlx5_query_qp_mbox_out *outb;
2943 struct mlx5_qp_context *context;
2947 mutex_lock(&qp->mutex);
2948 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
2953 context = &outb->ctx;
2954 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
2958 mlx5_state = be32_to_cpu(context->flags) >> 28;
2960 qp->state = to_ib_qp_state(mlx5_state);
2961 qp_attr->qp_state = qp->state;
2962 qp_attr->path_mtu = context->mtu_msgmax >> 5;
2963 qp_attr->path_mig_state =
2964 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
2965 qp_attr->qkey = be32_to_cpu(context->qkey);
2966 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
2967 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
2968 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
2969 qp_attr->qp_access_flags =
2970 to_ib_qp_access_flags(be32_to_cpu(context->params2));
2972 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2973 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
2974 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
2975 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
2976 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2979 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
2980 qp_attr->port_num = context->pri_path.port;
2982 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2983 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
2985 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
2987 qp_attr->max_dest_rd_atomic =
2988 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
2989 qp_attr->min_rnr_timer =
2990 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
2991 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
2992 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
2993 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
2994 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
2995 qp_attr->cur_qp_state = qp_attr->qp_state;
2996 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
2997 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
2999 if (!ibqp->uobject) {
3000 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
3001 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3003 qp_attr->cap.max_send_wr = 0;
3004 qp_attr->cap.max_send_sge = 0;
3007 /* We don't support inline sends for kernel QPs (yet), and we
3008 * don't know what userspace's value should be.
3010 qp_attr->cap.max_inline_data = 0;
3012 qp_init_attr->cap = qp_attr->cap;
3014 qp_init_attr->create_flags = 0;
3015 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3016 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3018 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3019 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3025 mutex_unlock(&qp->mutex);
3029 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3030 struct ib_ucontext *context,
3031 struct ib_udata *udata)
3033 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3034 struct mlx5_general_caps *gen;
3035 struct mlx5_ib_xrcd *xrcd;
3038 gen = &dev->mdev->caps.gen;
3039 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
3040 return ERR_PTR(-ENOSYS);
3042 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3044 return ERR_PTR(-ENOMEM);
3046 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
3049 return ERR_PTR(-ENOMEM);
3052 return &xrcd->ibxrcd;
3055 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3057 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3058 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3061 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3063 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);