2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
40 /* not supported currently */
41 static int wq_signature;
44 MLX5_IB_ACK_REQ_FREQ = 8,
48 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
49 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
50 MLX5_IB_LINK_TYPE_IB = 0,
51 MLX5_IB_LINK_TYPE_ETH = 1
55 MLX5_IB_SQ_STRIDE = 6,
56 MLX5_IB_CACHE_LINE_SIZE = 64,
59 static const u32 mlx5_ib_opcode[] = {
60 [IB_WR_SEND] = MLX5_OPCODE_SEND,
61 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
62 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
63 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
64 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
65 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
66 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
67 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
68 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
69 [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
70 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
71 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
72 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
76 static int is_qp0(enum ib_qp_type qp_type)
78 return qp_type == IB_QPT_SMI;
81 static int is_sqp(enum ib_qp_type qp_type)
83 return is_qp0(qp_type) || is_qp1(qp_type);
86 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
88 return mlx5_buf_offset(&qp->buf, offset);
91 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
93 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
96 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
98 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
102 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
104 * @qp: QP to copy from.
105 * @send: copy from the send queue when non-zero, use the receive queue
107 * @wqe_index: index to start copying from. For send work queues, the
108 * wqe_index is in units of MLX5_SEND_WQE_BB.
109 * For receive work queue, it is the number of work queue
110 * element in the queue.
111 * @buffer: destination buffer.
112 * @length: maximum number of bytes to copy.
114 * Copies at least a single WQE, but may copy more data.
116 * Return: the number of bytes copied, or an error code.
118 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
119 void *buffer, u32 length)
121 struct ib_device *ibdev = qp->ibqp.device;
122 struct mlx5_ib_dev *dev = to_mdev(ibdev);
123 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
126 struct ib_umem *umem = qp->umem;
127 u32 first_copy_length;
131 if (wq->wqe_cnt == 0) {
132 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
137 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
138 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
140 if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
143 if (offset > umem->length ||
144 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
147 first_copy_length = min_t(u32, offset + length, wq_end) - offset;
148 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
153 struct mlx5_wqe_ctrl_seg *ctrl = buffer;
154 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
156 wqe_length = ds * MLX5_WQE_DS_UNITS;
158 wqe_length = 1 << wq->wqe_shift;
161 if (wqe_length <= first_copy_length)
162 return first_copy_length;
164 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
165 wqe_length - first_copy_length);
172 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
174 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
175 struct ib_event event;
177 if (type == MLX5_EVENT_TYPE_PATH_MIG)
178 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
180 if (ibqp->event_handler) {
181 event.device = ibqp->device;
182 event.element.qp = ibqp;
184 case MLX5_EVENT_TYPE_PATH_MIG:
185 event.event = IB_EVENT_PATH_MIG;
187 case MLX5_EVENT_TYPE_COMM_EST:
188 event.event = IB_EVENT_COMM_EST;
190 case MLX5_EVENT_TYPE_SQ_DRAINED:
191 event.event = IB_EVENT_SQ_DRAINED;
193 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
194 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
196 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
197 event.event = IB_EVENT_QP_FATAL;
199 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
200 event.event = IB_EVENT_PATH_MIG_ERR;
202 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
203 event.event = IB_EVENT_QP_REQ_ERR;
205 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
206 event.event = IB_EVENT_QP_ACCESS_ERR;
209 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
213 ibqp->event_handler(&event, ibqp->qp_context);
217 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
218 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
223 /* Sanity check RQ size before proceeding */
224 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
230 qp->rq.wqe_shift = 0;
233 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
234 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
235 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
236 qp->rq.max_post = qp->rq.wqe_cnt;
238 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
239 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
240 wqe_size = roundup_pow_of_two(wqe_size);
241 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
242 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
243 qp->rq.wqe_cnt = wq_size / wqe_size;
244 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
245 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
247 MLX5_CAP_GEN(dev->mdev,
251 qp->rq.wqe_shift = ilog2(wqe_size);
252 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
253 qp->rq.max_post = qp->rq.wqe_cnt;
260 static int sq_overhead(enum ib_qp_type qp_type)
266 size += sizeof(struct mlx5_wqe_xrc_seg);
269 size += sizeof(struct mlx5_wqe_ctrl_seg) +
270 sizeof(struct mlx5_wqe_atomic_seg) +
271 sizeof(struct mlx5_wqe_raddr_seg);
278 size += sizeof(struct mlx5_wqe_ctrl_seg) +
279 sizeof(struct mlx5_wqe_raddr_seg) +
280 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
281 sizeof(struct mlx5_mkey_seg);
287 size += sizeof(struct mlx5_wqe_ctrl_seg) +
288 sizeof(struct mlx5_wqe_datagram_seg);
291 case MLX5_IB_QPT_REG_UMR:
292 size += sizeof(struct mlx5_wqe_ctrl_seg) +
293 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
294 sizeof(struct mlx5_mkey_seg);
304 static int calc_send_wqe(struct ib_qp_init_attr *attr)
309 size = sq_overhead(attr->qp_type);
313 if (attr->cap.max_inline_data) {
314 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
315 attr->cap.max_inline_data;
318 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
319 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
320 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
321 return MLX5_SIG_WQE_SIZE;
323 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
326 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
327 struct mlx5_ib_qp *qp)
332 if (!attr->cap.max_send_wr)
335 wqe_size = calc_send_wqe(attr);
336 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
340 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
341 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
342 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
346 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
347 sizeof(struct mlx5_wqe_inline_seg);
348 attr->cap.max_inline_data = qp->max_inline_data;
350 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
351 qp->signature_en = true;
353 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
354 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
355 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
356 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
358 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
361 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
362 qp->sq.max_gs = attr->cap.max_send_sge;
363 qp->sq.max_post = wq_size / wqe_size;
364 attr->cap.max_send_wr = qp->sq.max_post;
369 static int set_user_buf_size(struct mlx5_ib_dev *dev,
370 struct mlx5_ib_qp *qp,
371 struct mlx5_ib_create_qp *ucmd)
373 int desc_sz = 1 << qp->sq.wqe_shift;
375 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
376 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
377 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
381 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
382 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
383 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
387 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
389 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
390 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
392 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
396 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
397 (qp->sq.wqe_cnt << 6);
402 static int qp_has_rq(struct ib_qp_init_attr *attr)
404 if (attr->qp_type == IB_QPT_XRC_INI ||
405 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
406 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
407 !attr->cap.max_recv_wr)
413 static int first_med_uuar(void)
418 static int next_uuar(int n)
422 while (((n % 4) & 2))
428 static int num_med_uuar(struct mlx5_uuar_info *uuari)
432 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
433 uuari->num_low_latency_uuars - 1;
435 return n >= 0 ? n : 0;
438 static int max_uuari(struct mlx5_uuar_info *uuari)
440 return uuari->num_uars * 4;
443 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
449 med = num_med_uuar(uuari);
450 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
459 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
463 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
464 if (!test_bit(i, uuari->bitmap)) {
465 set_bit(i, uuari->bitmap);
474 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
476 int minidx = first_med_uuar();
479 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
480 if (uuari->count[i] < uuari->count[minidx])
484 uuari->count[minidx]++;
488 static int alloc_uuar(struct mlx5_uuar_info *uuari,
489 enum mlx5_ib_latency_class lat)
493 mutex_lock(&uuari->lock);
495 case MLX5_IB_LATENCY_CLASS_LOW:
497 uuari->count[uuarn]++;
500 case MLX5_IB_LATENCY_CLASS_MEDIUM:
504 uuarn = alloc_med_class_uuar(uuari);
507 case MLX5_IB_LATENCY_CLASS_HIGH:
511 uuarn = alloc_high_class_uuar(uuari);
514 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
518 mutex_unlock(&uuari->lock);
523 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
525 clear_bit(uuarn, uuari->bitmap);
526 --uuari->count[uuarn];
529 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
531 clear_bit(uuarn, uuari->bitmap);
532 --uuari->count[uuarn];
535 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
537 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
538 int high_uuar = nuuars - uuari->num_low_latency_uuars;
540 mutex_lock(&uuari->lock);
542 --uuari->count[uuarn];
546 if (uuarn < high_uuar) {
547 free_med_class_uuar(uuari, uuarn);
551 free_high_class_uuar(uuari, uuarn);
554 mutex_unlock(&uuari->lock);
557 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
560 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
561 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
562 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
563 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
564 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
565 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
566 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
571 static int to_mlx5_st(enum ib_qp_type type)
574 case IB_QPT_RC: return MLX5_QP_ST_RC;
575 case IB_QPT_UC: return MLX5_QP_ST_UC;
576 case IB_QPT_UD: return MLX5_QP_ST_UD;
577 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
579 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
580 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
581 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
582 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
583 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
584 case IB_QPT_RAW_PACKET:
586 default: return -EINVAL;
590 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
592 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
595 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
596 struct mlx5_ib_qp *qp, struct ib_udata *udata,
597 struct mlx5_create_qp_mbox_in **in,
598 struct mlx5_ib_create_qp_resp *resp, int *inlen)
600 struct mlx5_ib_ucontext *context;
601 struct mlx5_ib_create_qp ucmd;
610 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
612 mlx5_ib_dbg(dev, "copy failed\n");
616 context = to_mucontext(pd->uobject->context);
618 * TBD: should come from the verbs when we have the API
620 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
621 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
622 uuarn = MLX5_CROSS_CHANNEL_UUAR;
624 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
626 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
627 mlx5_ib_dbg(dev, "reverting to medium latency\n");
628 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
630 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
631 mlx5_ib_dbg(dev, "reverting to high latency\n");
632 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
634 mlx5_ib_warn(dev, "uuar allocation failed\n");
641 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
642 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
645 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
646 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
648 err = set_user_buf_size(dev, qp, &ucmd);
652 if (ucmd.buf_addr && qp->buf_size) {
653 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
655 if (IS_ERR(qp->umem)) {
656 mlx5_ib_dbg(dev, "umem_get failed\n");
657 err = PTR_ERR(qp->umem);
665 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
667 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
669 mlx5_ib_warn(dev, "bad offset\n");
672 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
673 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
676 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
677 *in = mlx5_vzalloc(*inlen);
683 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
684 (*in)->ctx.log_pg_sz_remote_qpn =
685 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
686 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
688 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
689 resp->uuar_index = uuarn;
692 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
694 mlx5_ib_dbg(dev, "map failed\n");
698 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
700 mlx5_ib_dbg(dev, "copy failed\n");
703 qp->create_type = MLX5_QP_USER;
708 mlx5_ib_db_unmap_user(context, &qp->db);
715 ib_umem_release(qp->umem);
718 free_uuar(&context->uuari, uuarn);
722 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
724 struct mlx5_ib_ucontext *context;
726 context = to_mucontext(pd->uobject->context);
727 mlx5_ib_db_unmap_user(context, &qp->db);
729 ib_umem_release(qp->umem);
730 free_uuar(&context->uuari, qp->uuarn);
733 static int create_kernel_qp(struct mlx5_ib_dev *dev,
734 struct ib_qp_init_attr *init_attr,
735 struct mlx5_ib_qp *qp,
736 struct mlx5_create_qp_mbox_in **in, int *inlen)
738 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
739 struct mlx5_uuar_info *uuari;
744 uuari = &dev->mdev->priv.uuari;
745 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
748 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
749 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
751 uuarn = alloc_uuar(uuari, lc);
753 mlx5_ib_dbg(dev, "\n");
757 qp->bf = &uuari->bfs[uuarn];
758 uar_index = qp->bf->uar->index;
760 err = calc_sq_size(dev, init_attr, qp);
762 mlx5_ib_dbg(dev, "err %d\n", err);
767 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
768 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
770 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
772 mlx5_ib_dbg(dev, "err %d\n", err);
776 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
777 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
778 *in = mlx5_vzalloc(*inlen);
783 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
784 (*in)->ctx.log_pg_sz_remote_qpn =
785 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
786 /* Set "fast registration enabled" for all kernel QPs */
787 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
788 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
790 mlx5_fill_page_array(&qp->buf, (*in)->pas);
792 err = mlx5_db_alloc(dev->mdev, &qp->db);
794 mlx5_ib_dbg(dev, "err %d\n", err);
798 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
799 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
800 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
801 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
802 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
804 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
805 !qp->sq.w_list || !qp->sq.wqe_head) {
809 qp->create_type = MLX5_QP_KERNEL;
814 mlx5_db_free(dev->mdev, &qp->db);
815 kfree(qp->sq.wqe_head);
816 kfree(qp->sq.w_list);
818 kfree(qp->sq.wr_data);
825 mlx5_buf_free(dev->mdev, &qp->buf);
828 free_uuar(&dev->mdev->priv.uuari, uuarn);
832 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
834 mlx5_db_free(dev->mdev, &qp->db);
835 kfree(qp->sq.wqe_head);
836 kfree(qp->sq.w_list);
838 kfree(qp->sq.wr_data);
840 mlx5_buf_free(dev->mdev, &qp->buf);
841 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
844 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
846 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
847 (attr->qp_type == IB_QPT_XRC_INI))
848 return cpu_to_be32(MLX5_SRQ_RQ);
849 else if (!qp->has_rq)
850 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
852 return cpu_to_be32(MLX5_NON_ZERO_RQ);
855 static int is_connected(enum ib_qp_type qp_type)
857 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
863 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
864 struct ib_qp_init_attr *init_attr,
865 struct ib_udata *udata, struct mlx5_ib_qp *qp)
867 struct mlx5_ib_resources *devr = &dev->devr;
868 struct mlx5_core_dev *mdev = dev->mdev;
869 struct mlx5_ib_create_qp_resp resp;
870 struct mlx5_create_qp_mbox_in *in;
871 struct mlx5_ib_create_qp ucmd;
872 int inlen = sizeof(*in);
874 u32 uidx = MLX5_IB_DEFAULT_UIDX;
877 mlx5_ib_odp_create_qp(qp);
879 mutex_init(&qp->mutex);
880 spin_lock_init(&qp->sq.lock);
881 spin_lock_init(&qp->rq.lock);
883 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
884 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
885 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
888 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
892 if (init_attr->create_flags &
893 (IB_QP_CREATE_CROSS_CHANNEL |
894 IB_QP_CREATE_MANAGED_SEND |
895 IB_QP_CREATE_MANAGED_RECV)) {
896 if (!MLX5_CAP_GEN(mdev, cd)) {
897 mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
900 if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
901 qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
902 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
903 qp->flags |= MLX5_IB_QP_MANAGED_SEND;
904 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
905 qp->flags |= MLX5_IB_QP_MANAGED_RECV;
907 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
908 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
910 if (pd && pd->uobject) {
911 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
912 mlx5_ib_dbg(dev, "copy failed\n");
916 err = get_qp_user_index(to_mucontext(pd->uobject->context),
917 &ucmd, udata->inlen, &uidx);
921 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
922 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
924 qp->wq_sig = !!wq_signature;
927 qp->has_rq = qp_has_rq(init_attr);
928 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
929 qp, (pd && pd->uobject) ? &ucmd : NULL);
931 mlx5_ib_dbg(dev, "err %d\n", err);
938 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
939 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
940 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
941 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
942 mlx5_ib_dbg(dev, "invalid rq params\n");
945 if (ucmd.sq_wqe_count > max_wqes) {
946 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
947 ucmd.sq_wqe_count, max_wqes);
950 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
952 mlx5_ib_dbg(dev, "err %d\n", err);
954 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
956 mlx5_ib_dbg(dev, "err %d\n", err);
962 in = mlx5_vzalloc(sizeof(*in));
966 qp->create_type = MLX5_QP_EMPTY;
969 if (is_sqp(init_attr->qp_type))
970 qp->port = init_attr->port_num;
972 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
973 MLX5_QP_PM_MIGRATED << 11);
975 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
976 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
978 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
981 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
983 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
984 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
986 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
987 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER);
988 if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
989 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND);
990 if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
991 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV);
993 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
997 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
998 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
1001 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
1003 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
1005 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
1007 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
1009 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
1013 if (qp->rq.wqe_cnt) {
1014 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
1015 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
1018 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
1021 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
1023 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
1025 /* Set default resources */
1026 switch (init_attr->qp_type) {
1027 case IB_QPT_XRC_TGT:
1028 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1029 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1030 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1031 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
1033 case IB_QPT_XRC_INI:
1034 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1035 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1036 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1039 if (init_attr->srq) {
1040 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
1041 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
1043 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1044 in->ctx.rq_type_srqn |=
1045 cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
1049 if (init_attr->send_cq)
1050 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
1052 if (init_attr->recv_cq)
1053 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
1055 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1057 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
1058 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1059 /* 0xffffff means we ask to work with cqe version 0 */
1060 MLX5_SET(qpc, qpc, user_index, uidx);
1063 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
1065 mlx5_ib_dbg(dev, "create qp failed\n");
1070 /* Hardware wants QPN written in big-endian order (after
1071 * shifting) for send doorbell. Precompute this value to save
1072 * a little bit when posting sends.
1074 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1076 qp->mqp.event = mlx5_ib_qp_event;
1081 if (qp->create_type == MLX5_QP_USER)
1082 destroy_qp_user(pd, qp);
1083 else if (qp->create_type == MLX5_QP_KERNEL)
1084 destroy_qp_kernel(dev, qp);
1090 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1091 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1095 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1096 spin_lock_irq(&send_cq->lock);
1097 spin_lock_nested(&recv_cq->lock,
1098 SINGLE_DEPTH_NESTING);
1099 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1100 spin_lock_irq(&send_cq->lock);
1101 __acquire(&recv_cq->lock);
1103 spin_lock_irq(&recv_cq->lock);
1104 spin_lock_nested(&send_cq->lock,
1105 SINGLE_DEPTH_NESTING);
1108 spin_lock_irq(&send_cq->lock);
1109 __acquire(&recv_cq->lock);
1111 } else if (recv_cq) {
1112 spin_lock_irq(&recv_cq->lock);
1113 __acquire(&send_cq->lock);
1115 __acquire(&send_cq->lock);
1116 __acquire(&recv_cq->lock);
1120 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1121 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1125 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1126 spin_unlock(&recv_cq->lock);
1127 spin_unlock_irq(&send_cq->lock);
1128 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1129 __release(&recv_cq->lock);
1130 spin_unlock_irq(&send_cq->lock);
1132 spin_unlock(&send_cq->lock);
1133 spin_unlock_irq(&recv_cq->lock);
1136 __release(&recv_cq->lock);
1137 spin_unlock_irq(&send_cq->lock);
1139 } else if (recv_cq) {
1140 __release(&send_cq->lock);
1141 spin_unlock_irq(&recv_cq->lock);
1143 __release(&recv_cq->lock);
1144 __release(&send_cq->lock);
1148 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1150 return to_mpd(qp->ibqp.pd);
1153 static void get_cqs(struct mlx5_ib_qp *qp,
1154 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1156 switch (qp->ibqp.qp_type) {
1157 case IB_QPT_XRC_TGT:
1161 case MLX5_IB_QPT_REG_UMR:
1162 case IB_QPT_XRC_INI:
1163 *send_cq = to_mcq(qp->ibqp.send_cq);
1172 case IB_QPT_RAW_IPV6:
1173 case IB_QPT_RAW_ETHERTYPE:
1174 *send_cq = to_mcq(qp->ibqp.send_cq);
1175 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1178 case IB_QPT_RAW_PACKET:
1187 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1189 struct mlx5_ib_cq *send_cq, *recv_cq;
1190 struct mlx5_modify_qp_mbox_in *in;
1193 in = kzalloc(sizeof(*in), GFP_KERNEL);
1197 if (qp->state != IB_QPS_RESET) {
1198 mlx5_ib_qp_disable_pagefaults(qp);
1199 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1200 MLX5_QP_STATE_RST, in, 0, &qp->mqp))
1201 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1205 get_cqs(qp, &send_cq, &recv_cq);
1207 if (qp->create_type == MLX5_QP_KERNEL) {
1208 mlx5_ib_lock_cqs(send_cq, recv_cq);
1209 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1210 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1211 if (send_cq != recv_cq)
1212 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1213 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1216 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
1218 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1222 if (qp->create_type == MLX5_QP_KERNEL)
1223 destroy_qp_kernel(dev, qp);
1224 else if (qp->create_type == MLX5_QP_USER)
1225 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1228 static const char *ib_qp_type_str(enum ib_qp_type type)
1232 return "IB_QPT_SMI";
1234 return "IB_QPT_GSI";
1241 case IB_QPT_RAW_IPV6:
1242 return "IB_QPT_RAW_IPV6";
1243 case IB_QPT_RAW_ETHERTYPE:
1244 return "IB_QPT_RAW_ETHERTYPE";
1245 case IB_QPT_XRC_INI:
1246 return "IB_QPT_XRC_INI";
1247 case IB_QPT_XRC_TGT:
1248 return "IB_QPT_XRC_TGT";
1249 case IB_QPT_RAW_PACKET:
1250 return "IB_QPT_RAW_PACKET";
1251 case MLX5_IB_QPT_REG_UMR:
1252 return "MLX5_IB_QPT_REG_UMR";
1255 return "Invalid QP type";
1259 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1260 struct ib_qp_init_attr *init_attr,
1261 struct ib_udata *udata)
1263 struct mlx5_ib_dev *dev;
1264 struct mlx5_ib_qp *qp;
1269 dev = to_mdev(pd->device);
1271 /* being cautious here */
1272 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1273 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1274 pr_warn("%s: no PD for transport %s\n", __func__,
1275 ib_qp_type_str(init_attr->qp_type));
1276 return ERR_PTR(-EINVAL);
1278 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1281 switch (init_attr->qp_type) {
1282 case IB_QPT_XRC_TGT:
1283 case IB_QPT_XRC_INI:
1284 if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
1285 mlx5_ib_dbg(dev, "XRC not supported\n");
1286 return ERR_PTR(-ENOSYS);
1288 init_attr->recv_cq = NULL;
1289 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1290 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1291 init_attr->send_cq = NULL;
1300 case MLX5_IB_QPT_REG_UMR:
1301 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1303 return ERR_PTR(-ENOMEM);
1305 err = create_qp_common(dev, pd, init_attr, udata, qp);
1307 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1309 return ERR_PTR(err);
1312 if (is_qp0(init_attr->qp_type))
1313 qp->ibqp.qp_num = 0;
1314 else if (is_qp1(init_attr->qp_type))
1315 qp->ibqp.qp_num = 1;
1317 qp->ibqp.qp_num = qp->mqp.qpn;
1319 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1320 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1321 to_mcq(init_attr->send_cq)->mcq.cqn);
1327 case IB_QPT_RAW_IPV6:
1328 case IB_QPT_RAW_ETHERTYPE:
1329 case IB_QPT_RAW_PACKET:
1332 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1333 init_attr->qp_type);
1334 /* Don't support raw QPs */
1335 return ERR_PTR(-EINVAL);
1341 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1343 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1344 struct mlx5_ib_qp *mqp = to_mqp(qp);
1346 destroy_qp_common(dev, mqp);
1353 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1356 u32 hw_access_flags = 0;
1360 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1361 dest_rd_atomic = attr->max_dest_rd_atomic;
1363 dest_rd_atomic = qp->resp_depth;
1365 if (attr_mask & IB_QP_ACCESS_FLAGS)
1366 access_flags = attr->qp_access_flags;
1368 access_flags = qp->atomic_rd_en;
1370 if (!dest_rd_atomic)
1371 access_flags &= IB_ACCESS_REMOTE_WRITE;
1373 if (access_flags & IB_ACCESS_REMOTE_READ)
1374 hw_access_flags |= MLX5_QP_BIT_RRE;
1375 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1376 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1377 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1378 hw_access_flags |= MLX5_QP_BIT_RWE;
1380 return cpu_to_be32(hw_access_flags);
1384 MLX5_PATH_FLAG_FL = 1 << 0,
1385 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1386 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1389 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1391 if (rate == IB_RATE_PORT_CURRENT) {
1393 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1396 while (rate != IB_RATE_2_5_GBPS &&
1397 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1398 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
1402 return rate + MLX5_STAT_RATE_OFFSET;
1405 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1406 struct mlx5_qp_path *path, u8 port, int attr_mask,
1407 u32 path_flags, const struct ib_qp_attr *attr)
1409 enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
1412 if (attr_mask & IB_QP_PKEY_INDEX)
1413 path->pkey_index = attr->pkey_index;
1415 if (ah->ah_flags & IB_AH_GRH) {
1416 if (ah->grh.sgid_index >=
1417 dev->mdev->port_caps[port - 1].gid_table_len) {
1418 pr_err("sgid_index (%u) too large. max is %d\n",
1420 dev->mdev->port_caps[port - 1].gid_table_len);
1425 if (ll == IB_LINK_LAYER_ETHERNET) {
1426 if (!(ah->ah_flags & IB_AH_GRH))
1428 memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
1429 path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
1430 ah->grh.sgid_index);
1431 path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
1433 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1434 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
1436 path->rlid = cpu_to_be16(ah->dlid);
1437 path->grh_mlid = ah->src_path_bits & 0x7f;
1438 if (ah->ah_flags & IB_AH_GRH)
1439 path->grh_mlid |= 1 << 7;
1440 path->dci_cfi_prio_sl = ah->sl & 0xf;
1443 if (ah->ah_flags & IB_AH_GRH) {
1444 path->mgid_index = ah->grh.sgid_index;
1445 path->hop_limit = ah->grh.hop_limit;
1446 path->tclass_flowlabel =
1447 cpu_to_be32((ah->grh.traffic_class << 20) |
1448 (ah->grh.flow_label));
1449 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1452 err = ib_rate_to_mlx5(dev, ah->static_rate);
1455 path->static_rate = err;
1458 if (attr_mask & IB_QP_TIMEOUT)
1459 path->ackto_lt = attr->timeout << 3;
1464 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1465 [MLX5_QP_STATE_INIT] = {
1466 [MLX5_QP_STATE_INIT] = {
1467 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1468 MLX5_QP_OPTPAR_RAE |
1469 MLX5_QP_OPTPAR_RWE |
1470 MLX5_QP_OPTPAR_PKEY_INDEX |
1471 MLX5_QP_OPTPAR_PRI_PORT,
1472 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1473 MLX5_QP_OPTPAR_PKEY_INDEX |
1474 MLX5_QP_OPTPAR_PRI_PORT,
1475 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1476 MLX5_QP_OPTPAR_Q_KEY |
1477 MLX5_QP_OPTPAR_PRI_PORT,
1479 [MLX5_QP_STATE_RTR] = {
1480 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1481 MLX5_QP_OPTPAR_RRE |
1482 MLX5_QP_OPTPAR_RAE |
1483 MLX5_QP_OPTPAR_RWE |
1484 MLX5_QP_OPTPAR_PKEY_INDEX,
1485 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1486 MLX5_QP_OPTPAR_RWE |
1487 MLX5_QP_OPTPAR_PKEY_INDEX,
1488 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1489 MLX5_QP_OPTPAR_Q_KEY,
1490 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1491 MLX5_QP_OPTPAR_Q_KEY,
1492 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1493 MLX5_QP_OPTPAR_RRE |
1494 MLX5_QP_OPTPAR_RAE |
1495 MLX5_QP_OPTPAR_RWE |
1496 MLX5_QP_OPTPAR_PKEY_INDEX,
1499 [MLX5_QP_STATE_RTR] = {
1500 [MLX5_QP_STATE_RTS] = {
1501 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1502 MLX5_QP_OPTPAR_RRE |
1503 MLX5_QP_OPTPAR_RAE |
1504 MLX5_QP_OPTPAR_RWE |
1505 MLX5_QP_OPTPAR_PM_STATE |
1506 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1507 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1508 MLX5_QP_OPTPAR_RWE |
1509 MLX5_QP_OPTPAR_PM_STATE,
1510 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1513 [MLX5_QP_STATE_RTS] = {
1514 [MLX5_QP_STATE_RTS] = {
1515 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1516 MLX5_QP_OPTPAR_RAE |
1517 MLX5_QP_OPTPAR_RWE |
1518 MLX5_QP_OPTPAR_RNR_TIMEOUT |
1519 MLX5_QP_OPTPAR_PM_STATE |
1520 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1521 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1522 MLX5_QP_OPTPAR_PM_STATE |
1523 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1524 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1525 MLX5_QP_OPTPAR_SRQN |
1526 MLX5_QP_OPTPAR_CQN_RCV,
1529 [MLX5_QP_STATE_SQER] = {
1530 [MLX5_QP_STATE_RTS] = {
1531 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1532 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1533 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1534 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1535 MLX5_QP_OPTPAR_RWE |
1536 MLX5_QP_OPTPAR_RAE |
1542 static int ib_nr_to_mlx5_nr(int ib_mask)
1547 case IB_QP_CUR_STATE:
1549 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1551 case IB_QP_ACCESS_FLAGS:
1552 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1554 case IB_QP_PKEY_INDEX:
1555 return MLX5_QP_OPTPAR_PKEY_INDEX;
1557 return MLX5_QP_OPTPAR_PRI_PORT;
1559 return MLX5_QP_OPTPAR_Q_KEY;
1561 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1562 MLX5_QP_OPTPAR_PRI_PORT;
1563 case IB_QP_PATH_MTU:
1566 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1567 case IB_QP_RETRY_CNT:
1568 return MLX5_QP_OPTPAR_RETRY_COUNT;
1569 case IB_QP_RNR_RETRY:
1570 return MLX5_QP_OPTPAR_RNR_RETRY;
1573 case IB_QP_MAX_QP_RD_ATOMIC:
1574 return MLX5_QP_OPTPAR_SRA_MAX;
1575 case IB_QP_ALT_PATH:
1576 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1577 case IB_QP_MIN_RNR_TIMER:
1578 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1581 case IB_QP_MAX_DEST_RD_ATOMIC:
1582 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1583 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1584 case IB_QP_PATH_MIG_STATE:
1585 return MLX5_QP_OPTPAR_PM_STATE;
1588 case IB_QP_DEST_QPN:
1594 static int ib_mask_to_mlx5_opt(int ib_mask)
1599 for (i = 0; i < 8 * sizeof(int); i++) {
1600 if ((1 << i) & ib_mask)
1601 result |= ib_nr_to_mlx5_nr(1 << i);
1607 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1608 const struct ib_qp_attr *attr, int attr_mask,
1609 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1611 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1612 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1613 struct mlx5_ib_cq *send_cq, *recv_cq;
1614 struct mlx5_qp_context *context;
1615 struct mlx5_modify_qp_mbox_in *in;
1616 struct mlx5_ib_pd *pd;
1617 enum mlx5_qp_state mlx5_cur, mlx5_new;
1618 enum mlx5_qp_optpar optpar;
1623 in = kzalloc(sizeof(*in), GFP_KERNEL);
1628 err = to_mlx5_st(ibqp->qp_type);
1632 context->flags = cpu_to_be32(err << 16);
1634 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1635 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1637 switch (attr->path_mig_state) {
1638 case IB_MIG_MIGRATED:
1639 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1642 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1645 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1650 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1651 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1652 } else if (ibqp->qp_type == IB_QPT_UD ||
1653 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1654 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1655 } else if (attr_mask & IB_QP_PATH_MTU) {
1656 if (attr->path_mtu < IB_MTU_256 ||
1657 attr->path_mtu > IB_MTU_4096) {
1658 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1662 context->mtu_msgmax = (attr->path_mtu << 5) |
1663 (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
1666 if (attr_mask & IB_QP_DEST_QPN)
1667 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1669 if (attr_mask & IB_QP_PKEY_INDEX)
1670 context->pri_path.pkey_index = attr->pkey_index;
1672 /* todo implement counter_index functionality */
1674 if (is_sqp(ibqp->qp_type))
1675 context->pri_path.port = qp->port;
1677 if (attr_mask & IB_QP_PORT)
1678 context->pri_path.port = attr->port_num;
1680 if (attr_mask & IB_QP_AV) {
1681 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1682 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1683 attr_mask, 0, attr);
1688 if (attr_mask & IB_QP_TIMEOUT)
1689 context->pri_path.ackto_lt |= attr->timeout << 3;
1691 if (attr_mask & IB_QP_ALT_PATH) {
1692 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1693 attr->alt_port_num, attr_mask, 0, attr);
1699 get_cqs(qp, &send_cq, &recv_cq);
1701 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1702 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1703 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1704 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1706 if (attr_mask & IB_QP_RNR_RETRY)
1707 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1709 if (attr_mask & IB_QP_RETRY_CNT)
1710 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1712 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1713 if (attr->max_rd_atomic)
1715 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1718 if (attr_mask & IB_QP_SQ_PSN)
1719 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1721 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1722 if (attr->max_dest_rd_atomic)
1724 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1727 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1728 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1730 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1731 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1733 if (attr_mask & IB_QP_RQ_PSN)
1734 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1736 if (attr_mask & IB_QP_QKEY)
1737 context->qkey = cpu_to_be32(attr->qkey);
1739 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1740 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1742 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1743 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1748 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1749 context->sq_crq_size |= cpu_to_be16(1 << 4);
1752 mlx5_cur = to_mlx5_state(cur_state);
1753 mlx5_new = to_mlx5_state(new_state);
1754 mlx5_st = to_mlx5_st(ibqp->qp_type);
1758 /* If moving to a reset or error state, we must disable page faults on
1759 * this QP and flush all current page faults. Otherwise a stale page
1760 * fault may attempt to work on this QP after it is reset and moved
1761 * again to RTS, and may cause the driver and the device to get out of
1763 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1764 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1765 mlx5_ib_qp_disable_pagefaults(qp);
1767 optpar = ib_mask_to_mlx5_opt(attr_mask);
1768 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1769 in->optparam = cpu_to_be32(optpar);
1770 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
1771 to_mlx5_state(new_state), in, sqd_event,
1776 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1777 mlx5_ib_qp_enable_pagefaults(qp);
1779 qp->state = new_state;
1781 if (attr_mask & IB_QP_ACCESS_FLAGS)
1782 qp->atomic_rd_en = attr->qp_access_flags;
1783 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1784 qp->resp_depth = attr->max_dest_rd_atomic;
1785 if (attr_mask & IB_QP_PORT)
1786 qp->port = attr->port_num;
1787 if (attr_mask & IB_QP_ALT_PATH)
1788 qp->alt_port = attr->alt_port_num;
1791 * If we moved a kernel QP to RESET, clean up all old CQ
1792 * entries and reinitialize the QP.
1794 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1795 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1796 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1797 if (send_cq != recv_cq)
1798 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1804 qp->sq.cur_post = 0;
1805 qp->sq.last_poll = 0;
1806 qp->db.db[MLX5_RCV_DBR] = 0;
1807 qp->db.db[MLX5_SND_DBR] = 0;
1815 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1816 int attr_mask, struct ib_udata *udata)
1818 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1819 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1820 enum ib_qp_state cur_state, new_state;
1823 enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
1825 mutex_lock(&qp->mutex);
1827 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1828 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1830 if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
1831 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1832 ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
1835 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1836 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1840 if ((attr_mask & IB_QP_PORT) &&
1841 (attr->port_num == 0 ||
1842 attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
1845 if (attr_mask & IB_QP_PKEY_INDEX) {
1846 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1847 if (attr->pkey_index >=
1848 dev->mdev->port_caps[port - 1].pkey_table_len)
1852 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1853 attr->max_rd_atomic >
1854 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
1857 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1858 attr->max_dest_rd_atomic >
1859 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
1862 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1867 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1870 mutex_unlock(&qp->mutex);
1874 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1876 struct mlx5_ib_cq *cq;
1879 cur = wq->head - wq->tail;
1880 if (likely(cur + nreq < wq->max_post))
1884 spin_lock(&cq->lock);
1885 cur = wq->head - wq->tail;
1886 spin_unlock(&cq->lock);
1888 return cur + nreq >= wq->max_post;
1891 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1892 u64 remote_addr, u32 rkey)
1894 rseg->raddr = cpu_to_be64(remote_addr);
1895 rseg->rkey = cpu_to_be32(rkey);
1899 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1900 struct ib_send_wr *wr)
1902 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
1903 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
1904 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
1907 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1909 dseg->byte_count = cpu_to_be32(sg->length);
1910 dseg->lkey = cpu_to_be32(sg->lkey);
1911 dseg->addr = cpu_to_be64(sg->addr);
1914 static __be16 get_klm_octo(int npages)
1916 return cpu_to_be16(ALIGN(npages, 8) / 2);
1919 static __be64 frwr_mkey_mask(void)
1923 result = MLX5_MKEY_MASK_LEN |
1924 MLX5_MKEY_MASK_PAGE_SIZE |
1925 MLX5_MKEY_MASK_START_ADDR |
1926 MLX5_MKEY_MASK_EN_RINVAL |
1927 MLX5_MKEY_MASK_KEY |
1933 MLX5_MKEY_MASK_SMALL_FENCE |
1934 MLX5_MKEY_MASK_FREE;
1936 return cpu_to_be64(result);
1939 static __be64 sig_mkey_mask(void)
1943 result = MLX5_MKEY_MASK_LEN |
1944 MLX5_MKEY_MASK_PAGE_SIZE |
1945 MLX5_MKEY_MASK_START_ADDR |
1946 MLX5_MKEY_MASK_EN_SIGERR |
1947 MLX5_MKEY_MASK_EN_RINVAL |
1948 MLX5_MKEY_MASK_KEY |
1953 MLX5_MKEY_MASK_SMALL_FENCE |
1954 MLX5_MKEY_MASK_FREE |
1955 MLX5_MKEY_MASK_BSF_EN;
1957 return cpu_to_be64(result);
1960 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
1961 struct mlx5_ib_mr *mr)
1963 int ndescs = mr->ndescs;
1965 memset(umr, 0, sizeof(*umr));
1966 umr->flags = MLX5_UMR_CHECK_NOT_FREE;
1967 umr->klm_octowords = get_klm_octo(ndescs);
1968 umr->mkey_mask = frwr_mkey_mask();
1971 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
1973 memset(umr, 0, sizeof(*umr));
1974 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1975 umr->flags = 1 << 7;
1978 static __be64 get_umr_reg_mr_mask(void)
1982 result = MLX5_MKEY_MASK_LEN |
1983 MLX5_MKEY_MASK_PAGE_SIZE |
1984 MLX5_MKEY_MASK_START_ADDR |
1988 MLX5_MKEY_MASK_KEY |
1992 MLX5_MKEY_MASK_FREE;
1994 return cpu_to_be64(result);
1997 static __be64 get_umr_unreg_mr_mask(void)
2001 result = MLX5_MKEY_MASK_FREE;
2003 return cpu_to_be64(result);
2006 static __be64 get_umr_update_mtt_mask(void)
2010 result = MLX5_MKEY_MASK_FREE;
2012 return cpu_to_be64(result);
2015 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2016 struct ib_send_wr *wr)
2018 struct mlx5_umr_wr *umrwr = umr_wr(wr);
2020 memset(umr, 0, sizeof(*umr));
2022 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
2023 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
2025 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
2027 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
2028 umr->klm_octowords = get_klm_octo(umrwr->npages);
2029 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
2030 umr->mkey_mask = get_umr_update_mtt_mask();
2031 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
2032 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
2034 umr->mkey_mask = get_umr_reg_mr_mask();
2037 umr->mkey_mask = get_umr_unreg_mr_mask();
2041 umr->flags |= MLX5_UMR_INLINE;
2044 static u8 get_umr_flags(int acc)
2046 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
2047 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
2048 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
2049 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
2050 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
2053 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
2054 struct mlx5_ib_mr *mr,
2055 u32 key, int access)
2057 int ndescs = ALIGN(mr->ndescs, 8) >> 1;
2059 memset(seg, 0, sizeof(*seg));
2060 seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
2061 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
2062 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2063 seg->start_addr = cpu_to_be64(mr->ibmr.iova);
2064 seg->len = cpu_to_be64(mr->ibmr.length);
2065 seg->xlt_oct_size = cpu_to_be32(ndescs);
2066 seg->log2_page_size = ilog2(mr->ibmr.page_size);
2069 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
2071 memset(seg, 0, sizeof(*seg));
2072 seg->status = MLX5_MKEY_STATUS_FREE;
2075 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2077 struct mlx5_umr_wr *umrwr = umr_wr(wr);
2079 memset(seg, 0, sizeof(*seg));
2080 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
2081 seg->status = MLX5_MKEY_STATUS_FREE;
2085 seg->flags = convert_access(umrwr->access_flags);
2086 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
2087 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
2088 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
2090 seg->len = cpu_to_be64(umrwr->length);
2091 seg->log2_page_size = umrwr->page_shift;
2092 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
2093 mlx5_mkey_variant(umrwr->mkey));
2096 static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
2097 struct mlx5_ib_mr *mr,
2098 struct mlx5_ib_pd *pd)
2100 int bcount = mr->desc_size * mr->ndescs;
2102 dseg->addr = cpu_to_be64(mr->desc_map);
2103 dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
2104 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2107 static __be32 send_ieth(struct ib_send_wr *wr)
2109 switch (wr->opcode) {
2110 case IB_WR_SEND_WITH_IMM:
2111 case IB_WR_RDMA_WRITE_WITH_IMM:
2112 return wr->ex.imm_data;
2114 case IB_WR_SEND_WITH_INV:
2115 return cpu_to_be32(wr->ex.invalidate_rkey);
2122 static u8 calc_sig(void *wqe, int size)
2128 for (i = 0; i < size; i++)
2134 static u8 wq_sig(void *wqe)
2136 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2139 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2142 struct mlx5_wqe_inline_seg *seg;
2143 void *qend = qp->sq.qend;
2151 wqe += sizeof(*seg);
2152 for (i = 0; i < wr->num_sge; i++) {
2153 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2154 len = wr->sg_list[i].length;
2157 if (unlikely(inl > qp->max_inline_data))
2160 if (unlikely(wqe + len > qend)) {
2162 memcpy(wqe, addr, copy);
2165 wqe = mlx5_get_send_wqe(qp, 0);
2167 memcpy(wqe, addr, len);
2171 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2173 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2178 static u16 prot_field_size(enum ib_signature_type type)
2181 case IB_SIG_TYPE_T10_DIF:
2182 return MLX5_DIF_SIZE;
2188 static u8 bs_selector(int block_size)
2190 switch (block_size) {
2191 case 512: return 0x1;
2192 case 520: return 0x2;
2193 case 4096: return 0x3;
2194 case 4160: return 0x4;
2195 case 1073741824: return 0x5;
2200 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2201 struct mlx5_bsf_inl *inl)
2203 /* Valid inline section and allow BSF refresh */
2204 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2205 MLX5_BSF_REFRESH_DIF);
2206 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2207 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
2208 /* repeating block */
2209 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2210 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2211 MLX5_DIF_CRC : MLX5_DIF_IPCS;
2213 if (domain->sig.dif.ref_remap)
2214 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
2216 if (domain->sig.dif.app_escape) {
2217 if (domain->sig.dif.ref_escape)
2218 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2220 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
2223 inl->dif_app_bitmask_check =
2224 cpu_to_be16(domain->sig.dif.apptag_check_mask);
2227 static int mlx5_set_bsf(struct ib_mr *sig_mr,
2228 struct ib_sig_attrs *sig_attrs,
2229 struct mlx5_bsf *bsf, u32 data_size)
2231 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2232 struct mlx5_bsf_basic *basic = &bsf->basic;
2233 struct ib_sig_domain *mem = &sig_attrs->mem;
2234 struct ib_sig_domain *wire = &sig_attrs->wire;
2236 memset(bsf, 0, sizeof(*bsf));
2238 /* Basic + Extended + Inline */
2239 basic->bsf_size_sbs = 1 << 7;
2240 /* Input domain check byte mask */
2241 basic->check_byte_mask = sig_attrs->check_mask;
2242 basic->raw_data_size = cpu_to_be32(data_size);
2245 switch (sig_attrs->mem.sig_type) {
2246 case IB_SIG_TYPE_NONE:
2248 case IB_SIG_TYPE_T10_DIF:
2249 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2250 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2251 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2258 switch (sig_attrs->wire.sig_type) {
2259 case IB_SIG_TYPE_NONE:
2261 case IB_SIG_TYPE_T10_DIF:
2262 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2263 mem->sig_type == wire->sig_type) {
2264 /* Same block structure */
2265 basic->bsf_size_sbs |= 1 << 4;
2266 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2267 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
2268 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
2269 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
2270 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
2271 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
2273 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2275 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
2276 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
2285 static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
2286 struct mlx5_ib_qp *qp, void **seg, int *size)
2288 struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
2289 struct ib_mr *sig_mr = wr->sig_mr;
2290 struct mlx5_bsf *bsf;
2291 u32 data_len = wr->wr.sg_list->length;
2292 u32 data_key = wr->wr.sg_list->lkey;
2293 u64 data_va = wr->wr.sg_list->addr;
2298 (data_key == wr->prot->lkey &&
2299 data_va == wr->prot->addr &&
2300 data_len == wr->prot->length)) {
2302 * Source domain doesn't contain signature information
2303 * or data and protection are interleaved in memory.
2304 * So need construct:
2305 * ------------------
2307 * ------------------
2309 * ------------------
2311 struct mlx5_klm *data_klm = *seg;
2313 data_klm->bcount = cpu_to_be32(data_len);
2314 data_klm->key = cpu_to_be32(data_key);
2315 data_klm->va = cpu_to_be64(data_va);
2316 wqe_size = ALIGN(sizeof(*data_klm), 64);
2319 * Source domain contains signature information
2320 * So need construct a strided block format:
2321 * ---------------------------
2322 * | stride_block_ctrl |
2323 * ---------------------------
2325 * ---------------------------
2327 * ---------------------------
2329 * ---------------------------
2331 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2332 struct mlx5_stride_block_entry *data_sentry;
2333 struct mlx5_stride_block_entry *prot_sentry;
2334 u32 prot_key = wr->prot->lkey;
2335 u64 prot_va = wr->prot->addr;
2336 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2340 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2341 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2343 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2345 pr_err("Bad block size given: %u\n", block_size);
2348 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2350 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2351 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2352 sblock_ctrl->num_entries = cpu_to_be16(2);
2354 data_sentry->bcount = cpu_to_be16(block_size);
2355 data_sentry->key = cpu_to_be32(data_key);
2356 data_sentry->va = cpu_to_be64(data_va);
2357 data_sentry->stride = cpu_to_be16(block_size);
2359 prot_sentry->bcount = cpu_to_be16(prot_size);
2360 prot_sentry->key = cpu_to_be32(prot_key);
2361 prot_sentry->va = cpu_to_be64(prot_va);
2362 prot_sentry->stride = cpu_to_be16(prot_size);
2364 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2365 sizeof(*prot_sentry), 64);
2369 *size += wqe_size / 16;
2370 if (unlikely((*seg == qp->sq.qend)))
2371 *seg = mlx5_get_send_wqe(qp, 0);
2374 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2378 *seg += sizeof(*bsf);
2379 *size += sizeof(*bsf) / 16;
2380 if (unlikely((*seg == qp->sq.qend)))
2381 *seg = mlx5_get_send_wqe(qp, 0);
2386 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2387 struct ib_sig_handover_wr *wr, u32 nelements,
2388 u32 length, u32 pdn)
2390 struct ib_mr *sig_mr = wr->sig_mr;
2391 u32 sig_key = sig_mr->rkey;
2392 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2394 memset(seg, 0, sizeof(*seg));
2396 seg->flags = get_umr_flags(wr->access_flags) |
2397 MLX5_ACCESS_MODE_KLM;
2398 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2399 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
2400 MLX5_MKEY_BSF_EN | pdn);
2401 seg->len = cpu_to_be64(length);
2402 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2403 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2406 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2409 memset(umr, 0, sizeof(*umr));
2411 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2412 umr->klm_octowords = get_klm_octo(nelements);
2413 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2414 umr->mkey_mask = sig_mkey_mask();
2418 static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
2419 void **seg, int *size)
2421 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
2422 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
2423 u32 pdn = get_pd(qp)->pdn;
2425 int region_len, ret;
2427 if (unlikely(wr->wr.num_sge != 1) ||
2428 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
2429 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2430 unlikely(!sig_mr->sig->sig_status_checked))
2433 /* length of the protected region, data + protection */
2434 region_len = wr->wr.sg_list->length;
2436 (wr->prot->lkey != wr->wr.sg_list->lkey ||
2437 wr->prot->addr != wr->wr.sg_list->addr ||
2438 wr->prot->length != wr->wr.sg_list->length))
2439 region_len += wr->prot->length;
2442 * KLM octoword size - if protection was provided
2443 * then we use strided block format (3 octowords),
2444 * else we use single KLM (1 octoword)
2446 klm_oct_size = wr->prot ? 3 : 1;
2448 set_sig_umr_segment(*seg, klm_oct_size);
2449 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2450 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2451 if (unlikely((*seg == qp->sq.qend)))
2452 *seg = mlx5_get_send_wqe(qp, 0);
2454 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2455 *seg += sizeof(struct mlx5_mkey_seg);
2456 *size += sizeof(struct mlx5_mkey_seg) / 16;
2457 if (unlikely((*seg == qp->sq.qend)))
2458 *seg = mlx5_get_send_wqe(qp, 0);
2460 ret = set_sig_data_segment(wr, qp, seg, size);
2464 sig_mr->sig->sig_status_checked = false;
2468 static int set_psv_wr(struct ib_sig_domain *domain,
2469 u32 psv_idx, void **seg, int *size)
2471 struct mlx5_seg_set_psv *psv_seg = *seg;
2473 memset(psv_seg, 0, sizeof(*psv_seg));
2474 psv_seg->psv_num = cpu_to_be32(psv_idx);
2475 switch (domain->sig_type) {
2476 case IB_SIG_TYPE_NONE:
2478 case IB_SIG_TYPE_T10_DIF:
2479 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2480 domain->sig.dif.app_tag);
2481 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2484 pr_err("Bad signature type given.\n");
2488 *seg += sizeof(*psv_seg);
2489 *size += sizeof(*psv_seg) / 16;
2494 static int set_reg_wr(struct mlx5_ib_qp *qp,
2495 struct ib_reg_wr *wr,
2496 void **seg, int *size)
2498 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
2499 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
2501 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
2502 mlx5_ib_warn(to_mdev(qp->ibqp.device),
2503 "Invalid IB_SEND_INLINE send flag\n");
2507 set_reg_umr_seg(*seg, mr);
2508 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2509 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2510 if (unlikely((*seg == qp->sq.qend)))
2511 *seg = mlx5_get_send_wqe(qp, 0);
2513 set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
2514 *seg += sizeof(struct mlx5_mkey_seg);
2515 *size += sizeof(struct mlx5_mkey_seg) / 16;
2516 if (unlikely((*seg == qp->sq.qend)))
2517 *seg = mlx5_get_send_wqe(qp, 0);
2519 set_reg_data_seg(*seg, mr, pd);
2520 *seg += sizeof(struct mlx5_wqe_data_seg);
2521 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2526 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
2528 set_linv_umr_seg(*seg);
2529 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2530 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2531 if (unlikely((*seg == qp->sq.qend)))
2532 *seg = mlx5_get_send_wqe(qp, 0);
2533 set_linv_mkey_seg(*seg);
2534 *seg += sizeof(struct mlx5_mkey_seg);
2535 *size += sizeof(struct mlx5_mkey_seg) / 16;
2536 if (unlikely((*seg == qp->sq.qend)))
2537 *seg = mlx5_get_send_wqe(qp, 0);
2540 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2546 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2547 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2548 if ((i & 0xf) == 0) {
2549 void *buf = mlx5_get_send_wqe(qp, tidx);
2550 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2554 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2555 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2556 be32_to_cpu(p[j + 3]));
2560 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2561 unsigned bytecnt, struct mlx5_ib_qp *qp)
2563 while (bytecnt > 0) {
2564 __iowrite64_copy(dst++, src++, 8);
2565 __iowrite64_copy(dst++, src++, 8);
2566 __iowrite64_copy(dst++, src++, 8);
2567 __iowrite64_copy(dst++, src++, 8);
2568 __iowrite64_copy(dst++, src++, 8);
2569 __iowrite64_copy(dst++, src++, 8);
2570 __iowrite64_copy(dst++, src++, 8);
2571 __iowrite64_copy(dst++, src++, 8);
2573 if (unlikely(src == qp->sq.qend))
2574 src = mlx5_get_send_wqe(qp, 0);
2578 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2580 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2581 wr->send_flags & IB_SEND_FENCE))
2582 return MLX5_FENCE_MODE_STRONG_ORDERING;
2584 if (unlikely(fence)) {
2585 if (wr->send_flags & IB_SEND_FENCE)
2586 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2595 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2596 struct mlx5_wqe_ctrl_seg **ctrl,
2597 struct ib_send_wr *wr, unsigned *idx,
2598 int *size, int nreq)
2602 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2607 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2608 *seg = mlx5_get_send_wqe(qp, *idx);
2610 *(uint32_t *)(*seg + 8) = 0;
2611 (*ctrl)->imm = send_ieth(wr);
2612 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2613 (wr->send_flags & IB_SEND_SIGNALED ?
2614 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2615 (wr->send_flags & IB_SEND_SOLICITED ?
2616 MLX5_WQE_CTRL_SOLICITED : 0);
2618 *seg += sizeof(**ctrl);
2619 *size = sizeof(**ctrl) / 16;
2624 static void finish_wqe(struct mlx5_ib_qp *qp,
2625 struct mlx5_wqe_ctrl_seg *ctrl,
2626 u8 size, unsigned idx, u64 wr_id,
2627 int nreq, u8 fence, u8 next_fence,
2632 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2633 mlx5_opcode | ((u32)opmod << 24));
2634 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2635 ctrl->fm_ce_se |= fence;
2636 qp->fm_cache = next_fence;
2637 if (unlikely(qp->wq_sig))
2638 ctrl->signature = wq_sig(ctrl);
2640 qp->sq.wrid[idx] = wr_id;
2641 qp->sq.w_list[idx].opcode = mlx5_opcode;
2642 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2643 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2644 qp->sq.w_list[idx].next = qp->sq.cur_post;
2648 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2649 struct ib_send_wr **bad_wr)
2651 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2652 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2653 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2654 struct mlx5_ib_mr *mr;
2655 struct mlx5_wqe_data_seg *dpseg;
2656 struct mlx5_wqe_xrc_seg *xrc;
2657 struct mlx5_bf *bf = qp->bf;
2658 int uninitialized_var(size);
2659 void *qend = qp->sq.qend;
2660 unsigned long flags;
2671 spin_lock_irqsave(&qp->sq.lock, flags);
2673 for (nreq = 0; wr; nreq++, wr = wr->next) {
2674 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
2675 mlx5_ib_warn(dev, "\n");
2681 fence = qp->fm_cache;
2682 num_sge = wr->num_sge;
2683 if (unlikely(num_sge > qp->sq.max_gs)) {
2684 mlx5_ib_warn(dev, "\n");
2690 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2692 mlx5_ib_warn(dev, "\n");
2698 switch (ibqp->qp_type) {
2699 case IB_QPT_XRC_INI:
2701 seg += sizeof(*xrc);
2702 size += sizeof(*xrc) / 16;
2705 switch (wr->opcode) {
2706 case IB_WR_RDMA_READ:
2707 case IB_WR_RDMA_WRITE:
2708 case IB_WR_RDMA_WRITE_WITH_IMM:
2709 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2711 seg += sizeof(struct mlx5_wqe_raddr_seg);
2712 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2715 case IB_WR_ATOMIC_CMP_AND_SWP:
2716 case IB_WR_ATOMIC_FETCH_AND_ADD:
2717 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2718 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2723 case IB_WR_LOCAL_INV:
2724 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2725 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2726 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2727 set_linv_wr(qp, &seg, &size);
2732 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2733 qp->sq.wr_data[idx] = IB_WR_REG_MR;
2734 ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
2735 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
2743 case IB_WR_REG_SIG_MR:
2744 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2745 mr = to_mmr(sig_handover_wr(wr)->sig_mr);
2747 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2748 err = set_sig_umr_wr(wr, qp, &seg, &size);
2750 mlx5_ib_warn(dev, "\n");
2755 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2756 nreq, get_fence(fence, wr),
2757 next_fence, MLX5_OPCODE_UMR);
2759 * SET_PSV WQEs are not signaled and solicited
2762 wr->send_flags &= ~IB_SEND_SIGNALED;
2763 wr->send_flags |= IB_SEND_SOLICITED;
2764 err = begin_wqe(qp, &seg, &ctrl, wr,
2767 mlx5_ib_warn(dev, "\n");
2773 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
2774 mr->sig->psv_memory.psv_idx, &seg,
2777 mlx5_ib_warn(dev, "\n");
2782 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2783 nreq, get_fence(fence, wr),
2784 next_fence, MLX5_OPCODE_SET_PSV);
2785 err = begin_wqe(qp, &seg, &ctrl, wr,
2788 mlx5_ib_warn(dev, "\n");
2794 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2795 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
2796 mr->sig->psv_wire.psv_idx, &seg,
2799 mlx5_ib_warn(dev, "\n");
2804 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2805 nreq, get_fence(fence, wr),
2806 next_fence, MLX5_OPCODE_SET_PSV);
2816 switch (wr->opcode) {
2817 case IB_WR_RDMA_WRITE:
2818 case IB_WR_RDMA_WRITE_WITH_IMM:
2819 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2821 seg += sizeof(struct mlx5_wqe_raddr_seg);
2822 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2833 set_datagram_seg(seg, wr);
2834 seg += sizeof(struct mlx5_wqe_datagram_seg);
2835 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2836 if (unlikely((seg == qend)))
2837 seg = mlx5_get_send_wqe(qp, 0);
2840 case MLX5_IB_QPT_REG_UMR:
2841 if (wr->opcode != MLX5_IB_WR_UMR) {
2843 mlx5_ib_warn(dev, "bad opcode\n");
2846 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2847 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
2848 set_reg_umr_segment(seg, wr);
2849 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2850 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2851 if (unlikely((seg == qend)))
2852 seg = mlx5_get_send_wqe(qp, 0);
2853 set_reg_mkey_segment(seg, wr);
2854 seg += sizeof(struct mlx5_mkey_seg);
2855 size += sizeof(struct mlx5_mkey_seg) / 16;
2856 if (unlikely((seg == qend)))
2857 seg = mlx5_get_send_wqe(qp, 0);
2864 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2865 int uninitialized_var(sz);
2867 err = set_data_inl_seg(qp, wr, seg, &sz);
2868 if (unlikely(err)) {
2869 mlx5_ib_warn(dev, "\n");
2877 for (i = 0; i < num_sge; i++) {
2878 if (unlikely(dpseg == qend)) {
2879 seg = mlx5_get_send_wqe(qp, 0);
2882 if (likely(wr->sg_list[i].length)) {
2883 set_data_ptr_seg(dpseg, wr->sg_list + i);
2884 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2890 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2891 get_fence(fence, wr), next_fence,
2892 mlx5_ib_opcode[wr->opcode]);
2895 dump_wqe(qp, idx, size);
2900 qp->sq.head += nreq;
2902 /* Make sure that descriptors are written before
2903 * updating doorbell record and ringing the doorbell
2907 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2909 /* Make sure doorbell record is visible to the HCA before
2910 * we hit doorbell */
2914 spin_lock(&bf->lock);
2916 __acquire(&bf->lock);
2919 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2920 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2923 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2924 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2925 /* Make sure doorbells don't leak out of SQ spinlock
2926 * and reach the HCA out of order.
2930 bf->offset ^= bf->buf_size;
2932 spin_unlock(&bf->lock);
2934 __release(&bf->lock);
2937 spin_unlock_irqrestore(&qp->sq.lock, flags);
2942 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2944 sig->signature = calc_sig(sig, size);
2947 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2948 struct ib_recv_wr **bad_wr)
2950 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2951 struct mlx5_wqe_data_seg *scat;
2952 struct mlx5_rwqe_sig *sig;
2953 unsigned long flags;
2959 spin_lock_irqsave(&qp->rq.lock, flags);
2961 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2963 for (nreq = 0; wr; nreq++, wr = wr->next) {
2964 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2970 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2976 scat = get_recv_wqe(qp, ind);
2980 for (i = 0; i < wr->num_sge; i++)
2981 set_data_ptr_seg(scat + i, wr->sg_list + i);
2983 if (i < qp->rq.max_gs) {
2984 scat[i].byte_count = 0;
2985 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2990 sig = (struct mlx5_rwqe_sig *)scat;
2991 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2994 qp->rq.wrid[ind] = wr->wr_id;
2996 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
3001 qp->rq.head += nreq;
3003 /* Make sure that descriptors are written before
3008 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
3011 spin_unlock_irqrestore(&qp->rq.lock, flags);
3016 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
3018 switch (mlx5_state) {
3019 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
3020 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
3021 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
3022 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
3023 case MLX5_QP_STATE_SQ_DRAINING:
3024 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
3025 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
3026 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
3031 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
3033 switch (mlx5_mig_state) {
3034 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
3035 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
3036 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
3041 static int to_ib_qp_access_flags(int mlx5_flags)
3045 if (mlx5_flags & MLX5_QP_BIT_RRE)
3046 ib_flags |= IB_ACCESS_REMOTE_READ;
3047 if (mlx5_flags & MLX5_QP_BIT_RWE)
3048 ib_flags |= IB_ACCESS_REMOTE_WRITE;
3049 if (mlx5_flags & MLX5_QP_BIT_RAE)
3050 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
3055 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
3056 struct mlx5_qp_path *path)
3058 struct mlx5_core_dev *dev = ibdev->mdev;
3060 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
3061 ib_ah_attr->port_num = path->port;
3063 if (ib_ah_attr->port_num == 0 ||
3064 ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
3067 ib_ah_attr->sl = path->dci_cfi_prio_sl & 0xf;
3069 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
3070 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
3071 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
3072 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
3073 if (ib_ah_attr->ah_flags) {
3074 ib_ah_attr->grh.sgid_index = path->mgid_index;
3075 ib_ah_attr->grh.hop_limit = path->hop_limit;
3076 ib_ah_attr->grh.traffic_class =
3077 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3078 ib_ah_attr->grh.flow_label =
3079 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3080 memcpy(ib_ah_attr->grh.dgid.raw,
3081 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
3085 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3086 struct ib_qp_init_attr *qp_init_attr)
3088 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3089 struct mlx5_ib_qp *qp = to_mqp(ibqp);
3090 struct mlx5_query_qp_mbox_out *outb;
3091 struct mlx5_qp_context *context;
3095 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3097 * Wait for any outstanding page faults, in case the user frees memory
3098 * based upon this query's result.
3100 flush_workqueue(mlx5_ib_page_fault_wq);
3103 mutex_lock(&qp->mutex);
3104 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
3109 context = &outb->ctx;
3110 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
3114 mlx5_state = be32_to_cpu(context->flags) >> 28;
3116 qp->state = to_ib_qp_state(mlx5_state);
3117 qp_attr->qp_state = qp->state;
3118 qp_attr->path_mtu = context->mtu_msgmax >> 5;
3119 qp_attr->path_mig_state =
3120 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
3121 qp_attr->qkey = be32_to_cpu(context->qkey);
3122 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
3123 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
3124 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
3125 qp_attr->qp_access_flags =
3126 to_ib_qp_access_flags(be32_to_cpu(context->params2));
3128 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3129 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
3130 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
3131 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
3132 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
3135 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
3136 qp_attr->port_num = context->pri_path.port;
3138 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3139 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
3141 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
3143 qp_attr->max_dest_rd_atomic =
3144 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3145 qp_attr->min_rnr_timer =
3146 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3147 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
3148 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
3149 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
3150 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
3151 qp_attr->cur_qp_state = qp_attr->qp_state;
3152 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3153 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
3155 if (!ibqp->uobject) {
3156 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
3157 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3159 qp_attr->cap.max_send_wr = 0;
3160 qp_attr->cap.max_send_sge = 0;
3163 /* We don't support inline sends for kernel QPs (yet), and we
3164 * don't know what userspace's value should be.
3166 qp_attr->cap.max_inline_data = 0;
3168 qp_init_attr->cap = qp_attr->cap;
3170 qp_init_attr->create_flags = 0;
3171 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3172 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3174 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
3175 qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
3176 if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
3177 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
3178 if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
3179 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
3181 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3182 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3188 mutex_unlock(&qp->mutex);
3192 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3193 struct ib_ucontext *context,
3194 struct ib_udata *udata)
3196 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3197 struct mlx5_ib_xrcd *xrcd;
3200 if (!MLX5_CAP_GEN(dev->mdev, xrc))
3201 return ERR_PTR(-ENOSYS);
3203 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3205 return ERR_PTR(-ENOMEM);
3207 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
3210 return ERR_PTR(-ENOMEM);
3213 return &xrcd->ibxrcd;
3216 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3218 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3219 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3222 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3224 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);