2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/mlx5/qp.h>
35 #include <linux/mlx5/srq.h>
36 #include <linux/slab.h>
37 #include <rdma/ib_umem.h>
38 #include <rdma/ib_user_verbs.h>
43 /* not supported currently */
44 static int srq_signature;
46 static void *get_wqe(struct mlx5_ib_srq *srq, int n)
48 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
51 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
53 struct ib_event event;
54 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
56 if (ibsrq->event_handler) {
57 event.device = ibsrq->device;
58 event.element.srq = ibsrq;
60 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
61 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
63 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
64 event.event = IB_EVENT_SRQ_ERR;
67 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
72 ibsrq->event_handler(&event, ibsrq->srq_context);
76 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
77 struct mlx5_srq_attr *in,
78 struct ib_udata *udata, int buf_size)
80 struct mlx5_ib_dev *dev = to_mdev(pd->device);
81 struct mlx5_ib_create_srq ucmd = {};
88 u32 uidx = MLX5_IB_DEFAULT_UIDX;
90 ucmdlen = min(udata->inlen, sizeof(ucmd));
92 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
93 mlx5_ib_dbg(dev, "failed copy udata\n");
97 if (ucmd.reserved0 || ucmd.reserved1)
100 if (udata->inlen > sizeof(ucmd) &&
101 !ib_is_udata_cleared(udata, sizeof(ucmd),
102 udata->inlen - sizeof(ucmd)))
105 if (in->type == IB_SRQT_XRC) {
106 err = get_srq_user_index(to_mucontext(pd->uobject->context),
107 &ucmd, udata->inlen, &uidx);
112 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
114 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
116 if (IS_ERR(srq->umem)) {
117 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
118 err = PTR_ERR(srq->umem);
122 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
123 &page_shift, &ncont, NULL);
124 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
127 mlx5_ib_warn(dev, "bad offset\n");
131 in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
137 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
139 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
140 ucmd.db_addr, &srq->db);
142 mlx5_ib_dbg(dev, "map doorbell failed\n");
146 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
147 in->page_offset = offset;
148 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
149 in->type == IB_SRQT_XRC)
150 in->user_index = uidx;
158 ib_umem_release(srq->umem);
163 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
164 struct mlx5_srq_attr *in, int buf_size)
168 struct mlx5_wqe_srq_next_seg *next;
172 err = mlx5_db_alloc(dev->mdev, &srq->db);
174 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
178 if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
179 mlx5_ib_dbg(dev, "buf alloc failed\n");
183 page_shift = srq->buf.page_shift;
186 srq->tail = srq->msrq.max - 1;
189 for (i = 0; i < srq->msrq.max; i++) {
190 next = get_wqe(srq, i);
191 next->next_wqe_index =
192 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
195 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
196 mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
197 buf_size, page_shift, srq->buf.npages, npages);
198 in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
203 mlx5_fill_page_array(&srq->buf, in->pas);
205 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
207 mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
208 (unsigned long)(srq->msrq.max * sizeof(u64)));
212 srq->wq_sig = !!srq_signature;
214 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
215 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
216 in->type == IB_SRQT_XRC)
217 in->user_index = MLX5_IB_DEFAULT_UIDX;
225 mlx5_buf_free(dev->mdev, &srq->buf);
228 mlx5_db_free(dev->mdev, &srq->db);
232 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
234 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
235 ib_umem_release(srq->umem);
239 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
242 mlx5_buf_free(dev->mdev, &srq->buf);
243 mlx5_db_free(dev->mdev, &srq->db);
246 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
247 struct ib_srq_init_attr *init_attr,
248 struct ib_udata *udata)
250 struct mlx5_ib_dev *dev = to_mdev(pd->device);
251 struct mlx5_ib_srq *srq;
255 struct mlx5_srq_attr in = {0};
256 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
258 /* Sanity check SRQ size before proceeding */
259 if (init_attr->attr.max_wr >= max_srq_wqes) {
260 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
261 init_attr->attr.max_wr,
263 return ERR_PTR(-EINVAL);
266 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
268 return ERR_PTR(-ENOMEM);
270 mutex_init(&srq->mutex);
271 spin_lock_init(&srq->lock);
272 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
273 srq->msrq.max_gs = init_attr->attr.max_sge;
275 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
276 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
277 desc_size = roundup_pow_of_two(desc_size);
278 desc_size = max_t(int, 32, desc_size);
279 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
280 sizeof(struct mlx5_wqe_data_seg);
281 srq->msrq.wqe_shift = ilog2(desc_size);
282 buf_size = srq->msrq.max * desc_size;
283 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
284 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
285 srq->msrq.max_avail_gather);
288 err = create_srq_user(pd, srq, &in, udata, buf_size);
290 err = create_srq_kernel(dev, srq, &in, buf_size);
293 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
294 pd->uobject ? "user" : "kernel", err);
298 in.type = init_attr->srq_type;
299 in.log_size = ilog2(srq->msrq.max);
300 in.wqe_shift = srq->msrq.wqe_shift - 4;
302 in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
303 if (init_attr->srq_type == IB_SRQT_XRC) {
304 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
305 in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
306 } else if (init_attr->srq_type == IB_SRQT_BASIC) {
307 in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
308 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
311 in.pd = to_mpd(pd)->pdn;
312 in.db_record = srq->db.dma;
313 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
316 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
317 goto err_usr_kern_srq;
320 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
322 srq->msrq.event = mlx5_ib_srq_event;
323 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
326 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
327 mlx5_ib_dbg(dev, "copy to user failed\n");
332 init_attr->attr.max_wr = srq->msrq.max - 1;
337 mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
341 destroy_srq_user(pd, srq);
343 destroy_srq_kernel(dev, srq);
351 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
352 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
354 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
355 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
358 /* We don't support resizing SRQs yet */
359 if (attr_mask & IB_SRQ_MAX_WR)
362 if (attr_mask & IB_SRQ_LIMIT) {
363 if (attr->srq_limit >= srq->msrq.max)
366 mutex_lock(&srq->mutex);
367 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
368 mutex_unlock(&srq->mutex);
377 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
379 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
380 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
382 struct mlx5_srq_attr *out;
384 out = kzalloc(sizeof(*out), GFP_KERNEL);
388 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
392 srq_attr->srq_limit = out->lwm;
393 srq_attr->max_wr = srq->msrq.max - 1;
394 srq_attr->max_sge = srq->msrq.max_gs;
401 int mlx5_ib_destroy_srq(struct ib_srq *srq)
403 struct mlx5_ib_dev *dev = to_mdev(srq->device);
404 struct mlx5_ib_srq *msrq = to_msrq(srq);
406 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
409 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
410 ib_umem_release(msrq->umem);
412 destroy_srq_kernel(dev, msrq);
419 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
421 struct mlx5_wqe_srq_next_seg *next;
423 /* always called with interrupts disabled. */
424 spin_lock(&srq->lock);
426 next = get_wqe(srq, srq->tail);
427 next->next_wqe_index = cpu_to_be16(wqe_index);
428 srq->tail = wqe_index;
430 spin_unlock(&srq->lock);
433 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
434 struct ib_recv_wr **bad_wr)
436 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
437 struct mlx5_wqe_srq_next_seg *next;
438 struct mlx5_wqe_data_seg *scat;
439 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
440 struct mlx5_core_dev *mdev = dev->mdev;
446 spin_lock_irqsave(&srq->lock, flags);
448 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
454 for (nreq = 0; wr; nreq++, wr = wr->next) {
455 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
461 if (unlikely(srq->head == srq->tail)) {
467 srq->wrid[srq->head] = wr->wr_id;
469 next = get_wqe(srq, srq->head);
470 srq->head = be16_to_cpu(next->next_wqe_index);
471 scat = (struct mlx5_wqe_data_seg *)(next + 1);
473 for (i = 0; i < wr->num_sge; i++) {
474 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
475 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
476 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
479 if (i < srq->msrq.max_avail_gather) {
480 scat[i].byte_count = 0;
481 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
487 srq->wqe_ctr += nreq;
489 /* Make sure that descriptors are written before
494 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
497 spin_unlock_irqrestore(&srq->lock, flags);