Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
6cf0a15f | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/module.h> | |
34 | #include <linux/mlx5/qp.h> | |
35 | #include <linux/mlx5/srq.h> | |
36 | #include <linux/slab.h> | |
37 | #include <rdma/ib_umem.h> | |
43bc8893 | 38 | #include <rdma/ib_user_verbs.h> |
e126ba97 EC |
39 | |
40 | #include "mlx5_ib.h" | |
e126ba97 EC |
41 | |
42 | /* not supported currently */ | |
43 | static int srq_signature; | |
44 | ||
45 | static void *get_wqe(struct mlx5_ib_srq *srq, int n) | |
46 | { | |
47 | return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); | |
48 | } | |
49 | ||
50 | static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) | |
51 | { | |
52 | struct ib_event event; | |
53 | struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; | |
54 | ||
55 | if (ibsrq->event_handler) { | |
56 | event.device = ibsrq->device; | |
57 | event.element.srq = ibsrq; | |
58 | switch (type) { | |
59 | case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | |
60 | event.event = IB_EVENT_SRQ_LIMIT_REACHED; | |
61 | break; | |
62 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | |
63 | event.event = IB_EVENT_SRQ_ERR; | |
64 | break; | |
65 | default: | |
66 | pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n", | |
67 | type, srq->srqn); | |
68 | return; | |
69 | } | |
70 | ||
71 | ibsrq->event_handler(&event, ibsrq->srq_context); | |
72 | } | |
73 | } | |
74 | ||
75 | static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, | |
af1ba291 AK |
76 | struct mlx5_srq_attr *in, |
77 | struct ib_udata *udata, int buf_size) | |
e126ba97 EC |
78 | { |
79 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
cfb5e088 | 80 | struct mlx5_ib_create_srq ucmd = {}; |
43bc8893 | 81 | size_t ucmdlen; |
e126ba97 EC |
82 | int err; |
83 | int npages; | |
84 | int page_shift; | |
85 | int ncont; | |
86 | u32 offset; | |
cfb5e088 | 87 | u32 uidx = MLX5_IB_DEFAULT_UIDX; |
e126ba97 | 88 | |
3d943c9d | 89 | ucmdlen = min(udata->inlen, sizeof(ucmd)); |
43bc8893 YD |
90 | |
91 | if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { | |
e126ba97 EC |
92 | mlx5_ib_dbg(dev, "failed copy udata\n"); |
93 | return -EFAULT; | |
94 | } | |
43bc8893 | 95 | |
cfb5e088 | 96 | if (ucmd.reserved0 || ucmd.reserved1) |
43bc8893 YD |
97 | return -EINVAL; |
98 | ||
3d943c9d | 99 | if (udata->inlen > sizeof(ucmd) && |
cfb5e088 | 100 | !ib_is_udata_cleared(udata, sizeof(ucmd), |
3d943c9d | 101 | udata->inlen - sizeof(ucmd))) |
cfb5e088 HA |
102 | return -EINVAL; |
103 | ||
3fd3307e | 104 | if (in->type != IB_SRQT_BASIC) { |
85d9691c MD |
105 | err = get_srq_user_index(to_mucontext(pd->uobject->context), |
106 | &ucmd, udata->inlen, &uidx); | |
107 | if (err) | |
108 | return err; | |
109 | } | |
cfb5e088 | 110 | |
e126ba97 EC |
111 | srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); |
112 | ||
113 | srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, | |
114 | 0, 0); | |
115 | if (IS_ERR(srq->umem)) { | |
116 | mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); | |
117 | err = PTR_ERR(srq->umem); | |
118 | return err; | |
119 | } | |
120 | ||
762f899a | 121 | mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages, |
e126ba97 EC |
122 | &page_shift, &ncont, NULL); |
123 | err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, | |
124 | &offset); | |
125 | if (err) { | |
126 | mlx5_ib_warn(dev, "bad offset\n"); | |
127 | goto err_umem; | |
128 | } | |
129 | ||
1b9a07ee | 130 | in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL); |
af1ba291 | 131 | if (!in->pas) { |
e126ba97 EC |
132 | err = -ENOMEM; |
133 | goto err_umem; | |
134 | } | |
135 | ||
af1ba291 | 136 | mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0); |
e126ba97 EC |
137 | |
138 | err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), | |
139 | ucmd.db_addr, &srq->db); | |
140 | if (err) { | |
141 | mlx5_ib_dbg(dev, "map doorbell failed\n"); | |
142 | goto err_in; | |
143 | } | |
144 | ||
af1ba291 AK |
145 | in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
146 | in->page_offset = offset; | |
147 | if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && | |
3fd3307e | 148 | in->type != IB_SRQT_BASIC) |
af1ba291 | 149 | in->user_index = uidx; |
cfb5e088 | 150 | |
e126ba97 EC |
151 | return 0; |
152 | ||
153 | err_in: | |
af1ba291 | 154 | kvfree(in->pas); |
e126ba97 EC |
155 | |
156 | err_umem: | |
157 | ib_umem_release(srq->umem); | |
158 | ||
159 | return err; | |
160 | } | |
161 | ||
162 | static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, | |
af1ba291 | 163 | struct mlx5_srq_attr *in, int buf_size) |
e126ba97 EC |
164 | { |
165 | int err; | |
166 | int i; | |
167 | struct mlx5_wqe_srq_next_seg *next; | |
e126ba97 | 168 | |
9603b61d | 169 | err = mlx5_db_alloc(dev->mdev, &srq->db); |
e126ba97 EC |
170 | if (err) { |
171 | mlx5_ib_warn(dev, "alloc dbell rec failed\n"); | |
172 | return err; | |
173 | } | |
174 | ||
64ffaa21 | 175 | if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) { |
e126ba97 EC |
176 | mlx5_ib_dbg(dev, "buf alloc failed\n"); |
177 | err = -ENOMEM; | |
178 | goto err_db; | |
179 | } | |
e126ba97 EC |
180 | |
181 | srq->head = 0; | |
182 | srq->tail = srq->msrq.max - 1; | |
183 | srq->wqe_ctr = 0; | |
184 | ||
185 | for (i = 0; i < srq->msrq.max; i++) { | |
186 | next = get_wqe(srq, i); | |
187 | next->next_wqe_index = | |
188 | cpu_to_be16((i + 1) & (srq->msrq.max - 1)); | |
189 | } | |
190 | ||
0fd27a88 | 191 | mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); |
1b9a07ee | 192 | in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL); |
af1ba291 | 193 | if (!in->pas) { |
e126ba97 EC |
194 | err = -ENOMEM; |
195 | goto err_buf; | |
196 | } | |
af1ba291 | 197 | mlx5_fill_page_array(&srq->buf, in->pas); |
e126ba97 | 198 | |
b5883008 | 199 | srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL); |
e126ba97 | 200 | if (!srq->wrid) { |
e126ba97 EC |
201 | err = -ENOMEM; |
202 | goto err_in; | |
203 | } | |
204 | srq->wq_sig = !!srq_signature; | |
205 | ||
0fd27a88 | 206 | in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
af1ba291 | 207 | if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && |
3fd3307e | 208 | in->type != IB_SRQT_BASIC) |
af1ba291 | 209 | in->user_index = MLX5_IB_DEFAULT_UIDX; |
cfb5e088 | 210 | |
e126ba97 EC |
211 | return 0; |
212 | ||
213 | err_in: | |
af1ba291 | 214 | kvfree(in->pas); |
e126ba97 EC |
215 | |
216 | err_buf: | |
9603b61d | 217 | mlx5_buf_free(dev->mdev, &srq->buf); |
e126ba97 EC |
218 | |
219 | err_db: | |
9603b61d | 220 | mlx5_db_free(dev->mdev, &srq->db); |
e126ba97 EC |
221 | return err; |
222 | } | |
223 | ||
224 | static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) | |
225 | { | |
226 | mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); | |
227 | ib_umem_release(srq->umem); | |
228 | } | |
229 | ||
230 | ||
231 | static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) | |
232 | { | |
b5883008 | 233 | kvfree(srq->wrid); |
9603b61d JM |
234 | mlx5_buf_free(dev->mdev, &srq->buf); |
235 | mlx5_db_free(dev->mdev, &srq->db); | |
e126ba97 EC |
236 | } |
237 | ||
238 | struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |
239 | struct ib_srq_init_attr *init_attr, | |
240 | struct ib_udata *udata) | |
241 | { | |
242 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
243 | struct mlx5_ib_srq *srq; | |
c2b37f76 BP |
244 | size_t desc_size; |
245 | size_t buf_size; | |
e126ba97 | 246 | int err; |
af1ba291 | 247 | struct mlx5_srq_attr in = {0}; |
938fe83c | 248 | __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); |
e126ba97 EC |
249 | |
250 | /* Sanity check SRQ size before proceeding */ | |
938fe83c | 251 | if (init_attr->attr.max_wr >= max_srq_wqes) { |
e126ba97 EC |
252 | mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", |
253 | init_attr->attr.max_wr, | |
938fe83c | 254 | max_srq_wqes); |
e126ba97 EC |
255 | return ERR_PTR(-EINVAL); |
256 | } | |
257 | ||
258 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | |
259 | if (!srq) | |
260 | return ERR_PTR(-ENOMEM); | |
261 | ||
262 | mutex_init(&srq->mutex); | |
263 | spin_lock_init(&srq->lock); | |
264 | srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); | |
265 | srq->msrq.max_gs = init_attr->attr.max_sge; | |
266 | ||
267 | desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + | |
268 | srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); | |
c2b37f76 BP |
269 | if (desc_size == 0 || srq->msrq.max_gs > desc_size) |
270 | return ERR_PTR(-EINVAL); | |
e126ba97 | 271 | desc_size = roundup_pow_of_two(desc_size); |
c2b37f76 BP |
272 | desc_size = max_t(size_t, 32, desc_size); |
273 | if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) | |
274 | return ERR_PTR(-EINVAL); | |
e126ba97 EC |
275 | srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / |
276 | sizeof(struct mlx5_wqe_data_seg); | |
277 | srq->msrq.wqe_shift = ilog2(desc_size); | |
278 | buf_size = srq->msrq.max * desc_size; | |
c2b37f76 BP |
279 | if (buf_size < desc_size) |
280 | return ERR_PTR(-EINVAL); | |
c73b7911 | 281 | in.type = init_attr->srq_type; |
e126ba97 EC |
282 | |
283 | if (pd->uobject) | |
af1ba291 | 284 | err = create_srq_user(pd, srq, &in, udata, buf_size); |
e126ba97 | 285 | else |
af1ba291 | 286 | err = create_srq_kernel(dev, srq, &in, buf_size); |
e126ba97 EC |
287 | |
288 | if (err) { | |
289 | mlx5_ib_warn(dev, "create srq %s failed, err %d\n", | |
290 | pd->uobject ? "user" : "kernel", err); | |
291 | goto err_srq; | |
292 | } | |
293 | ||
af1ba291 AK |
294 | in.log_size = ilog2(srq->msrq.max); |
295 | in.wqe_shift = srq->msrq.wqe_shift - 4; | |
296 | if (srq->wq_sig) | |
297 | in.flags |= MLX5_SRQ_FLAG_WQ_SIG; | |
1a56ff6d AK |
298 | |
299 | if (init_attr->srq_type == IB_SRQT_XRC) | |
af1ba291 | 300 | in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; |
1a56ff6d | 301 | else |
af1ba291 | 302 | in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn; |
1a56ff6d | 303 | |
3fd3307e AK |
304 | if (init_attr->srq_type == IB_SRQT_TM) { |
305 | in.tm_log_list_size = | |
306 | ilog2(init_attr->ext.tag_matching.max_num_tags) + 1; | |
307 | if (in.tm_log_list_size > | |
308 | MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) { | |
309 | mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n"); | |
310 | err = -EINVAL; | |
311 | goto err_usr_kern_srq; | |
312 | } | |
313 | in.flags |= MLX5_SRQ_FLAG_RNDV; | |
314 | } | |
315 | ||
1a56ff6d AK |
316 | if (ib_srq_has_cq(init_attr->srq_type)) |
317 | in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; | |
318 | else | |
af1ba291 | 319 | in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; |
e126ba97 | 320 | |
af1ba291 AK |
321 | in.pd = to_mpd(pd)->pdn; |
322 | in.db_record = srq->db.dma; | |
323 | err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in); | |
324 | kvfree(in.pas); | |
e126ba97 EC |
325 | if (err) { |
326 | mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); | |
56e1ab0f | 327 | goto err_usr_kern_srq; |
e126ba97 EC |
328 | } |
329 | ||
330 | mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); | |
331 | ||
332 | srq->msrq.event = mlx5_ib_srq_event; | |
333 | srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; | |
334 | ||
335 | if (pd->uobject) | |
336 | if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { | |
337 | mlx5_ib_dbg(dev, "copy to user failed\n"); | |
338 | err = -EFAULT; | |
339 | goto err_core; | |
340 | } | |
341 | ||
342 | init_attr->attr.max_wr = srq->msrq.max - 1; | |
343 | ||
344 | return &srq->ibsrq; | |
345 | ||
346 | err_core: | |
9603b61d | 347 | mlx5_core_destroy_srq(dev->mdev, &srq->msrq); |
56e1ab0f ML |
348 | |
349 | err_usr_kern_srq: | |
e126ba97 EC |
350 | if (pd->uobject) |
351 | destroy_srq_user(pd, srq); | |
352 | else | |
353 | destroy_srq_kernel(dev, srq); | |
354 | ||
355 | err_srq: | |
356 | kfree(srq); | |
357 | ||
358 | return ERR_PTR(err); | |
359 | } | |
360 | ||
361 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
362 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) | |
363 | { | |
364 | struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); | |
365 | struct mlx5_ib_srq *srq = to_msrq(ibsrq); | |
366 | int ret; | |
367 | ||
368 | /* We don't support resizing SRQs yet */ | |
369 | if (attr_mask & IB_SRQ_MAX_WR) | |
370 | return -EINVAL; | |
371 | ||
372 | if (attr_mask & IB_SRQ_LIMIT) { | |
373 | if (attr->srq_limit >= srq->msrq.max) | |
374 | return -EINVAL; | |
375 | ||
376 | mutex_lock(&srq->mutex); | |
9603b61d | 377 | ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1); |
e126ba97 EC |
378 | mutex_unlock(&srq->mutex); |
379 | ||
380 | if (ret) | |
381 | return ret; | |
382 | } | |
383 | ||
384 | return 0; | |
385 | } | |
386 | ||
387 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | |
388 | { | |
389 | struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); | |
390 | struct mlx5_ib_srq *srq = to_msrq(ibsrq); | |
391 | int ret; | |
af1ba291 | 392 | struct mlx5_srq_attr *out; |
e126ba97 EC |
393 | |
394 | out = kzalloc(sizeof(*out), GFP_KERNEL); | |
395 | if (!out) | |
396 | return -ENOMEM; | |
397 | ||
9603b61d | 398 | ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out); |
e126ba97 EC |
399 | if (ret) |
400 | goto out_box; | |
401 | ||
af1ba291 | 402 | srq_attr->srq_limit = out->lwm; |
e126ba97 EC |
403 | srq_attr->max_wr = srq->msrq.max - 1; |
404 | srq_attr->max_sge = srq->msrq.max_gs; | |
405 | ||
406 | out_box: | |
407 | kfree(out); | |
408 | return ret; | |
409 | } | |
410 | ||
411 | int mlx5_ib_destroy_srq(struct ib_srq *srq) | |
412 | { | |
413 | struct mlx5_ib_dev *dev = to_mdev(srq->device); | |
414 | struct mlx5_ib_srq *msrq = to_msrq(srq); | |
415 | ||
9603b61d | 416 | mlx5_core_destroy_srq(dev->mdev, &msrq->msrq); |
e126ba97 EC |
417 | |
418 | if (srq->uobject) { | |
419 | mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); | |
420 | ib_umem_release(msrq->umem); | |
421 | } else { | |
1faacf82 | 422 | destroy_srq_kernel(dev, msrq); |
e126ba97 EC |
423 | } |
424 | ||
425 | kfree(srq); | |
426 | return 0; | |
427 | } | |
428 | ||
429 | void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) | |
430 | { | |
431 | struct mlx5_wqe_srq_next_seg *next; | |
432 | ||
433 | /* always called with interrupts disabled. */ | |
434 | spin_lock(&srq->lock); | |
435 | ||
436 | next = get_wqe(srq, srq->tail); | |
437 | next->next_wqe_index = cpu_to_be16(wqe_index); | |
438 | srq->tail = wqe_index; | |
439 | ||
440 | spin_unlock(&srq->lock); | |
441 | } | |
442 | ||
443 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
444 | struct ib_recv_wr **bad_wr) | |
445 | { | |
446 | struct mlx5_ib_srq *srq = to_msrq(ibsrq); | |
447 | struct mlx5_wqe_srq_next_seg *next; | |
448 | struct mlx5_wqe_data_seg *scat; | |
89ea94a7 MG |
449 | struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); |
450 | struct mlx5_core_dev *mdev = dev->mdev; | |
e126ba97 EC |
451 | unsigned long flags; |
452 | int err = 0; | |
453 | int nreq; | |
454 | int i; | |
455 | ||
456 | spin_lock_irqsave(&srq->lock, flags); | |
457 | ||
89ea94a7 MG |
458 | if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { |
459 | err = -EIO; | |
460 | *bad_wr = wr; | |
461 | goto out; | |
462 | } | |
463 | ||
e126ba97 EC |
464 | for (nreq = 0; wr; nreq++, wr = wr->next) { |
465 | if (unlikely(wr->num_sge > srq->msrq.max_gs)) { | |
466 | err = -EINVAL; | |
467 | *bad_wr = wr; | |
468 | break; | |
469 | } | |
470 | ||
471 | if (unlikely(srq->head == srq->tail)) { | |
472 | err = -ENOMEM; | |
473 | *bad_wr = wr; | |
474 | break; | |
475 | } | |
476 | ||
477 | srq->wrid[srq->head] = wr->wr_id; | |
478 | ||
479 | next = get_wqe(srq, srq->head); | |
480 | srq->head = be16_to_cpu(next->next_wqe_index); | |
481 | scat = (struct mlx5_wqe_data_seg *)(next + 1); | |
482 | ||
483 | for (i = 0; i < wr->num_sge; i++) { | |
484 | scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); | |
485 | scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); | |
486 | scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); | |
487 | } | |
488 | ||
489 | if (i < srq->msrq.max_avail_gather) { | |
490 | scat[i].byte_count = 0; | |
491 | scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); | |
492 | scat[i].addr = 0; | |
493 | } | |
494 | } | |
495 | ||
496 | if (likely(nreq)) { | |
497 | srq->wqe_ctr += nreq; | |
498 | ||
499 | /* Make sure that descriptors are written before | |
500 | * doorbell record. | |
501 | */ | |
502 | wmb(); | |
503 | ||
504 | *srq->db.db = cpu_to_be32(srq->wqe_ctr); | |
505 | } | |
89ea94a7 | 506 | out: |
e126ba97 EC |
507 | spin_unlock_irqrestore(&srq->lock, flags); |
508 | ||
509 | return err; | |
510 | } |