Commit | Line | Data |
---|---|---|
8b10ba78 BT |
1 | /* |
2 | * Copyright (c) 2016-2017 VMware, Inc. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of EITHER the GNU General Public License | |
6 | * version 2 as published by the Free Software Foundation or the BSD | |
7 | * 2-Clause License. This program is distributed in the hope that it | |
8 | * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED | |
9 | * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. | |
10 | * See the GNU General Public License version 2 for more details at | |
11 | * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program available in the file COPYING in the main | |
15 | * directory of this source tree. | |
16 | * | |
17 | * The BSD 2-Clause License | |
18 | * | |
19 | * Redistribution and use in source and binary forms, with or | |
20 | * without modification, are permitted provided that the following | |
21 | * conditions are met: | |
22 | * | |
23 | * - Redistributions of source code must retain the above | |
24 | * copyright notice, this list of conditions and the following | |
25 | * disclaimer. | |
26 | * | |
27 | * - Redistributions in binary form must reproduce the above | |
28 | * copyright notice, this list of conditions and the following | |
29 | * disclaimer in the documentation and/or other materials | |
30 | * provided with the distribution. | |
31 | * | |
32 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
33 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
34 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
35 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
36 | * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, | |
37 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
38 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |
39 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
40 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | |
41 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
42 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED | |
43 | * OF THE POSSIBILITY OF SUCH DAMAGE. | |
44 | */ | |
45 | ||
46 | #include <asm/page.h> | |
47 | #include <linux/io.h> | |
48 | #include <linux/wait.h> | |
49 | #include <rdma/ib_addr.h> | |
50 | #include <rdma/ib_smi.h> | |
51 | #include <rdma/ib_user_verbs.h> | |
52 | ||
53 | #include "pvrdma.h" | |
54 | ||
d34ac5cd BVA |
55 | int pvrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
56 | const struct ib_recv_wr **bad_wr) | |
8b10ba78 BT |
57 | { |
58 | /* No support for kernel clients. */ | |
59 | return -EOPNOTSUPP; | |
60 | } | |
61 | ||
62 | /** | |
63 | * pvrdma_query_srq - query shared receive queue | |
64 | * @ibsrq: the shared receive queue to query | |
65 | * @srq_attr: attributes to query and return to client | |
66 | * | |
67 | * @return: 0 for success, otherwise returns an errno. | |
68 | */ | |
69 | int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | |
70 | { | |
71 | struct pvrdma_dev *dev = to_vdev(ibsrq->device); | |
72 | struct pvrdma_srq *srq = to_vsrq(ibsrq); | |
73 | union pvrdma_cmd_req req; | |
74 | union pvrdma_cmd_resp rsp; | |
75 | struct pvrdma_cmd_query_srq *cmd = &req.query_srq; | |
76 | struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp; | |
77 | int ret; | |
78 | ||
79 | memset(cmd, 0, sizeof(*cmd)); | |
80 | cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ; | |
81 | cmd->srq_handle = srq->srq_handle; | |
82 | ||
83 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP); | |
84 | if (ret < 0) { | |
85 | dev_warn(&dev->pdev->dev, | |
86 | "could not query shared receive queue, error: %d\n", | |
87 | ret); | |
88 | return -EINVAL; | |
89 | } | |
90 | ||
91 | srq_attr->srq_limit = resp->attrs.srq_limit; | |
92 | srq_attr->max_wr = resp->attrs.max_wr; | |
93 | srq_attr->max_sge = resp->attrs.max_sge; | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | /** | |
99 | * pvrdma_create_srq - create shared receive queue | |
100 | * @pd: protection domain | |
101 | * @init_attr: shared receive queue attributes | |
102 | * @udata: user data | |
103 | * | |
104 | * @return: the ib_srq pointer on success, otherwise returns an errno. | |
105 | */ | |
106 | struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | |
107 | struct ib_srq_init_attr *init_attr, | |
108 | struct ib_udata *udata) | |
109 | { | |
110 | struct pvrdma_srq *srq = NULL; | |
111 | struct pvrdma_dev *dev = to_vdev(pd->device); | |
112 | union pvrdma_cmd_req req; | |
113 | union pvrdma_cmd_resp rsp; | |
114 | struct pvrdma_cmd_create_srq *cmd = &req.create_srq; | |
115 | struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; | |
1f5a6c47 | 116 | struct pvrdma_create_srq_resp srq_resp = {0}; |
8b10ba78 BT |
117 | struct pvrdma_create_srq ucmd; |
118 | unsigned long flags; | |
119 | int ret; | |
120 | ||
121 | if (!(pd->uobject && udata)) { | |
122 | /* No support for kernel clients. */ | |
123 | dev_warn(&dev->pdev->dev, | |
124 | "no shared receive queue support for kernel client\n"); | |
125 | return ERR_PTR(-EOPNOTSUPP); | |
126 | } | |
127 | ||
128 | if (init_attr->srq_type != IB_SRQT_BASIC) { | |
129 | dev_warn(&dev->pdev->dev, | |
130 | "shared receive queue type %d not supported\n", | |
131 | init_attr->srq_type); | |
132 | return ERR_PTR(-EINVAL); | |
133 | } | |
134 | ||
135 | if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr || | |
136 | init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) { | |
137 | dev_warn(&dev->pdev->dev, | |
138 | "shared receive queue size invalid\n"); | |
139 | return ERR_PTR(-EINVAL); | |
140 | } | |
141 | ||
142 | if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq)) | |
143 | return ERR_PTR(-ENOMEM); | |
144 | ||
145 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | |
146 | if (!srq) { | |
147 | ret = -ENOMEM; | |
148 | goto err_srq; | |
149 | } | |
150 | ||
151 | spin_lock_init(&srq->lock); | |
152 | refcount_set(&srq->refcnt, 1); | |
e3524b26 | 153 | init_completion(&srq->free); |
8b10ba78 BT |
154 | |
155 | dev_dbg(&dev->pdev->dev, | |
156 | "create shared receive queue from user space\n"); | |
157 | ||
158 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | |
159 | ret = -EFAULT; | |
160 | goto err_srq; | |
161 | } | |
162 | ||
163 | srq->umem = ib_umem_get(pd->uobject->context, | |
164 | ucmd.buf_addr, | |
165 | ucmd.buf_size, 0, 0); | |
166 | if (IS_ERR(srq->umem)) { | |
167 | ret = PTR_ERR(srq->umem); | |
168 | goto err_srq; | |
169 | } | |
170 | ||
171 | srq->npages = ib_umem_page_count(srq->umem); | |
172 | ||
173 | if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { | |
174 | dev_warn(&dev->pdev->dev, | |
175 | "overflow pages in shared receive queue\n"); | |
176 | ret = -EINVAL; | |
177 | goto err_umem; | |
178 | } | |
179 | ||
180 | ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false); | |
181 | if (ret) { | |
182 | dev_warn(&dev->pdev->dev, | |
183 | "could not allocate page directory\n"); | |
184 | goto err_umem; | |
185 | } | |
186 | ||
187 | pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); | |
188 | ||
189 | memset(cmd, 0, sizeof(*cmd)); | |
190 | cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ; | |
191 | cmd->srq_type = init_attr->srq_type; | |
192 | cmd->nchunks = srq->npages; | |
193 | cmd->pd_handle = to_vpd(pd)->pd_handle; | |
194 | cmd->attrs.max_wr = init_attr->attr.max_wr; | |
195 | cmd->attrs.max_sge = init_attr->attr.max_sge; | |
196 | cmd->attrs.srq_limit = init_attr->attr.srq_limit; | |
197 | cmd->pdir_dma = srq->pdir.dir_dma; | |
198 | ||
199 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP); | |
200 | if (ret < 0) { | |
201 | dev_warn(&dev->pdev->dev, | |
202 | "could not create shared receive queue, error: %d\n", | |
203 | ret); | |
204 | goto err_page_dir; | |
205 | } | |
206 | ||
207 | srq->srq_handle = resp->srqn; | |
1f5a6c47 | 208 | srq_resp.srqn = resp->srqn; |
8b10ba78 BT |
209 | spin_lock_irqsave(&dev->srq_tbl_lock, flags); |
210 | dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; | |
211 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); | |
212 | ||
213 | /* Copy udata back. */ | |
1f5a6c47 | 214 | if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { |
8b10ba78 BT |
215 | dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); |
216 | pvrdma_destroy_srq(&srq->ibsrq); | |
217 | return ERR_PTR(-EINVAL); | |
218 | } | |
219 | ||
220 | return &srq->ibsrq; | |
221 | ||
222 | err_page_dir: | |
223 | pvrdma_page_dir_cleanup(dev, &srq->pdir); | |
224 | err_umem: | |
225 | ib_umem_release(srq->umem); | |
226 | err_srq: | |
227 | kfree(srq); | |
228 | atomic_dec(&dev->num_srqs); | |
229 | ||
230 | return ERR_PTR(ret); | |
231 | } | |
232 | ||
233 | static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq) | |
234 | { | |
235 | unsigned long flags; | |
236 | ||
237 | spin_lock_irqsave(&dev->srq_tbl_lock, flags); | |
238 | dev->srq_tbl[srq->srq_handle] = NULL; | |
239 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); | |
240 | ||
e3524b26 BT |
241 | if (refcount_dec_and_test(&srq->refcnt)) |
242 | complete(&srq->free); | |
243 | wait_for_completion(&srq->free); | |
8b10ba78 BT |
244 | |
245 | /* There is no support for kernel clients, so this is safe. */ | |
246 | ib_umem_release(srq->umem); | |
247 | ||
248 | pvrdma_page_dir_cleanup(dev, &srq->pdir); | |
249 | ||
250 | kfree(srq); | |
251 | ||
252 | atomic_dec(&dev->num_srqs); | |
253 | } | |
254 | ||
255 | /** | |
256 | * pvrdma_destroy_srq - destroy shared receive queue | |
257 | * @srq: the shared receive queue to destroy | |
258 | * | |
259 | * @return: 0 for success. | |
260 | */ | |
261 | int pvrdma_destroy_srq(struct ib_srq *srq) | |
262 | { | |
263 | struct pvrdma_srq *vsrq = to_vsrq(srq); | |
264 | union pvrdma_cmd_req req; | |
265 | struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq; | |
266 | struct pvrdma_dev *dev = to_vdev(srq->device); | |
267 | int ret; | |
268 | ||
269 | memset(cmd, 0, sizeof(*cmd)); | |
270 | cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ; | |
271 | cmd->srq_handle = vsrq->srq_handle; | |
272 | ||
273 | ret = pvrdma_cmd_post(dev, &req, NULL, 0); | |
274 | if (ret < 0) | |
275 | dev_warn(&dev->pdev->dev, | |
276 | "destroy shared receive queue failed, error: %d\n", | |
277 | ret); | |
278 | ||
279 | pvrdma_free_srq(dev, vsrq); | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
284 | /** | |
285 | * pvrdma_modify_srq - modify shared receive queue attributes | |
286 | * @ibsrq: the shared receive queue to modify | |
287 | * @attr: the shared receive queue's new attributes | |
288 | * @attr_mask: attributes mask | |
289 | * @udata: user data | |
290 | * | |
291 | * @returns 0 on success, otherwise returns an errno. | |
292 | */ | |
293 | int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
294 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) | |
295 | { | |
296 | struct pvrdma_srq *vsrq = to_vsrq(ibsrq); | |
297 | union pvrdma_cmd_req req; | |
298 | struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq; | |
299 | struct pvrdma_dev *dev = to_vdev(ibsrq->device); | |
300 | int ret; | |
301 | ||
302 | /* Only support SRQ limit. */ | |
303 | if (!(attr_mask & IB_SRQ_LIMIT)) | |
304 | return -EINVAL; | |
305 | ||
306 | memset(cmd, 0, sizeof(*cmd)); | |
307 | cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ; | |
308 | cmd->srq_handle = vsrq->srq_handle; | |
309 | cmd->attrs.srq_limit = attr->srq_limit; | |
310 | cmd->attr_mask = attr_mask; | |
311 | ||
312 | ret = pvrdma_cmd_post(dev, &req, NULL, 0); | |
313 | if (ret < 0) { | |
314 | dev_warn(&dev->pdev->dev, | |
315 | "could not modify shared receive queue, error: %d\n", | |
316 | ret); | |
317 | ||
318 | return -EINVAL; | |
319 | } | |
320 | ||
321 | return ret; | |
322 | } |