Commit | Line | Data |
---|---|---|
aad9158b | 1 | /* |
fe314195 | 2 | * Copyright(c) 2016 Intel Corporation. |
aad9158b DD |
3 | * |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
5 | * redistributing this file, you may do so under either license. | |
6 | * | |
7 | * GPL LICENSE SUMMARY | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of version 2 of the GNU General Public License as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * BSD LICENSE | |
19 | * | |
20 | * Redistribution and use in source and binary forms, with or without | |
21 | * modification, are permitted provided that the following conditions | |
22 | * are met: | |
23 | * | |
24 | * - Redistributions of source code must retain the above copyright | |
25 | * notice, this list of conditions and the following disclaimer. | |
26 | * - Redistributions in binary form must reproduce the above copyright | |
27 | * notice, this list of conditions and the following disclaimer in | |
28 | * the documentation and/or other materials provided with the | |
29 | * distribution. | |
30 | * - Neither the name of Intel Corporation nor the names of its | |
31 | * contributors may be used to endorse or promote products derived | |
32 | * from this software without specific prior written permission. | |
33 | * | |
34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
45 | * | |
46 | */ | |
47 | ||
b8f881b9 JJ |
48 | #include <linux/err.h> |
49 | #include <linux/slab.h> | |
50 | #include <linux/vmalloc.h> | |
51 | ||
aad9158b DD |
52 | #include "srq.h" |
53 | ||
b8f881b9 JJ |
54 | /* |
55 | * Do any initialization needed when a driver registers with rdmavt. | |
56 | */ | |
57 | void rvt_driver_srq_init(struct rvt_dev_info *rdi) | |
58 | { | |
59 | spin_lock_init(&rdi->n_srqs_lock); | |
60 | rdi->n_srqs_allocated = 0; | |
61 | } | |
62 | ||
aad9158b DD |
63 | /** |
64 | * rvt_create_srq - create a shared receive queue | |
65 | * @ibpd: the protection domain of the SRQ to create | |
66 | * @srq_init_attr: the attributes of the SRQ | |
67 | * @udata: data from libibverbs when creating a user SRQ | |
68 | */ | |
69 | struct ib_srq *rvt_create_srq(struct ib_pd *ibpd, | |
70 | struct ib_srq_init_attr *srq_init_attr, | |
71 | struct ib_udata *udata) | |
72 | { | |
b8f881b9 JJ |
73 | struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); |
74 | struct rvt_srq *srq; | |
75 | u32 sz; | |
76 | struct ib_srq *ret; | |
77 | ||
78 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) | |
79 | return ERR_PTR(-ENOSYS); | |
80 | ||
81 | if (srq_init_attr->attr.max_sge == 0 || | |
82 | srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge || | |
83 | srq_init_attr->attr.max_wr == 0 || | |
84 | srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr) | |
85 | return ERR_PTR(-EINVAL); | |
86 | ||
87 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | |
88 | if (!srq) | |
89 | return ERR_PTR(-ENOMEM); | |
90 | ||
91 | /* | |
92 | * Need to use vmalloc() if we want to support large #s of entries. | |
93 | */ | |
94 | srq->rq.size = srq_init_attr->attr.max_wr + 1; | |
95 | srq->rq.max_sge = srq_init_attr->attr.max_sge; | |
96 | sz = sizeof(struct ib_sge) * srq->rq.max_sge + | |
97 | sizeof(struct rvt_rwqe); | |
98 | srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz); | |
99 | if (!srq->rq.wq) { | |
100 | ret = ERR_PTR(-ENOMEM); | |
101 | goto bail_srq; | |
102 | } | |
103 | ||
104 | /* | |
105 | * Return the address of the RWQ as the offset to mmap. | |
106 | * See rvt_mmap() for details. | |
107 | */ | |
108 | if (udata && udata->outlen >= sizeof(__u64)) { | |
109 | int err; | |
110 | u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; | |
111 | ||
112 | srq->ip = | |
113 | rvt_create_mmap_info(dev, s, ibpd->uobject->context, | |
114 | srq->rq.wq); | |
115 | if (!srq->ip) { | |
116 | ret = ERR_PTR(-ENOMEM); | |
117 | goto bail_wq; | |
118 | } | |
119 | ||
120 | err = ib_copy_to_udata(udata, &srq->ip->offset, | |
121 | sizeof(srq->ip->offset)); | |
122 | if (err) { | |
123 | ret = ERR_PTR(err); | |
124 | goto bail_ip; | |
125 | } | |
126 | } else { | |
127 | srq->ip = NULL; | |
128 | } | |
129 | ||
130 | /* | |
131 | * ib_create_srq() will initialize srq->ibsrq. | |
132 | */ | |
133 | spin_lock_init(&srq->rq.lock); | |
134 | srq->rq.wq->head = 0; | |
135 | srq->rq.wq->tail = 0; | |
136 | srq->limit = srq_init_attr->attr.srq_limit; | |
137 | ||
138 | spin_lock(&dev->n_srqs_lock); | |
139 | if (dev->n_srqs_allocated == dev->dparms.props.max_srq) { | |
140 | spin_unlock(&dev->n_srqs_lock); | |
141 | ret = ERR_PTR(-ENOMEM); | |
142 | goto bail_ip; | |
143 | } | |
144 | ||
145 | dev->n_srqs_allocated++; | |
146 | spin_unlock(&dev->n_srqs_lock); | |
147 | ||
148 | if (srq->ip) { | |
149 | spin_lock_irq(&dev->pending_lock); | |
150 | list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); | |
151 | spin_unlock_irq(&dev->pending_lock); | |
152 | } | |
153 | ||
154 | return &srq->ibsrq; | |
155 | ||
156 | bail_ip: | |
157 | kfree(srq->ip); | |
158 | bail_wq: | |
159 | vfree(srq->rq.wq); | |
160 | bail_srq: | |
161 | kfree(srq); | |
162 | return ret; | |
aad9158b DD |
163 | } |
164 | ||
165 | /** | |
166 | * rvt_modify_srq - modify a shared receive queue | |
167 | * @ibsrq: the SRQ to modify | |
168 | * @attr: the new attributes of the SRQ | |
169 | * @attr_mask: indicates which attributes to modify | |
170 | * @udata: user data for libibverbs.so | |
171 | */ | |
172 | int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
173 | enum ib_srq_attr_mask attr_mask, | |
174 | struct ib_udata *udata) | |
175 | { | |
b8f881b9 JJ |
176 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); |
177 | struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); | |
178 | struct rvt_rwq *wq; | |
179 | int ret = 0; | |
180 | ||
181 | if (attr_mask & IB_SRQ_MAX_WR) { | |
182 | struct rvt_rwq *owq; | |
183 | struct rvt_rwqe *p; | |
184 | u32 sz, size, n, head, tail; | |
185 | ||
186 | /* Check that the requested sizes are below the limits. */ | |
187 | if ((attr->max_wr > dev->dparms.props.max_srq_wr) || | |
188 | ((attr_mask & IB_SRQ_LIMIT) ? | |
189 | attr->srq_limit : srq->limit) > attr->max_wr) | |
190 | return -EINVAL; | |
191 | ||
192 | sz = sizeof(struct rvt_rwqe) + | |
193 | srq->rq.max_sge * sizeof(struct ib_sge); | |
194 | size = attr->max_wr + 1; | |
195 | wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz); | |
196 | if (!wq) | |
197 | return -ENOMEM; | |
198 | ||
199 | /* Check that we can write the offset to mmap. */ | |
200 | if (udata && udata->inlen >= sizeof(__u64)) { | |
201 | __u64 offset_addr; | |
202 | __u64 offset = 0; | |
203 | ||
204 | ret = ib_copy_from_udata(&offset_addr, udata, | |
205 | sizeof(offset_addr)); | |
206 | if (ret) | |
207 | goto bail_free; | |
208 | udata->outbuf = (void __user *) | |
209 | (unsigned long)offset_addr; | |
210 | ret = ib_copy_to_udata(udata, &offset, | |
211 | sizeof(offset)); | |
212 | if (ret) | |
213 | goto bail_free; | |
214 | } | |
215 | ||
216 | spin_lock_irq(&srq->rq.lock); | |
217 | /* | |
218 | * validate head and tail pointer values and compute | |
219 | * the number of remaining WQEs. | |
220 | */ | |
221 | owq = srq->rq.wq; | |
222 | head = owq->head; | |
223 | tail = owq->tail; | |
224 | if (head >= srq->rq.size || tail >= srq->rq.size) { | |
225 | ret = -EINVAL; | |
226 | goto bail_unlock; | |
227 | } | |
228 | n = head; | |
229 | if (n < tail) | |
230 | n += srq->rq.size - tail; | |
231 | else | |
232 | n -= tail; | |
233 | if (size <= n) { | |
234 | ret = -EINVAL; | |
235 | goto bail_unlock; | |
236 | } | |
237 | n = 0; | |
238 | p = wq->wq; | |
239 | while (tail != head) { | |
240 | struct rvt_rwqe *wqe; | |
241 | int i; | |
242 | ||
243 | wqe = rvt_get_rwqe_ptr(&srq->rq, tail); | |
244 | p->wr_id = wqe->wr_id; | |
245 | p->num_sge = wqe->num_sge; | |
246 | for (i = 0; i < wqe->num_sge; i++) | |
247 | p->sg_list[i] = wqe->sg_list[i]; | |
248 | n++; | |
249 | p = (struct rvt_rwqe *)((char *)p + sz); | |
250 | if (++tail >= srq->rq.size) | |
251 | tail = 0; | |
252 | } | |
253 | srq->rq.wq = wq; | |
254 | srq->rq.size = size; | |
255 | wq->head = n; | |
256 | wq->tail = 0; | |
257 | if (attr_mask & IB_SRQ_LIMIT) | |
258 | srq->limit = attr->srq_limit; | |
259 | spin_unlock_irq(&srq->rq.lock); | |
260 | ||
261 | vfree(owq); | |
262 | ||
263 | if (srq->ip) { | |
264 | struct rvt_mmap_info *ip = srq->ip; | |
265 | struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device); | |
266 | u32 s = sizeof(struct rvt_rwq) + size * sz; | |
267 | ||
268 | rvt_update_mmap_info(dev, ip, s, wq); | |
269 | ||
270 | /* | |
271 | * Return the offset to mmap. | |
272 | * See rvt_mmap() for details. | |
273 | */ | |
274 | if (udata && udata->inlen >= sizeof(__u64)) { | |
275 | ret = ib_copy_to_udata(udata, &ip->offset, | |
276 | sizeof(ip->offset)); | |
277 | if (ret) | |
278 | return ret; | |
279 | } | |
280 | ||
281 | /* | |
282 | * Put user mapping info onto the pending list | |
283 | * unless it already is on the list. | |
284 | */ | |
285 | spin_lock_irq(&dev->pending_lock); | |
286 | if (list_empty(&ip->pending_mmaps)) | |
287 | list_add(&ip->pending_mmaps, | |
288 | &dev->pending_mmaps); | |
289 | spin_unlock_irq(&dev->pending_lock); | |
290 | } | |
291 | } else if (attr_mask & IB_SRQ_LIMIT) { | |
292 | spin_lock_irq(&srq->rq.lock); | |
293 | if (attr->srq_limit >= srq->rq.size) | |
294 | ret = -EINVAL; | |
295 | else | |
296 | srq->limit = attr->srq_limit; | |
297 | spin_unlock_irq(&srq->rq.lock); | |
298 | } | |
299 | return ret; | |
300 | ||
301 | bail_unlock: | |
302 | spin_unlock_irq(&srq->rq.lock); | |
303 | bail_free: | |
304 | vfree(wq); | |
305 | return ret; | |
aad9158b DD |
306 | } |
307 | ||
308 | int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | |
309 | { | |
b8f881b9 JJ |
310 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); |
311 | ||
312 | attr->max_wr = srq->rq.size - 1; | |
313 | attr->max_sge = srq->rq.max_sge; | |
314 | attr->srq_limit = srq->limit; | |
315 | return 0; | |
aad9158b DD |
316 | } |
317 | ||
318 | int rvt_destroy_srq(struct ib_srq *ibsrq) | |
319 | { | |
b8f881b9 JJ |
320 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); |
321 | struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); | |
aad9158b | 322 | |
b8f881b9 JJ |
323 | spin_lock(&dev->n_srqs_lock); |
324 | dev->n_srqs_allocated--; | |
325 | spin_unlock(&dev->n_srqs_lock); | |
326 | if (srq->ip) | |
327 | kref_put(&srq->ip->ref, rvt_release_mmap_info); | |
328 | else | |
329 | vfree(srq->rq.wq); | |
330 | kfree(srq); | |
331 | ||
332 | return 0; | |
333 | } |