Commit | Line | Data |
---|---|---|
0a8a69dd RR |
1 | /* Virtio ring implementation. |
2 | * | |
3 | * Copyright 2007 Rusty Russell IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
18 | */ | |
19 | #include <linux/virtio.h> | |
20 | #include <linux/virtio_ring.h> | |
e34f8725 | 21 | #include <linux/virtio_config.h> |
0a8a69dd | 22 | #include <linux/device.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
b5a2c4f1 | 24 | #include <linux/module.h> |
e93300b1 | 25 | #include <linux/hrtimer.h> |
6abb2dd9 | 26 | #include <linux/kmemleak.h> |
0a8a69dd RR |
27 | |
28 | #ifdef DEBUG | |
29 | /* For development, we want to crash whenever the ring is screwed. */ | |
9499f5e7 RR |
30 | #define BAD_RING(_vq, fmt, args...) \ |
31 | do { \ | |
32 | dev_err(&(_vq)->vq.vdev->dev, \ | |
33 | "%s:"fmt, (_vq)->vq.name, ##args); \ | |
34 | BUG(); \ | |
35 | } while (0) | |
c5f841f1 RR |
36 | /* Caller is supposed to guarantee no reentry. */ |
37 | #define START_USE(_vq) \ | |
38 | do { \ | |
39 | if ((_vq)->in_use) \ | |
9499f5e7 RR |
40 | panic("%s:in_use = %i\n", \ |
41 | (_vq)->vq.name, (_vq)->in_use); \ | |
c5f841f1 | 42 | (_vq)->in_use = __LINE__; \ |
9499f5e7 | 43 | } while (0) |
3a35ce7d | 44 | #define END_USE(_vq) \ |
97a545ab | 45 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) |
0a8a69dd | 46 | #else |
9499f5e7 RR |
47 | #define BAD_RING(_vq, fmt, args...) \ |
48 | do { \ | |
49 | dev_err(&_vq->vq.vdev->dev, \ | |
50 | "%s:"fmt, (_vq)->vq.name, ##args); \ | |
51 | (_vq)->broken = true; \ | |
52 | } while (0) | |
0a8a69dd RR |
53 | #define START_USE(vq) |
54 | #define END_USE(vq) | |
55 | #endif | |
56 | ||
57 | struct vring_virtqueue | |
58 | { | |
59 | struct virtqueue vq; | |
60 | ||
61 | /* Actual memory layout for this queue */ | |
62 | struct vring vring; | |
63 | ||
7b21e34f RR |
64 | /* Can we use weak barriers? */ |
65 | bool weak_barriers; | |
66 | ||
0a8a69dd RR |
67 | /* Other side has made a mess, don't try any more. */ |
68 | bool broken; | |
69 | ||
9fa29b9d MM |
70 | /* Host supports indirect buffers */ |
71 | bool indirect; | |
72 | ||
a5c262c5 MT |
73 | /* Host publishes avail event idx */ |
74 | bool event; | |
75 | ||
0a8a69dd RR |
76 | /* Head of free buffer list. */ |
77 | unsigned int free_head; | |
78 | /* Number we've added since last sync. */ | |
79 | unsigned int num_added; | |
80 | ||
81 | /* Last used index we've seen. */ | |
1bc4953e | 82 | u16 last_used_idx; |
0a8a69dd RR |
83 | |
84 | /* How to notify other side. FIXME: commonalize hcalls! */ | |
46f9c2b9 | 85 | bool (*notify)(struct virtqueue *vq); |
0a8a69dd RR |
86 | |
87 | #ifdef DEBUG | |
88 | /* They're supposed to lock for us. */ | |
89 | unsigned int in_use; | |
e93300b1 RR |
90 | |
91 | /* Figure out if their kicks are too delayed. */ | |
92 | bool last_add_time_valid; | |
93 | ktime_t last_add_time; | |
0a8a69dd RR |
94 | #endif |
95 | ||
96 | /* Tokens for callbacks. */ | |
97 | void *data[]; | |
98 | }; | |
99 | ||
100 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) | |
101 | ||
13816c76 RR |
102 | static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, |
103 | unsigned int *count) | |
104 | { | |
105 | return sg_next(sg); | |
106 | } | |
107 | ||
108 | static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, | |
109 | unsigned int *count) | |
110 | { | |
111 | if (--(*count) == 0) | |
112 | return NULL; | |
113 | return sg + 1; | |
114 | } | |
115 | ||
9fa29b9d | 116 | /* Set up an indirect table of descriptors and add it to the queue. */ |
13816c76 RR |
117 | static inline int vring_add_indirect(struct vring_virtqueue *vq, |
118 | struct scatterlist *sgs[], | |
119 | struct scatterlist *(*next) | |
120 | (struct scatterlist *, unsigned int *), | |
121 | unsigned int total_sg, | |
122 | unsigned int total_out, | |
123 | unsigned int total_in, | |
124 | unsigned int out_sgs, | |
125 | unsigned int in_sgs, | |
126 | gfp_t gfp) | |
9fa29b9d MM |
127 | { |
128 | struct vring_desc *desc; | |
129 | unsigned head; | |
13816c76 RR |
130 | struct scatterlist *sg; |
131 | int i, n; | |
9fa29b9d | 132 | |
b92b1b89 WD |
133 | /* |
134 | * We require lowmem mappings for the descriptors because | |
135 | * otherwise virt_to_phys will give us bogus addresses in the | |
136 | * virtqueue. | |
137 | */ | |
138 | gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); | |
139 | ||
13816c76 | 140 | desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); |
9fa29b9d | 141 | if (!desc) |
686d3637 | 142 | return -ENOMEM; |
9fa29b9d | 143 | |
13816c76 RR |
144 | /* Transfer entries from the sg lists into the indirect page */ |
145 | i = 0; | |
146 | for (n = 0; n < out_sgs; n++) { | |
147 | for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { | |
148 | desc[i].flags = VRING_DESC_F_NEXT; | |
149 | desc[i].addr = sg_phys(sg); | |
150 | desc[i].len = sg->length; | |
151 | desc[i].next = i+1; | |
152 | i++; | |
153 | } | |
9fa29b9d | 154 | } |
13816c76 RR |
155 | for (; n < (out_sgs + in_sgs); n++) { |
156 | for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { | |
157 | desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | |
158 | desc[i].addr = sg_phys(sg); | |
159 | desc[i].len = sg->length; | |
160 | desc[i].next = i+1; | |
161 | i++; | |
162 | } | |
9fa29b9d | 163 | } |
13816c76 | 164 | BUG_ON(i != total_sg); |
9fa29b9d MM |
165 | |
166 | /* Last one doesn't continue. */ | |
167 | desc[i-1].flags &= ~VRING_DESC_F_NEXT; | |
168 | desc[i-1].next = 0; | |
169 | ||
170 | /* We're about to use a buffer */ | |
06ca287d | 171 | vq->vq.num_free--; |
9fa29b9d MM |
172 | |
173 | /* Use a single buffer which doesn't continue */ | |
174 | head = vq->free_head; | |
175 | vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; | |
176 | vq->vring.desc[head].addr = virt_to_phys(desc); | |
bb478d8b RR |
177 | /* kmemleak gives a false positive, as it's hidden by virt_to_phys */ |
178 | kmemleak_ignore(desc); | |
9fa29b9d MM |
179 | vq->vring.desc[head].len = i * sizeof(struct vring_desc); |
180 | ||
181 | /* Update free pointer */ | |
182 | vq->free_head = vq->vring.desc[head].next; | |
183 | ||
184 | return head; | |
185 | } | |
186 | ||
13816c76 RR |
187 | static inline int virtqueue_add(struct virtqueue *_vq, |
188 | struct scatterlist *sgs[], | |
189 | struct scatterlist *(*next) | |
190 | (struct scatterlist *, unsigned int *), | |
191 | unsigned int total_out, | |
192 | unsigned int total_in, | |
193 | unsigned int out_sgs, | |
194 | unsigned int in_sgs, | |
195 | void *data, | |
196 | gfp_t gfp) | |
0a8a69dd RR |
197 | { |
198 | struct vring_virtqueue *vq = to_vvq(_vq); | |
13816c76 RR |
199 | struct scatterlist *sg; |
200 | unsigned int i, n, avail, uninitialized_var(prev), total_sg; | |
1fe9b6fe | 201 | int head; |
0a8a69dd | 202 | |
9fa29b9d MM |
203 | START_USE(vq); |
204 | ||
0a8a69dd | 205 | BUG_ON(data == NULL); |
9fa29b9d | 206 | |
70670444 RR |
207 | if (unlikely(vq->broken)) { |
208 | END_USE(vq); | |
209 | return -EIO; | |
210 | } | |
211 | ||
e93300b1 RR |
212 | #ifdef DEBUG |
213 | { | |
214 | ktime_t now = ktime_get(); | |
215 | ||
216 | /* No kick or get, with .1 second between? Warn. */ | |
217 | if (vq->last_add_time_valid) | |
218 | WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) | |
219 | > 100); | |
220 | vq->last_add_time = now; | |
221 | vq->last_add_time_valid = true; | |
222 | } | |
223 | #endif | |
224 | ||
13816c76 RR |
225 | total_sg = total_in + total_out; |
226 | ||
9fa29b9d MM |
227 | /* If the host supports indirect descriptor tables, and we have multiple |
228 | * buffers, then go indirect. FIXME: tune this threshold */ | |
13816c76 RR |
229 | if (vq->indirect && total_sg > 1 && vq->vq.num_free) { |
230 | head = vring_add_indirect(vq, sgs, next, total_sg, total_out, | |
231 | total_in, | |
232 | out_sgs, in_sgs, gfp); | |
1fe9b6fe | 233 | if (likely(head >= 0)) |
9fa29b9d MM |
234 | goto add_head; |
235 | } | |
236 | ||
13816c76 RR |
237 | BUG_ON(total_sg > vq->vring.num); |
238 | BUG_ON(total_sg == 0); | |
0a8a69dd | 239 | |
13816c76 | 240 | if (vq->vq.num_free < total_sg) { |
0a8a69dd | 241 | pr_debug("Can't add buf len %i - avail = %i\n", |
13816c76 | 242 | total_sg, vq->vq.num_free); |
44653eae RR |
243 | /* FIXME: for historical reasons, we force a notify here if |
244 | * there are outgoing parts to the buffer. Presumably the | |
245 | * host should service the ring ASAP. */ | |
13816c76 | 246 | if (out_sgs) |
44653eae | 247 | vq->notify(&vq->vq); |
0a8a69dd RR |
248 | END_USE(vq); |
249 | return -ENOSPC; | |
250 | } | |
251 | ||
252 | /* We're about to use some buffers from the free list. */ | |
13816c76 RR |
253 | vq->vq.num_free -= total_sg; |
254 | ||
255 | head = i = vq->free_head; | |
256 | for (n = 0; n < out_sgs; n++) { | |
257 | for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { | |
258 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT; | |
259 | vq->vring.desc[i].addr = sg_phys(sg); | |
260 | vq->vring.desc[i].len = sg->length; | |
261 | prev = i; | |
262 | i = vq->vring.desc[i].next; | |
263 | } | |
0a8a69dd | 264 | } |
13816c76 RR |
265 | for (; n < (out_sgs + in_sgs); n++) { |
266 | for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { | |
267 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | |
268 | vq->vring.desc[i].addr = sg_phys(sg); | |
269 | vq->vring.desc[i].len = sg->length; | |
270 | prev = i; | |
271 | i = vq->vring.desc[i].next; | |
272 | } | |
0a8a69dd RR |
273 | } |
274 | /* Last one doesn't continue. */ | |
275 | vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; | |
276 | ||
277 | /* Update free pointer */ | |
278 | vq->free_head = i; | |
279 | ||
9fa29b9d | 280 | add_head: |
0a8a69dd RR |
281 | /* Set token. */ |
282 | vq->data[head] = data; | |
283 | ||
284 | /* Put entry in available array (but don't update avail->idx until they | |
3b720b8c | 285 | * do sync). */ |
ee7cd898 | 286 | avail = (vq->vring.avail->idx & (vq->vring.num-1)); |
0a8a69dd RR |
287 | vq->vring.avail->ring[avail] = head; |
288 | ||
ee7cd898 RR |
289 | /* Descriptors and available array need to be set before we expose the |
290 | * new available array entries. */ | |
a9a0fef7 | 291 | virtio_wmb(vq->weak_barriers); |
ee7cd898 RR |
292 | vq->vring.avail->idx++; |
293 | vq->num_added++; | |
294 | ||
295 | /* This is very unlikely, but theoretically possible. Kick | |
296 | * just in case. */ | |
297 | if (unlikely(vq->num_added == (1 << 16) - 1)) | |
298 | virtqueue_kick(_vq); | |
299 | ||
0a8a69dd RR |
300 | pr_debug("Added buffer head %i to %p\n", head, vq); |
301 | END_USE(vq); | |
3c1b27d5 | 302 | |
98e8c6bc | 303 | return 0; |
0a8a69dd | 304 | } |
13816c76 | 305 | |
13816c76 RR |
306 | /** |
307 | * virtqueue_add_sgs - expose buffers to other end | |
308 | * @vq: the struct virtqueue we're talking about. | |
309 | * @sgs: array of terminated scatterlists. | |
310 | * @out_num: the number of scatterlists readable by other side | |
311 | * @in_num: the number of scatterlists which are writable (after readable ones) | |
312 | * @data: the token identifying the buffer. | |
313 | * @gfp: how to do memory allocations (if necessary). | |
314 | * | |
315 | * Caller must ensure we don't call this with other virtqueue operations | |
316 | * at the same time (except where noted). | |
317 | * | |
70670444 | 318 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). |
13816c76 RR |
319 | */ |
320 | int virtqueue_add_sgs(struct virtqueue *_vq, | |
321 | struct scatterlist *sgs[], | |
322 | unsigned int out_sgs, | |
323 | unsigned int in_sgs, | |
324 | void *data, | |
325 | gfp_t gfp) | |
326 | { | |
327 | unsigned int i, total_out, total_in; | |
328 | ||
329 | /* Count them first. */ | |
330 | for (i = total_out = total_in = 0; i < out_sgs; i++) { | |
331 | struct scatterlist *sg; | |
332 | for (sg = sgs[i]; sg; sg = sg_next(sg)) | |
333 | total_out++; | |
334 | } | |
335 | for (; i < out_sgs + in_sgs; i++) { | |
336 | struct scatterlist *sg; | |
337 | for (sg = sgs[i]; sg; sg = sg_next(sg)) | |
338 | total_in++; | |
339 | } | |
340 | return virtqueue_add(_vq, sgs, sg_next_chained, | |
341 | total_out, total_in, out_sgs, in_sgs, data, gfp); | |
342 | } | |
343 | EXPORT_SYMBOL_GPL(virtqueue_add_sgs); | |
344 | ||
282edb36 RR |
345 | /** |
346 | * virtqueue_add_outbuf - expose output buffers to other end | |
347 | * @vq: the struct virtqueue we're talking about. | |
348 | * @sgs: array of scatterlists (need not be terminated!) | |
349 | * @num: the number of scatterlists readable by other side | |
350 | * @data: the token identifying the buffer. | |
351 | * @gfp: how to do memory allocations (if necessary). | |
352 | * | |
353 | * Caller must ensure we don't call this with other virtqueue operations | |
354 | * at the same time (except where noted). | |
355 | * | |
70670444 | 356 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). |
282edb36 RR |
357 | */ |
358 | int virtqueue_add_outbuf(struct virtqueue *vq, | |
359 | struct scatterlist sg[], unsigned int num, | |
360 | void *data, | |
361 | gfp_t gfp) | |
362 | { | |
363 | return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); | |
364 | } | |
365 | EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); | |
366 | ||
367 | /** | |
368 | * virtqueue_add_inbuf - expose input buffers to other end | |
369 | * @vq: the struct virtqueue we're talking about. | |
370 | * @sgs: array of scatterlists (need not be terminated!) | |
371 | * @num: the number of scatterlists writable by other side | |
372 | * @data: the token identifying the buffer. | |
373 | * @gfp: how to do memory allocations (if necessary). | |
374 | * | |
375 | * Caller must ensure we don't call this with other virtqueue operations | |
376 | * at the same time (except where noted). | |
377 | * | |
70670444 | 378 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). |
282edb36 RR |
379 | */ |
380 | int virtqueue_add_inbuf(struct virtqueue *vq, | |
381 | struct scatterlist sg[], unsigned int num, | |
382 | void *data, | |
383 | gfp_t gfp) | |
384 | { | |
385 | return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); | |
386 | } | |
387 | EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); | |
388 | ||
5dfc1762 | 389 | /** |
41f0377f | 390 | * virtqueue_kick_prepare - first half of split virtqueue_kick call. |
5dfc1762 RR |
391 | * @vq: the struct virtqueue |
392 | * | |
41f0377f RR |
393 | * Instead of virtqueue_kick(), you can do: |
394 | * if (virtqueue_kick_prepare(vq)) | |
395 | * virtqueue_notify(vq); | |
5dfc1762 | 396 | * |
41f0377f RR |
397 | * This is sometimes useful because the virtqueue_kick_prepare() needs |
398 | * to be serialized, but the actual virtqueue_notify() call does not. | |
5dfc1762 | 399 | */ |
41f0377f | 400 | bool virtqueue_kick_prepare(struct virtqueue *_vq) |
0a8a69dd RR |
401 | { |
402 | struct vring_virtqueue *vq = to_vvq(_vq); | |
a5c262c5 | 403 | u16 new, old; |
41f0377f RR |
404 | bool needs_kick; |
405 | ||
0a8a69dd | 406 | START_USE(vq); |
a72caae2 JW |
407 | /* We need to expose available array entries before checking avail |
408 | * event. */ | |
a9a0fef7 | 409 | virtio_mb(vq->weak_barriers); |
0a8a69dd | 410 | |
ee7cd898 RR |
411 | old = vq->vring.avail->idx - vq->num_added; |
412 | new = vq->vring.avail->idx; | |
0a8a69dd RR |
413 | vq->num_added = 0; |
414 | ||
e93300b1 RR |
415 | #ifdef DEBUG |
416 | if (vq->last_add_time_valid) { | |
417 | WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), | |
418 | vq->last_add_time)) > 100); | |
419 | } | |
420 | vq->last_add_time_valid = false; | |
421 | #endif | |
422 | ||
41f0377f RR |
423 | if (vq->event) { |
424 | needs_kick = vring_need_event(vring_avail_event(&vq->vring), | |
425 | new, old); | |
426 | } else { | |
427 | needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); | |
428 | } | |
0a8a69dd | 429 | END_USE(vq); |
41f0377f RR |
430 | return needs_kick; |
431 | } | |
432 | EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); | |
433 | ||
434 | /** | |
435 | * virtqueue_notify - second half of split virtqueue_kick call. | |
436 | * @vq: the struct virtqueue | |
437 | * | |
438 | * This does not need to be serialized. | |
5b1bf7cb HG |
439 | * |
440 | * Returns false if host notify failed or queue is broken, otherwise true. | |
41f0377f | 441 | */ |
5b1bf7cb | 442 | bool virtqueue_notify(struct virtqueue *_vq) |
41f0377f RR |
443 | { |
444 | struct vring_virtqueue *vq = to_vvq(_vq); | |
445 | ||
5b1bf7cb HG |
446 | if (unlikely(vq->broken)) |
447 | return false; | |
448 | ||
41f0377f | 449 | /* Prod other side to tell it about changes. */ |
2342d6a6 | 450 | if (!vq->notify(_vq)) { |
5b1bf7cb HG |
451 | vq->broken = true; |
452 | return false; | |
453 | } | |
454 | return true; | |
41f0377f RR |
455 | } |
456 | EXPORT_SYMBOL_GPL(virtqueue_notify); | |
457 | ||
458 | /** | |
459 | * virtqueue_kick - update after add_buf | |
460 | * @vq: the struct virtqueue | |
461 | * | |
b3087e48 | 462 | * After one or more virtqueue_add_* calls, invoke this to kick |
41f0377f RR |
463 | * the other side. |
464 | * | |
465 | * Caller must ensure we don't call this with other virtqueue | |
466 | * operations at the same time (except where noted). | |
5b1bf7cb HG |
467 | * |
468 | * Returns false if kick failed, otherwise true. | |
41f0377f | 469 | */ |
5b1bf7cb | 470 | bool virtqueue_kick(struct virtqueue *vq) |
41f0377f RR |
471 | { |
472 | if (virtqueue_kick_prepare(vq)) | |
5b1bf7cb HG |
473 | return virtqueue_notify(vq); |
474 | return true; | |
0a8a69dd | 475 | } |
7c5e9ed0 | 476 | EXPORT_SYMBOL_GPL(virtqueue_kick); |
0a8a69dd RR |
477 | |
478 | static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | |
479 | { | |
480 | unsigned int i; | |
481 | ||
482 | /* Clear data ptr. */ | |
483 | vq->data[head] = NULL; | |
484 | ||
485 | /* Put back on free list: find end */ | |
486 | i = head; | |
9fa29b9d MM |
487 | |
488 | /* Free the indirect table */ | |
489 | if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) | |
490 | kfree(phys_to_virt(vq->vring.desc[i].addr)); | |
491 | ||
0a8a69dd RR |
492 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
493 | i = vq->vring.desc[i].next; | |
06ca287d | 494 | vq->vq.num_free++; |
0a8a69dd RR |
495 | } |
496 | ||
497 | vq->vring.desc[i].next = vq->free_head; | |
498 | vq->free_head = head; | |
499 | /* Plus final descriptor */ | |
06ca287d | 500 | vq->vq.num_free++; |
0a8a69dd RR |
501 | } |
502 | ||
0a8a69dd RR |
503 | static inline bool more_used(const struct vring_virtqueue *vq) |
504 | { | |
505 | return vq->last_used_idx != vq->vring.used->idx; | |
506 | } | |
507 | ||
5dfc1762 RR |
508 | /** |
509 | * virtqueue_get_buf - get the next used buffer | |
510 | * @vq: the struct virtqueue we're talking about. | |
511 | * @len: the length written into the buffer | |
512 | * | |
513 | * If the driver wrote data into the buffer, @len will be set to the | |
514 | * amount written. This means you don't need to clear the buffer | |
515 | * beforehand to ensure there's no data leakage in the case of short | |
516 | * writes. | |
517 | * | |
518 | * Caller must ensure we don't call this with other virtqueue | |
519 | * operations at the same time (except where noted). | |
520 | * | |
521 | * Returns NULL if there are no used buffers, or the "data" token | |
b3087e48 | 522 | * handed to virtqueue_add_*(). |
5dfc1762 | 523 | */ |
7c5e9ed0 | 524 | void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) |
0a8a69dd RR |
525 | { |
526 | struct vring_virtqueue *vq = to_vvq(_vq); | |
527 | void *ret; | |
528 | unsigned int i; | |
3b720b8c | 529 | u16 last_used; |
0a8a69dd RR |
530 | |
531 | START_USE(vq); | |
532 | ||
5ef82752 RR |
533 | if (unlikely(vq->broken)) { |
534 | END_USE(vq); | |
535 | return NULL; | |
536 | } | |
537 | ||
0a8a69dd RR |
538 | if (!more_used(vq)) { |
539 | pr_debug("No more buffers in queue\n"); | |
540 | END_USE(vq); | |
541 | return NULL; | |
542 | } | |
543 | ||
2d61ba95 | 544 | /* Only get used array entries after they have been exposed by host. */ |
a9a0fef7 | 545 | virtio_rmb(vq->weak_barriers); |
2d61ba95 | 546 | |
3b720b8c RR |
547 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); |
548 | i = vq->vring.used->ring[last_used].id; | |
549 | *len = vq->vring.used->ring[last_used].len; | |
0a8a69dd RR |
550 | |
551 | if (unlikely(i >= vq->vring.num)) { | |
552 | BAD_RING(vq, "id %u out of range\n", i); | |
553 | return NULL; | |
554 | } | |
555 | if (unlikely(!vq->data[i])) { | |
556 | BAD_RING(vq, "id %u is not a head!\n", i); | |
557 | return NULL; | |
558 | } | |
559 | ||
560 | /* detach_buf clears data, so grab it now. */ | |
561 | ret = vq->data[i]; | |
562 | detach_buf(vq, i); | |
563 | vq->last_used_idx++; | |
a5c262c5 MT |
564 | /* If we expect an interrupt for the next entry, tell host |
565 | * by writing event index and flush out the write before | |
566 | * the read in the next get_buf call. */ | |
567 | if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { | |
568 | vring_used_event(&vq->vring) = vq->last_used_idx; | |
a9a0fef7 | 569 | virtio_mb(vq->weak_barriers); |
a5c262c5 MT |
570 | } |
571 | ||
e93300b1 RR |
572 | #ifdef DEBUG |
573 | vq->last_add_time_valid = false; | |
574 | #endif | |
575 | ||
0a8a69dd RR |
576 | END_USE(vq); |
577 | return ret; | |
578 | } | |
7c5e9ed0 | 579 | EXPORT_SYMBOL_GPL(virtqueue_get_buf); |
0a8a69dd | 580 | |
5dfc1762 RR |
581 | /** |
582 | * virtqueue_disable_cb - disable callbacks | |
583 | * @vq: the struct virtqueue we're talking about. | |
584 | * | |
585 | * Note that this is not necessarily synchronous, hence unreliable and only | |
586 | * useful as an optimization. | |
587 | * | |
588 | * Unlike other operations, this need not be serialized. | |
589 | */ | |
7c5e9ed0 | 590 | void virtqueue_disable_cb(struct virtqueue *_vq) |
18445c4d RR |
591 | { |
592 | struct vring_virtqueue *vq = to_vvq(_vq); | |
593 | ||
18445c4d | 594 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
18445c4d | 595 | } |
7c5e9ed0 | 596 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); |
18445c4d | 597 | |
5dfc1762 | 598 | /** |
cc229884 | 599 | * virtqueue_enable_cb_prepare - restart callbacks after disable_cb |
5dfc1762 RR |
600 | * @vq: the struct virtqueue we're talking about. |
601 | * | |
cc229884 MT |
602 | * This re-enables callbacks; it returns current queue state |
603 | * in an opaque unsigned value. This value should be later tested by | |
604 | * virtqueue_poll, to detect a possible race between the driver checking for | |
605 | * more work, and enabling callbacks. | |
5dfc1762 RR |
606 | * |
607 | * Caller must ensure we don't call this with other virtqueue | |
608 | * operations at the same time (except where noted). | |
609 | */ | |
cc229884 | 610 | unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) |
0a8a69dd RR |
611 | { |
612 | struct vring_virtqueue *vq = to_vvq(_vq); | |
cc229884 | 613 | u16 last_used_idx; |
0a8a69dd RR |
614 | |
615 | START_USE(vq); | |
0a8a69dd RR |
616 | |
617 | /* We optimistically turn back on interrupts, then check if there was | |
618 | * more to do. */ | |
a5c262c5 MT |
619 | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to |
620 | * either clear the flags bit or point the event index at the next | |
621 | * entry. Always do both to keep code simple. */ | |
0a8a69dd | 622 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
cc229884 MT |
623 | vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; |
624 | END_USE(vq); | |
625 | return last_used_idx; | |
626 | } | |
627 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); | |
628 | ||
629 | /** | |
630 | * virtqueue_poll - query pending used buffers | |
631 | * @vq: the struct virtqueue we're talking about. | |
632 | * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). | |
633 | * | |
634 | * Returns "true" if there are pending used buffers in the queue. | |
635 | * | |
636 | * This does not need to be serialized. | |
637 | */ | |
638 | bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) | |
639 | { | |
640 | struct vring_virtqueue *vq = to_vvq(_vq); | |
641 | ||
a9a0fef7 | 642 | virtio_mb(vq->weak_barriers); |
cc229884 MT |
643 | return (u16)last_used_idx != vq->vring.used->idx; |
644 | } | |
645 | EXPORT_SYMBOL_GPL(virtqueue_poll); | |
0a8a69dd | 646 | |
cc229884 MT |
647 | /** |
648 | * virtqueue_enable_cb - restart callbacks after disable_cb. | |
649 | * @vq: the struct virtqueue we're talking about. | |
650 | * | |
651 | * This re-enables callbacks; it returns "false" if there are pending | |
652 | * buffers in the queue, to detect a possible race between the driver | |
653 | * checking for more work, and enabling callbacks. | |
654 | * | |
655 | * Caller must ensure we don't call this with other virtqueue | |
656 | * operations at the same time (except where noted). | |
657 | */ | |
658 | bool virtqueue_enable_cb(struct virtqueue *_vq) | |
659 | { | |
660 | unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); | |
661 | return !virtqueue_poll(_vq, last_used_idx); | |
0a8a69dd | 662 | } |
7c5e9ed0 | 663 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); |
0a8a69dd | 664 | |
5dfc1762 RR |
665 | /** |
666 | * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. | |
667 | * @vq: the struct virtqueue we're talking about. | |
668 | * | |
669 | * This re-enables callbacks but hints to the other side to delay | |
670 | * interrupts until most of the available buffers have been processed; | |
671 | * it returns "false" if there are many pending buffers in the queue, | |
672 | * to detect a possible race between the driver checking for more work, | |
673 | * and enabling callbacks. | |
674 | * | |
675 | * Caller must ensure we don't call this with other virtqueue | |
676 | * operations at the same time (except where noted). | |
677 | */ | |
7ab358c2 MT |
678 | bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) |
679 | { | |
680 | struct vring_virtqueue *vq = to_vvq(_vq); | |
681 | u16 bufs; | |
682 | ||
683 | START_USE(vq); | |
684 | ||
685 | /* We optimistically turn back on interrupts, then check if there was | |
686 | * more to do. */ | |
687 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to | |
688 | * either clear the flags bit or point the event index at the next | |
689 | * entry. Always do both to keep code simple. */ | |
690 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | |
691 | /* TODO: tune this threshold */ | |
692 | bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; | |
693 | vring_used_event(&vq->vring) = vq->last_used_idx + bufs; | |
a9a0fef7 | 694 | virtio_mb(vq->weak_barriers); |
7ab358c2 MT |
695 | if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { |
696 | END_USE(vq); | |
697 | return false; | |
698 | } | |
699 | ||
700 | END_USE(vq); | |
701 | return true; | |
702 | } | |
703 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); | |
704 | ||
5dfc1762 RR |
705 | /** |
706 | * virtqueue_detach_unused_buf - detach first unused buffer | |
707 | * @vq: the struct virtqueue we're talking about. | |
708 | * | |
b3087e48 | 709 | * Returns NULL or the "data" token handed to virtqueue_add_*(). |
5dfc1762 RR |
710 | * This is not valid on an active queue; it is useful only for device |
711 | * shutdown. | |
712 | */ | |
7c5e9ed0 | 713 | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) |
c021eac4 SM |
714 | { |
715 | struct vring_virtqueue *vq = to_vvq(_vq); | |
716 | unsigned int i; | |
717 | void *buf; | |
718 | ||
719 | START_USE(vq); | |
720 | ||
721 | for (i = 0; i < vq->vring.num; i++) { | |
722 | if (!vq->data[i]) | |
723 | continue; | |
724 | /* detach_buf clears data, so grab it now. */ | |
725 | buf = vq->data[i]; | |
726 | detach_buf(vq, i); | |
b3258ff1 | 727 | vq->vring.avail->idx--; |
c021eac4 SM |
728 | END_USE(vq); |
729 | return buf; | |
730 | } | |
731 | /* That should have freed everything. */ | |
06ca287d | 732 | BUG_ON(vq->vq.num_free != vq->vring.num); |
c021eac4 SM |
733 | |
734 | END_USE(vq); | |
735 | return NULL; | |
736 | } | |
7c5e9ed0 | 737 | EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); |
c021eac4 | 738 | |
0a8a69dd RR |
739 | irqreturn_t vring_interrupt(int irq, void *_vq) |
740 | { | |
741 | struct vring_virtqueue *vq = to_vvq(_vq); | |
742 | ||
743 | if (!more_used(vq)) { | |
744 | pr_debug("virtqueue interrupt with no work for %p\n", vq); | |
745 | return IRQ_NONE; | |
746 | } | |
747 | ||
748 | if (unlikely(vq->broken)) | |
749 | return IRQ_HANDLED; | |
750 | ||
751 | pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); | |
18445c4d RR |
752 | if (vq->vq.callback) |
753 | vq->vq.callback(&vq->vq); | |
0a8a69dd RR |
754 | |
755 | return IRQ_HANDLED; | |
756 | } | |
c6fd4701 | 757 | EXPORT_SYMBOL_GPL(vring_interrupt); |
0a8a69dd | 758 | |
17bb6d40 JW |
759 | struct virtqueue *vring_new_virtqueue(unsigned int index, |
760 | unsigned int num, | |
87c7d57c | 761 | unsigned int vring_align, |
0a8a69dd | 762 | struct virtio_device *vdev, |
7b21e34f | 763 | bool weak_barriers, |
0a8a69dd | 764 | void *pages, |
46f9c2b9 | 765 | bool (*notify)(struct virtqueue *), |
9499f5e7 RR |
766 | void (*callback)(struct virtqueue *), |
767 | const char *name) | |
0a8a69dd RR |
768 | { |
769 | struct vring_virtqueue *vq; | |
770 | unsigned int i; | |
771 | ||
42b36cc0 RR |
772 | /* We assume num is a power of 2. */ |
773 | if (num & (num - 1)) { | |
774 | dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); | |
775 | return NULL; | |
776 | } | |
777 | ||
0a8a69dd RR |
778 | vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); |
779 | if (!vq) | |
780 | return NULL; | |
781 | ||
87c7d57c | 782 | vring_init(&vq->vring, num, pages, vring_align); |
0a8a69dd RR |
783 | vq->vq.callback = callback; |
784 | vq->vq.vdev = vdev; | |
9499f5e7 | 785 | vq->vq.name = name; |
06ca287d RR |
786 | vq->vq.num_free = num; |
787 | vq->vq.index = index; | |
0a8a69dd | 788 | vq->notify = notify; |
7b21e34f | 789 | vq->weak_barriers = weak_barriers; |
0a8a69dd RR |
790 | vq->broken = false; |
791 | vq->last_used_idx = 0; | |
792 | vq->num_added = 0; | |
9499f5e7 | 793 | list_add_tail(&vq->vq.list, &vdev->vqs); |
0a8a69dd RR |
794 | #ifdef DEBUG |
795 | vq->in_use = false; | |
e93300b1 | 796 | vq->last_add_time_valid = false; |
0a8a69dd RR |
797 | #endif |
798 | ||
9fa29b9d | 799 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); |
a5c262c5 | 800 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
9fa29b9d | 801 | |
0a8a69dd RR |
802 | /* No callback? Tell other side not to bother us. */ |
803 | if (!callback) | |
804 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | |
805 | ||
806 | /* Put everything in free lists. */ | |
0a8a69dd | 807 | vq->free_head = 0; |
3b870624 | 808 | for (i = 0; i < num-1; i++) { |
0a8a69dd | 809 | vq->vring.desc[i].next = i+1; |
3b870624 AS |
810 | vq->data[i] = NULL; |
811 | } | |
812 | vq->data[i] = NULL; | |
0a8a69dd RR |
813 | |
814 | return &vq->vq; | |
815 | } | |
c6fd4701 | 816 | EXPORT_SYMBOL_GPL(vring_new_virtqueue); |
0a8a69dd RR |
817 | |
818 | void vring_del_virtqueue(struct virtqueue *vq) | |
819 | { | |
9499f5e7 | 820 | list_del(&vq->list); |
0a8a69dd RR |
821 | kfree(to_vvq(vq)); |
822 | } | |
c6fd4701 | 823 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
0a8a69dd | 824 | |
e34f8725 RR |
825 | /* Manipulates transport-specific feature bits. */ |
826 | void vring_transport_features(struct virtio_device *vdev) | |
827 | { | |
828 | unsigned int i; | |
829 | ||
830 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { | |
831 | switch (i) { | |
9fa29b9d MM |
832 | case VIRTIO_RING_F_INDIRECT_DESC: |
833 | break; | |
a5c262c5 MT |
834 | case VIRTIO_RING_F_EVENT_IDX: |
835 | break; | |
e34f8725 RR |
836 | default: |
837 | /* We don't understand this bit. */ | |
838 | clear_bit(i, vdev->features); | |
839 | } | |
840 | } | |
841 | } | |
842 | EXPORT_SYMBOL_GPL(vring_transport_features); | |
843 | ||
5dfc1762 RR |
844 | /** |
845 | * virtqueue_get_vring_size - return the size of the virtqueue's vring | |
846 | * @vq: the struct virtqueue containing the vring of interest. | |
847 | * | |
848 | * Returns the size of the vring. This is mainly used for boasting to | |
849 | * userspace. Unlike other operations, this need not be serialized. | |
850 | */ | |
8f9f4668 RJ |
851 | unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) |
852 | { | |
853 | ||
854 | struct vring_virtqueue *vq = to_vvq(_vq); | |
855 | ||
856 | return vq->vring.num; | |
857 | } | |
858 | EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); | |
859 | ||
b3b32c94 HG |
860 | bool virtqueue_is_broken(struct virtqueue *_vq) |
861 | { | |
862 | struct vring_virtqueue *vq = to_vvq(_vq); | |
863 | ||
864 | return vq->broken; | |
865 | } | |
866 | EXPORT_SYMBOL_GPL(virtqueue_is_broken); | |
867 | ||
e2dcdfe9 RR |
868 | /* |
869 | * This should prevent the device from being used, allowing drivers to | |
870 | * recover. You may need to grab appropriate locks to flush. | |
871 | */ | |
872 | void virtio_break_device(struct virtio_device *dev) | |
873 | { | |
874 | struct virtqueue *_vq; | |
875 | ||
876 | list_for_each_entry(_vq, &dev->vqs, list) { | |
877 | struct vring_virtqueue *vq = to_vvq(_vq); | |
878 | vq->broken = true; | |
879 | } | |
880 | } | |
881 | EXPORT_SYMBOL_GPL(virtio_break_device); | |
882 | ||
c6fd4701 | 883 | MODULE_LICENSE("GPL"); |