Commit | Line | Data |
---|---|---|
0a8a69dd RR |
1 | /* Virtio ring implementation. |
2 | * | |
3 | * Copyright 2007 Rusty Russell IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
18 | */ | |
19 | #include <linux/virtio.h> | |
20 | #include <linux/virtio_ring.h> | |
e34f8725 | 21 | #include <linux/virtio_config.h> |
0a8a69dd | 22 | #include <linux/device.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
b5a2c4f1 | 24 | #include <linux/module.h> |
0a8a69dd | 25 | |
d57ed95d MT |
26 | /* virtio guest is communicating with a virtual "device" that actually runs on |
27 | * a host processor. Memory barriers are used to control SMP effects. */ | |
28 | #ifdef CONFIG_SMP | |
29 | /* Where possible, use SMP barriers which are more lightweight than mandatory | |
30 | * barriers, because mandatory barriers control MMIO effects on accesses | |
7b21e34f RR |
31 | * through relaxed memory I/O windows (which virtio-pci does not use). */ |
32 | #define virtio_mb(vq) \ | |
33 | do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0) | |
34 | #define virtio_rmb(vq) \ | |
35 | do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) | |
36 | #define virtio_wmb(vq) \ | |
37 | do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) | |
d57ed95d MT |
38 | #else |
39 | /* We must force memory ordering even if guest is UP since host could be | |
40 | * running on another CPU, but SMP barriers are defined to barrier() in that | |
41 | * configuration. So fall back to mandatory barriers instead. */ | |
7b21e34f RR |
42 | #define virtio_mb(vq) mb() |
43 | #define virtio_rmb(vq) rmb() | |
44 | #define virtio_wmb(vq) wmb() | |
d57ed95d MT |
45 | #endif |
46 | ||
0a8a69dd RR |
47 | #ifdef DEBUG |
48 | /* For development, we want to crash whenever the ring is screwed. */ | |
9499f5e7 RR |
49 | #define BAD_RING(_vq, fmt, args...) \ |
50 | do { \ | |
51 | dev_err(&(_vq)->vq.vdev->dev, \ | |
52 | "%s:"fmt, (_vq)->vq.name, ##args); \ | |
53 | BUG(); \ | |
54 | } while (0) | |
c5f841f1 RR |
55 | /* Caller is supposed to guarantee no reentry. */ |
56 | #define START_USE(_vq) \ | |
57 | do { \ | |
58 | if ((_vq)->in_use) \ | |
9499f5e7 RR |
59 | panic("%s:in_use = %i\n", \ |
60 | (_vq)->vq.name, (_vq)->in_use); \ | |
c5f841f1 | 61 | (_vq)->in_use = __LINE__; \ |
9499f5e7 | 62 | } while (0) |
3a35ce7d | 63 | #define END_USE(_vq) \ |
97a545ab | 64 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) |
0a8a69dd | 65 | #else |
9499f5e7 RR |
66 | #define BAD_RING(_vq, fmt, args...) \ |
67 | do { \ | |
68 | dev_err(&_vq->vq.vdev->dev, \ | |
69 | "%s:"fmt, (_vq)->vq.name, ##args); \ | |
70 | (_vq)->broken = true; \ | |
71 | } while (0) | |
0a8a69dd RR |
72 | #define START_USE(vq) |
73 | #define END_USE(vq) | |
74 | #endif | |
75 | ||
76 | struct vring_virtqueue | |
77 | { | |
78 | struct virtqueue vq; | |
79 | ||
80 | /* Actual memory layout for this queue */ | |
81 | struct vring vring; | |
82 | ||
7b21e34f RR |
83 | /* Can we use weak barriers? */ |
84 | bool weak_barriers; | |
85 | ||
0a8a69dd RR |
86 | /* Other side has made a mess, don't try any more. */ |
87 | bool broken; | |
88 | ||
9fa29b9d MM |
89 | /* Host supports indirect buffers */ |
90 | bool indirect; | |
91 | ||
a5c262c5 MT |
92 | /* Host publishes avail event idx */ |
93 | bool event; | |
94 | ||
0a8a69dd RR |
95 | /* Number of free buffers */ |
96 | unsigned int num_free; | |
97 | /* Head of free buffer list. */ | |
98 | unsigned int free_head; | |
99 | /* Number we've added since last sync. */ | |
100 | unsigned int num_added; | |
101 | ||
102 | /* Last used index we've seen. */ | |
1bc4953e | 103 | u16 last_used_idx; |
0a8a69dd RR |
104 | |
105 | /* How to notify other side. FIXME: commonalize hcalls! */ | |
106 | void (*notify)(struct virtqueue *vq); | |
107 | ||
108 | #ifdef DEBUG | |
109 | /* They're supposed to lock for us. */ | |
110 | unsigned int in_use; | |
111 | #endif | |
112 | ||
113 | /* Tokens for callbacks. */ | |
114 | void *data[]; | |
115 | }; | |
116 | ||
117 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) | |
118 | ||
9fa29b9d MM |
119 | /* Set up an indirect table of descriptors and add it to the queue. */ |
120 | static int vring_add_indirect(struct vring_virtqueue *vq, | |
121 | struct scatterlist sg[], | |
122 | unsigned int out, | |
bbd603ef MT |
123 | unsigned int in, |
124 | gfp_t gfp) | |
9fa29b9d MM |
125 | { |
126 | struct vring_desc *desc; | |
127 | unsigned head; | |
128 | int i; | |
129 | ||
bbd603ef | 130 | desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); |
9fa29b9d | 131 | if (!desc) |
686d3637 | 132 | return -ENOMEM; |
9fa29b9d MM |
133 | |
134 | /* Transfer entries from the sg list into the indirect page */ | |
135 | for (i = 0; i < out; i++) { | |
136 | desc[i].flags = VRING_DESC_F_NEXT; | |
137 | desc[i].addr = sg_phys(sg); | |
138 | desc[i].len = sg->length; | |
139 | desc[i].next = i+1; | |
140 | sg++; | |
141 | } | |
142 | for (; i < (out + in); i++) { | |
143 | desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | |
144 | desc[i].addr = sg_phys(sg); | |
145 | desc[i].len = sg->length; | |
146 | desc[i].next = i+1; | |
147 | sg++; | |
148 | } | |
149 | ||
150 | /* Last one doesn't continue. */ | |
151 | desc[i-1].flags &= ~VRING_DESC_F_NEXT; | |
152 | desc[i-1].next = 0; | |
153 | ||
154 | /* We're about to use a buffer */ | |
155 | vq->num_free--; | |
156 | ||
157 | /* Use a single buffer which doesn't continue */ | |
158 | head = vq->free_head; | |
159 | vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; | |
160 | vq->vring.desc[head].addr = virt_to_phys(desc); | |
161 | vq->vring.desc[head].len = i * sizeof(struct vring_desc); | |
162 | ||
163 | /* Update free pointer */ | |
164 | vq->free_head = vq->vring.desc[head].next; | |
165 | ||
166 | return head; | |
167 | } | |
168 | ||
5dfc1762 | 169 | /** |
f96fde41 | 170 | * virtqueue_add_buf - expose buffer to other end |
5dfc1762 RR |
171 | * @vq: the struct virtqueue we're talking about. |
172 | * @sg: the description of the buffer(s). | |
173 | * @out_num: the number of sg readable by other side | |
174 | * @in_num: the number of sg which are writable (after readable ones) | |
175 | * @data: the token identifying the buffer. | |
176 | * @gfp: how to do memory allocations (if necessary). | |
177 | * | |
178 | * Caller must ensure we don't call this with other virtqueue operations | |
179 | * at the same time (except where noted). | |
180 | * | |
181 | * Returns remaining capacity of queue or a negative error | |
182 | * (ie. ENOSPC). Note that it only really makes sense to treat all | |
183 | * positive return values as "available": indirect buffers mean that | |
184 | * we can put an entire sg[] array inside a single queue entry. | |
185 | */ | |
f96fde41 RR |
186 | int virtqueue_add_buf(struct virtqueue *_vq, |
187 | struct scatterlist sg[], | |
188 | unsigned int out, | |
189 | unsigned int in, | |
190 | void *data, | |
191 | gfp_t gfp) | |
0a8a69dd RR |
192 | { |
193 | struct vring_virtqueue *vq = to_vvq(_vq); | |
1fe9b6fe MT |
194 | unsigned int i, avail, uninitialized_var(prev); |
195 | int head; | |
0a8a69dd | 196 | |
9fa29b9d MM |
197 | START_USE(vq); |
198 | ||
0a8a69dd | 199 | BUG_ON(data == NULL); |
9fa29b9d MM |
200 | |
201 | /* If the host supports indirect descriptor tables, and we have multiple | |
202 | * buffers, then go indirect. FIXME: tune this threshold */ | |
203 | if (vq->indirect && (out + in) > 1 && vq->num_free) { | |
bbd603ef | 204 | head = vring_add_indirect(vq, sg, out, in, gfp); |
1fe9b6fe | 205 | if (likely(head >= 0)) |
9fa29b9d MM |
206 | goto add_head; |
207 | } | |
208 | ||
0a8a69dd RR |
209 | BUG_ON(out + in > vq->vring.num); |
210 | BUG_ON(out + in == 0); | |
211 | ||
0a8a69dd RR |
212 | if (vq->num_free < out + in) { |
213 | pr_debug("Can't add buf len %i - avail = %i\n", | |
214 | out + in, vq->num_free); | |
44653eae RR |
215 | /* FIXME: for historical reasons, we force a notify here if |
216 | * there are outgoing parts to the buffer. Presumably the | |
217 | * host should service the ring ASAP. */ | |
218 | if (out) | |
219 | vq->notify(&vq->vq); | |
0a8a69dd RR |
220 | END_USE(vq); |
221 | return -ENOSPC; | |
222 | } | |
223 | ||
224 | /* We're about to use some buffers from the free list. */ | |
225 | vq->num_free -= out + in; | |
226 | ||
227 | head = vq->free_head; | |
228 | for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { | |
229 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT; | |
15f9c890 | 230 | vq->vring.desc[i].addr = sg_phys(sg); |
0a8a69dd RR |
231 | vq->vring.desc[i].len = sg->length; |
232 | prev = i; | |
233 | sg++; | |
234 | } | |
235 | for (; in; i = vq->vring.desc[i].next, in--) { | |
236 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | |
15f9c890 | 237 | vq->vring.desc[i].addr = sg_phys(sg); |
0a8a69dd RR |
238 | vq->vring.desc[i].len = sg->length; |
239 | prev = i; | |
240 | sg++; | |
241 | } | |
242 | /* Last one doesn't continue. */ | |
243 | vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; | |
244 | ||
245 | /* Update free pointer */ | |
246 | vq->free_head = i; | |
247 | ||
9fa29b9d | 248 | add_head: |
0a8a69dd RR |
249 | /* Set token. */ |
250 | vq->data[head] = data; | |
251 | ||
252 | /* Put entry in available array (but don't update avail->idx until they | |
3b720b8c RR |
253 | * do sync). */ |
254 | avail = ((vq->vring.avail->idx + vq->num_added++) & (vq->vring.num-1)); | |
0a8a69dd RR |
255 | vq->vring.avail->ring[avail] = head; |
256 | ||
257 | pr_debug("Added buffer head %i to %p\n", head, vq); | |
258 | END_USE(vq); | |
3c1b27d5 | 259 | |
3c1b27d5 | 260 | return vq->num_free; |
0a8a69dd | 261 | } |
f96fde41 | 262 | EXPORT_SYMBOL_GPL(virtqueue_add_buf); |
0a8a69dd | 263 | |
5dfc1762 | 264 | /** |
41f0377f | 265 | * virtqueue_kick_prepare - first half of split virtqueue_kick call. |
5dfc1762 RR |
266 | * @vq: the struct virtqueue |
267 | * | |
41f0377f RR |
268 | * Instead of virtqueue_kick(), you can do: |
269 | * if (virtqueue_kick_prepare(vq)) | |
270 | * virtqueue_notify(vq); | |
5dfc1762 | 271 | * |
41f0377f RR |
272 | * This is sometimes useful because the virtqueue_kick_prepare() needs |
273 | * to be serialized, but the actual virtqueue_notify() call does not. | |
5dfc1762 | 274 | */ |
41f0377f | 275 | bool virtqueue_kick_prepare(struct virtqueue *_vq) |
0a8a69dd RR |
276 | { |
277 | struct vring_virtqueue *vq = to_vvq(_vq); | |
a5c262c5 | 278 | u16 new, old; |
41f0377f RR |
279 | bool needs_kick; |
280 | ||
0a8a69dd RR |
281 | START_USE(vq); |
282 | /* Descriptors and available array need to be set before we expose the | |
283 | * new available array entries. */ | |
7b21e34f | 284 | virtio_wmb(vq); |
0a8a69dd | 285 | |
a5c262c5 MT |
286 | old = vq->vring.avail->idx; |
287 | new = vq->vring.avail->idx = old + vq->num_added; | |
0a8a69dd RR |
288 | vq->num_added = 0; |
289 | ||
290 | /* Need to update avail index before checking if we should notify */ | |
7b21e34f | 291 | virtio_mb(vq); |
0a8a69dd | 292 | |
41f0377f RR |
293 | if (vq->event) { |
294 | needs_kick = vring_need_event(vring_avail_event(&vq->vring), | |
295 | new, old); | |
296 | } else { | |
297 | needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); | |
298 | } | |
0a8a69dd | 299 | END_USE(vq); |
41f0377f RR |
300 | return needs_kick; |
301 | } | |
302 | EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); | |
303 | ||
304 | /** | |
305 | * virtqueue_notify - second half of split virtqueue_kick call. | |
306 | * @vq: the struct virtqueue | |
307 | * | |
308 | * This does not need to be serialized. | |
309 | */ | |
310 | void virtqueue_notify(struct virtqueue *_vq) | |
311 | { | |
312 | struct vring_virtqueue *vq = to_vvq(_vq); | |
313 | ||
314 | /* Prod other side to tell it about changes. */ | |
315 | vq->notify(_vq); | |
316 | } | |
317 | EXPORT_SYMBOL_GPL(virtqueue_notify); | |
318 | ||
319 | /** | |
320 | * virtqueue_kick - update after add_buf | |
321 | * @vq: the struct virtqueue | |
322 | * | |
323 | * After one or more virtqueue_add_buf calls, invoke this to kick | |
324 | * the other side. | |
325 | * | |
326 | * Caller must ensure we don't call this with other virtqueue | |
327 | * operations at the same time (except where noted). | |
328 | */ | |
329 | void virtqueue_kick(struct virtqueue *vq) | |
330 | { | |
331 | if (virtqueue_kick_prepare(vq)) | |
332 | virtqueue_notify(vq); | |
0a8a69dd | 333 | } |
7c5e9ed0 | 334 | EXPORT_SYMBOL_GPL(virtqueue_kick); |
0a8a69dd RR |
335 | |
336 | static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | |
337 | { | |
338 | unsigned int i; | |
339 | ||
340 | /* Clear data ptr. */ | |
341 | vq->data[head] = NULL; | |
342 | ||
343 | /* Put back on free list: find end */ | |
344 | i = head; | |
9fa29b9d MM |
345 | |
346 | /* Free the indirect table */ | |
347 | if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) | |
348 | kfree(phys_to_virt(vq->vring.desc[i].addr)); | |
349 | ||
0a8a69dd RR |
350 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
351 | i = vq->vring.desc[i].next; | |
352 | vq->num_free++; | |
353 | } | |
354 | ||
355 | vq->vring.desc[i].next = vq->free_head; | |
356 | vq->free_head = head; | |
357 | /* Plus final descriptor */ | |
358 | vq->num_free++; | |
359 | } | |
360 | ||
0a8a69dd RR |
361 | static inline bool more_used(const struct vring_virtqueue *vq) |
362 | { | |
363 | return vq->last_used_idx != vq->vring.used->idx; | |
364 | } | |
365 | ||
5dfc1762 RR |
366 | /** |
367 | * virtqueue_get_buf - get the next used buffer | |
368 | * @vq: the struct virtqueue we're talking about. | |
369 | * @len: the length written into the buffer | |
370 | * | |
371 | * If the driver wrote data into the buffer, @len will be set to the | |
372 | * amount written. This means you don't need to clear the buffer | |
373 | * beforehand to ensure there's no data leakage in the case of short | |
374 | * writes. | |
375 | * | |
376 | * Caller must ensure we don't call this with other virtqueue | |
377 | * operations at the same time (except where noted). | |
378 | * | |
379 | * Returns NULL if there are no used buffers, or the "data" token | |
f96fde41 | 380 | * handed to virtqueue_add_buf(). |
5dfc1762 | 381 | */ |
7c5e9ed0 | 382 | void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) |
0a8a69dd RR |
383 | { |
384 | struct vring_virtqueue *vq = to_vvq(_vq); | |
385 | void *ret; | |
386 | unsigned int i; | |
3b720b8c | 387 | u16 last_used; |
0a8a69dd RR |
388 | |
389 | START_USE(vq); | |
390 | ||
5ef82752 RR |
391 | if (unlikely(vq->broken)) { |
392 | END_USE(vq); | |
393 | return NULL; | |
394 | } | |
395 | ||
0a8a69dd RR |
396 | if (!more_used(vq)) { |
397 | pr_debug("No more buffers in queue\n"); | |
398 | END_USE(vq); | |
399 | return NULL; | |
400 | } | |
401 | ||
2d61ba95 | 402 | /* Only get used array entries after they have been exposed by host. */ |
7b21e34f | 403 | virtio_rmb(vq); |
2d61ba95 | 404 | |
3b720b8c RR |
405 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); |
406 | i = vq->vring.used->ring[last_used].id; | |
407 | *len = vq->vring.used->ring[last_used].len; | |
0a8a69dd RR |
408 | |
409 | if (unlikely(i >= vq->vring.num)) { | |
410 | BAD_RING(vq, "id %u out of range\n", i); | |
411 | return NULL; | |
412 | } | |
413 | if (unlikely(!vq->data[i])) { | |
414 | BAD_RING(vq, "id %u is not a head!\n", i); | |
415 | return NULL; | |
416 | } | |
417 | ||
418 | /* detach_buf clears data, so grab it now. */ | |
419 | ret = vq->data[i]; | |
420 | detach_buf(vq, i); | |
421 | vq->last_used_idx++; | |
a5c262c5 MT |
422 | /* If we expect an interrupt for the next entry, tell host |
423 | * by writing event index and flush out the write before | |
424 | * the read in the next get_buf call. */ | |
425 | if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { | |
426 | vring_used_event(&vq->vring) = vq->last_used_idx; | |
7b21e34f | 427 | virtio_mb(vq); |
a5c262c5 MT |
428 | } |
429 | ||
0a8a69dd RR |
430 | END_USE(vq); |
431 | return ret; | |
432 | } | |
7c5e9ed0 | 433 | EXPORT_SYMBOL_GPL(virtqueue_get_buf); |
0a8a69dd | 434 | |
5dfc1762 RR |
435 | /** |
436 | * virtqueue_disable_cb - disable callbacks | |
437 | * @vq: the struct virtqueue we're talking about. | |
438 | * | |
439 | * Note that this is not necessarily synchronous, hence unreliable and only | |
440 | * useful as an optimization. | |
441 | * | |
442 | * Unlike other operations, this need not be serialized. | |
443 | */ | |
7c5e9ed0 | 444 | void virtqueue_disable_cb(struct virtqueue *_vq) |
18445c4d RR |
445 | { |
446 | struct vring_virtqueue *vq = to_vvq(_vq); | |
447 | ||
18445c4d | 448 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
18445c4d | 449 | } |
7c5e9ed0 | 450 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); |
18445c4d | 451 | |
5dfc1762 RR |
452 | /** |
453 | * virtqueue_enable_cb - restart callbacks after disable_cb. | |
454 | * @vq: the struct virtqueue we're talking about. | |
455 | * | |
456 | * This re-enables callbacks; it returns "false" if there are pending | |
457 | * buffers in the queue, to detect a possible race between the driver | |
458 | * checking for more work, and enabling callbacks. | |
459 | * | |
460 | * Caller must ensure we don't call this with other virtqueue | |
461 | * operations at the same time (except where noted). | |
462 | */ | |
7c5e9ed0 | 463 | bool virtqueue_enable_cb(struct virtqueue *_vq) |
0a8a69dd RR |
464 | { |
465 | struct vring_virtqueue *vq = to_vvq(_vq); | |
466 | ||
467 | START_USE(vq); | |
0a8a69dd RR |
468 | |
469 | /* We optimistically turn back on interrupts, then check if there was | |
470 | * more to do. */ | |
a5c262c5 MT |
471 | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to |
472 | * either clear the flags bit or point the event index at the next | |
473 | * entry. Always do both to keep code simple. */ | |
0a8a69dd | 474 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
a5c262c5 | 475 | vring_used_event(&vq->vring) = vq->last_used_idx; |
7b21e34f | 476 | virtio_mb(vq); |
0a8a69dd | 477 | if (unlikely(more_used(vq))) { |
0a8a69dd RR |
478 | END_USE(vq); |
479 | return false; | |
480 | } | |
481 | ||
482 | END_USE(vq); | |
483 | return true; | |
484 | } | |
7c5e9ed0 | 485 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); |
0a8a69dd | 486 | |
5dfc1762 RR |
487 | /** |
488 | * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. | |
489 | * @vq: the struct virtqueue we're talking about. | |
490 | * | |
491 | * This re-enables callbacks but hints to the other side to delay | |
492 | * interrupts until most of the available buffers have been processed; | |
493 | * it returns "false" if there are many pending buffers in the queue, | |
494 | * to detect a possible race between the driver checking for more work, | |
495 | * and enabling callbacks. | |
496 | * | |
497 | * Caller must ensure we don't call this with other virtqueue | |
498 | * operations at the same time (except where noted). | |
499 | */ | |
7ab358c2 MT |
500 | bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) |
501 | { | |
502 | struct vring_virtqueue *vq = to_vvq(_vq); | |
503 | u16 bufs; | |
504 | ||
505 | START_USE(vq); | |
506 | ||
507 | /* We optimistically turn back on interrupts, then check if there was | |
508 | * more to do. */ | |
509 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to | |
510 | * either clear the flags bit or point the event index at the next | |
511 | * entry. Always do both to keep code simple. */ | |
512 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | |
513 | /* TODO: tune this threshold */ | |
514 | bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; | |
515 | vring_used_event(&vq->vring) = vq->last_used_idx + bufs; | |
7b21e34f | 516 | virtio_mb(vq); |
7ab358c2 MT |
517 | if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { |
518 | END_USE(vq); | |
519 | return false; | |
520 | } | |
521 | ||
522 | END_USE(vq); | |
523 | return true; | |
524 | } | |
525 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); | |
526 | ||
5dfc1762 RR |
527 | /** |
528 | * virtqueue_detach_unused_buf - detach first unused buffer | |
529 | * @vq: the struct virtqueue we're talking about. | |
530 | * | |
f96fde41 | 531 | * Returns NULL or the "data" token handed to virtqueue_add_buf(). |
5dfc1762 RR |
532 | * This is not valid on an active queue; it is useful only for device |
533 | * shutdown. | |
534 | */ | |
7c5e9ed0 | 535 | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) |
c021eac4 SM |
536 | { |
537 | struct vring_virtqueue *vq = to_vvq(_vq); | |
538 | unsigned int i; | |
539 | void *buf; | |
540 | ||
541 | START_USE(vq); | |
542 | ||
543 | for (i = 0; i < vq->vring.num; i++) { | |
544 | if (!vq->data[i]) | |
545 | continue; | |
546 | /* detach_buf clears data, so grab it now. */ | |
547 | buf = vq->data[i]; | |
548 | detach_buf(vq, i); | |
b3258ff1 | 549 | vq->vring.avail->idx--; |
c021eac4 SM |
550 | END_USE(vq); |
551 | return buf; | |
552 | } | |
553 | /* That should have freed everything. */ | |
554 | BUG_ON(vq->num_free != vq->vring.num); | |
555 | ||
556 | END_USE(vq); | |
557 | return NULL; | |
558 | } | |
7c5e9ed0 | 559 | EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); |
c021eac4 | 560 | |
0a8a69dd RR |
561 | irqreturn_t vring_interrupt(int irq, void *_vq) |
562 | { | |
563 | struct vring_virtqueue *vq = to_vvq(_vq); | |
564 | ||
565 | if (!more_used(vq)) { | |
566 | pr_debug("virtqueue interrupt with no work for %p\n", vq); | |
567 | return IRQ_NONE; | |
568 | } | |
569 | ||
570 | if (unlikely(vq->broken)) | |
571 | return IRQ_HANDLED; | |
572 | ||
573 | pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); | |
18445c4d RR |
574 | if (vq->vq.callback) |
575 | vq->vq.callback(&vq->vq); | |
0a8a69dd RR |
576 | |
577 | return IRQ_HANDLED; | |
578 | } | |
c6fd4701 | 579 | EXPORT_SYMBOL_GPL(vring_interrupt); |
0a8a69dd | 580 | |
0a8a69dd | 581 | struct virtqueue *vring_new_virtqueue(unsigned int num, |
87c7d57c | 582 | unsigned int vring_align, |
0a8a69dd | 583 | struct virtio_device *vdev, |
7b21e34f | 584 | bool weak_barriers, |
0a8a69dd RR |
585 | void *pages, |
586 | void (*notify)(struct virtqueue *), | |
9499f5e7 RR |
587 | void (*callback)(struct virtqueue *), |
588 | const char *name) | |
0a8a69dd RR |
589 | { |
590 | struct vring_virtqueue *vq; | |
591 | unsigned int i; | |
592 | ||
42b36cc0 RR |
593 | /* We assume num is a power of 2. */ |
594 | if (num & (num - 1)) { | |
595 | dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); | |
596 | return NULL; | |
597 | } | |
598 | ||
0a8a69dd RR |
599 | vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); |
600 | if (!vq) | |
601 | return NULL; | |
602 | ||
87c7d57c | 603 | vring_init(&vq->vring, num, pages, vring_align); |
0a8a69dd RR |
604 | vq->vq.callback = callback; |
605 | vq->vq.vdev = vdev; | |
9499f5e7 | 606 | vq->vq.name = name; |
0a8a69dd | 607 | vq->notify = notify; |
7b21e34f | 608 | vq->weak_barriers = weak_barriers; |
0a8a69dd RR |
609 | vq->broken = false; |
610 | vq->last_used_idx = 0; | |
611 | vq->num_added = 0; | |
9499f5e7 | 612 | list_add_tail(&vq->vq.list, &vdev->vqs); |
0a8a69dd RR |
613 | #ifdef DEBUG |
614 | vq->in_use = false; | |
615 | #endif | |
616 | ||
9fa29b9d | 617 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); |
a5c262c5 | 618 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
9fa29b9d | 619 | |
0a8a69dd RR |
620 | /* No callback? Tell other side not to bother us. */ |
621 | if (!callback) | |
622 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | |
623 | ||
624 | /* Put everything in free lists. */ | |
625 | vq->num_free = num; | |
626 | vq->free_head = 0; | |
3b870624 | 627 | for (i = 0; i < num-1; i++) { |
0a8a69dd | 628 | vq->vring.desc[i].next = i+1; |
3b870624 AS |
629 | vq->data[i] = NULL; |
630 | } | |
631 | vq->data[i] = NULL; | |
0a8a69dd RR |
632 | |
633 | return &vq->vq; | |
634 | } | |
c6fd4701 | 635 | EXPORT_SYMBOL_GPL(vring_new_virtqueue); |
0a8a69dd RR |
636 | |
637 | void vring_del_virtqueue(struct virtqueue *vq) | |
638 | { | |
9499f5e7 | 639 | list_del(&vq->list); |
0a8a69dd RR |
640 | kfree(to_vvq(vq)); |
641 | } | |
c6fd4701 | 642 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
0a8a69dd | 643 | |
e34f8725 RR |
644 | /* Manipulates transport-specific feature bits. */ |
645 | void vring_transport_features(struct virtio_device *vdev) | |
646 | { | |
647 | unsigned int i; | |
648 | ||
649 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { | |
650 | switch (i) { | |
9fa29b9d MM |
651 | case VIRTIO_RING_F_INDIRECT_DESC: |
652 | break; | |
a5c262c5 MT |
653 | case VIRTIO_RING_F_EVENT_IDX: |
654 | break; | |
e34f8725 RR |
655 | default: |
656 | /* We don't understand this bit. */ | |
657 | clear_bit(i, vdev->features); | |
658 | } | |
659 | } | |
660 | } | |
661 | EXPORT_SYMBOL_GPL(vring_transport_features); | |
662 | ||
5dfc1762 RR |
663 | /** |
664 | * virtqueue_get_vring_size - return the size of the virtqueue's vring | |
665 | * @vq: the struct virtqueue containing the vring of interest. | |
666 | * | |
667 | * Returns the size of the vring. This is mainly used for boasting to | |
668 | * userspace. Unlike other operations, this need not be serialized. | |
669 | */ | |
8f9f4668 RJ |
670 | unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) |
671 | { | |
672 | ||
673 | struct vring_virtqueue *vq = to_vvq(_vq); | |
674 | ||
675 | return vq->vring.num; | |
676 | } | |
677 | EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); | |
678 | ||
c6fd4701 | 679 | MODULE_LICENSE("GPL"); |