Commit | Line | Data |
---|---|---|
0a8a69dd RR |
1 | /* Virtio ring implementation. |
2 | * | |
3 | * Copyright 2007 Rusty Russell IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
18 | */ | |
19 | #include <linux/virtio.h> | |
20 | #include <linux/virtio_ring.h> | |
e34f8725 | 21 | #include <linux/virtio_config.h> |
0a8a69dd | 22 | #include <linux/device.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
b5a2c4f1 | 24 | #include <linux/module.h> |
e93300b1 | 25 | #include <linux/hrtimer.h> |
0a8a69dd | 26 | |
d57ed95d MT |
27 | /* virtio guest is communicating with a virtual "device" that actually runs on |
28 | * a host processor. Memory barriers are used to control SMP effects. */ | |
29 | #ifdef CONFIG_SMP | |
30 | /* Where possible, use SMP barriers which are more lightweight than mandatory | |
31 | * barriers, because mandatory barriers control MMIO effects on accesses | |
7b21e34f RR |
32 | * through relaxed memory I/O windows (which virtio-pci does not use). */ |
33 | #define virtio_mb(vq) \ | |
34 | do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0) | |
35 | #define virtio_rmb(vq) \ | |
36 | do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) | |
37 | #define virtio_wmb(vq) \ | |
4dbc5d9f | 38 | do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0) |
d57ed95d MT |
39 | #else |
40 | /* We must force memory ordering even if guest is UP since host could be | |
41 | * running on another CPU, but SMP barriers are defined to barrier() in that | |
42 | * configuration. So fall back to mandatory barriers instead. */ | |
7b21e34f RR |
43 | #define virtio_mb(vq) mb() |
44 | #define virtio_rmb(vq) rmb() | |
45 | #define virtio_wmb(vq) wmb() | |
d57ed95d MT |
46 | #endif |
47 | ||
0a8a69dd RR |
48 | #ifdef DEBUG |
49 | /* For development, we want to crash whenever the ring is screwed. */ | |
9499f5e7 RR |
50 | #define BAD_RING(_vq, fmt, args...) \ |
51 | do { \ | |
52 | dev_err(&(_vq)->vq.vdev->dev, \ | |
53 | "%s:"fmt, (_vq)->vq.name, ##args); \ | |
54 | BUG(); \ | |
55 | } while (0) | |
c5f841f1 RR |
56 | /* Caller is supposed to guarantee no reentry. */ |
57 | #define START_USE(_vq) \ | |
58 | do { \ | |
59 | if ((_vq)->in_use) \ | |
9499f5e7 RR |
60 | panic("%s:in_use = %i\n", \ |
61 | (_vq)->vq.name, (_vq)->in_use); \ | |
c5f841f1 | 62 | (_vq)->in_use = __LINE__; \ |
9499f5e7 | 63 | } while (0) |
3a35ce7d | 64 | #define END_USE(_vq) \ |
97a545ab | 65 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) |
0a8a69dd | 66 | #else |
9499f5e7 RR |
67 | #define BAD_RING(_vq, fmt, args...) \ |
68 | do { \ | |
69 | dev_err(&_vq->vq.vdev->dev, \ | |
70 | "%s:"fmt, (_vq)->vq.name, ##args); \ | |
71 | (_vq)->broken = true; \ | |
72 | } while (0) | |
0a8a69dd RR |
73 | #define START_USE(vq) |
74 | #define END_USE(vq) | |
75 | #endif | |
76 | ||
77 | struct vring_virtqueue | |
78 | { | |
79 | struct virtqueue vq; | |
80 | ||
81 | /* Actual memory layout for this queue */ | |
82 | struct vring vring; | |
83 | ||
7b21e34f RR |
84 | /* Can we use weak barriers? */ |
85 | bool weak_barriers; | |
86 | ||
0a8a69dd RR |
87 | /* Other side has made a mess, don't try any more. */ |
88 | bool broken; | |
89 | ||
9fa29b9d MM |
90 | /* Host supports indirect buffers */ |
91 | bool indirect; | |
92 | ||
a5c262c5 MT |
93 | /* Host publishes avail event idx */ |
94 | bool event; | |
95 | ||
0a8a69dd RR |
96 | /* Number of free buffers */ |
97 | unsigned int num_free; | |
98 | /* Head of free buffer list. */ | |
99 | unsigned int free_head; | |
100 | /* Number we've added since last sync. */ | |
101 | unsigned int num_added; | |
102 | ||
103 | /* Last used index we've seen. */ | |
1bc4953e | 104 | u16 last_used_idx; |
0a8a69dd RR |
105 | |
106 | /* How to notify other side. FIXME: commonalize hcalls! */ | |
107 | void (*notify)(struct virtqueue *vq); | |
108 | ||
109 | #ifdef DEBUG | |
110 | /* They're supposed to lock for us. */ | |
111 | unsigned int in_use; | |
e93300b1 RR |
112 | |
113 | /* Figure out if their kicks are too delayed. */ | |
114 | bool last_add_time_valid; | |
115 | ktime_t last_add_time; | |
0a8a69dd RR |
116 | #endif |
117 | ||
118 | /* Tokens for callbacks. */ | |
119 | void *data[]; | |
120 | }; | |
121 | ||
122 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) | |
123 | ||
9fa29b9d MM |
124 | /* Set up an indirect table of descriptors and add it to the queue. */ |
125 | static int vring_add_indirect(struct vring_virtqueue *vq, | |
126 | struct scatterlist sg[], | |
127 | unsigned int out, | |
bbd603ef MT |
128 | unsigned int in, |
129 | gfp_t gfp) | |
9fa29b9d MM |
130 | { |
131 | struct vring_desc *desc; | |
132 | unsigned head; | |
133 | int i; | |
134 | ||
bbd603ef | 135 | desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); |
9fa29b9d | 136 | if (!desc) |
686d3637 | 137 | return -ENOMEM; |
9fa29b9d MM |
138 | |
139 | /* Transfer entries from the sg list into the indirect page */ | |
140 | for (i = 0; i < out; i++) { | |
141 | desc[i].flags = VRING_DESC_F_NEXT; | |
142 | desc[i].addr = sg_phys(sg); | |
143 | desc[i].len = sg->length; | |
144 | desc[i].next = i+1; | |
145 | sg++; | |
146 | } | |
147 | for (; i < (out + in); i++) { | |
148 | desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | |
149 | desc[i].addr = sg_phys(sg); | |
150 | desc[i].len = sg->length; | |
151 | desc[i].next = i+1; | |
152 | sg++; | |
153 | } | |
154 | ||
155 | /* Last one doesn't continue. */ | |
156 | desc[i-1].flags &= ~VRING_DESC_F_NEXT; | |
157 | desc[i-1].next = 0; | |
158 | ||
159 | /* We're about to use a buffer */ | |
160 | vq->num_free--; | |
161 | ||
162 | /* Use a single buffer which doesn't continue */ | |
163 | head = vq->free_head; | |
164 | vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; | |
165 | vq->vring.desc[head].addr = virt_to_phys(desc); | |
166 | vq->vring.desc[head].len = i * sizeof(struct vring_desc); | |
167 | ||
168 | /* Update free pointer */ | |
169 | vq->free_head = vq->vring.desc[head].next; | |
170 | ||
171 | return head; | |
172 | } | |
173 | ||
5dfc1762 | 174 | /** |
f96fde41 | 175 | * virtqueue_add_buf - expose buffer to other end |
5dfc1762 RR |
176 | * @vq: the struct virtqueue we're talking about. |
177 | * @sg: the description of the buffer(s). | |
178 | * @out_num: the number of sg readable by other side | |
179 | * @in_num: the number of sg which are writable (after readable ones) | |
180 | * @data: the token identifying the buffer. | |
181 | * @gfp: how to do memory allocations (if necessary). | |
182 | * | |
183 | * Caller must ensure we don't call this with other virtqueue operations | |
184 | * at the same time (except where noted). | |
185 | * | |
186 | * Returns remaining capacity of queue or a negative error | |
187 | * (ie. ENOSPC). Note that it only really makes sense to treat all | |
188 | * positive return values as "available": indirect buffers mean that | |
189 | * we can put an entire sg[] array inside a single queue entry. | |
190 | */ | |
f96fde41 RR |
191 | int virtqueue_add_buf(struct virtqueue *_vq, |
192 | struct scatterlist sg[], | |
193 | unsigned int out, | |
194 | unsigned int in, | |
195 | void *data, | |
196 | gfp_t gfp) | |
0a8a69dd RR |
197 | { |
198 | struct vring_virtqueue *vq = to_vvq(_vq); | |
1fe9b6fe MT |
199 | unsigned int i, avail, uninitialized_var(prev); |
200 | int head; | |
0a8a69dd | 201 | |
9fa29b9d MM |
202 | START_USE(vq); |
203 | ||
0a8a69dd | 204 | BUG_ON(data == NULL); |
9fa29b9d | 205 | |
e93300b1 RR |
206 | #ifdef DEBUG |
207 | { | |
208 | ktime_t now = ktime_get(); | |
209 | ||
210 | /* No kick or get, with .1 second between? Warn. */ | |
211 | if (vq->last_add_time_valid) | |
212 | WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) | |
213 | > 100); | |
214 | vq->last_add_time = now; | |
215 | vq->last_add_time_valid = true; | |
216 | } | |
217 | #endif | |
218 | ||
9fa29b9d MM |
219 | /* If the host supports indirect descriptor tables, and we have multiple |
220 | * buffers, then go indirect. FIXME: tune this threshold */ | |
221 | if (vq->indirect && (out + in) > 1 && vq->num_free) { | |
bbd603ef | 222 | head = vring_add_indirect(vq, sg, out, in, gfp); |
1fe9b6fe | 223 | if (likely(head >= 0)) |
9fa29b9d MM |
224 | goto add_head; |
225 | } | |
226 | ||
0a8a69dd RR |
227 | BUG_ON(out + in > vq->vring.num); |
228 | BUG_ON(out + in == 0); | |
229 | ||
0a8a69dd RR |
230 | if (vq->num_free < out + in) { |
231 | pr_debug("Can't add buf len %i - avail = %i\n", | |
232 | out + in, vq->num_free); | |
44653eae RR |
233 | /* FIXME: for historical reasons, we force a notify here if |
234 | * there are outgoing parts to the buffer. Presumably the | |
235 | * host should service the ring ASAP. */ | |
236 | if (out) | |
237 | vq->notify(&vq->vq); | |
0a8a69dd RR |
238 | END_USE(vq); |
239 | return -ENOSPC; | |
240 | } | |
241 | ||
242 | /* We're about to use some buffers from the free list. */ | |
243 | vq->num_free -= out + in; | |
244 | ||
245 | head = vq->free_head; | |
246 | for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { | |
247 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT; | |
15f9c890 | 248 | vq->vring.desc[i].addr = sg_phys(sg); |
0a8a69dd RR |
249 | vq->vring.desc[i].len = sg->length; |
250 | prev = i; | |
251 | sg++; | |
252 | } | |
253 | for (; in; i = vq->vring.desc[i].next, in--) { | |
254 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | |
15f9c890 | 255 | vq->vring.desc[i].addr = sg_phys(sg); |
0a8a69dd RR |
256 | vq->vring.desc[i].len = sg->length; |
257 | prev = i; | |
258 | sg++; | |
259 | } | |
260 | /* Last one doesn't continue. */ | |
261 | vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; | |
262 | ||
263 | /* Update free pointer */ | |
264 | vq->free_head = i; | |
265 | ||
9fa29b9d | 266 | add_head: |
0a8a69dd RR |
267 | /* Set token. */ |
268 | vq->data[head] = data; | |
269 | ||
270 | /* Put entry in available array (but don't update avail->idx until they | |
3b720b8c | 271 | * do sync). */ |
ee7cd898 | 272 | avail = (vq->vring.avail->idx & (vq->vring.num-1)); |
0a8a69dd RR |
273 | vq->vring.avail->ring[avail] = head; |
274 | ||
ee7cd898 RR |
275 | /* Descriptors and available array need to be set before we expose the |
276 | * new available array entries. */ | |
277 | virtio_wmb(vq); | |
278 | vq->vring.avail->idx++; | |
279 | vq->num_added++; | |
280 | ||
281 | /* This is very unlikely, but theoretically possible. Kick | |
282 | * just in case. */ | |
283 | if (unlikely(vq->num_added == (1 << 16) - 1)) | |
284 | virtqueue_kick(_vq); | |
285 | ||
0a8a69dd RR |
286 | pr_debug("Added buffer head %i to %p\n", head, vq); |
287 | END_USE(vq); | |
3c1b27d5 | 288 | |
3c1b27d5 | 289 | return vq->num_free; |
0a8a69dd | 290 | } |
f96fde41 | 291 | EXPORT_SYMBOL_GPL(virtqueue_add_buf); |
0a8a69dd | 292 | |
5dfc1762 | 293 | /** |
41f0377f | 294 | * virtqueue_kick_prepare - first half of split virtqueue_kick call. |
5dfc1762 RR |
295 | * @vq: the struct virtqueue |
296 | * | |
41f0377f RR |
297 | * Instead of virtqueue_kick(), you can do: |
298 | * if (virtqueue_kick_prepare(vq)) | |
299 | * virtqueue_notify(vq); | |
5dfc1762 | 300 | * |
41f0377f RR |
301 | * This is sometimes useful because the virtqueue_kick_prepare() needs |
302 | * to be serialized, but the actual virtqueue_notify() call does not. | |
5dfc1762 | 303 | */ |
41f0377f | 304 | bool virtqueue_kick_prepare(struct virtqueue *_vq) |
0a8a69dd RR |
305 | { |
306 | struct vring_virtqueue *vq = to_vvq(_vq); | |
a5c262c5 | 307 | u16 new, old; |
41f0377f RR |
308 | bool needs_kick; |
309 | ||
0a8a69dd | 310 | START_USE(vq); |
a72caae2 JW |
311 | /* We need to expose available array entries before checking avail |
312 | * event. */ | |
313 | virtio_mb(vq); | |
0a8a69dd | 314 | |
ee7cd898 RR |
315 | old = vq->vring.avail->idx - vq->num_added; |
316 | new = vq->vring.avail->idx; | |
0a8a69dd RR |
317 | vq->num_added = 0; |
318 | ||
e93300b1 RR |
319 | #ifdef DEBUG |
320 | if (vq->last_add_time_valid) { | |
321 | WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), | |
322 | vq->last_add_time)) > 100); | |
323 | } | |
324 | vq->last_add_time_valid = false; | |
325 | #endif | |
326 | ||
41f0377f RR |
327 | if (vq->event) { |
328 | needs_kick = vring_need_event(vring_avail_event(&vq->vring), | |
329 | new, old); | |
330 | } else { | |
331 | needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); | |
332 | } | |
0a8a69dd | 333 | END_USE(vq); |
41f0377f RR |
334 | return needs_kick; |
335 | } | |
336 | EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); | |
337 | ||
338 | /** | |
339 | * virtqueue_notify - second half of split virtqueue_kick call. | |
340 | * @vq: the struct virtqueue | |
341 | * | |
342 | * This does not need to be serialized. | |
343 | */ | |
344 | void virtqueue_notify(struct virtqueue *_vq) | |
345 | { | |
346 | struct vring_virtqueue *vq = to_vvq(_vq); | |
347 | ||
348 | /* Prod other side to tell it about changes. */ | |
349 | vq->notify(_vq); | |
350 | } | |
351 | EXPORT_SYMBOL_GPL(virtqueue_notify); | |
352 | ||
353 | /** | |
354 | * virtqueue_kick - update after add_buf | |
355 | * @vq: the struct virtqueue | |
356 | * | |
357 | * After one or more virtqueue_add_buf calls, invoke this to kick | |
358 | * the other side. | |
359 | * | |
360 | * Caller must ensure we don't call this with other virtqueue | |
361 | * operations at the same time (except where noted). | |
362 | */ | |
363 | void virtqueue_kick(struct virtqueue *vq) | |
364 | { | |
365 | if (virtqueue_kick_prepare(vq)) | |
366 | virtqueue_notify(vq); | |
0a8a69dd | 367 | } |
7c5e9ed0 | 368 | EXPORT_SYMBOL_GPL(virtqueue_kick); |
0a8a69dd RR |
369 | |
370 | static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | |
371 | { | |
372 | unsigned int i; | |
373 | ||
374 | /* Clear data ptr. */ | |
375 | vq->data[head] = NULL; | |
376 | ||
377 | /* Put back on free list: find end */ | |
378 | i = head; | |
9fa29b9d MM |
379 | |
380 | /* Free the indirect table */ | |
381 | if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) | |
382 | kfree(phys_to_virt(vq->vring.desc[i].addr)); | |
383 | ||
0a8a69dd RR |
384 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
385 | i = vq->vring.desc[i].next; | |
386 | vq->num_free++; | |
387 | } | |
388 | ||
389 | vq->vring.desc[i].next = vq->free_head; | |
390 | vq->free_head = head; | |
391 | /* Plus final descriptor */ | |
392 | vq->num_free++; | |
393 | } | |
394 | ||
0a8a69dd RR |
395 | static inline bool more_used(const struct vring_virtqueue *vq) |
396 | { | |
397 | return vq->last_used_idx != vq->vring.used->idx; | |
398 | } | |
399 | ||
5dfc1762 RR |
400 | /** |
401 | * virtqueue_get_buf - get the next used buffer | |
402 | * @vq: the struct virtqueue we're talking about. | |
403 | * @len: the length written into the buffer | |
404 | * | |
405 | * If the driver wrote data into the buffer, @len will be set to the | |
406 | * amount written. This means you don't need to clear the buffer | |
407 | * beforehand to ensure there's no data leakage in the case of short | |
408 | * writes. | |
409 | * | |
410 | * Caller must ensure we don't call this with other virtqueue | |
411 | * operations at the same time (except where noted). | |
412 | * | |
413 | * Returns NULL if there are no used buffers, or the "data" token | |
f96fde41 | 414 | * handed to virtqueue_add_buf(). |
5dfc1762 | 415 | */ |
7c5e9ed0 | 416 | void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) |
0a8a69dd RR |
417 | { |
418 | struct vring_virtqueue *vq = to_vvq(_vq); | |
419 | void *ret; | |
420 | unsigned int i; | |
3b720b8c | 421 | u16 last_used; |
0a8a69dd RR |
422 | |
423 | START_USE(vq); | |
424 | ||
5ef82752 RR |
425 | if (unlikely(vq->broken)) { |
426 | END_USE(vq); | |
427 | return NULL; | |
428 | } | |
429 | ||
0a8a69dd RR |
430 | if (!more_used(vq)) { |
431 | pr_debug("No more buffers in queue\n"); | |
432 | END_USE(vq); | |
433 | return NULL; | |
434 | } | |
435 | ||
2d61ba95 | 436 | /* Only get used array entries after they have been exposed by host. */ |
7b21e34f | 437 | virtio_rmb(vq); |
2d61ba95 | 438 | |
3b720b8c RR |
439 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); |
440 | i = vq->vring.used->ring[last_used].id; | |
441 | *len = vq->vring.used->ring[last_used].len; | |
0a8a69dd RR |
442 | |
443 | if (unlikely(i >= vq->vring.num)) { | |
444 | BAD_RING(vq, "id %u out of range\n", i); | |
445 | return NULL; | |
446 | } | |
447 | if (unlikely(!vq->data[i])) { | |
448 | BAD_RING(vq, "id %u is not a head!\n", i); | |
449 | return NULL; | |
450 | } | |
451 | ||
452 | /* detach_buf clears data, so grab it now. */ | |
453 | ret = vq->data[i]; | |
454 | detach_buf(vq, i); | |
455 | vq->last_used_idx++; | |
a5c262c5 MT |
456 | /* If we expect an interrupt for the next entry, tell host |
457 | * by writing event index and flush out the write before | |
458 | * the read in the next get_buf call. */ | |
459 | if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { | |
460 | vring_used_event(&vq->vring) = vq->last_used_idx; | |
7b21e34f | 461 | virtio_mb(vq); |
a5c262c5 MT |
462 | } |
463 | ||
e93300b1 RR |
464 | #ifdef DEBUG |
465 | vq->last_add_time_valid = false; | |
466 | #endif | |
467 | ||
0a8a69dd RR |
468 | END_USE(vq); |
469 | return ret; | |
470 | } | |
7c5e9ed0 | 471 | EXPORT_SYMBOL_GPL(virtqueue_get_buf); |
0a8a69dd | 472 | |
5dfc1762 RR |
473 | /** |
474 | * virtqueue_disable_cb - disable callbacks | |
475 | * @vq: the struct virtqueue we're talking about. | |
476 | * | |
477 | * Note that this is not necessarily synchronous, hence unreliable and only | |
478 | * useful as an optimization. | |
479 | * | |
480 | * Unlike other operations, this need not be serialized. | |
481 | */ | |
7c5e9ed0 | 482 | void virtqueue_disable_cb(struct virtqueue *_vq) |
18445c4d RR |
483 | { |
484 | struct vring_virtqueue *vq = to_vvq(_vq); | |
485 | ||
18445c4d | 486 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
18445c4d | 487 | } |
7c5e9ed0 | 488 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); |
18445c4d | 489 | |
5dfc1762 RR |
490 | /** |
491 | * virtqueue_enable_cb - restart callbacks after disable_cb. | |
492 | * @vq: the struct virtqueue we're talking about. | |
493 | * | |
494 | * This re-enables callbacks; it returns "false" if there are pending | |
495 | * buffers in the queue, to detect a possible race between the driver | |
496 | * checking for more work, and enabling callbacks. | |
497 | * | |
498 | * Caller must ensure we don't call this with other virtqueue | |
499 | * operations at the same time (except where noted). | |
500 | */ | |
7c5e9ed0 | 501 | bool virtqueue_enable_cb(struct virtqueue *_vq) |
0a8a69dd RR |
502 | { |
503 | struct vring_virtqueue *vq = to_vvq(_vq); | |
504 | ||
505 | START_USE(vq); | |
0a8a69dd RR |
506 | |
507 | /* We optimistically turn back on interrupts, then check if there was | |
508 | * more to do. */ | |
a5c262c5 MT |
509 | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to |
510 | * either clear the flags bit or point the event index at the next | |
511 | * entry. Always do both to keep code simple. */ | |
0a8a69dd | 512 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
a5c262c5 | 513 | vring_used_event(&vq->vring) = vq->last_used_idx; |
7b21e34f | 514 | virtio_mb(vq); |
0a8a69dd | 515 | if (unlikely(more_used(vq))) { |
0a8a69dd RR |
516 | END_USE(vq); |
517 | return false; | |
518 | } | |
519 | ||
520 | END_USE(vq); | |
521 | return true; | |
522 | } | |
7c5e9ed0 | 523 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); |
0a8a69dd | 524 | |
5dfc1762 RR |
525 | /** |
526 | * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. | |
527 | * @vq: the struct virtqueue we're talking about. | |
528 | * | |
529 | * This re-enables callbacks but hints to the other side to delay | |
530 | * interrupts until most of the available buffers have been processed; | |
531 | * it returns "false" if there are many pending buffers in the queue, | |
532 | * to detect a possible race between the driver checking for more work, | |
533 | * and enabling callbacks. | |
534 | * | |
535 | * Caller must ensure we don't call this with other virtqueue | |
536 | * operations at the same time (except where noted). | |
537 | */ | |
7ab358c2 MT |
538 | bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) |
539 | { | |
540 | struct vring_virtqueue *vq = to_vvq(_vq); | |
541 | u16 bufs; | |
542 | ||
543 | START_USE(vq); | |
544 | ||
545 | /* We optimistically turn back on interrupts, then check if there was | |
546 | * more to do. */ | |
547 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to | |
548 | * either clear the flags bit or point the event index at the next | |
549 | * entry. Always do both to keep code simple. */ | |
550 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | |
551 | /* TODO: tune this threshold */ | |
552 | bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; | |
553 | vring_used_event(&vq->vring) = vq->last_used_idx + bufs; | |
7b21e34f | 554 | virtio_mb(vq); |
7ab358c2 MT |
555 | if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { |
556 | END_USE(vq); | |
557 | return false; | |
558 | } | |
559 | ||
560 | END_USE(vq); | |
561 | return true; | |
562 | } | |
563 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); | |
564 | ||
5dfc1762 RR |
565 | /** |
566 | * virtqueue_detach_unused_buf - detach first unused buffer | |
567 | * @vq: the struct virtqueue we're talking about. | |
568 | * | |
f96fde41 | 569 | * Returns NULL or the "data" token handed to virtqueue_add_buf(). |
5dfc1762 RR |
570 | * This is not valid on an active queue; it is useful only for device |
571 | * shutdown. | |
572 | */ | |
7c5e9ed0 | 573 | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) |
c021eac4 SM |
574 | { |
575 | struct vring_virtqueue *vq = to_vvq(_vq); | |
576 | unsigned int i; | |
577 | void *buf; | |
578 | ||
579 | START_USE(vq); | |
580 | ||
581 | for (i = 0; i < vq->vring.num; i++) { | |
582 | if (!vq->data[i]) | |
583 | continue; | |
584 | /* detach_buf clears data, so grab it now. */ | |
585 | buf = vq->data[i]; | |
586 | detach_buf(vq, i); | |
b3258ff1 | 587 | vq->vring.avail->idx--; |
c021eac4 SM |
588 | END_USE(vq); |
589 | return buf; | |
590 | } | |
591 | /* That should have freed everything. */ | |
592 | BUG_ON(vq->num_free != vq->vring.num); | |
593 | ||
594 | END_USE(vq); | |
595 | return NULL; | |
596 | } | |
7c5e9ed0 | 597 | EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); |
c021eac4 | 598 | |
0a8a69dd RR |
599 | irqreturn_t vring_interrupt(int irq, void *_vq) |
600 | { | |
601 | struct vring_virtqueue *vq = to_vvq(_vq); | |
602 | ||
603 | if (!more_used(vq)) { | |
604 | pr_debug("virtqueue interrupt with no work for %p\n", vq); | |
605 | return IRQ_NONE; | |
606 | } | |
607 | ||
608 | if (unlikely(vq->broken)) | |
609 | return IRQ_HANDLED; | |
610 | ||
611 | pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); | |
18445c4d RR |
612 | if (vq->vq.callback) |
613 | vq->vq.callback(&vq->vq); | |
0a8a69dd RR |
614 | |
615 | return IRQ_HANDLED; | |
616 | } | |
c6fd4701 | 617 | EXPORT_SYMBOL_GPL(vring_interrupt); |
0a8a69dd | 618 | |
0a8a69dd | 619 | struct virtqueue *vring_new_virtqueue(unsigned int num, |
87c7d57c | 620 | unsigned int vring_align, |
0a8a69dd | 621 | struct virtio_device *vdev, |
7b21e34f | 622 | bool weak_barriers, |
0a8a69dd RR |
623 | void *pages, |
624 | void (*notify)(struct virtqueue *), | |
9499f5e7 RR |
625 | void (*callback)(struct virtqueue *), |
626 | const char *name) | |
0a8a69dd RR |
627 | { |
628 | struct vring_virtqueue *vq; | |
629 | unsigned int i; | |
630 | ||
42b36cc0 RR |
631 | /* We assume num is a power of 2. */ |
632 | if (num & (num - 1)) { | |
633 | dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); | |
634 | return NULL; | |
635 | } | |
636 | ||
0a8a69dd RR |
637 | vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); |
638 | if (!vq) | |
639 | return NULL; | |
640 | ||
87c7d57c | 641 | vring_init(&vq->vring, num, pages, vring_align); |
0a8a69dd RR |
642 | vq->vq.callback = callback; |
643 | vq->vq.vdev = vdev; | |
9499f5e7 | 644 | vq->vq.name = name; |
0a8a69dd | 645 | vq->notify = notify; |
7b21e34f | 646 | vq->weak_barriers = weak_barriers; |
0a8a69dd RR |
647 | vq->broken = false; |
648 | vq->last_used_idx = 0; | |
649 | vq->num_added = 0; | |
9499f5e7 | 650 | list_add_tail(&vq->vq.list, &vdev->vqs); |
0a8a69dd RR |
651 | #ifdef DEBUG |
652 | vq->in_use = false; | |
e93300b1 | 653 | vq->last_add_time_valid = false; |
0a8a69dd RR |
654 | #endif |
655 | ||
9fa29b9d | 656 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); |
a5c262c5 | 657 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
9fa29b9d | 658 | |
0a8a69dd RR |
659 | /* No callback? Tell other side not to bother us. */ |
660 | if (!callback) | |
661 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | |
662 | ||
663 | /* Put everything in free lists. */ | |
664 | vq->num_free = num; | |
665 | vq->free_head = 0; | |
3b870624 | 666 | for (i = 0; i < num-1; i++) { |
0a8a69dd | 667 | vq->vring.desc[i].next = i+1; |
3b870624 AS |
668 | vq->data[i] = NULL; |
669 | } | |
670 | vq->data[i] = NULL; | |
0a8a69dd RR |
671 | |
672 | return &vq->vq; | |
673 | } | |
c6fd4701 | 674 | EXPORT_SYMBOL_GPL(vring_new_virtqueue); |
0a8a69dd RR |
675 | |
676 | void vring_del_virtqueue(struct virtqueue *vq) | |
677 | { | |
9499f5e7 | 678 | list_del(&vq->list); |
0a8a69dd RR |
679 | kfree(to_vvq(vq)); |
680 | } | |
c6fd4701 | 681 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
0a8a69dd | 682 | |
e34f8725 RR |
683 | /* Manipulates transport-specific feature bits. */ |
684 | void vring_transport_features(struct virtio_device *vdev) | |
685 | { | |
686 | unsigned int i; | |
687 | ||
688 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { | |
689 | switch (i) { | |
9fa29b9d MM |
690 | case VIRTIO_RING_F_INDIRECT_DESC: |
691 | break; | |
a5c262c5 MT |
692 | case VIRTIO_RING_F_EVENT_IDX: |
693 | break; | |
e34f8725 RR |
694 | default: |
695 | /* We don't understand this bit. */ | |
696 | clear_bit(i, vdev->features); | |
697 | } | |
698 | } | |
699 | } | |
700 | EXPORT_SYMBOL_GPL(vring_transport_features); | |
701 | ||
5dfc1762 RR |
702 | /** |
703 | * virtqueue_get_vring_size - return the size of the virtqueue's vring | |
704 | * @vq: the struct virtqueue containing the vring of interest. | |
705 | * | |
706 | * Returns the size of the vring. This is mainly used for boasting to | |
707 | * userspace. Unlike other operations, this need not be serialized. | |
708 | */ | |
8f9f4668 RJ |
709 | unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) |
710 | { | |
711 | ||
712 | struct vring_virtqueue *vq = to_vvq(_vq); | |
713 | ||
714 | return vq->vring.num; | |
715 | } | |
716 | EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); | |
717 | ||
c6fd4701 | 718 | MODULE_LICENSE("GPL"); |