Merge tag 'printk-for-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/pmladek...
[linux-2.6-block.git] / drivers / virtio / virtio_ring.c
CommitLineData
0a8a69dd
RR
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
e34f8725 21#include <linux/virtio_config.h>
0a8a69dd 22#include <linux/device.h>
5a0e3ad6 23#include <linux/slab.h>
b5a2c4f1 24#include <linux/module.h>
e93300b1 25#include <linux/hrtimer.h>
780bc790 26#include <linux/dma-mapping.h>
78fe3987 27#include <xen/xen.h>
0a8a69dd
RR
28
29#ifdef DEBUG
30/* For development, we want to crash whenever the ring is screwed. */
9499f5e7
RR
31#define BAD_RING(_vq, fmt, args...) \
32 do { \
33 dev_err(&(_vq)->vq.vdev->dev, \
34 "%s:"fmt, (_vq)->vq.name, ##args); \
35 BUG(); \
36 } while (0)
c5f841f1
RR
37/* Caller is supposed to guarantee no reentry. */
38#define START_USE(_vq) \
39 do { \
40 if ((_vq)->in_use) \
9499f5e7
RR
41 panic("%s:in_use = %i\n", \
42 (_vq)->vq.name, (_vq)->in_use); \
c5f841f1 43 (_vq)->in_use = __LINE__; \
9499f5e7 44 } while (0)
3a35ce7d 45#define END_USE(_vq) \
97a545ab 46 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
0a8a69dd 47#else
9499f5e7
RR
48#define BAD_RING(_vq, fmt, args...) \
49 do { \
50 dev_err(&_vq->vq.vdev->dev, \
51 "%s:"fmt, (_vq)->vq.name, ##args); \
52 (_vq)->broken = true; \
53 } while (0)
0a8a69dd
RR
54#define START_USE(vq)
55#define END_USE(vq)
56#endif
57
780bc790
AL
58struct vring_desc_state {
59 void *data; /* Data for callback. */
60 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
61};
62
43b4f721 63struct vring_virtqueue {
0a8a69dd
RR
64 struct virtqueue vq;
65
66 /* Actual memory layout for this queue */
67 struct vring vring;
68
7b21e34f
RR
69 /* Can we use weak barriers? */
70 bool weak_barriers;
71
0a8a69dd
RR
72 /* Other side has made a mess, don't try any more. */
73 bool broken;
74
9fa29b9d
MM
75 /* Host supports indirect buffers */
76 bool indirect;
77
a5c262c5
MT
78 /* Host publishes avail event idx */
79 bool event;
80
0a8a69dd
RR
81 /* Head of free buffer list. */
82 unsigned int free_head;
83 /* Number we've added since last sync. */
84 unsigned int num_added;
85
86 /* Last used index we've seen. */
1bc4953e 87 u16 last_used_idx;
0a8a69dd 88
f277ec42
VS
89 /* Last written value to avail->flags */
90 u16 avail_flags_shadow;
91
92 /* Last written value to avail->idx in guest byte order */
93 u16 avail_idx_shadow;
94
0a8a69dd 95 /* How to notify other side. FIXME: commonalize hcalls! */
46f9c2b9 96 bool (*notify)(struct virtqueue *vq);
0a8a69dd 97
2a2d1382
AL
98 /* DMA, allocation, and size information */
99 bool we_own_ring;
100 size_t queue_size_in_bytes;
101 dma_addr_t queue_dma_addr;
102
0a8a69dd
RR
103#ifdef DEBUG
104 /* They're supposed to lock for us. */
105 unsigned int in_use;
e93300b1
RR
106
107 /* Figure out if their kicks are too delayed. */
108 bool last_add_time_valid;
109 ktime_t last_add_time;
0a8a69dd
RR
110#endif
111
780bc790
AL
112 /* Per-descriptor state. */
113 struct vring_desc_state desc_state[];
0a8a69dd
RR
114};
115
116#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
117
d26c96c8 118/*
1a937693
MT
119 * Modern virtio devices have feature bits to specify whether they need a
120 * quirk and bypass the IOMMU. If not there, just use the DMA API.
121 *
122 * If there, the interaction between virtio and DMA API is messy.
d26c96c8
AL
123 *
124 * On most systems with virtio, physical addresses match bus addresses,
125 * and it doesn't particularly matter whether we use the DMA API.
126 *
127 * On some systems, including Xen and any system with a physical device
128 * that speaks virtio behind a physical IOMMU, we must use the DMA API
129 * for virtio DMA to work at all.
130 *
131 * On other systems, including SPARC and PPC64, virtio-pci devices are
132 * enumerated as though they are behind an IOMMU, but the virtio host
133 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
134 * there or somehow map everything as the identity.
135 *
136 * For the time being, we preserve historic behavior and bypass the DMA
137 * API.
1a937693
MT
138 *
139 * TODO: install a per-device DMA ops structure that does the right thing
140 * taking into account all the above quirks, and use the DMA API
141 * unconditionally on data path.
d26c96c8
AL
142 */
143
144static bool vring_use_dma_api(struct virtio_device *vdev)
145{
1a937693
MT
146 if (!virtio_has_iommu_quirk(vdev))
147 return true;
148
149 /* Otherwise, we are left to guess. */
78fe3987
AL
150 /*
151 * In theory, it's possible to have a buggy QEMU-supposed
152 * emulated Q35 IOMMU and Xen enabled at the same time. On
153 * such a configuration, virtio has never worked and will
154 * not work without an even larger kludge. Instead, enable
155 * the DMA API if we're a Xen guest, which at least allows
156 * all of the sensible Xen configurations to work correctly.
157 */
158 if (xen_domain())
159 return true;
160
d26c96c8
AL
161 return false;
162}
163
780bc790
AL
164/*
165 * The DMA ops on various arches are rather gnarly right now, and
166 * making all of the arch DMA ops work on the vring device itself
167 * is a mess. For now, we use the parent device for DMA ops.
168 */
75bfa81b 169static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
780bc790
AL
170{
171 return vq->vq.vdev->dev.parent;
172}
173
174/* Map one sg entry. */
175static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
176 struct scatterlist *sg,
177 enum dma_data_direction direction)
178{
179 if (!vring_use_dma_api(vq->vq.vdev))
180 return (dma_addr_t)sg_phys(sg);
181
182 /*
183 * We can't use dma_map_sg, because we don't use scatterlists in
184 * the way it expects (we don't guarantee that the scatterlist
185 * will exist for the lifetime of the mapping).
186 */
187 return dma_map_page(vring_dma_dev(vq),
188 sg_page(sg), sg->offset, sg->length,
189 direction);
190}
191
192static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
193 void *cpu_addr, size_t size,
194 enum dma_data_direction direction)
195{
196 if (!vring_use_dma_api(vq->vq.vdev))
197 return (dma_addr_t)virt_to_phys(cpu_addr);
198
199 return dma_map_single(vring_dma_dev(vq),
200 cpu_addr, size, direction);
201}
202
203static void vring_unmap_one(const struct vring_virtqueue *vq,
204 struct vring_desc *desc)
205{
206 u16 flags;
207
208 if (!vring_use_dma_api(vq->vq.vdev))
209 return;
210
211 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
212
213 if (flags & VRING_DESC_F_INDIRECT) {
214 dma_unmap_single(vring_dma_dev(vq),
215 virtio64_to_cpu(vq->vq.vdev, desc->addr),
216 virtio32_to_cpu(vq->vq.vdev, desc->len),
217 (flags & VRING_DESC_F_WRITE) ?
218 DMA_FROM_DEVICE : DMA_TO_DEVICE);
219 } else {
220 dma_unmap_page(vring_dma_dev(vq),
221 virtio64_to_cpu(vq->vq.vdev, desc->addr),
222 virtio32_to_cpu(vq->vq.vdev, desc->len),
223 (flags & VRING_DESC_F_WRITE) ?
224 DMA_FROM_DEVICE : DMA_TO_DEVICE);
225 }
226}
227
228static int vring_mapping_error(const struct vring_virtqueue *vq,
229 dma_addr_t addr)
230{
231 if (!vring_use_dma_api(vq->vq.vdev))
232 return 0;
233
234 return dma_mapping_error(vring_dma_dev(vq), addr);
235}
236
00e6f3d9
MT
237static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
238 unsigned int total_sg, gfp_t gfp)
9fa29b9d
MM
239{
240 struct vring_desc *desc;
b25bd251 241 unsigned int i;
9fa29b9d 242
b92b1b89
WD
243 /*
244 * We require lowmem mappings for the descriptors because
245 * otherwise virt_to_phys will give us bogus addresses in the
246 * virtqueue.
247 */
82107539 248 gfp &= ~__GFP_HIGHMEM;
b92b1b89 249
13816c76 250 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
9fa29b9d 251 if (!desc)
b25bd251 252 return NULL;
9fa29b9d 253
b25bd251 254 for (i = 0; i < total_sg; i++)
00e6f3d9 255 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
b25bd251 256 return desc;
9fa29b9d
MM
257}
258
13816c76
RR
259static inline int virtqueue_add(struct virtqueue *_vq,
260 struct scatterlist *sgs[],
eeebf9b1 261 unsigned int total_sg,
13816c76
RR
262 unsigned int out_sgs,
263 unsigned int in_sgs,
264 void *data,
5a08b04f 265 void *ctx,
13816c76 266 gfp_t gfp)
0a8a69dd
RR
267{
268 struct vring_virtqueue *vq = to_vvq(_vq);
13816c76 269 struct scatterlist *sg;
b25bd251 270 struct vring_desc *desc;
780bc790 271 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
1fe9b6fe 272 int head;
b25bd251 273 bool indirect;
0a8a69dd 274
9fa29b9d
MM
275 START_USE(vq);
276
0a8a69dd 277 BUG_ON(data == NULL);
5a08b04f 278 BUG_ON(ctx && vq->indirect);
9fa29b9d 279
70670444
RR
280 if (unlikely(vq->broken)) {
281 END_USE(vq);
282 return -EIO;
283 }
284
e93300b1
RR
285#ifdef DEBUG
286 {
287 ktime_t now = ktime_get();
288
289 /* No kick or get, with .1 second between? Warn. */
290 if (vq->last_add_time_valid)
291 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
292 > 100);
293 vq->last_add_time = now;
294 vq->last_add_time_valid = true;
295 }
296#endif
297
b25bd251
RR
298 BUG_ON(total_sg == 0);
299
300 head = vq->free_head;
301
9fa29b9d
MM
302 /* If the host supports indirect descriptor tables, and we have multiple
303 * buffers, then go indirect. FIXME: tune this threshold */
b25bd251 304 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
00e6f3d9 305 desc = alloc_indirect(_vq, total_sg, gfp);
44ed8089 306 else {
b25bd251 307 desc = NULL;
44ed8089
RJ
308 WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
309 }
b25bd251
RR
310
311 if (desc) {
312 /* Use a single buffer which doesn't continue */
780bc790 313 indirect = true;
b25bd251
RR
314 /* Set up rest to use this indirect table. */
315 i = 0;
316 descs_used = 1;
b25bd251 317 } else {
780bc790 318 indirect = false;
b25bd251
RR
319 desc = vq->vring.desc;
320 i = head;
321 descs_used = total_sg;
9fa29b9d
MM
322 }
323
b25bd251 324 if (vq->vq.num_free < descs_used) {
0a8a69dd 325 pr_debug("Can't add buf len %i - avail = %i\n",
b25bd251 326 descs_used, vq->vq.num_free);
44653eae
RR
327 /* FIXME: for historical reasons, we force a notify here if
328 * there are outgoing parts to the buffer. Presumably the
329 * host should service the ring ASAP. */
13816c76 330 if (out_sgs)
44653eae 331 vq->notify(&vq->vq);
58625edf
WY
332 if (indirect)
333 kfree(desc);
0a8a69dd
RR
334 END_USE(vq);
335 return -ENOSPC;
336 }
337
13816c76 338 for (n = 0; n < out_sgs; n++) {
eeebf9b1 339 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc790
AL
340 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
341 if (vring_mapping_error(vq, addr))
342 goto unmap_release;
343
00e6f3d9 344 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
780bc790 345 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9 346 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c76 347 prev = i;
00e6f3d9 348 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c76 349 }
0a8a69dd 350 }
13816c76 351 for (; n < (out_sgs + in_sgs); n++) {
eeebf9b1 352 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc790
AL
353 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
354 if (vring_mapping_error(vq, addr))
355 goto unmap_release;
356
00e6f3d9 357 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
780bc790 358 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9 359 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c76 360 prev = i;
00e6f3d9 361 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c76 362 }
0a8a69dd
RR
363 }
364 /* Last one doesn't continue. */
00e6f3d9 365 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
0a8a69dd 366
780bc790
AL
367 if (indirect) {
368 /* Now that the indirect table is filled in, map it. */
369 dma_addr_t addr = vring_map_single(
370 vq, desc, total_sg * sizeof(struct vring_desc),
371 DMA_TO_DEVICE);
372 if (vring_mapping_error(vq, addr))
373 goto unmap_release;
374
375 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
376 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
377
378 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
379 }
380
381 /* We're using some buffers from the free list. */
382 vq->vq.num_free -= descs_used;
383
0a8a69dd 384 /* Update free pointer */
b25bd251 385 if (indirect)
00e6f3d9 386 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
b25bd251
RR
387 else
388 vq->free_head = i;
0a8a69dd 389
780bc790
AL
390 /* Store token and indirect buffer state. */
391 vq->desc_state[head].data = data;
392 if (indirect)
393 vq->desc_state[head].indir_desc = desc;
87646a34 394 else
5a08b04f 395 vq->desc_state[head].indir_desc = ctx;
0a8a69dd
RR
396
397 /* Put entry in available array (but don't update avail->idx until they
3b720b8c 398 * do sync). */
f277ec42 399 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
00e6f3d9 400 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
0a8a69dd 401
ee7cd898
RR
402 /* Descriptors and available array need to be set before we expose the
403 * new available array entries. */
a9a0fef7 404 virtio_wmb(vq->weak_barriers);
f277ec42
VS
405 vq->avail_idx_shadow++;
406 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
ee7cd898
RR
407 vq->num_added++;
408
5e05bf58
TH
409 pr_debug("Added buffer head %i to %p\n", head, vq);
410 END_USE(vq);
411
ee7cd898
RR
412 /* This is very unlikely, but theoretically possible. Kick
413 * just in case. */
414 if (unlikely(vq->num_added == (1 << 16) - 1))
415 virtqueue_kick(_vq);
416
98e8c6bc 417 return 0;
780bc790
AL
418
419unmap_release:
420 err_idx = i;
421 i = head;
422
423 for (n = 0; n < total_sg; n++) {
424 if (i == err_idx)
425 break;
426 vring_unmap_one(vq, &desc[i]);
c60923cb 427 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
780bc790
AL
428 }
429
780bc790
AL
430 if (indirect)
431 kfree(desc);
432
3cc36f6e 433 END_USE(vq);
780bc790 434 return -EIO;
0a8a69dd 435}
13816c76 436
13816c76
RR
437/**
438 * virtqueue_add_sgs - expose buffers to other end
439 * @vq: the struct virtqueue we're talking about.
440 * @sgs: array of terminated scatterlists.
441 * @out_num: the number of scatterlists readable by other side
442 * @in_num: the number of scatterlists which are writable (after readable ones)
443 * @data: the token identifying the buffer.
444 * @gfp: how to do memory allocations (if necessary).
445 *
446 * Caller must ensure we don't call this with other virtqueue operations
447 * at the same time (except where noted).
448 *
70670444 449 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
13816c76
RR
450 */
451int virtqueue_add_sgs(struct virtqueue *_vq,
452 struct scatterlist *sgs[],
453 unsigned int out_sgs,
454 unsigned int in_sgs,
455 void *data,
456 gfp_t gfp)
457{
eeebf9b1 458 unsigned int i, total_sg = 0;
13816c76
RR
459
460 /* Count them first. */
eeebf9b1 461 for (i = 0; i < out_sgs + in_sgs; i++) {
13816c76
RR
462 struct scatterlist *sg;
463 for (sg = sgs[i]; sg; sg = sg_next(sg))
eeebf9b1 464 total_sg++;
13816c76 465 }
5a08b04f
MT
466 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
467 data, NULL, gfp);
13816c76
RR
468}
469EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
470
282edb36
RR
471/**
472 * virtqueue_add_outbuf - expose output buffers to other end
473 * @vq: the struct virtqueue we're talking about.
eeebf9b1
RR
474 * @sg: scatterlist (must be well-formed and terminated!)
475 * @num: the number of entries in @sg readable by other side
282edb36
RR
476 * @data: the token identifying the buffer.
477 * @gfp: how to do memory allocations (if necessary).
478 *
479 * Caller must ensure we don't call this with other virtqueue operations
480 * at the same time (except where noted).
481 *
70670444 482 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
282edb36
RR
483 */
484int virtqueue_add_outbuf(struct virtqueue *vq,
eeebf9b1 485 struct scatterlist *sg, unsigned int num,
282edb36
RR
486 void *data,
487 gfp_t gfp)
488{
5a08b04f 489 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
282edb36
RR
490}
491EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
492
493/**
494 * virtqueue_add_inbuf - expose input buffers to other end
495 * @vq: the struct virtqueue we're talking about.
eeebf9b1
RR
496 * @sg: scatterlist (must be well-formed and terminated!)
497 * @num: the number of entries in @sg writable by other side
282edb36
RR
498 * @data: the token identifying the buffer.
499 * @gfp: how to do memory allocations (if necessary).
500 *
501 * Caller must ensure we don't call this with other virtqueue operations
502 * at the same time (except where noted).
503 *
70670444 504 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
282edb36
RR
505 */
506int virtqueue_add_inbuf(struct virtqueue *vq,
eeebf9b1 507 struct scatterlist *sg, unsigned int num,
282edb36
RR
508 void *data,
509 gfp_t gfp)
510{
5a08b04f 511 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
282edb36
RR
512}
513EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
514
5a08b04f
MT
515/**
516 * virtqueue_add_inbuf_ctx - expose input buffers to other end
517 * @vq: the struct virtqueue we're talking about.
518 * @sg: scatterlist (must be well-formed and terminated!)
519 * @num: the number of entries in @sg writable by other side
520 * @data: the token identifying the buffer.
521 * @ctx: extra context for the token
522 * @gfp: how to do memory allocations (if necessary).
523 *
524 * Caller must ensure we don't call this with other virtqueue operations
525 * at the same time (except where noted).
526 *
527 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
528 */
529int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
530 struct scatterlist *sg, unsigned int num,
531 void *data,
532 void *ctx,
533 gfp_t gfp)
534{
535 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
536}
537EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
538
5dfc1762 539/**
41f0377f 540 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
5dfc1762
RR
541 * @vq: the struct virtqueue
542 *
41f0377f
RR
543 * Instead of virtqueue_kick(), you can do:
544 * if (virtqueue_kick_prepare(vq))
545 * virtqueue_notify(vq);
5dfc1762 546 *
41f0377f
RR
547 * This is sometimes useful because the virtqueue_kick_prepare() needs
548 * to be serialized, but the actual virtqueue_notify() call does not.
5dfc1762 549 */
41f0377f 550bool virtqueue_kick_prepare(struct virtqueue *_vq)
0a8a69dd
RR
551{
552 struct vring_virtqueue *vq = to_vvq(_vq);
a5c262c5 553 u16 new, old;
41f0377f
RR
554 bool needs_kick;
555
0a8a69dd 556 START_USE(vq);
a72caae2
JW
557 /* We need to expose available array entries before checking avail
558 * event. */
a9a0fef7 559 virtio_mb(vq->weak_barriers);
0a8a69dd 560
f277ec42
VS
561 old = vq->avail_idx_shadow - vq->num_added;
562 new = vq->avail_idx_shadow;
0a8a69dd
RR
563 vq->num_added = 0;
564
e93300b1
RR
565#ifdef DEBUG
566 if (vq->last_add_time_valid) {
567 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
568 vq->last_add_time)) > 100);
569 }
570 vq->last_add_time_valid = false;
571#endif
572
41f0377f 573 if (vq->event) {
00e6f3d9 574 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
41f0377f
RR
575 new, old);
576 } else {
00e6f3d9 577 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
41f0377f 578 }
0a8a69dd 579 END_USE(vq);
41f0377f
RR
580 return needs_kick;
581}
582EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
583
584/**
585 * virtqueue_notify - second half of split virtqueue_kick call.
586 * @vq: the struct virtqueue
587 *
588 * This does not need to be serialized.
5b1bf7cb
HG
589 *
590 * Returns false if host notify failed or queue is broken, otherwise true.
41f0377f 591 */
5b1bf7cb 592bool virtqueue_notify(struct virtqueue *_vq)
41f0377f
RR
593{
594 struct vring_virtqueue *vq = to_vvq(_vq);
595
5b1bf7cb
HG
596 if (unlikely(vq->broken))
597 return false;
598
41f0377f 599 /* Prod other side to tell it about changes. */
2342d6a6 600 if (!vq->notify(_vq)) {
5b1bf7cb
HG
601 vq->broken = true;
602 return false;
603 }
604 return true;
41f0377f
RR
605}
606EXPORT_SYMBOL_GPL(virtqueue_notify);
607
608/**
609 * virtqueue_kick - update after add_buf
610 * @vq: the struct virtqueue
611 *
b3087e48 612 * After one or more virtqueue_add_* calls, invoke this to kick
41f0377f
RR
613 * the other side.
614 *
615 * Caller must ensure we don't call this with other virtqueue
616 * operations at the same time (except where noted).
5b1bf7cb
HG
617 *
618 * Returns false if kick failed, otherwise true.
41f0377f 619 */
5b1bf7cb 620bool virtqueue_kick(struct virtqueue *vq)
41f0377f
RR
621{
622 if (virtqueue_kick_prepare(vq))
5b1bf7cb
HG
623 return virtqueue_notify(vq);
624 return true;
0a8a69dd 625}
7c5e9ed0 626EXPORT_SYMBOL_GPL(virtqueue_kick);
0a8a69dd 627
5a08b04f
MT
628static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
629 void **ctx)
0a8a69dd 630{
780bc790 631 unsigned int i, j;
c60923cb 632 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
0a8a69dd
RR
633
634 /* Clear data ptr. */
780bc790 635 vq->desc_state[head].data = NULL;
0a8a69dd 636
780bc790 637 /* Put back on free list: unmap first-level descriptors and find end */
0a8a69dd 638 i = head;
9fa29b9d 639
780bc790
AL
640 while (vq->vring.desc[i].flags & nextflag) {
641 vring_unmap_one(vq, &vq->vring.desc[i]);
00e6f3d9 642 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
06ca287d 643 vq->vq.num_free++;
0a8a69dd
RR
644 }
645
780bc790 646 vring_unmap_one(vq, &vq->vring.desc[i]);
00e6f3d9 647 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
0a8a69dd 648 vq->free_head = head;
780bc790 649
0a8a69dd 650 /* Plus final descriptor */
06ca287d 651 vq->vq.num_free++;
780bc790 652
5a08b04f 653 if (vq->indirect) {
780bc790 654 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
5a08b04f
MT
655 u32 len;
656
657 /* Free the indirect table, if any, now that it's unmapped. */
658 if (!indir_desc)
659 return;
660
661 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
780bc790
AL
662
663 BUG_ON(!(vq->vring.desc[head].flags &
664 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
665 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
666
667 for (j = 0; j < len / sizeof(struct vring_desc); j++)
668 vring_unmap_one(vq, &indir_desc[j]);
669
5a08b04f 670 kfree(indir_desc);
780bc790 671 vq->desc_state[head].indir_desc = NULL;
5a08b04f
MT
672 } else if (ctx) {
673 *ctx = vq->desc_state[head].indir_desc;
780bc790 674 }
0a8a69dd
RR
675}
676
0a8a69dd
RR
677static inline bool more_used(const struct vring_virtqueue *vq)
678{
00e6f3d9 679 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
0a8a69dd
RR
680}
681
5dfc1762
RR
682/**
683 * virtqueue_get_buf - get the next used buffer
684 * @vq: the struct virtqueue we're talking about.
685 * @len: the length written into the buffer
686 *
0c7eaf59 687 * If the device wrote data into the buffer, @len will be set to the
5dfc1762
RR
688 * amount written. This means you don't need to clear the buffer
689 * beforehand to ensure there's no data leakage in the case of short
690 * writes.
691 *
692 * Caller must ensure we don't call this with other virtqueue
693 * operations at the same time (except where noted).
694 *
695 * Returns NULL if there are no used buffers, or the "data" token
b3087e48 696 * handed to virtqueue_add_*().
5dfc1762 697 */
5a08b04f
MT
698void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
699 void **ctx)
0a8a69dd
RR
700{
701 struct vring_virtqueue *vq = to_vvq(_vq);
702 void *ret;
703 unsigned int i;
3b720b8c 704 u16 last_used;
0a8a69dd
RR
705
706 START_USE(vq);
707
5ef82752
RR
708 if (unlikely(vq->broken)) {
709 END_USE(vq);
710 return NULL;
711 }
712
0a8a69dd
RR
713 if (!more_used(vq)) {
714 pr_debug("No more buffers in queue\n");
715 END_USE(vq);
716 return NULL;
717 }
718
2d61ba95 719 /* Only get used array entries after they have been exposed by host. */
a9a0fef7 720 virtio_rmb(vq->weak_barriers);
2d61ba95 721
3b720b8c 722 last_used = (vq->last_used_idx & (vq->vring.num - 1));
00e6f3d9
MT
723 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
724 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
0a8a69dd
RR
725
726 if (unlikely(i >= vq->vring.num)) {
727 BAD_RING(vq, "id %u out of range\n", i);
728 return NULL;
729 }
780bc790 730 if (unlikely(!vq->desc_state[i].data)) {
0a8a69dd
RR
731 BAD_RING(vq, "id %u is not a head!\n", i);
732 return NULL;
733 }
734
735 /* detach_buf clears data, so grab it now. */
780bc790 736 ret = vq->desc_state[i].data;
5a08b04f 737 detach_buf(vq, i, ctx);
0a8a69dd 738 vq->last_used_idx++;
a5c262c5
MT
739 /* If we expect an interrupt for the next entry, tell host
740 * by writing event index and flush out the write before
741 * the read in the next get_buf call. */
788e5b3a
MT
742 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
743 virtio_store_mb(vq->weak_barriers,
744 &vring_used_event(&vq->vring),
745 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
a5c262c5 746
e93300b1
RR
747#ifdef DEBUG
748 vq->last_add_time_valid = false;
749#endif
750
0a8a69dd
RR
751 END_USE(vq);
752 return ret;
753}
5a08b04f 754EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
0a8a69dd 755
5a08b04f
MT
756void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
757{
758 return virtqueue_get_buf_ctx(_vq, len, NULL);
759}
760EXPORT_SYMBOL_GPL(virtqueue_get_buf);
5dfc1762
RR
761/**
762 * virtqueue_disable_cb - disable callbacks
763 * @vq: the struct virtqueue we're talking about.
764 *
765 * Note that this is not necessarily synchronous, hence unreliable and only
766 * useful as an optimization.
767 *
768 * Unlike other operations, this need not be serialized.
769 */
7c5e9ed0 770void virtqueue_disable_cb(struct virtqueue *_vq)
18445c4d
RR
771{
772 struct vring_virtqueue *vq = to_vvq(_vq);
773
f277ec42
VS
774 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
775 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6
LP
776 if (!vq->event)
777 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
f277ec42
VS
778 }
779
18445c4d 780}
7c5e9ed0 781EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
18445c4d 782
5dfc1762 783/**
cc229884 784 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
5dfc1762
RR
785 * @vq: the struct virtqueue we're talking about.
786 *
cc229884
MT
787 * This re-enables callbacks; it returns current queue state
788 * in an opaque unsigned value. This value should be later tested by
789 * virtqueue_poll, to detect a possible race between the driver checking for
790 * more work, and enabling callbacks.
5dfc1762
RR
791 *
792 * Caller must ensure we don't call this with other virtqueue
793 * operations at the same time (except where noted).
794 */
cc229884 795unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
0a8a69dd
RR
796{
797 struct vring_virtqueue *vq = to_vvq(_vq);
cc229884 798 u16 last_used_idx;
0a8a69dd
RR
799
800 START_USE(vq);
0a8a69dd
RR
801
802 /* We optimistically turn back on interrupts, then check if there was
803 * more to do. */
a5c262c5
MT
804 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
805 * either clear the flags bit or point the event index at the next
806 * entry. Always do both to keep code simple. */
f277ec42
VS
807 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
808 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6
LP
809 if (!vq->event)
810 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
f277ec42 811 }
00e6f3d9 812 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
cc229884
MT
813 END_USE(vq);
814 return last_used_idx;
815}
816EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
817
818/**
819 * virtqueue_poll - query pending used buffers
820 * @vq: the struct virtqueue we're talking about.
821 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
822 *
823 * Returns "true" if there are pending used buffers in the queue.
824 *
825 * This does not need to be serialized.
826 */
827bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
828{
829 struct vring_virtqueue *vq = to_vvq(_vq);
830
a9a0fef7 831 virtio_mb(vq->weak_barriers);
00e6f3d9 832 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
cc229884
MT
833}
834EXPORT_SYMBOL_GPL(virtqueue_poll);
0a8a69dd 835
cc229884
MT
836/**
837 * virtqueue_enable_cb - restart callbacks after disable_cb.
838 * @vq: the struct virtqueue we're talking about.
839 *
840 * This re-enables callbacks; it returns "false" if there are pending
841 * buffers in the queue, to detect a possible race between the driver
842 * checking for more work, and enabling callbacks.
843 *
844 * Caller must ensure we don't call this with other virtqueue
845 * operations at the same time (except where noted).
846 */
847bool virtqueue_enable_cb(struct virtqueue *_vq)
848{
849 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
850 return !virtqueue_poll(_vq, last_used_idx);
0a8a69dd 851}
7c5e9ed0 852EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
0a8a69dd 853
5dfc1762
RR
854/**
855 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
856 * @vq: the struct virtqueue we're talking about.
857 *
858 * This re-enables callbacks but hints to the other side to delay
859 * interrupts until most of the available buffers have been processed;
860 * it returns "false" if there are many pending buffers in the queue,
861 * to detect a possible race between the driver checking for more work,
862 * and enabling callbacks.
863 *
864 * Caller must ensure we don't call this with other virtqueue
865 * operations at the same time (except where noted).
866 */
7ab358c2
MT
867bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
868{
869 struct vring_virtqueue *vq = to_vvq(_vq);
870 u16 bufs;
871
872 START_USE(vq);
873
874 /* We optimistically turn back on interrupts, then check if there was
875 * more to do. */
876 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
877 * either clear the flags bit or point the event index at the next
0ea1e4a6 878 * entry. Always update the event index to keep code simple. */
f277ec42
VS
879 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
880 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6
LP
881 if (!vq->event)
882 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
f277ec42 883 }
7ab358c2 884 /* TODO: tune this threshold */
f277ec42 885 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
788e5b3a
MT
886
887 virtio_store_mb(vq->weak_barriers,
888 &vring_used_event(&vq->vring),
889 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
890
00e6f3d9 891 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
7ab358c2
MT
892 END_USE(vq);
893 return false;
894 }
895
896 END_USE(vq);
897 return true;
898}
899EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
900
5dfc1762
RR
901/**
902 * virtqueue_detach_unused_buf - detach first unused buffer
903 * @vq: the struct virtqueue we're talking about.
904 *
b3087e48 905 * Returns NULL or the "data" token handed to virtqueue_add_*().
5dfc1762
RR
906 * This is not valid on an active queue; it is useful only for device
907 * shutdown.
908 */
7c5e9ed0 909void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
c021eac4
SM
910{
911 struct vring_virtqueue *vq = to_vvq(_vq);
912 unsigned int i;
913 void *buf;
914
915 START_USE(vq);
916
917 for (i = 0; i < vq->vring.num; i++) {
780bc790 918 if (!vq->desc_state[i].data)
c021eac4
SM
919 continue;
920 /* detach_buf clears data, so grab it now. */
780bc790 921 buf = vq->desc_state[i].data;
5a08b04f 922 detach_buf(vq, i, NULL);
f277ec42
VS
923 vq->avail_idx_shadow--;
924 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
c021eac4
SM
925 END_USE(vq);
926 return buf;
927 }
928 /* That should have freed everything. */
06ca287d 929 BUG_ON(vq->vq.num_free != vq->vring.num);
c021eac4
SM
930
931 END_USE(vq);
932 return NULL;
933}
7c5e9ed0 934EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
c021eac4 935
0a8a69dd
RR
936irqreturn_t vring_interrupt(int irq, void *_vq)
937{
938 struct vring_virtqueue *vq = to_vvq(_vq);
939
940 if (!more_used(vq)) {
941 pr_debug("virtqueue interrupt with no work for %p\n", vq);
942 return IRQ_NONE;
943 }
944
945 if (unlikely(vq->broken))
946 return IRQ_HANDLED;
947
948 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
18445c4d
RR
949 if (vq->vq.callback)
950 vq->vq.callback(&vq->vq);
0a8a69dd
RR
951
952 return IRQ_HANDLED;
953}
c6fd4701 954EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd 955
2a2d1382
AL
956struct virtqueue *__vring_new_virtqueue(unsigned int index,
957 struct vring vring,
958 struct virtio_device *vdev,
959 bool weak_barriers,
f94682dd 960 bool context,
2a2d1382
AL
961 bool (*notify)(struct virtqueue *),
962 void (*callback)(struct virtqueue *),
963 const char *name)
0a8a69dd 964{
0a8a69dd 965 unsigned int i;
2a2d1382 966 struct vring_virtqueue *vq;
0a8a69dd 967
2a2d1382 968 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
780bc790 969 GFP_KERNEL);
0a8a69dd
RR
970 if (!vq)
971 return NULL;
972
2a2d1382 973 vq->vring = vring;
0a8a69dd
RR
974 vq->vq.callback = callback;
975 vq->vq.vdev = vdev;
9499f5e7 976 vq->vq.name = name;
2a2d1382 977 vq->vq.num_free = vring.num;
06ca287d 978 vq->vq.index = index;
2a2d1382
AL
979 vq->we_own_ring = false;
980 vq->queue_dma_addr = 0;
981 vq->queue_size_in_bytes = 0;
0a8a69dd 982 vq->notify = notify;
7b21e34f 983 vq->weak_barriers = weak_barriers;
0a8a69dd
RR
984 vq->broken = false;
985 vq->last_used_idx = 0;
f277ec42
VS
986 vq->avail_flags_shadow = 0;
987 vq->avail_idx_shadow = 0;
0a8a69dd 988 vq->num_added = 0;
9499f5e7 989 list_add_tail(&vq->vq.list, &vdev->vqs);
0a8a69dd
RR
990#ifdef DEBUG
991 vq->in_use = false;
e93300b1 992 vq->last_add_time_valid = false;
0a8a69dd
RR
993#endif
994
5a08b04f
MT
995 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
996 !context;
a5c262c5 997 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9fa29b9d 998
0a8a69dd 999 /* No callback? Tell other side not to bother us. */
f277ec42
VS
1000 if (!callback) {
1001 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6
LP
1002 if (!vq->event)
1003 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
f277ec42 1004 }
0a8a69dd
RR
1005
1006 /* Put everything in free lists. */
0a8a69dd 1007 vq->free_head = 0;
2a2d1382 1008 for (i = 0; i < vring.num-1; i++)
00e6f3d9 1009 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2a2d1382 1010 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
0a8a69dd
RR
1011
1012 return &vq->vq;
1013}
2a2d1382
AL
1014EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1015
1016static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
1017 dma_addr_t *dma_handle, gfp_t flag)
1018{
1019 if (vring_use_dma_api(vdev)) {
1020 return dma_alloc_coherent(vdev->dev.parent, size,
1021 dma_handle, flag);
1022 } else {
1023 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
1024 if (queue) {
1025 phys_addr_t phys_addr = virt_to_phys(queue);
1026 *dma_handle = (dma_addr_t)phys_addr;
1027
1028 /*
1029 * Sanity check: make sure we dind't truncate
1030 * the address. The only arches I can find that
1031 * have 64-bit phys_addr_t but 32-bit dma_addr_t
1032 * are certain non-highmem MIPS and x86
1033 * configurations, but these configurations
1034 * should never allocate physical pages above 32
1035 * bits, so this is fine. Just in case, throw a
1036 * warning and abort if we end up with an
1037 * unrepresentable address.
1038 */
1039 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
1040 free_pages_exact(queue, PAGE_ALIGN(size));
1041 return NULL;
1042 }
1043 }
1044 return queue;
1045 }
1046}
1047
1048static void vring_free_queue(struct virtio_device *vdev, size_t size,
1049 void *queue, dma_addr_t dma_handle)
1050{
1051 if (vring_use_dma_api(vdev)) {
1052 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1053 } else {
1054 free_pages_exact(queue, PAGE_ALIGN(size));
1055 }
1056}
1057
1058struct virtqueue *vring_create_virtqueue(
1059 unsigned int index,
1060 unsigned int num,
1061 unsigned int vring_align,
1062 struct virtio_device *vdev,
1063 bool weak_barriers,
1064 bool may_reduce_num,
f94682dd 1065 bool context,
2a2d1382
AL
1066 bool (*notify)(struct virtqueue *),
1067 void (*callback)(struct virtqueue *),
1068 const char *name)
1069{
1070 struct virtqueue *vq;
e00f7bd2 1071 void *queue = NULL;
2a2d1382
AL
1072 dma_addr_t dma_addr;
1073 size_t queue_size_in_bytes;
1074 struct vring vring;
1075
1076 /* We assume num is a power of 2. */
1077 if (num & (num - 1)) {
1078 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1079 return NULL;
1080 }
1081
1082 /* TODO: allocate each queue chunk individually */
1083 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1084 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1085 &dma_addr,
1086 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1087 if (queue)
1088 break;
1089 }
1090
1091 if (!num)
1092 return NULL;
1093
1094 if (!queue) {
1095 /* Try to get a single page. You are my only hope! */
1096 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1097 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1098 }
1099 if (!queue)
1100 return NULL;
1101
1102 queue_size_in_bytes = vring_size(num, vring_align);
1103 vring_init(&vring, num, queue, vring_align);
1104
f94682dd 1105 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2a2d1382
AL
1106 notify, callback, name);
1107 if (!vq) {
1108 vring_free_queue(vdev, queue_size_in_bytes, queue,
1109 dma_addr);
1110 return NULL;
1111 }
1112
1113 to_vvq(vq)->queue_dma_addr = dma_addr;
1114 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1115 to_vvq(vq)->we_own_ring = true;
1116
1117 return vq;
1118}
1119EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1120
1121struct virtqueue *vring_new_virtqueue(unsigned int index,
1122 unsigned int num,
1123 unsigned int vring_align,
1124 struct virtio_device *vdev,
1125 bool weak_barriers,
f94682dd 1126 bool context,
2a2d1382
AL
1127 void *pages,
1128 bool (*notify)(struct virtqueue *vq),
1129 void (*callback)(struct virtqueue *vq),
1130 const char *name)
1131{
1132 struct vring vring;
1133 vring_init(&vring, num, pages, vring_align);
f94682dd 1134 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2a2d1382
AL
1135 notify, callback, name);
1136}
c6fd4701 1137EXPORT_SYMBOL_GPL(vring_new_virtqueue);
0a8a69dd 1138
2a2d1382 1139void vring_del_virtqueue(struct virtqueue *_vq)
0a8a69dd 1140{
2a2d1382
AL
1141 struct vring_virtqueue *vq = to_vvq(_vq);
1142
1143 if (vq->we_own_ring) {
1144 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1145 vq->vring.desc, vq->queue_dma_addr);
1146 }
1147 list_del(&_vq->list);
1148 kfree(vq);
0a8a69dd 1149}
c6fd4701 1150EXPORT_SYMBOL_GPL(vring_del_virtqueue);
0a8a69dd 1151
e34f8725
RR
1152/* Manipulates transport-specific feature bits. */
1153void vring_transport_features(struct virtio_device *vdev)
1154{
1155 unsigned int i;
1156
1157 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1158 switch (i) {
9fa29b9d
MM
1159 case VIRTIO_RING_F_INDIRECT_DESC:
1160 break;
a5c262c5
MT
1161 case VIRTIO_RING_F_EVENT_IDX:
1162 break;
747ae34a
MT
1163 case VIRTIO_F_VERSION_1:
1164 break;
1a937693
MT
1165 case VIRTIO_F_IOMMU_PLATFORM:
1166 break;
e34f8725
RR
1167 default:
1168 /* We don't understand this bit. */
e16e12be 1169 __virtio_clear_bit(vdev, i);
e34f8725
RR
1170 }
1171 }
1172}
1173EXPORT_SYMBOL_GPL(vring_transport_features);
1174
5dfc1762
RR
1175/**
1176 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1177 * @vq: the struct virtqueue containing the vring of interest.
1178 *
1179 * Returns the size of the vring. This is mainly used for boasting to
1180 * userspace. Unlike other operations, this need not be serialized.
1181 */
8f9f4668
RJ
1182unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1183{
1184
1185 struct vring_virtqueue *vq = to_vvq(_vq);
1186
1187 return vq->vring.num;
1188}
1189EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1190
b3b32c94
HG
1191bool virtqueue_is_broken(struct virtqueue *_vq)
1192{
1193 struct vring_virtqueue *vq = to_vvq(_vq);
1194
1195 return vq->broken;
1196}
1197EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1198
e2dcdfe9
RR
1199/*
1200 * This should prevent the device from being used, allowing drivers to
1201 * recover. You may need to grab appropriate locks to flush.
1202 */
1203void virtio_break_device(struct virtio_device *dev)
1204{
1205 struct virtqueue *_vq;
1206
1207 list_for_each_entry(_vq, &dev->vqs, list) {
1208 struct vring_virtqueue *vq = to_vvq(_vq);
1209 vq->broken = true;
1210 }
1211}
1212EXPORT_SYMBOL_GPL(virtio_break_device);
1213
2a2d1382 1214dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
89062652
CH
1215{
1216 struct vring_virtqueue *vq = to_vvq(_vq);
1217
2a2d1382
AL
1218 BUG_ON(!vq->we_own_ring);
1219
1220 return vq->queue_dma_addr;
89062652 1221}
2a2d1382 1222EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
89062652 1223
2a2d1382 1224dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
89062652
CH
1225{
1226 struct vring_virtqueue *vq = to_vvq(_vq);
1227
2a2d1382
AL
1228 BUG_ON(!vq->we_own_ring);
1229
1230 return vq->queue_dma_addr +
1231 ((char *)vq->vring.avail - (char *)vq->vring.desc);
1232}
1233EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1234
1235dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1236{
1237 struct vring_virtqueue *vq = to_vvq(_vq);
1238
1239 BUG_ON(!vq->we_own_ring);
1240
1241 return vq->queue_dma_addr +
1242 ((char *)vq->vring.used - (char *)vq->vring.desc);
1243}
1244EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1245
1246const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1247{
1248 return &to_vvq(vq)->vring;
89062652 1249}
2a2d1382 1250EXPORT_SYMBOL_GPL(virtqueue_get_vring);
89062652 1251
c6fd4701 1252MODULE_LICENSE("GPL");