1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/kmsan.h>
15 #include <linux/spinlock.h>
19 /* For development, we want to crash whenever the ring is screwed. */
20 #define BAD_RING(_vq, fmt, args...) \
22 dev_err(&(_vq)->vq.vdev->dev, \
23 "%s:"fmt, (_vq)->vq.name, ##args); \
26 /* Caller is supposed to guarantee no reentry. */
27 #define START_USE(_vq) \
30 panic("%s:in_use = %i\n", \
31 (_vq)->vq.name, (_vq)->in_use); \
32 (_vq)->in_use = __LINE__; \
34 #define END_USE(_vq) \
35 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
36 #define LAST_ADD_TIME_UPDATE(_vq) \
38 ktime_t now = ktime_get(); \
40 /* No kick or get, with .1 second between? Warn. */ \
41 if ((_vq)->last_add_time_valid) \
42 WARN_ON(ktime_to_ms(ktime_sub(now, \
43 (_vq)->last_add_time)) > 100); \
44 (_vq)->last_add_time = now; \
45 (_vq)->last_add_time_valid = true; \
47 #define LAST_ADD_TIME_CHECK(_vq) \
49 if ((_vq)->last_add_time_valid) { \
50 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
51 (_vq)->last_add_time)) > 100); \
54 #define LAST_ADD_TIME_INVALID(_vq) \
55 ((_vq)->last_add_time_valid = false)
57 #define BAD_RING(_vq, fmt, args...) \
59 dev_err(&_vq->vq.vdev->dev, \
60 "%s:"fmt, (_vq)->vq.name, ##args); \
61 (_vq)->broken = true; \
65 #define LAST_ADD_TIME_UPDATE(vq)
66 #define LAST_ADD_TIME_CHECK(vq)
67 #define LAST_ADD_TIME_INVALID(vq)
70 struct vring_desc_state_split {
71 void *data; /* Data for callback. */
72 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
75 struct vring_desc_state_packed {
76 void *data; /* Data for callback. */
77 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
78 u16 num; /* Descriptor list length. */
79 u16 last; /* The last desc state in a list. */
82 struct vring_desc_extra {
83 dma_addr_t addr; /* Descriptor DMA addr. */
84 u32 len; /* Descriptor length. */
85 u16 flags; /* Descriptor flags. */
86 u16 next; /* The next desc state in a list. */
89 struct vring_virtqueue_split {
90 /* Actual memory layout for this queue. */
93 /* Last written value to avail->flags */
94 u16 avail_flags_shadow;
97 * Last written value to avail->idx in
100 u16 avail_idx_shadow;
102 /* Per-descriptor state. */
103 struct vring_desc_state_split *desc_state;
104 struct vring_desc_extra *desc_extra;
106 /* DMA address and size information */
107 dma_addr_t queue_dma_addr;
108 size_t queue_size_in_bytes;
111 * The parameters for creating vrings are reserved for creating new
118 struct vring_virtqueue_packed {
119 /* Actual memory layout for this queue. */
122 struct vring_packed_desc *desc;
123 struct vring_packed_desc_event *driver;
124 struct vring_packed_desc_event *device;
127 /* Driver ring wrap counter. */
128 bool avail_wrap_counter;
130 /* Avail used flags. */
131 u16 avail_used_flags;
133 /* Index of the next avail descriptor. */
137 * Last written value to driver->flags in
140 u16 event_flags_shadow;
142 /* Per-descriptor state. */
143 struct vring_desc_state_packed *desc_state;
144 struct vring_desc_extra *desc_extra;
146 /* DMA address and size information */
147 dma_addr_t ring_dma_addr;
148 dma_addr_t driver_event_dma_addr;
149 dma_addr_t device_event_dma_addr;
150 size_t ring_size_in_bytes;
151 size_t event_size_in_bytes;
154 struct vring_virtqueue {
157 /* Is this a packed ring? */
160 /* Is DMA API used? */
163 /* Can we use weak barriers? */
166 /* Other side has made a mess, don't try any more. */
169 /* Host supports indirect buffers */
172 /* Host publishes avail event idx */
175 /* Head of free buffer list. */
176 unsigned int free_head;
177 /* Number we've added since last sync. */
178 unsigned int num_added;
180 /* Last used index we've seen.
181 * for split ring, it just contains last used index
183 * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
184 * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
188 /* Hint for event idx: already triggered no need to disable. */
189 bool event_triggered;
192 /* Available for split ring */
193 struct vring_virtqueue_split split;
195 /* Available for packed ring */
196 struct vring_virtqueue_packed packed;
199 /* How to notify other side. FIXME: commonalize hcalls! */
200 bool (*notify)(struct virtqueue *vq);
202 /* DMA, allocation, and size information */
206 /* They're supposed to lock for us. */
209 /* Figure out if their kicks are too delayed. */
210 bool last_add_time_valid;
211 ktime_t last_add_time;
215 static struct virtqueue *__vring_new_virtqueue(unsigned int index,
216 struct vring_virtqueue_split *vring_split,
217 struct virtio_device *vdev,
220 bool (*notify)(struct virtqueue *),
221 void (*callback)(struct virtqueue *),
223 static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
224 static void vring_free(struct virtqueue *_vq);
230 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
232 static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
233 unsigned int total_sg)
236 * If the host supports indirect descriptor tables, and we have multiple
237 * buffers, then go indirect. FIXME: tune this threshold
239 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
243 * Modern virtio devices have feature bits to specify whether they need a
244 * quirk and bypass the IOMMU. If not there, just use the DMA API.
246 * If there, the interaction between virtio and DMA API is messy.
248 * On most systems with virtio, physical addresses match bus addresses,
249 * and it doesn't particularly matter whether we use the DMA API.
251 * On some systems, including Xen and any system with a physical device
252 * that speaks virtio behind a physical IOMMU, we must use the DMA API
253 * for virtio DMA to work at all.
255 * On other systems, including SPARC and PPC64, virtio-pci devices are
256 * enumerated as though they are behind an IOMMU, but the virtio host
257 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
258 * there or somehow map everything as the identity.
260 * For the time being, we preserve historic behavior and bypass the DMA
263 * TODO: install a per-device DMA ops structure that does the right thing
264 * taking into account all the above quirks, and use the DMA API
265 * unconditionally on data path.
268 static bool vring_use_dma_api(struct virtio_device *vdev)
270 if (!virtio_has_dma_quirk(vdev))
273 /* Otherwise, we are left to guess. */
275 * In theory, it's possible to have a buggy QEMU-supposed
276 * emulated Q35 IOMMU and Xen enabled at the same time. On
277 * such a configuration, virtio has never worked and will
278 * not work without an even larger kludge. Instead, enable
279 * the DMA API if we're a Xen guest, which at least allows
280 * all of the sensible Xen configurations to work correctly.
288 size_t virtio_max_dma_size(struct virtio_device *vdev)
290 size_t max_segment_size = SIZE_MAX;
292 if (vring_use_dma_api(vdev))
293 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
295 return max_segment_size;
297 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
299 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
300 dma_addr_t *dma_handle, gfp_t flag)
302 if (vring_use_dma_api(vdev)) {
303 return dma_alloc_coherent(vdev->dev.parent, size,
306 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
309 phys_addr_t phys_addr = virt_to_phys(queue);
310 *dma_handle = (dma_addr_t)phys_addr;
313 * Sanity check: make sure we dind't truncate
314 * the address. The only arches I can find that
315 * have 64-bit phys_addr_t but 32-bit dma_addr_t
316 * are certain non-highmem MIPS and x86
317 * configurations, but these configurations
318 * should never allocate physical pages above 32
319 * bits, so this is fine. Just in case, throw a
320 * warning and abort if we end up with an
321 * unrepresentable address.
323 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
324 free_pages_exact(queue, PAGE_ALIGN(size));
332 static void vring_free_queue(struct virtio_device *vdev, size_t size,
333 void *queue, dma_addr_t dma_handle)
335 if (vring_use_dma_api(vdev))
336 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
338 free_pages_exact(queue, PAGE_ALIGN(size));
342 * The DMA ops on various arches are rather gnarly right now, and
343 * making all of the arch DMA ops work on the vring device itself
344 * is a mess. For now, we use the parent device for DMA ops.
346 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
348 return vq->vq.vdev->dev.parent;
351 /* Map one sg entry. */
352 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
353 struct scatterlist *sg,
354 enum dma_data_direction direction)
356 if (!vq->use_dma_api) {
358 * If DMA is not used, KMSAN doesn't know that the scatterlist
359 * is initialized by the hardware. Explicitly check/unpoison it
360 * depending on the direction.
362 kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
363 return (dma_addr_t)sg_phys(sg);
367 * We can't use dma_map_sg, because we don't use scatterlists in
368 * the way it expects (we don't guarantee that the scatterlist
369 * will exist for the lifetime of the mapping).
371 return dma_map_page(vring_dma_dev(vq),
372 sg_page(sg), sg->offset, sg->length,
376 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
377 void *cpu_addr, size_t size,
378 enum dma_data_direction direction)
380 if (!vq->use_dma_api)
381 return (dma_addr_t)virt_to_phys(cpu_addr);
383 return dma_map_single(vring_dma_dev(vq),
384 cpu_addr, size, direction);
387 static int vring_mapping_error(const struct vring_virtqueue *vq,
390 if (!vq->use_dma_api)
393 return dma_mapping_error(vring_dma_dev(vq), addr);
396 static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
398 vq->vq.num_free = num;
401 vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
403 vq->last_used_idx = 0;
405 vq->event_triggered = false;
410 vq->last_add_time_valid = false;
416 * Split ring specific functions - *_split().
419 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
420 struct vring_desc *desc)
424 if (!vq->use_dma_api)
427 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
429 dma_unmap_page(vring_dma_dev(vq),
430 virtio64_to_cpu(vq->vq.vdev, desc->addr),
431 virtio32_to_cpu(vq->vq.vdev, desc->len),
432 (flags & VRING_DESC_F_WRITE) ?
433 DMA_FROM_DEVICE : DMA_TO_DEVICE);
436 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
439 struct vring_desc_extra *extra = vq->split.desc_extra;
442 if (!vq->use_dma_api)
445 flags = extra[i].flags;
447 if (flags & VRING_DESC_F_INDIRECT) {
448 dma_unmap_single(vring_dma_dev(vq),
451 (flags & VRING_DESC_F_WRITE) ?
452 DMA_FROM_DEVICE : DMA_TO_DEVICE);
454 dma_unmap_page(vring_dma_dev(vq),
457 (flags & VRING_DESC_F_WRITE) ?
458 DMA_FROM_DEVICE : DMA_TO_DEVICE);
462 return extra[i].next;
465 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
466 unsigned int total_sg,
469 struct vring_desc *desc;
473 * We require lowmem mappings for the descriptors because
474 * otherwise virt_to_phys will give us bogus addresses in the
477 gfp &= ~__GFP_HIGHMEM;
479 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
483 for (i = 0; i < total_sg; i++)
484 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
488 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
489 struct vring_desc *desc,
496 struct vring_virtqueue *vring = to_vvq(vq);
497 struct vring_desc_extra *extra = vring->split.desc_extra;
500 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
501 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
502 desc[i].len = cpu_to_virtio32(vq->vdev, len);
505 next = extra[i].next;
506 desc[i].next = cpu_to_virtio16(vq->vdev, next);
508 extra[i].addr = addr;
510 extra[i].flags = flags;
512 next = virtio16_to_cpu(vq->vdev, desc[i].next);
517 static inline int virtqueue_add_split(struct virtqueue *_vq,
518 struct scatterlist *sgs[],
519 unsigned int total_sg,
520 unsigned int out_sgs,
526 struct vring_virtqueue *vq = to_vvq(_vq);
527 struct scatterlist *sg;
528 struct vring_desc *desc;
529 unsigned int i, n, avail, descs_used, prev, err_idx;
535 BUG_ON(data == NULL);
536 BUG_ON(ctx && vq->indirect);
538 if (unlikely(vq->broken)) {
543 LAST_ADD_TIME_UPDATE(vq);
545 BUG_ON(total_sg == 0);
547 head = vq->free_head;
549 if (virtqueue_use_indirect(vq, total_sg))
550 desc = alloc_indirect_split(_vq, total_sg, gfp);
553 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
557 /* Use a single buffer which doesn't continue */
559 /* Set up rest to use this indirect table. */
564 desc = vq->split.vring.desc;
566 descs_used = total_sg;
569 if (unlikely(vq->vq.num_free < descs_used)) {
570 pr_debug("Can't add buf len %i - avail = %i\n",
571 descs_used, vq->vq.num_free);
572 /* FIXME: for historical reasons, we force a notify here if
573 * there are outgoing parts to the buffer. Presumably the
574 * host should service the ring ASAP. */
583 for (n = 0; n < out_sgs; n++) {
584 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
585 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
586 if (vring_mapping_error(vq, addr))
590 /* Note that we trust indirect descriptor
591 * table since it use stream DMA mapping.
593 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
598 for (; n < (out_sgs + in_sgs); n++) {
599 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
600 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
601 if (vring_mapping_error(vq, addr))
605 /* Note that we trust indirect descriptor
606 * table since it use stream DMA mapping.
608 i = virtqueue_add_desc_split(_vq, desc, i, addr,
615 /* Last one doesn't continue. */
616 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
617 if (!indirect && vq->use_dma_api)
618 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
622 /* Now that the indirect table is filled in, map it. */
623 dma_addr_t addr = vring_map_single(
624 vq, desc, total_sg * sizeof(struct vring_desc),
626 if (vring_mapping_error(vq, addr))
629 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
631 total_sg * sizeof(struct vring_desc),
632 VRING_DESC_F_INDIRECT,
636 /* We're using some buffers from the free list. */
637 vq->vq.num_free -= descs_used;
639 /* Update free pointer */
641 vq->free_head = vq->split.desc_extra[head].next;
645 /* Store token and indirect buffer state. */
646 vq->split.desc_state[head].data = data;
648 vq->split.desc_state[head].indir_desc = desc;
650 vq->split.desc_state[head].indir_desc = ctx;
652 /* Put entry in available array (but don't update avail->idx until they
654 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
655 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
657 /* Descriptors and available array need to be set before we expose the
658 * new available array entries. */
659 virtio_wmb(vq->weak_barriers);
660 vq->split.avail_idx_shadow++;
661 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
662 vq->split.avail_idx_shadow);
665 pr_debug("Added buffer head %i to %p\n", head, vq);
668 /* This is very unlikely, but theoretically possible. Kick
670 if (unlikely(vq->num_added == (1 << 16) - 1))
683 for (n = 0; n < total_sg; n++) {
687 vring_unmap_one_split_indirect(vq, &desc[i]);
688 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
690 i = vring_unmap_one_split(vq, i);
700 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
702 struct vring_virtqueue *vq = to_vvq(_vq);
707 /* We need to expose available array entries before checking avail
709 virtio_mb(vq->weak_barriers);
711 old = vq->split.avail_idx_shadow - vq->num_added;
712 new = vq->split.avail_idx_shadow;
715 LAST_ADD_TIME_CHECK(vq);
716 LAST_ADD_TIME_INVALID(vq);
719 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
720 vring_avail_event(&vq->split.vring)),
723 needs_kick = !(vq->split.vring.used->flags &
724 cpu_to_virtio16(_vq->vdev,
725 VRING_USED_F_NO_NOTIFY));
731 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
735 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
737 /* Clear data ptr. */
738 vq->split.desc_state[head].data = NULL;
740 /* Put back on free list: unmap first-level descriptors and find end */
743 while (vq->split.vring.desc[i].flags & nextflag) {
744 vring_unmap_one_split(vq, i);
745 i = vq->split.desc_extra[i].next;
749 vring_unmap_one_split(vq, i);
750 vq->split.desc_extra[i].next = vq->free_head;
751 vq->free_head = head;
753 /* Plus final descriptor */
757 struct vring_desc *indir_desc =
758 vq->split.desc_state[head].indir_desc;
761 /* Free the indirect table, if any, now that it's unmapped. */
765 len = vq->split.desc_extra[head].len;
767 BUG_ON(!(vq->split.desc_extra[head].flags &
768 VRING_DESC_F_INDIRECT));
769 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
771 for (j = 0; j < len / sizeof(struct vring_desc); j++)
772 vring_unmap_one_split_indirect(vq, &indir_desc[j]);
775 vq->split.desc_state[head].indir_desc = NULL;
777 *ctx = vq->split.desc_state[head].indir_desc;
781 static inline bool more_used_split(const struct vring_virtqueue *vq)
783 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
784 vq->split.vring.used->idx);
787 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
791 struct vring_virtqueue *vq = to_vvq(_vq);
798 if (unlikely(vq->broken)) {
803 if (!more_used_split(vq)) {
804 pr_debug("No more buffers in queue\n");
809 /* Only get used array entries after they have been exposed by host. */
810 virtio_rmb(vq->weak_barriers);
812 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
813 i = virtio32_to_cpu(_vq->vdev,
814 vq->split.vring.used->ring[last_used].id);
815 *len = virtio32_to_cpu(_vq->vdev,
816 vq->split.vring.used->ring[last_used].len);
818 if (unlikely(i >= vq->split.vring.num)) {
819 BAD_RING(vq, "id %u out of range\n", i);
822 if (unlikely(!vq->split.desc_state[i].data)) {
823 BAD_RING(vq, "id %u is not a head!\n", i);
827 /* detach_buf_split clears data, so grab it now. */
828 ret = vq->split.desc_state[i].data;
829 detach_buf_split(vq, i, ctx);
831 /* If we expect an interrupt for the next entry, tell host
832 * by writing event index and flush out the write before
833 * the read in the next get_buf call. */
834 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
835 virtio_store_mb(vq->weak_barriers,
836 &vring_used_event(&vq->split.vring),
837 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
839 LAST_ADD_TIME_INVALID(vq);
845 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
847 struct vring_virtqueue *vq = to_vvq(_vq);
849 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
850 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
852 /* TODO: this is a hack. Figure out a cleaner value to write. */
853 vring_used_event(&vq->split.vring) = 0x0;
855 vq->split.vring.avail->flags =
856 cpu_to_virtio16(_vq->vdev,
857 vq->split.avail_flags_shadow);
861 static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
863 struct vring_virtqueue *vq = to_vvq(_vq);
868 /* We optimistically turn back on interrupts, then check if there was
870 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
871 * either clear the flags bit or point the event index at the next
872 * entry. Always do both to keep code simple. */
873 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
874 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
876 vq->split.vring.avail->flags =
877 cpu_to_virtio16(_vq->vdev,
878 vq->split.avail_flags_shadow);
880 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
881 last_used_idx = vq->last_used_idx);
883 return last_used_idx;
886 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
888 struct vring_virtqueue *vq = to_vvq(_vq);
890 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
891 vq->split.vring.used->idx);
894 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
896 struct vring_virtqueue *vq = to_vvq(_vq);
901 /* We optimistically turn back on interrupts, then check if there was
903 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
904 * either clear the flags bit or point the event index at the next
905 * entry. Always update the event index to keep code simple. */
906 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
907 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
909 vq->split.vring.avail->flags =
910 cpu_to_virtio16(_vq->vdev,
911 vq->split.avail_flags_shadow);
913 /* TODO: tune this threshold */
914 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
916 virtio_store_mb(vq->weak_barriers,
917 &vring_used_event(&vq->split.vring),
918 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
920 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
921 - vq->last_used_idx) > bufs)) {
930 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
932 struct vring_virtqueue *vq = to_vvq(_vq);
938 for (i = 0; i < vq->split.vring.num; i++) {
939 if (!vq->split.desc_state[i].data)
941 /* detach_buf_split clears data, so grab it now. */
942 buf = vq->split.desc_state[i].data;
943 detach_buf_split(vq, i, NULL);
944 vq->split.avail_idx_shadow--;
945 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
946 vq->split.avail_idx_shadow);
950 /* That should have freed everything. */
951 BUG_ON(vq->vq.num_free != vq->split.vring.num);
957 static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
958 struct vring_virtqueue *vq)
960 struct virtio_device *vdev;
964 vring_split->avail_flags_shadow = 0;
965 vring_split->avail_idx_shadow = 0;
967 /* No callback? Tell other side not to bother us. */
968 if (!vq->vq.callback) {
969 vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
971 vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
972 vring_split->avail_flags_shadow);
976 static void virtqueue_reinit_split(struct vring_virtqueue *vq)
980 num = vq->split.vring.num;
982 vq->split.vring.avail->flags = 0;
983 vq->split.vring.avail->idx = 0;
985 /* reset avail event */
986 vq->split.vring.avail->ring[num] = 0;
988 vq->split.vring.used->flags = 0;
989 vq->split.vring.used->idx = 0;
991 /* reset used event */
992 *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
994 virtqueue_init(vq, num);
996 virtqueue_vring_init_split(&vq->split, vq);
999 static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
1000 struct vring_virtqueue_split *vring_split)
1002 vq->split = *vring_split;
1004 /* Put everything in free lists. */
1008 static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
1010 struct vring_desc_state_split *state;
1011 struct vring_desc_extra *extra;
1012 u32 num = vring_split->vring.num;
1014 state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
1018 extra = vring_alloc_desc_extra(num);
1022 memset(state, 0, num * sizeof(struct vring_desc_state_split));
1024 vring_split->desc_state = state;
1025 vring_split->desc_extra = extra;
1034 static void vring_free_split(struct vring_virtqueue_split *vring_split,
1035 struct virtio_device *vdev)
1037 vring_free_queue(vdev, vring_split->queue_size_in_bytes,
1038 vring_split->vring.desc,
1039 vring_split->queue_dma_addr);
1041 kfree(vring_split->desc_state);
1042 kfree(vring_split->desc_extra);
1045 static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
1046 struct virtio_device *vdev,
1048 unsigned int vring_align,
1049 bool may_reduce_num)
1052 dma_addr_t dma_addr;
1054 /* We assume num is a power of 2. */
1055 if (!is_power_of_2(num)) {
1056 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1060 /* TODO: allocate each queue chunk individually */
1061 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1062 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1064 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
1067 if (!may_reduce_num)
1075 /* Try to get a single page. You are my only hope! */
1076 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1077 &dma_addr, GFP_KERNEL | __GFP_ZERO);
1082 vring_init(&vring_split->vring, num, queue, vring_align);
1084 vring_split->queue_dma_addr = dma_addr;
1085 vring_split->queue_size_in_bytes = vring_size(num, vring_align);
1087 vring_split->vring_align = vring_align;
1088 vring_split->may_reduce_num = may_reduce_num;
1093 static struct virtqueue *vring_create_virtqueue_split(
1096 unsigned int vring_align,
1097 struct virtio_device *vdev,
1099 bool may_reduce_num,
1101 bool (*notify)(struct virtqueue *),
1102 void (*callback)(struct virtqueue *),
1105 struct vring_virtqueue_split vring_split = {};
1106 struct virtqueue *vq;
1109 err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
1114 vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
1115 context, notify, callback, name);
1117 vring_free_split(&vring_split, vdev);
1121 to_vvq(vq)->we_own_ring = true;
1126 static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
1128 struct vring_virtqueue_split vring_split = {};
1129 struct vring_virtqueue *vq = to_vvq(_vq);
1130 struct virtio_device *vdev = _vq->vdev;
1133 err = vring_alloc_queue_split(&vring_split, vdev, num,
1134 vq->split.vring_align,
1135 vq->split.may_reduce_num);
1139 err = vring_alloc_state_extra_split(&vring_split);
1141 goto err_state_extra;
1143 vring_free(&vq->vq);
1145 virtqueue_vring_init_split(&vring_split, vq);
1147 virtqueue_init(vq, vring_split.vring.num);
1148 virtqueue_vring_attach_split(vq, &vring_split);
1153 vring_free_split(&vring_split, vdev);
1155 virtqueue_reinit_split(vq);
1161 * Packed ring specific functions - *_packed().
1163 static inline bool packed_used_wrap_counter(u16 last_used_idx)
1165 return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1168 static inline u16 packed_last_used(u16 last_used_idx)
1170 return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1173 static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
1174 struct vring_desc_extra *extra)
1178 if (!vq->use_dma_api)
1181 flags = extra->flags;
1183 if (flags & VRING_DESC_F_INDIRECT) {
1184 dma_unmap_single(vring_dma_dev(vq),
1185 extra->addr, extra->len,
1186 (flags & VRING_DESC_F_WRITE) ?
1187 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1189 dma_unmap_page(vring_dma_dev(vq),
1190 extra->addr, extra->len,
1191 (flags & VRING_DESC_F_WRITE) ?
1192 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1196 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
1197 struct vring_packed_desc *desc)
1201 if (!vq->use_dma_api)
1204 flags = le16_to_cpu(desc->flags);
1206 dma_unmap_page(vring_dma_dev(vq),
1207 le64_to_cpu(desc->addr),
1208 le32_to_cpu(desc->len),
1209 (flags & VRING_DESC_F_WRITE) ?
1210 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1213 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
1216 struct vring_packed_desc *desc;
1219 * We require lowmem mappings for the descriptors because
1220 * otherwise virt_to_phys will give us bogus addresses in the
1223 gfp &= ~__GFP_HIGHMEM;
1225 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
1230 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1231 struct scatterlist *sgs[],
1232 unsigned int total_sg,
1233 unsigned int out_sgs,
1234 unsigned int in_sgs,
1238 struct vring_packed_desc *desc;
1239 struct scatterlist *sg;
1240 unsigned int i, n, err_idx;
1244 head = vq->packed.next_avail_idx;
1245 desc = alloc_indirect_packed(total_sg, gfp);
1249 if (unlikely(vq->vq.num_free < 1)) {
1250 pr_debug("Can't add buf len 1 - avail = 0\n");
1258 BUG_ON(id == vq->packed.vring.num);
1260 for (n = 0; n < out_sgs + in_sgs; n++) {
1261 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1262 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1263 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1264 if (vring_mapping_error(vq, addr))
1267 desc[i].flags = cpu_to_le16(n < out_sgs ?
1268 0 : VRING_DESC_F_WRITE);
1269 desc[i].addr = cpu_to_le64(addr);
1270 desc[i].len = cpu_to_le32(sg->length);
1275 /* Now that the indirect table is filled in, map it. */
1276 addr = vring_map_single(vq, desc,
1277 total_sg * sizeof(struct vring_packed_desc),
1279 if (vring_mapping_error(vq, addr))
1282 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1283 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1284 sizeof(struct vring_packed_desc));
1285 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1287 if (vq->use_dma_api) {
1288 vq->packed.desc_extra[id].addr = addr;
1289 vq->packed.desc_extra[id].len = total_sg *
1290 sizeof(struct vring_packed_desc);
1291 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1292 vq->packed.avail_used_flags;
1296 * A driver MUST NOT make the first descriptor in the list
1297 * available before all subsequent descriptors comprising
1298 * the list are made available.
1300 virtio_wmb(vq->weak_barriers);
1301 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1302 vq->packed.avail_used_flags);
1304 /* We're using some buffers from the free list. */
1305 vq->vq.num_free -= 1;
1307 /* Update free pointer */
1309 if (n >= vq->packed.vring.num) {
1311 vq->packed.avail_wrap_counter ^= 1;
1312 vq->packed.avail_used_flags ^=
1313 1 << VRING_PACKED_DESC_F_AVAIL |
1314 1 << VRING_PACKED_DESC_F_USED;
1316 vq->packed.next_avail_idx = n;
1317 vq->free_head = vq->packed.desc_extra[id].next;
1319 /* Store token and indirect buffer state. */
1320 vq->packed.desc_state[id].num = 1;
1321 vq->packed.desc_state[id].data = data;
1322 vq->packed.desc_state[id].indir_desc = desc;
1323 vq->packed.desc_state[id].last = id;
1327 pr_debug("Added buffer head %i to %p\n", head, vq);
1335 for (i = 0; i < err_idx; i++)
1336 vring_unmap_desc_packed(vq, &desc[i]);
1344 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1345 struct scatterlist *sgs[],
1346 unsigned int total_sg,
1347 unsigned int out_sgs,
1348 unsigned int in_sgs,
1353 struct vring_virtqueue *vq = to_vvq(_vq);
1354 struct vring_packed_desc *desc;
1355 struct scatterlist *sg;
1356 unsigned int i, n, c, descs_used, err_idx;
1357 __le16 head_flags, flags;
1358 u16 head, id, prev, curr, avail_used_flags;
1363 BUG_ON(data == NULL);
1364 BUG_ON(ctx && vq->indirect);
1366 if (unlikely(vq->broken)) {
1371 LAST_ADD_TIME_UPDATE(vq);
1373 BUG_ON(total_sg == 0);
1375 if (virtqueue_use_indirect(vq, total_sg)) {
1376 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1378 if (err != -ENOMEM) {
1383 /* fall back on direct */
1386 head = vq->packed.next_avail_idx;
1387 avail_used_flags = vq->packed.avail_used_flags;
1389 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1391 desc = vq->packed.vring.desc;
1393 descs_used = total_sg;
1395 if (unlikely(vq->vq.num_free < descs_used)) {
1396 pr_debug("Can't add buf len %i - avail = %i\n",
1397 descs_used, vq->vq.num_free);
1403 BUG_ON(id == vq->packed.vring.num);
1407 for (n = 0; n < out_sgs + in_sgs; n++) {
1408 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1409 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1410 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1411 if (vring_mapping_error(vq, addr))
1414 flags = cpu_to_le16(vq->packed.avail_used_flags |
1415 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1416 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1420 desc[i].flags = flags;
1422 desc[i].addr = cpu_to_le64(addr);
1423 desc[i].len = cpu_to_le32(sg->length);
1424 desc[i].id = cpu_to_le16(id);
1426 if (unlikely(vq->use_dma_api)) {
1427 vq->packed.desc_extra[curr].addr = addr;
1428 vq->packed.desc_extra[curr].len = sg->length;
1429 vq->packed.desc_extra[curr].flags =
1433 curr = vq->packed.desc_extra[curr].next;
1435 if ((unlikely(++i >= vq->packed.vring.num))) {
1437 vq->packed.avail_used_flags ^=
1438 1 << VRING_PACKED_DESC_F_AVAIL |
1439 1 << VRING_PACKED_DESC_F_USED;
1445 vq->packed.avail_wrap_counter ^= 1;
1447 /* We're using some buffers from the free list. */
1448 vq->vq.num_free -= descs_used;
1450 /* Update free pointer */
1451 vq->packed.next_avail_idx = i;
1452 vq->free_head = curr;
1455 vq->packed.desc_state[id].num = descs_used;
1456 vq->packed.desc_state[id].data = data;
1457 vq->packed.desc_state[id].indir_desc = ctx;
1458 vq->packed.desc_state[id].last = prev;
1461 * A driver MUST NOT make the first descriptor in the list
1462 * available before all subsequent descriptors comprising
1463 * the list are made available.
1465 virtio_wmb(vq->weak_barriers);
1466 vq->packed.vring.desc[head].flags = head_flags;
1467 vq->num_added += descs_used;
1469 pr_debug("Added buffer head %i to %p\n", head, vq);
1477 curr = vq->free_head;
1479 vq->packed.avail_used_flags = avail_used_flags;
1481 for (n = 0; n < total_sg; n++) {
1484 vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
1485 curr = vq->packed.desc_extra[curr].next;
1487 if (i >= vq->packed.vring.num)
1495 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1497 struct vring_virtqueue *vq = to_vvq(_vq);
1498 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1511 * We need to expose the new flags value before checking notification
1514 virtio_mb(vq->weak_barriers);
1516 old = vq->packed.next_avail_idx - vq->num_added;
1517 new = vq->packed.next_avail_idx;
1520 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1521 flags = le16_to_cpu(snapshot.flags);
1523 LAST_ADD_TIME_CHECK(vq);
1524 LAST_ADD_TIME_INVALID(vq);
1526 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1527 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1531 off_wrap = le16_to_cpu(snapshot.off_wrap);
1533 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1534 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1535 if (wrap_counter != vq->packed.avail_wrap_counter)
1536 event_idx -= vq->packed.vring.num;
1538 needs_kick = vring_need_event(event_idx, new, old);
1544 static void detach_buf_packed(struct vring_virtqueue *vq,
1545 unsigned int id, void **ctx)
1547 struct vring_desc_state_packed *state = NULL;
1548 struct vring_packed_desc *desc;
1549 unsigned int i, curr;
1551 state = &vq->packed.desc_state[id];
1553 /* Clear data ptr. */
1556 vq->packed.desc_extra[state->last].next = vq->free_head;
1558 vq->vq.num_free += state->num;
1560 if (unlikely(vq->use_dma_api)) {
1562 for (i = 0; i < state->num; i++) {
1563 vring_unmap_extra_packed(vq,
1564 &vq->packed.desc_extra[curr]);
1565 curr = vq->packed.desc_extra[curr].next;
1572 /* Free the indirect table, if any, now that it's unmapped. */
1573 desc = state->indir_desc;
1577 if (vq->use_dma_api) {
1578 len = vq->packed.desc_extra[id].len;
1579 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1581 vring_unmap_desc_packed(vq, &desc[i]);
1584 state->indir_desc = NULL;
1586 *ctx = state->indir_desc;
1590 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1591 u16 idx, bool used_wrap_counter)
1596 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1597 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1598 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1600 return avail == used && used == used_wrap_counter;
1603 static inline bool more_used_packed(const struct vring_virtqueue *vq)
1607 bool used_wrap_counter;
1609 last_used_idx = READ_ONCE(vq->last_used_idx);
1610 last_used = packed_last_used(last_used_idx);
1611 used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1612 return is_used_desc_packed(vq, last_used, used_wrap_counter);
1615 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1619 struct vring_virtqueue *vq = to_vvq(_vq);
1620 u16 last_used, id, last_used_idx;
1621 bool used_wrap_counter;
1626 if (unlikely(vq->broken)) {
1631 if (!more_used_packed(vq)) {
1632 pr_debug("No more buffers in queue\n");
1637 /* Only get used elements after they have been exposed by host. */
1638 virtio_rmb(vq->weak_barriers);
1640 last_used_idx = READ_ONCE(vq->last_used_idx);
1641 used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1642 last_used = packed_last_used(last_used_idx);
1643 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1644 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1646 if (unlikely(id >= vq->packed.vring.num)) {
1647 BAD_RING(vq, "id %u out of range\n", id);
1650 if (unlikely(!vq->packed.desc_state[id].data)) {
1651 BAD_RING(vq, "id %u is not a head!\n", id);
1655 /* detach_buf_packed clears data, so grab it now. */
1656 ret = vq->packed.desc_state[id].data;
1657 detach_buf_packed(vq, id, ctx);
1659 last_used += vq->packed.desc_state[id].num;
1660 if (unlikely(last_used >= vq->packed.vring.num)) {
1661 last_used -= vq->packed.vring.num;
1662 used_wrap_counter ^= 1;
1665 last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1666 WRITE_ONCE(vq->last_used_idx, last_used);
1669 * If we expect an interrupt for the next entry, tell host
1670 * by writing event index and flush out the write before
1671 * the read in the next get_buf call.
1673 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1674 virtio_store_mb(vq->weak_barriers,
1675 &vq->packed.vring.driver->off_wrap,
1676 cpu_to_le16(vq->last_used_idx));
1678 LAST_ADD_TIME_INVALID(vq);
1684 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1686 struct vring_virtqueue *vq = to_vvq(_vq);
1688 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1689 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1690 vq->packed.vring.driver->flags =
1691 cpu_to_le16(vq->packed.event_flags_shadow);
1695 static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1697 struct vring_virtqueue *vq = to_vvq(_vq);
1702 * We optimistically turn back on interrupts, then check if there was
1707 vq->packed.vring.driver->off_wrap =
1708 cpu_to_le16(vq->last_used_idx);
1710 * We need to update event offset and event wrap
1711 * counter first before updating event flags.
1713 virtio_wmb(vq->weak_barriers);
1716 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1717 vq->packed.event_flags_shadow = vq->event ?
1718 VRING_PACKED_EVENT_FLAG_DESC :
1719 VRING_PACKED_EVENT_FLAG_ENABLE;
1720 vq->packed.vring.driver->flags =
1721 cpu_to_le16(vq->packed.event_flags_shadow);
1725 return vq->last_used_idx;
1728 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1730 struct vring_virtqueue *vq = to_vvq(_vq);
1734 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1735 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1737 return is_used_desc_packed(vq, used_idx, wrap_counter);
1740 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1742 struct vring_virtqueue *vq = to_vvq(_vq);
1743 u16 used_idx, wrap_counter, last_used_idx;
1749 * We optimistically turn back on interrupts, then check if there was
1754 /* TODO: tune this threshold */
1755 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1756 last_used_idx = READ_ONCE(vq->last_used_idx);
1757 wrap_counter = packed_used_wrap_counter(last_used_idx);
1759 used_idx = packed_last_used(last_used_idx) + bufs;
1760 if (used_idx >= vq->packed.vring.num) {
1761 used_idx -= vq->packed.vring.num;
1765 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1766 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1769 * We need to update event offset and event wrap
1770 * counter first before updating event flags.
1772 virtio_wmb(vq->weak_barriers);
1775 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1776 vq->packed.event_flags_shadow = vq->event ?
1777 VRING_PACKED_EVENT_FLAG_DESC :
1778 VRING_PACKED_EVENT_FLAG_ENABLE;
1779 vq->packed.vring.driver->flags =
1780 cpu_to_le16(vq->packed.event_flags_shadow);
1784 * We need to update event suppression structure first
1785 * before re-checking for more used buffers.
1787 virtio_mb(vq->weak_barriers);
1789 last_used_idx = READ_ONCE(vq->last_used_idx);
1790 wrap_counter = packed_used_wrap_counter(last_used_idx);
1791 used_idx = packed_last_used(last_used_idx);
1792 if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
1801 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1803 struct vring_virtqueue *vq = to_vvq(_vq);
1809 for (i = 0; i < vq->packed.vring.num; i++) {
1810 if (!vq->packed.desc_state[i].data)
1812 /* detach_buf clears data, so grab it now. */
1813 buf = vq->packed.desc_state[i].data;
1814 detach_buf_packed(vq, i, NULL);
1818 /* That should have freed everything. */
1819 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1825 static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
1827 struct vring_desc_extra *desc_extra;
1830 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1835 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1837 for (i = 0; i < num - 1; i++)
1838 desc_extra[i].next = i + 1;
1843 static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
1844 struct virtio_device *vdev)
1846 if (vring_packed->vring.desc)
1847 vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
1848 vring_packed->vring.desc,
1849 vring_packed->ring_dma_addr);
1851 if (vring_packed->vring.driver)
1852 vring_free_queue(vdev, vring_packed->event_size_in_bytes,
1853 vring_packed->vring.driver,
1854 vring_packed->driver_event_dma_addr);
1856 if (vring_packed->vring.device)
1857 vring_free_queue(vdev, vring_packed->event_size_in_bytes,
1858 vring_packed->vring.device,
1859 vring_packed->device_event_dma_addr);
1861 kfree(vring_packed->desc_state);
1862 kfree(vring_packed->desc_extra);
1865 static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
1866 struct virtio_device *vdev,
1869 struct vring_packed_desc *ring;
1870 struct vring_packed_desc_event *driver, *device;
1871 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1872 size_t ring_size_in_bytes, event_size_in_bytes;
1874 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1876 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1878 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
1882 vring_packed->vring.desc = ring;
1883 vring_packed->ring_dma_addr = ring_dma_addr;
1884 vring_packed->ring_size_in_bytes = ring_size_in_bytes;
1886 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1888 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1889 &driver_event_dma_addr,
1890 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
1894 vring_packed->vring.driver = driver;
1895 vring_packed->event_size_in_bytes = event_size_in_bytes;
1896 vring_packed->driver_event_dma_addr = driver_event_dma_addr;
1898 device = vring_alloc_queue(vdev, event_size_in_bytes,
1899 &device_event_dma_addr,
1900 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
1904 vring_packed->vring.device = device;
1905 vring_packed->device_event_dma_addr = device_event_dma_addr;
1907 vring_packed->vring.num = num;
1912 vring_free_packed(vring_packed, vdev);
1916 static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
1918 struct vring_desc_state_packed *state;
1919 struct vring_desc_extra *extra;
1920 u32 num = vring_packed->vring.num;
1922 state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
1924 goto err_desc_state;
1926 memset(state, 0, num * sizeof(struct vring_desc_state_packed));
1928 extra = vring_alloc_desc_extra(num);
1930 goto err_desc_extra;
1932 vring_packed->desc_state = state;
1933 vring_packed->desc_extra = extra;
1943 static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
1946 vring_packed->next_avail_idx = 0;
1947 vring_packed->avail_wrap_counter = 1;
1948 vring_packed->event_flags_shadow = 0;
1949 vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1951 /* No callback? Tell other side not to bother us. */
1953 vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1954 vring_packed->vring.driver->flags =
1955 cpu_to_le16(vring_packed->event_flags_shadow);
1959 static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
1960 struct vring_virtqueue_packed *vring_packed)
1962 vq->packed = *vring_packed;
1964 /* Put everything in free lists. */
1968 static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
1970 memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
1971 memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
1973 /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
1974 memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
1976 virtqueue_init(vq, vq->packed.vring.num);
1977 virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
1980 static struct virtqueue *vring_create_virtqueue_packed(
1983 unsigned int vring_align,
1984 struct virtio_device *vdev,
1986 bool may_reduce_num,
1988 bool (*notify)(struct virtqueue *),
1989 void (*callback)(struct virtqueue *),
1992 struct vring_virtqueue_packed vring_packed = {};
1993 struct vring_virtqueue *vq;
1996 if (vring_alloc_queue_packed(&vring_packed, vdev, num))
1999 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2003 vq->vq.callback = callback;
2006 vq->vq.index = index;
2007 vq->vq.reset = false;
2008 vq->we_own_ring = true;
2009 vq->notify = notify;
2010 vq->weak_barriers = weak_barriers;
2011 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2016 vq->packed_ring = true;
2017 vq->use_dma_api = vring_use_dma_api(vdev);
2019 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2021 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2023 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2024 vq->weak_barriers = false;
2026 err = vring_alloc_state_extra_packed(&vring_packed);
2028 goto err_state_extra;
2030 virtqueue_vring_init_packed(&vring_packed, !!callback);
2032 virtqueue_init(vq, num);
2033 virtqueue_vring_attach_packed(vq, &vring_packed);
2035 spin_lock(&vdev->vqs_list_lock);
2036 list_add_tail(&vq->vq.list, &vdev->vqs);
2037 spin_unlock(&vdev->vqs_list_lock);
2043 vring_free_packed(&vring_packed, vdev);
2048 static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
2050 struct vring_virtqueue_packed vring_packed = {};
2051 struct vring_virtqueue *vq = to_vvq(_vq);
2052 struct virtio_device *vdev = _vq->vdev;
2055 if (vring_alloc_queue_packed(&vring_packed, vdev, num))
2058 err = vring_alloc_state_extra_packed(&vring_packed);
2060 goto err_state_extra;
2062 vring_free(&vq->vq);
2064 virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
2066 virtqueue_init(vq, vring_packed.vring.num);
2067 virtqueue_vring_attach_packed(vq, &vring_packed);
2072 vring_free_packed(&vring_packed, vdev);
2074 virtqueue_reinit_packed(vq);
2080 * Generic functions and exported symbols.
2083 static inline int virtqueue_add(struct virtqueue *_vq,
2084 struct scatterlist *sgs[],
2085 unsigned int total_sg,
2086 unsigned int out_sgs,
2087 unsigned int in_sgs,
2092 struct vring_virtqueue *vq = to_vvq(_vq);
2094 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
2095 out_sgs, in_sgs, data, ctx, gfp) :
2096 virtqueue_add_split(_vq, sgs, total_sg,
2097 out_sgs, in_sgs, data, ctx, gfp);
2101 * virtqueue_add_sgs - expose buffers to other end
2102 * @_vq: the struct virtqueue we're talking about.
2103 * @sgs: array of terminated scatterlists.
2104 * @out_sgs: the number of scatterlists readable by other side
2105 * @in_sgs: the number of scatterlists which are writable (after readable ones)
2106 * @data: the token identifying the buffer.
2107 * @gfp: how to do memory allocations (if necessary).
2109 * Caller must ensure we don't call this with other virtqueue operations
2110 * at the same time (except where noted).
2112 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2114 int virtqueue_add_sgs(struct virtqueue *_vq,
2115 struct scatterlist *sgs[],
2116 unsigned int out_sgs,
2117 unsigned int in_sgs,
2121 unsigned int i, total_sg = 0;
2123 /* Count them first. */
2124 for (i = 0; i < out_sgs + in_sgs; i++) {
2125 struct scatterlist *sg;
2127 for (sg = sgs[i]; sg; sg = sg_next(sg))
2130 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
2133 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
2136 * virtqueue_add_outbuf - expose output buffers to other end
2137 * @vq: the struct virtqueue we're talking about.
2138 * @sg: scatterlist (must be well-formed and terminated!)
2139 * @num: the number of entries in @sg readable by other side
2140 * @data: the token identifying the buffer.
2141 * @gfp: how to do memory allocations (if necessary).
2143 * Caller must ensure we don't call this with other virtqueue operations
2144 * at the same time (except where noted).
2146 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2148 int virtqueue_add_outbuf(struct virtqueue *vq,
2149 struct scatterlist *sg, unsigned int num,
2153 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
2155 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
2158 * virtqueue_add_inbuf - expose input buffers to other end
2159 * @vq: the struct virtqueue we're talking about.
2160 * @sg: scatterlist (must be well-formed and terminated!)
2161 * @num: the number of entries in @sg writable by other side
2162 * @data: the token identifying the buffer.
2163 * @gfp: how to do memory allocations (if necessary).
2165 * Caller must ensure we don't call this with other virtqueue operations
2166 * at the same time (except where noted).
2168 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2170 int virtqueue_add_inbuf(struct virtqueue *vq,
2171 struct scatterlist *sg, unsigned int num,
2175 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
2177 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
2180 * virtqueue_add_inbuf_ctx - expose input buffers to other end
2181 * @vq: the struct virtqueue we're talking about.
2182 * @sg: scatterlist (must be well-formed and terminated!)
2183 * @num: the number of entries in @sg writable by other side
2184 * @data: the token identifying the buffer.
2185 * @ctx: extra context for the token
2186 * @gfp: how to do memory allocations (if necessary).
2188 * Caller must ensure we don't call this with other virtqueue operations
2189 * at the same time (except where noted).
2191 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2193 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
2194 struct scatterlist *sg, unsigned int num,
2199 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
2201 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
2204 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
2205 * @_vq: the struct virtqueue
2207 * Instead of virtqueue_kick(), you can do:
2208 * if (virtqueue_kick_prepare(vq))
2209 * virtqueue_notify(vq);
2211 * This is sometimes useful because the virtqueue_kick_prepare() needs
2212 * to be serialized, but the actual virtqueue_notify() call does not.
2214 bool virtqueue_kick_prepare(struct virtqueue *_vq)
2216 struct vring_virtqueue *vq = to_vvq(_vq);
2218 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
2219 virtqueue_kick_prepare_split(_vq);
2221 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
2224 * virtqueue_notify - second half of split virtqueue_kick call.
2225 * @_vq: the struct virtqueue
2227 * This does not need to be serialized.
2229 * Returns false if host notify failed or queue is broken, otherwise true.
2231 bool virtqueue_notify(struct virtqueue *_vq)
2233 struct vring_virtqueue *vq = to_vvq(_vq);
2235 if (unlikely(vq->broken))
2238 /* Prod other side to tell it about changes. */
2239 if (!vq->notify(_vq)) {
2245 EXPORT_SYMBOL_GPL(virtqueue_notify);
2248 * virtqueue_kick - update after add_buf
2249 * @vq: the struct virtqueue
2251 * After one or more virtqueue_add_* calls, invoke this to kick
2254 * Caller must ensure we don't call this with other virtqueue
2255 * operations at the same time (except where noted).
2257 * Returns false if kick failed, otherwise true.
2259 bool virtqueue_kick(struct virtqueue *vq)
2261 if (virtqueue_kick_prepare(vq))
2262 return virtqueue_notify(vq);
2265 EXPORT_SYMBOL_GPL(virtqueue_kick);
2268 * virtqueue_get_buf_ctx - get the next used buffer
2269 * @_vq: the struct virtqueue we're talking about.
2270 * @len: the length written into the buffer
2271 * @ctx: extra context for the token
2273 * If the device wrote data into the buffer, @len will be set to the
2274 * amount written. This means you don't need to clear the buffer
2275 * beforehand to ensure there's no data leakage in the case of short
2278 * Caller must ensure we don't call this with other virtqueue
2279 * operations at the same time (except where noted).
2281 * Returns NULL if there are no used buffers, or the "data" token
2282 * handed to virtqueue_add_*().
2284 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
2287 struct vring_virtqueue *vq = to_vvq(_vq);
2289 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
2290 virtqueue_get_buf_ctx_split(_vq, len, ctx);
2292 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2294 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2296 return virtqueue_get_buf_ctx(_vq, len, NULL);
2298 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
2300 * virtqueue_disable_cb - disable callbacks
2301 * @_vq: the struct virtqueue we're talking about.
2303 * Note that this is not necessarily synchronous, hence unreliable and only
2304 * useful as an optimization.
2306 * Unlike other operations, this need not be serialized.
2308 void virtqueue_disable_cb(struct virtqueue *_vq)
2310 struct vring_virtqueue *vq = to_vvq(_vq);
2312 /* If device triggered an event already it won't trigger one again:
2313 * no need to disable.
2315 if (vq->event_triggered)
2318 if (vq->packed_ring)
2319 virtqueue_disable_cb_packed(_vq);
2321 virtqueue_disable_cb_split(_vq);
2323 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2326 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2327 * @_vq: the struct virtqueue we're talking about.
2329 * This re-enables callbacks; it returns current queue state
2330 * in an opaque unsigned value. This value should be later tested by
2331 * virtqueue_poll, to detect a possible race between the driver checking for
2332 * more work, and enabling callbacks.
2334 * Caller must ensure we don't call this with other virtqueue
2335 * operations at the same time (except where noted).
2337 unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2339 struct vring_virtqueue *vq = to_vvq(_vq);
2341 if (vq->event_triggered)
2342 vq->event_triggered = false;
2344 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
2345 virtqueue_enable_cb_prepare_split(_vq);
2347 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2350 * virtqueue_poll - query pending used buffers
2351 * @_vq: the struct virtqueue we're talking about.
2352 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2354 * Returns "true" if there are pending used buffers in the queue.
2356 * This does not need to be serialized.
2358 bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
2360 struct vring_virtqueue *vq = to_vvq(_vq);
2362 if (unlikely(vq->broken))
2365 virtio_mb(vq->weak_barriers);
2366 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2367 virtqueue_poll_split(_vq, last_used_idx);
2369 EXPORT_SYMBOL_GPL(virtqueue_poll);
2372 * virtqueue_enable_cb - restart callbacks after disable_cb.
2373 * @_vq: the struct virtqueue we're talking about.
2375 * This re-enables callbacks; it returns "false" if there are pending
2376 * buffers in the queue, to detect a possible race between the driver
2377 * checking for more work, and enabling callbacks.
2379 * Caller must ensure we don't call this with other virtqueue
2380 * operations at the same time (except where noted).
2382 bool virtqueue_enable_cb(struct virtqueue *_vq)
2384 unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
2386 return !virtqueue_poll(_vq, last_used_idx);
2388 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2391 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2392 * @_vq: the struct virtqueue we're talking about.
2394 * This re-enables callbacks but hints to the other side to delay
2395 * interrupts until most of the available buffers have been processed;
2396 * it returns "false" if there are many pending buffers in the queue,
2397 * to detect a possible race between the driver checking for more work,
2398 * and enabling callbacks.
2400 * Caller must ensure we don't call this with other virtqueue
2401 * operations at the same time (except where noted).
2403 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2405 struct vring_virtqueue *vq = to_vvq(_vq);
2407 if (vq->event_triggered)
2408 vq->event_triggered = false;
2410 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2411 virtqueue_enable_cb_delayed_split(_vq);
2413 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2416 * virtqueue_detach_unused_buf - detach first unused buffer
2417 * @_vq: the struct virtqueue we're talking about.
2419 * Returns NULL or the "data" token handed to virtqueue_add_*().
2420 * This is not valid on an active queue; it is useful for device
2421 * shutdown or the reset queue.
2423 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2425 struct vring_virtqueue *vq = to_vvq(_vq);
2427 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2428 virtqueue_detach_unused_buf_split(_vq);
2430 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2432 static inline bool more_used(const struct vring_virtqueue *vq)
2434 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2438 * vring_interrupt - notify a virtqueue on an interrupt
2439 * @irq: the IRQ number (ignored)
2440 * @_vq: the struct virtqueue to notify
2442 * Calls the callback function of @_vq to process the virtqueue
2445 irqreturn_t vring_interrupt(int irq, void *_vq)
2447 struct vring_virtqueue *vq = to_vvq(_vq);
2449 if (!more_used(vq)) {
2450 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2454 if (unlikely(vq->broken)) {
2455 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2456 dev_warn_once(&vq->vq.vdev->dev,
2457 "virtio vring IRQ raised before DRIVER_OK");
2464 /* Just a hint for performance: so it's ok that this can be racy! */
2466 vq->event_triggered = true;
2468 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2469 if (vq->vq.callback)
2470 vq->vq.callback(&vq->vq);
2474 EXPORT_SYMBOL_GPL(vring_interrupt);
2476 /* Only available for split ring */
2477 static struct virtqueue *__vring_new_virtqueue(unsigned int index,
2478 struct vring_virtqueue_split *vring_split,
2479 struct virtio_device *vdev,
2482 bool (*notify)(struct virtqueue *),
2483 void (*callback)(struct virtqueue *),
2486 struct vring_virtqueue *vq;
2489 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2492 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2496 vq->packed_ring = false;
2497 vq->vq.callback = callback;
2500 vq->vq.index = index;
2501 vq->vq.reset = false;
2502 vq->we_own_ring = false;
2503 vq->notify = notify;
2504 vq->weak_barriers = weak_barriers;
2505 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2510 vq->use_dma_api = vring_use_dma_api(vdev);
2512 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2514 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2516 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2517 vq->weak_barriers = false;
2519 err = vring_alloc_state_extra_split(vring_split);
2525 virtqueue_vring_init_split(vring_split, vq);
2527 virtqueue_init(vq, vring_split->vring.num);
2528 virtqueue_vring_attach_split(vq, vring_split);
2530 spin_lock(&vdev->vqs_list_lock);
2531 list_add_tail(&vq->vq.list, &vdev->vqs);
2532 spin_unlock(&vdev->vqs_list_lock);
2536 struct virtqueue *vring_create_virtqueue(
2539 unsigned int vring_align,
2540 struct virtio_device *vdev,
2542 bool may_reduce_num,
2544 bool (*notify)(struct virtqueue *),
2545 void (*callback)(struct virtqueue *),
2549 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2550 return vring_create_virtqueue_packed(index, num, vring_align,
2551 vdev, weak_barriers, may_reduce_num,
2552 context, notify, callback, name);
2554 return vring_create_virtqueue_split(index, num, vring_align,
2555 vdev, weak_barriers, may_reduce_num,
2556 context, notify, callback, name);
2558 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2561 * virtqueue_resize - resize the vring of vq
2562 * @_vq: the struct virtqueue we're talking about.
2563 * @num: new ring num
2564 * @recycle: callback for recycle the useless buffer
2566 * When it is really necessary to create a new vring, it will set the current vq
2567 * into the reset state. Then call the passed callback to recycle the buffer
2568 * that is no longer used. Only after the new vring is successfully created, the
2569 * old vring will be released.
2571 * Caller must ensure we don't call this with other virtqueue operations
2572 * at the same time (except where noted).
2574 * Returns zero or a negative error.
2576 * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
2577 * vq can still work normally
2578 * -EBUSY: Failed to sync with device, vq may not work properly
2579 * -ENOENT: Transport or device not supported
2580 * -E2BIG/-EINVAL: num error
2581 * -EPERM: Operation not permitted
2584 int virtqueue_resize(struct virtqueue *_vq, u32 num,
2585 void (*recycle)(struct virtqueue *vq, void *buf))
2587 struct vring_virtqueue *vq = to_vvq(_vq);
2588 struct virtio_device *vdev = vq->vq.vdev;
2592 if (!vq->we_own_ring)
2595 if (num > vq->vq.num_max)
2601 if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
2604 if (!vdev->config->disable_vq_and_reset)
2607 if (!vdev->config->enable_vq_after_reset)
2610 err = vdev->config->disable_vq_and_reset(_vq);
2614 while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
2617 if (vq->packed_ring)
2618 err = virtqueue_resize_packed(_vq, num);
2620 err = virtqueue_resize_split(_vq, num);
2622 if (vdev->config->enable_vq_after_reset(_vq))
2627 EXPORT_SYMBOL_GPL(virtqueue_resize);
2629 /* Only available for split ring */
2630 struct virtqueue *vring_new_virtqueue(unsigned int index,
2632 unsigned int vring_align,
2633 struct virtio_device *vdev,
2637 bool (*notify)(struct virtqueue *vq),
2638 void (*callback)(struct virtqueue *vq),
2641 struct vring_virtqueue_split vring_split = {};
2643 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2646 vring_init(&vring_split.vring, num, pages, vring_align);
2647 return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
2648 context, notify, callback, name);
2650 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2652 static void vring_free(struct virtqueue *_vq)
2654 struct vring_virtqueue *vq = to_vvq(_vq);
2656 if (vq->we_own_ring) {
2657 if (vq->packed_ring) {
2658 vring_free_queue(vq->vq.vdev,
2659 vq->packed.ring_size_in_bytes,
2660 vq->packed.vring.desc,
2661 vq->packed.ring_dma_addr);
2663 vring_free_queue(vq->vq.vdev,
2664 vq->packed.event_size_in_bytes,
2665 vq->packed.vring.driver,
2666 vq->packed.driver_event_dma_addr);
2668 vring_free_queue(vq->vq.vdev,
2669 vq->packed.event_size_in_bytes,
2670 vq->packed.vring.device,
2671 vq->packed.device_event_dma_addr);
2673 kfree(vq->packed.desc_state);
2674 kfree(vq->packed.desc_extra);
2676 vring_free_queue(vq->vq.vdev,
2677 vq->split.queue_size_in_bytes,
2678 vq->split.vring.desc,
2679 vq->split.queue_dma_addr);
2682 if (!vq->packed_ring) {
2683 kfree(vq->split.desc_state);
2684 kfree(vq->split.desc_extra);
2688 void vring_del_virtqueue(struct virtqueue *_vq)
2690 struct vring_virtqueue *vq = to_vvq(_vq);
2692 spin_lock(&vq->vq.vdev->vqs_list_lock);
2693 list_del(&_vq->list);
2694 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2700 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2702 /* Manipulates transport-specific feature bits. */
2703 void vring_transport_features(struct virtio_device *vdev)
2707 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2709 case VIRTIO_RING_F_INDIRECT_DESC:
2711 case VIRTIO_RING_F_EVENT_IDX:
2713 case VIRTIO_F_VERSION_1:
2715 case VIRTIO_F_ACCESS_PLATFORM:
2717 case VIRTIO_F_RING_PACKED:
2719 case VIRTIO_F_ORDER_PLATFORM:
2722 /* We don't understand this bit. */
2723 __virtio_clear_bit(vdev, i);
2727 EXPORT_SYMBOL_GPL(vring_transport_features);
2730 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2731 * @_vq: the struct virtqueue containing the vring of interest.
2733 * Returns the size of the vring. This is mainly used for boasting to
2734 * userspace. Unlike other operations, this need not be serialized.
2736 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2739 struct vring_virtqueue *vq = to_vvq(_vq);
2741 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2743 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2746 * This function should only be called by the core, not directly by the driver.
2748 void __virtqueue_break(struct virtqueue *_vq)
2750 struct vring_virtqueue *vq = to_vvq(_vq);
2752 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2753 WRITE_ONCE(vq->broken, true);
2755 EXPORT_SYMBOL_GPL(__virtqueue_break);
2758 * This function should only be called by the core, not directly by the driver.
2760 void __virtqueue_unbreak(struct virtqueue *_vq)
2762 struct vring_virtqueue *vq = to_vvq(_vq);
2764 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2765 WRITE_ONCE(vq->broken, false);
2767 EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
2769 bool virtqueue_is_broken(struct virtqueue *_vq)
2771 struct vring_virtqueue *vq = to_vvq(_vq);
2773 return READ_ONCE(vq->broken);
2775 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2778 * This should prevent the device from being used, allowing drivers to
2779 * recover. You may need to grab appropriate locks to flush.
2781 void virtio_break_device(struct virtio_device *dev)
2783 struct virtqueue *_vq;
2785 spin_lock(&dev->vqs_list_lock);
2786 list_for_each_entry(_vq, &dev->vqs, list) {
2787 struct vring_virtqueue *vq = to_vvq(_vq);
2789 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2790 WRITE_ONCE(vq->broken, true);
2792 spin_unlock(&dev->vqs_list_lock);
2794 EXPORT_SYMBOL_GPL(virtio_break_device);
2797 * This should allow the device to be used by the driver. You may
2798 * need to grab appropriate locks to flush the write to
2799 * vq->broken. This should only be used in some specific case e.g
2800 * (probing and restoring). This function should only be called by the
2801 * core, not directly by the driver.
2803 void __virtio_unbreak_device(struct virtio_device *dev)
2805 struct virtqueue *_vq;
2807 spin_lock(&dev->vqs_list_lock);
2808 list_for_each_entry(_vq, &dev->vqs, list) {
2809 struct vring_virtqueue *vq = to_vvq(_vq);
2811 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2812 WRITE_ONCE(vq->broken, false);
2814 spin_unlock(&dev->vqs_list_lock);
2816 EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
2818 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2820 struct vring_virtqueue *vq = to_vvq(_vq);
2822 BUG_ON(!vq->we_own_ring);
2824 if (vq->packed_ring)
2825 return vq->packed.ring_dma_addr;
2827 return vq->split.queue_dma_addr;
2829 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2831 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2833 struct vring_virtqueue *vq = to_vvq(_vq);
2835 BUG_ON(!vq->we_own_ring);
2837 if (vq->packed_ring)
2838 return vq->packed.driver_event_dma_addr;
2840 return vq->split.queue_dma_addr +
2841 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2843 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2845 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2847 struct vring_virtqueue *vq = to_vvq(_vq);
2849 BUG_ON(!vq->we_own_ring);
2851 if (vq->packed_ring)
2852 return vq->packed.device_event_dma_addr;
2854 return vq->split.queue_dma_addr +
2855 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2857 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2859 /* Only available for split ring */
2860 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2862 return &to_vvq(vq)->split.vring;
2864 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2866 MODULE_LICENSE("GPL");