1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
17 /* For development, we want to crash whenever the ring is screwed. */
18 #define BAD_RING(_vq, fmt, args...) \
20 dev_err(&(_vq)->vq.vdev->dev, \
21 "%s:"fmt, (_vq)->vq.name, ##args); \
24 /* Caller is supposed to guarantee no reentry. */
25 #define START_USE(_vq) \
28 panic("%s:in_use = %i\n", \
29 (_vq)->vq.name, (_vq)->in_use); \
30 (_vq)->in_use = __LINE__; \
32 #define END_USE(_vq) \
33 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
34 #define LAST_ADD_TIME_UPDATE(_vq) \
36 ktime_t now = ktime_get(); \
38 /* No kick or get, with .1 second between? Warn. */ \
39 if ((_vq)->last_add_time_valid) \
40 WARN_ON(ktime_to_ms(ktime_sub(now, \
41 (_vq)->last_add_time)) > 100); \
42 (_vq)->last_add_time = now; \
43 (_vq)->last_add_time_valid = true; \
45 #define LAST_ADD_TIME_CHECK(_vq) \
47 if ((_vq)->last_add_time_valid) { \
48 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
49 (_vq)->last_add_time)) > 100); \
52 #define LAST_ADD_TIME_INVALID(_vq) \
53 ((_vq)->last_add_time_valid = false)
55 #define BAD_RING(_vq, fmt, args...) \
57 dev_err(&_vq->vq.vdev->dev, \
58 "%s:"fmt, (_vq)->vq.name, ##args); \
59 (_vq)->broken = true; \
63 #define LAST_ADD_TIME_UPDATE(vq)
64 #define LAST_ADD_TIME_CHECK(vq)
65 #define LAST_ADD_TIME_INVALID(vq)
68 struct vring_desc_state_split {
69 void *data; /* Data for callback. */
70 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
73 struct vring_desc_state_packed {
74 void *data; /* Data for callback. */
75 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
76 u16 num; /* Descriptor list length. */
77 u16 last; /* The last desc state in a list. */
80 struct vring_desc_extra {
81 dma_addr_t addr; /* Buffer DMA addr. */
82 u32 len; /* Buffer length. */
83 u16 flags; /* Descriptor flags. */
84 u16 next; /* The next desc state in a list. */
87 struct vring_virtqueue {
90 /* Is this a packed ring? */
93 /* Is DMA API used? */
96 /* Can we use weak barriers? */
99 /* Other side has made a mess, don't try any more. */
102 /* Host supports indirect buffers */
105 /* Host publishes avail event idx */
108 /* Head of free buffer list. */
109 unsigned int free_head;
110 /* Number we've added since last sync. */
111 unsigned int num_added;
113 /* Last used index we've seen. */
116 /* Hint for event idx: already triggered no need to disable. */
117 bool event_triggered;
120 /* Available for split ring */
122 /* Actual memory layout for this queue. */
125 /* Last written value to avail->flags */
126 u16 avail_flags_shadow;
129 * Last written value to avail->idx in
132 u16 avail_idx_shadow;
134 /* Per-descriptor state. */
135 struct vring_desc_state_split *desc_state;
137 /* DMA address and size information */
138 dma_addr_t queue_dma_addr;
139 size_t queue_size_in_bytes;
142 /* Available for packed ring */
144 /* Actual memory layout for this queue. */
147 struct vring_packed_desc *desc;
148 struct vring_packed_desc_event *driver;
149 struct vring_packed_desc_event *device;
152 /* Driver ring wrap counter. */
153 bool avail_wrap_counter;
155 /* Device ring wrap counter. */
156 bool used_wrap_counter;
158 /* Avail used flags. */
159 u16 avail_used_flags;
161 /* Index of the next avail descriptor. */
165 * Last written value to driver->flags in
168 u16 event_flags_shadow;
170 /* Per-descriptor state. */
171 struct vring_desc_state_packed *desc_state;
172 struct vring_desc_extra *desc_extra;
174 /* DMA address and size information */
175 dma_addr_t ring_dma_addr;
176 dma_addr_t driver_event_dma_addr;
177 dma_addr_t device_event_dma_addr;
178 size_t ring_size_in_bytes;
179 size_t event_size_in_bytes;
183 /* How to notify other side. FIXME: commonalize hcalls! */
184 bool (*notify)(struct virtqueue *vq);
186 /* DMA, allocation, and size information */
190 /* They're supposed to lock for us. */
193 /* Figure out if their kicks are too delayed. */
194 bool last_add_time_valid;
195 ktime_t last_add_time;
204 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
206 static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
207 unsigned int total_sg)
209 struct vring_virtqueue *vq = to_vvq(_vq);
212 * If the host supports indirect descriptor tables, and we have multiple
213 * buffers, then go indirect. FIXME: tune this threshold
215 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
219 * Modern virtio devices have feature bits to specify whether they need a
220 * quirk and bypass the IOMMU. If not there, just use the DMA API.
222 * If there, the interaction between virtio and DMA API is messy.
224 * On most systems with virtio, physical addresses match bus addresses,
225 * and it doesn't particularly matter whether we use the DMA API.
227 * On some systems, including Xen and any system with a physical device
228 * that speaks virtio behind a physical IOMMU, we must use the DMA API
229 * for virtio DMA to work at all.
231 * On other systems, including SPARC and PPC64, virtio-pci devices are
232 * enumerated as though they are behind an IOMMU, but the virtio host
233 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
234 * there or somehow map everything as the identity.
236 * For the time being, we preserve historic behavior and bypass the DMA
239 * TODO: install a per-device DMA ops structure that does the right thing
240 * taking into account all the above quirks, and use the DMA API
241 * unconditionally on data path.
244 static bool vring_use_dma_api(struct virtio_device *vdev)
246 if (!virtio_has_dma_quirk(vdev))
249 /* Otherwise, we are left to guess. */
251 * In theory, it's possible to have a buggy QEMU-supposed
252 * emulated Q35 IOMMU and Xen enabled at the same time. On
253 * such a configuration, virtio has never worked and will
254 * not work without an even larger kludge. Instead, enable
255 * the DMA API if we're a Xen guest, which at least allows
256 * all of the sensible Xen configurations to work correctly.
264 size_t virtio_max_dma_size(struct virtio_device *vdev)
266 size_t max_segment_size = SIZE_MAX;
268 if (vring_use_dma_api(vdev))
269 max_segment_size = dma_max_mapping_size(&vdev->dev);
271 return max_segment_size;
273 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
275 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
276 dma_addr_t *dma_handle, gfp_t flag)
278 if (vring_use_dma_api(vdev)) {
279 return dma_alloc_coherent(vdev->dev.parent, size,
282 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
285 phys_addr_t phys_addr = virt_to_phys(queue);
286 *dma_handle = (dma_addr_t)phys_addr;
289 * Sanity check: make sure we dind't truncate
290 * the address. The only arches I can find that
291 * have 64-bit phys_addr_t but 32-bit dma_addr_t
292 * are certain non-highmem MIPS and x86
293 * configurations, but these configurations
294 * should never allocate physical pages above 32
295 * bits, so this is fine. Just in case, throw a
296 * warning and abort if we end up with an
297 * unrepresentable address.
299 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
300 free_pages_exact(queue, PAGE_ALIGN(size));
308 static void vring_free_queue(struct virtio_device *vdev, size_t size,
309 void *queue, dma_addr_t dma_handle)
311 if (vring_use_dma_api(vdev))
312 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
314 free_pages_exact(queue, PAGE_ALIGN(size));
318 * The DMA ops on various arches are rather gnarly right now, and
319 * making all of the arch DMA ops work on the vring device itself
320 * is a mess. For now, we use the parent device for DMA ops.
322 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
324 return vq->vq.vdev->dev.parent;
327 /* Map one sg entry. */
328 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
329 struct scatterlist *sg,
330 enum dma_data_direction direction)
332 if (!vq->use_dma_api)
333 return (dma_addr_t)sg_phys(sg);
336 * We can't use dma_map_sg, because we don't use scatterlists in
337 * the way it expects (we don't guarantee that the scatterlist
338 * will exist for the lifetime of the mapping).
340 return dma_map_page(vring_dma_dev(vq),
341 sg_page(sg), sg->offset, sg->length,
345 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
346 void *cpu_addr, size_t size,
347 enum dma_data_direction direction)
349 if (!vq->use_dma_api)
350 return (dma_addr_t)virt_to_phys(cpu_addr);
352 return dma_map_single(vring_dma_dev(vq),
353 cpu_addr, size, direction);
356 static int vring_mapping_error(const struct vring_virtqueue *vq,
359 if (!vq->use_dma_api)
362 return dma_mapping_error(vring_dma_dev(vq), addr);
367 * Split ring specific functions - *_split().
370 static void vring_unmap_one_split(const struct vring_virtqueue *vq,
371 struct vring_desc *desc)
375 if (!vq->use_dma_api)
378 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
380 if (flags & VRING_DESC_F_INDIRECT) {
381 dma_unmap_single(vring_dma_dev(vq),
382 virtio64_to_cpu(vq->vq.vdev, desc->addr),
383 virtio32_to_cpu(vq->vq.vdev, desc->len),
384 (flags & VRING_DESC_F_WRITE) ?
385 DMA_FROM_DEVICE : DMA_TO_DEVICE);
387 dma_unmap_page(vring_dma_dev(vq),
388 virtio64_to_cpu(vq->vq.vdev, desc->addr),
389 virtio32_to_cpu(vq->vq.vdev, desc->len),
390 (flags & VRING_DESC_F_WRITE) ?
391 DMA_FROM_DEVICE : DMA_TO_DEVICE);
395 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
396 unsigned int total_sg,
399 struct vring_desc *desc;
403 * We require lowmem mappings for the descriptors because
404 * otherwise virt_to_phys will give us bogus addresses in the
407 gfp &= ~__GFP_HIGHMEM;
409 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
413 for (i = 0; i < total_sg; i++)
414 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
418 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
419 struct vring_desc *desc,
425 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
426 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
427 desc[i].len = cpu_to_virtio32(vq->vdev, len);
429 return virtio16_to_cpu(vq->vdev, desc[i].next);
432 static inline int virtqueue_add_split(struct virtqueue *_vq,
433 struct scatterlist *sgs[],
434 unsigned int total_sg,
435 unsigned int out_sgs,
441 struct vring_virtqueue *vq = to_vvq(_vq);
442 struct scatterlist *sg;
443 struct vring_desc *desc;
444 unsigned int i, n, avail, descs_used, prev, err_idx;
450 BUG_ON(data == NULL);
451 BUG_ON(ctx && vq->indirect);
453 if (unlikely(vq->broken)) {
458 LAST_ADD_TIME_UPDATE(vq);
460 BUG_ON(total_sg == 0);
462 head = vq->free_head;
464 if (virtqueue_use_indirect(_vq, total_sg))
465 desc = alloc_indirect_split(_vq, total_sg, gfp);
468 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
472 /* Use a single buffer which doesn't continue */
474 /* Set up rest to use this indirect table. */
479 desc = vq->split.vring.desc;
481 descs_used = total_sg;
484 if (vq->vq.num_free < descs_used) {
485 pr_debug("Can't add buf len %i - avail = %i\n",
486 descs_used, vq->vq.num_free);
487 /* FIXME: for historical reasons, we force a notify here if
488 * there are outgoing parts to the buffer. Presumably the
489 * host should service the ring ASAP. */
498 for (n = 0; n < out_sgs; n++) {
499 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
500 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
501 if (vring_mapping_error(vq, addr))
505 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
509 for (; n < (out_sgs + in_sgs); n++) {
510 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
511 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
512 if (vring_mapping_error(vq, addr))
516 i = virtqueue_add_desc_split(_vq, desc, i, addr,
522 /* Last one doesn't continue. */
523 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
526 /* Now that the indirect table is filled in, map it. */
527 dma_addr_t addr = vring_map_single(
528 vq, desc, total_sg * sizeof(struct vring_desc),
530 if (vring_mapping_error(vq, addr))
533 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
535 total_sg * sizeof(struct vring_desc),
536 VRING_DESC_F_INDIRECT);
539 /* We're using some buffers from the free list. */
540 vq->vq.num_free -= descs_used;
542 /* Update free pointer */
544 vq->free_head = virtio16_to_cpu(_vq->vdev,
545 vq->split.vring.desc[head].next);
549 /* Store token and indirect buffer state. */
550 vq->split.desc_state[head].data = data;
552 vq->split.desc_state[head].indir_desc = desc;
554 vq->split.desc_state[head].indir_desc = ctx;
556 /* Put entry in available array (but don't update avail->idx until they
558 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
559 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
561 /* Descriptors and available array need to be set before we expose the
562 * new available array entries. */
563 virtio_wmb(vq->weak_barriers);
564 vq->split.avail_idx_shadow++;
565 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
566 vq->split.avail_idx_shadow);
569 pr_debug("Added buffer head %i to %p\n", head, vq);
572 /* This is very unlikely, but theoretically possible. Kick
574 if (unlikely(vq->num_added == (1 << 16) - 1))
587 for (n = 0; n < total_sg; n++) {
590 vring_unmap_one_split(vq, &desc[i]);
591 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
601 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
603 struct vring_virtqueue *vq = to_vvq(_vq);
608 /* We need to expose available array entries before checking avail
610 virtio_mb(vq->weak_barriers);
612 old = vq->split.avail_idx_shadow - vq->num_added;
613 new = vq->split.avail_idx_shadow;
616 LAST_ADD_TIME_CHECK(vq);
617 LAST_ADD_TIME_INVALID(vq);
620 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
621 vring_avail_event(&vq->split.vring)),
624 needs_kick = !(vq->split.vring.used->flags &
625 cpu_to_virtio16(_vq->vdev,
626 VRING_USED_F_NO_NOTIFY));
632 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
636 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
638 /* Clear data ptr. */
639 vq->split.desc_state[head].data = NULL;
641 /* Put back on free list: unmap first-level descriptors and find end */
644 while (vq->split.vring.desc[i].flags & nextflag) {
645 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
646 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
650 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
651 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
653 vq->free_head = head;
655 /* Plus final descriptor */
659 struct vring_desc *indir_desc =
660 vq->split.desc_state[head].indir_desc;
663 /* Free the indirect table, if any, now that it's unmapped. */
667 len = virtio32_to_cpu(vq->vq.vdev,
668 vq->split.vring.desc[head].len);
670 BUG_ON(!(vq->split.vring.desc[head].flags &
671 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
672 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
674 for (j = 0; j < len / sizeof(struct vring_desc); j++)
675 vring_unmap_one_split(vq, &indir_desc[j]);
678 vq->split.desc_state[head].indir_desc = NULL;
680 *ctx = vq->split.desc_state[head].indir_desc;
684 static inline bool more_used_split(const struct vring_virtqueue *vq)
686 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
687 vq->split.vring.used->idx);
690 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
694 struct vring_virtqueue *vq = to_vvq(_vq);
701 if (unlikely(vq->broken)) {
706 if (!more_used_split(vq)) {
707 pr_debug("No more buffers in queue\n");
712 /* Only get used array entries after they have been exposed by host. */
713 virtio_rmb(vq->weak_barriers);
715 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
716 i = virtio32_to_cpu(_vq->vdev,
717 vq->split.vring.used->ring[last_used].id);
718 *len = virtio32_to_cpu(_vq->vdev,
719 vq->split.vring.used->ring[last_used].len);
721 if (unlikely(i >= vq->split.vring.num)) {
722 BAD_RING(vq, "id %u out of range\n", i);
725 if (unlikely(!vq->split.desc_state[i].data)) {
726 BAD_RING(vq, "id %u is not a head!\n", i);
730 /* detach_buf_split clears data, so grab it now. */
731 ret = vq->split.desc_state[i].data;
732 detach_buf_split(vq, i, ctx);
734 /* If we expect an interrupt for the next entry, tell host
735 * by writing event index and flush out the write before
736 * the read in the next get_buf call. */
737 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
738 virtio_store_mb(vq->weak_barriers,
739 &vring_used_event(&vq->split.vring),
740 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
742 LAST_ADD_TIME_INVALID(vq);
748 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
750 struct vring_virtqueue *vq = to_vvq(_vq);
752 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
753 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
755 /* TODO: this is a hack. Figure out a cleaner value to write. */
756 vring_used_event(&vq->split.vring) = 0x0;
758 vq->split.vring.avail->flags =
759 cpu_to_virtio16(_vq->vdev,
760 vq->split.avail_flags_shadow);
764 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
766 struct vring_virtqueue *vq = to_vvq(_vq);
771 /* We optimistically turn back on interrupts, then check if there was
773 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
774 * either clear the flags bit or point the event index at the next
775 * entry. Always do both to keep code simple. */
776 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
777 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
779 vq->split.vring.avail->flags =
780 cpu_to_virtio16(_vq->vdev,
781 vq->split.avail_flags_shadow);
783 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
784 last_used_idx = vq->last_used_idx);
786 return last_used_idx;
789 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
791 struct vring_virtqueue *vq = to_vvq(_vq);
793 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
794 vq->split.vring.used->idx);
797 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
799 struct vring_virtqueue *vq = to_vvq(_vq);
804 /* We optimistically turn back on interrupts, then check if there was
806 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
807 * either clear the flags bit or point the event index at the next
808 * entry. Always update the event index to keep code simple. */
809 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
810 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
812 vq->split.vring.avail->flags =
813 cpu_to_virtio16(_vq->vdev,
814 vq->split.avail_flags_shadow);
816 /* TODO: tune this threshold */
817 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
819 virtio_store_mb(vq->weak_barriers,
820 &vring_used_event(&vq->split.vring),
821 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
823 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
824 - vq->last_used_idx) > bufs)) {
833 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
835 struct vring_virtqueue *vq = to_vvq(_vq);
841 for (i = 0; i < vq->split.vring.num; i++) {
842 if (!vq->split.desc_state[i].data)
844 /* detach_buf_split clears data, so grab it now. */
845 buf = vq->split.desc_state[i].data;
846 detach_buf_split(vq, i, NULL);
847 vq->split.avail_idx_shadow--;
848 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
849 vq->split.avail_idx_shadow);
853 /* That should have freed everything. */
854 BUG_ON(vq->vq.num_free != vq->split.vring.num);
860 static struct virtqueue *vring_create_virtqueue_split(
863 unsigned int vring_align,
864 struct virtio_device *vdev,
868 bool (*notify)(struct virtqueue *),
869 void (*callback)(struct virtqueue *),
872 struct virtqueue *vq;
875 size_t queue_size_in_bytes;
878 /* We assume num is a power of 2. */
879 if (num & (num - 1)) {
880 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
884 /* TODO: allocate each queue chunk individually */
885 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
886 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
888 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
899 /* Try to get a single page. You are my only hope! */
900 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
901 &dma_addr, GFP_KERNEL|__GFP_ZERO);
906 queue_size_in_bytes = vring_size(num, vring_align);
907 vring_init(&vring, num, queue, vring_align);
909 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
910 notify, callback, name);
912 vring_free_queue(vdev, queue_size_in_bytes, queue,
917 to_vvq(vq)->split.queue_dma_addr = dma_addr;
918 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
919 to_vvq(vq)->we_own_ring = true;
926 * Packed ring specific functions - *_packed().
929 static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
930 struct vring_desc_extra *state)
934 if (!vq->use_dma_api)
937 flags = state->flags;
939 if (flags & VRING_DESC_F_INDIRECT) {
940 dma_unmap_single(vring_dma_dev(vq),
941 state->addr, state->len,
942 (flags & VRING_DESC_F_WRITE) ?
943 DMA_FROM_DEVICE : DMA_TO_DEVICE);
945 dma_unmap_page(vring_dma_dev(vq),
946 state->addr, state->len,
947 (flags & VRING_DESC_F_WRITE) ?
948 DMA_FROM_DEVICE : DMA_TO_DEVICE);
952 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
953 struct vring_packed_desc *desc)
957 if (!vq->use_dma_api)
960 flags = le16_to_cpu(desc->flags);
962 if (flags & VRING_DESC_F_INDIRECT) {
963 dma_unmap_single(vring_dma_dev(vq),
964 le64_to_cpu(desc->addr),
965 le32_to_cpu(desc->len),
966 (flags & VRING_DESC_F_WRITE) ?
967 DMA_FROM_DEVICE : DMA_TO_DEVICE);
969 dma_unmap_page(vring_dma_dev(vq),
970 le64_to_cpu(desc->addr),
971 le32_to_cpu(desc->len),
972 (flags & VRING_DESC_F_WRITE) ?
973 DMA_FROM_DEVICE : DMA_TO_DEVICE);
977 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
980 struct vring_packed_desc *desc;
983 * We require lowmem mappings for the descriptors because
984 * otherwise virt_to_phys will give us bogus addresses in the
987 gfp &= ~__GFP_HIGHMEM;
989 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
994 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
995 struct scatterlist *sgs[],
996 unsigned int total_sg,
997 unsigned int out_sgs,
1002 struct vring_packed_desc *desc;
1003 struct scatterlist *sg;
1004 unsigned int i, n, err_idx;
1008 head = vq->packed.next_avail_idx;
1009 desc = alloc_indirect_packed(total_sg, gfp);
1011 if (unlikely(vq->vq.num_free < 1)) {
1012 pr_debug("Can't add buf len 1 - avail = 0\n");
1020 BUG_ON(id == vq->packed.vring.num);
1022 for (n = 0; n < out_sgs + in_sgs; n++) {
1023 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1024 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1025 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1026 if (vring_mapping_error(vq, addr))
1029 desc[i].flags = cpu_to_le16(n < out_sgs ?
1030 0 : VRING_DESC_F_WRITE);
1031 desc[i].addr = cpu_to_le64(addr);
1032 desc[i].len = cpu_to_le32(sg->length);
1037 /* Now that the indirect table is filled in, map it. */
1038 addr = vring_map_single(vq, desc,
1039 total_sg * sizeof(struct vring_packed_desc),
1041 if (vring_mapping_error(vq, addr))
1044 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1045 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1046 sizeof(struct vring_packed_desc));
1047 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1049 if (vq->use_dma_api) {
1050 vq->packed.desc_extra[id].addr = addr;
1051 vq->packed.desc_extra[id].len = total_sg *
1052 sizeof(struct vring_packed_desc);
1053 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1054 vq->packed.avail_used_flags;
1058 * A driver MUST NOT make the first descriptor in the list
1059 * available before all subsequent descriptors comprising
1060 * the list are made available.
1062 virtio_wmb(vq->weak_barriers);
1063 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1064 vq->packed.avail_used_flags);
1066 /* We're using some buffers from the free list. */
1067 vq->vq.num_free -= 1;
1069 /* Update free pointer */
1071 if (n >= vq->packed.vring.num) {
1073 vq->packed.avail_wrap_counter ^= 1;
1074 vq->packed.avail_used_flags ^=
1075 1 << VRING_PACKED_DESC_F_AVAIL |
1076 1 << VRING_PACKED_DESC_F_USED;
1078 vq->packed.next_avail_idx = n;
1079 vq->free_head = vq->packed.desc_extra[id].next;
1081 /* Store token and indirect buffer state. */
1082 vq->packed.desc_state[id].num = 1;
1083 vq->packed.desc_state[id].data = data;
1084 vq->packed.desc_state[id].indir_desc = desc;
1085 vq->packed.desc_state[id].last = id;
1089 pr_debug("Added buffer head %i to %p\n", head, vq);
1097 for (i = 0; i < err_idx; i++)
1098 vring_unmap_desc_packed(vq, &desc[i]);
1106 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1107 struct scatterlist *sgs[],
1108 unsigned int total_sg,
1109 unsigned int out_sgs,
1110 unsigned int in_sgs,
1115 struct vring_virtqueue *vq = to_vvq(_vq);
1116 struct vring_packed_desc *desc;
1117 struct scatterlist *sg;
1118 unsigned int i, n, c, descs_used, err_idx;
1119 __le16 head_flags, flags;
1120 u16 head, id, prev, curr, avail_used_flags;
1124 BUG_ON(data == NULL);
1125 BUG_ON(ctx && vq->indirect);
1127 if (unlikely(vq->broken)) {
1132 LAST_ADD_TIME_UPDATE(vq);
1134 BUG_ON(total_sg == 0);
1136 if (virtqueue_use_indirect(_vq, total_sg))
1137 return virtqueue_add_indirect_packed(vq, sgs, total_sg,
1138 out_sgs, in_sgs, data, gfp);
1140 head = vq->packed.next_avail_idx;
1141 avail_used_flags = vq->packed.avail_used_flags;
1143 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1145 desc = vq->packed.vring.desc;
1147 descs_used = total_sg;
1149 if (unlikely(vq->vq.num_free < descs_used)) {
1150 pr_debug("Can't add buf len %i - avail = %i\n",
1151 descs_used, vq->vq.num_free);
1157 BUG_ON(id == vq->packed.vring.num);
1161 for (n = 0; n < out_sgs + in_sgs; n++) {
1162 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1163 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1164 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1165 if (vring_mapping_error(vq, addr))
1168 flags = cpu_to_le16(vq->packed.avail_used_flags |
1169 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1170 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1174 desc[i].flags = flags;
1176 desc[i].addr = cpu_to_le64(addr);
1177 desc[i].len = cpu_to_le32(sg->length);
1178 desc[i].id = cpu_to_le16(id);
1180 if (unlikely(vq->use_dma_api)) {
1181 vq->packed.desc_extra[curr].addr = addr;
1182 vq->packed.desc_extra[curr].len = sg->length;
1183 vq->packed.desc_extra[curr].flags =
1187 curr = vq->packed.desc_extra[curr].next;
1189 if ((unlikely(++i >= vq->packed.vring.num))) {
1191 vq->packed.avail_used_flags ^=
1192 1 << VRING_PACKED_DESC_F_AVAIL |
1193 1 << VRING_PACKED_DESC_F_USED;
1199 vq->packed.avail_wrap_counter ^= 1;
1201 /* We're using some buffers from the free list. */
1202 vq->vq.num_free -= descs_used;
1204 /* Update free pointer */
1205 vq->packed.next_avail_idx = i;
1206 vq->free_head = curr;
1209 vq->packed.desc_state[id].num = descs_used;
1210 vq->packed.desc_state[id].data = data;
1211 vq->packed.desc_state[id].indir_desc = ctx;
1212 vq->packed.desc_state[id].last = prev;
1215 * A driver MUST NOT make the first descriptor in the list
1216 * available before all subsequent descriptors comprising
1217 * the list are made available.
1219 virtio_wmb(vq->weak_barriers);
1220 vq->packed.vring.desc[head].flags = head_flags;
1221 vq->num_added += descs_used;
1223 pr_debug("Added buffer head %i to %p\n", head, vq);
1231 curr = vq->free_head;
1233 vq->packed.avail_used_flags = avail_used_flags;
1235 for (n = 0; n < total_sg; n++) {
1238 vring_unmap_state_packed(vq,
1239 &vq->packed.desc_extra[curr]);
1240 curr = vq->packed.desc_extra[curr].next;
1242 if (i >= vq->packed.vring.num)
1250 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1252 struct vring_virtqueue *vq = to_vvq(_vq);
1253 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1266 * We need to expose the new flags value before checking notification
1269 virtio_mb(vq->weak_barriers);
1271 old = vq->packed.next_avail_idx - vq->num_added;
1272 new = vq->packed.next_avail_idx;
1275 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1276 flags = le16_to_cpu(snapshot.flags);
1278 LAST_ADD_TIME_CHECK(vq);
1279 LAST_ADD_TIME_INVALID(vq);
1281 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1282 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1286 off_wrap = le16_to_cpu(snapshot.off_wrap);
1288 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1289 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1290 if (wrap_counter != vq->packed.avail_wrap_counter)
1291 event_idx -= vq->packed.vring.num;
1293 needs_kick = vring_need_event(event_idx, new, old);
1299 static void detach_buf_packed(struct vring_virtqueue *vq,
1300 unsigned int id, void **ctx)
1302 struct vring_desc_state_packed *state = NULL;
1303 struct vring_packed_desc *desc;
1304 unsigned int i, curr;
1306 state = &vq->packed.desc_state[id];
1308 /* Clear data ptr. */
1311 vq->packed.desc_extra[state->last].next = vq->free_head;
1313 vq->vq.num_free += state->num;
1315 if (unlikely(vq->use_dma_api)) {
1317 for (i = 0; i < state->num; i++) {
1318 vring_unmap_state_packed(vq,
1319 &vq->packed.desc_extra[curr]);
1320 curr = vq->packed.desc_extra[curr].next;
1327 /* Free the indirect table, if any, now that it's unmapped. */
1328 desc = state->indir_desc;
1332 if (vq->use_dma_api) {
1333 len = vq->packed.desc_extra[id].len;
1334 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1336 vring_unmap_desc_packed(vq, &desc[i]);
1339 state->indir_desc = NULL;
1341 *ctx = state->indir_desc;
1345 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1346 u16 idx, bool used_wrap_counter)
1351 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1352 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1353 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1355 return avail == used && used == used_wrap_counter;
1358 static inline bool more_used_packed(const struct vring_virtqueue *vq)
1360 return is_used_desc_packed(vq, vq->last_used_idx,
1361 vq->packed.used_wrap_counter);
1364 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1368 struct vring_virtqueue *vq = to_vvq(_vq);
1374 if (unlikely(vq->broken)) {
1379 if (!more_used_packed(vq)) {
1380 pr_debug("No more buffers in queue\n");
1385 /* Only get used elements after they have been exposed by host. */
1386 virtio_rmb(vq->weak_barriers);
1388 last_used = vq->last_used_idx;
1389 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1390 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1392 if (unlikely(id >= vq->packed.vring.num)) {
1393 BAD_RING(vq, "id %u out of range\n", id);
1396 if (unlikely(!vq->packed.desc_state[id].data)) {
1397 BAD_RING(vq, "id %u is not a head!\n", id);
1401 /* detach_buf_packed clears data, so grab it now. */
1402 ret = vq->packed.desc_state[id].data;
1403 detach_buf_packed(vq, id, ctx);
1405 vq->last_used_idx += vq->packed.desc_state[id].num;
1406 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1407 vq->last_used_idx -= vq->packed.vring.num;
1408 vq->packed.used_wrap_counter ^= 1;
1412 * If we expect an interrupt for the next entry, tell host
1413 * by writing event index and flush out the write before
1414 * the read in the next get_buf call.
1416 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1417 virtio_store_mb(vq->weak_barriers,
1418 &vq->packed.vring.driver->off_wrap,
1419 cpu_to_le16(vq->last_used_idx |
1420 (vq->packed.used_wrap_counter <<
1421 VRING_PACKED_EVENT_F_WRAP_CTR)));
1423 LAST_ADD_TIME_INVALID(vq);
1429 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1431 struct vring_virtqueue *vq = to_vvq(_vq);
1433 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1434 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1435 vq->packed.vring.driver->flags =
1436 cpu_to_le16(vq->packed.event_flags_shadow);
1440 static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1442 struct vring_virtqueue *vq = to_vvq(_vq);
1447 * We optimistically turn back on interrupts, then check if there was
1452 vq->packed.vring.driver->off_wrap =
1453 cpu_to_le16(vq->last_used_idx |
1454 (vq->packed.used_wrap_counter <<
1455 VRING_PACKED_EVENT_F_WRAP_CTR));
1457 * We need to update event offset and event wrap
1458 * counter first before updating event flags.
1460 virtio_wmb(vq->weak_barriers);
1463 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1464 vq->packed.event_flags_shadow = vq->event ?
1465 VRING_PACKED_EVENT_FLAG_DESC :
1466 VRING_PACKED_EVENT_FLAG_ENABLE;
1467 vq->packed.vring.driver->flags =
1468 cpu_to_le16(vq->packed.event_flags_shadow);
1472 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1473 VRING_PACKED_EVENT_F_WRAP_CTR);
1476 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1478 struct vring_virtqueue *vq = to_vvq(_vq);
1482 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1483 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1485 return is_used_desc_packed(vq, used_idx, wrap_counter);
1488 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1490 struct vring_virtqueue *vq = to_vvq(_vq);
1491 u16 used_idx, wrap_counter;
1497 * We optimistically turn back on interrupts, then check if there was
1502 /* TODO: tune this threshold */
1503 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1504 wrap_counter = vq->packed.used_wrap_counter;
1506 used_idx = vq->last_used_idx + bufs;
1507 if (used_idx >= vq->packed.vring.num) {
1508 used_idx -= vq->packed.vring.num;
1512 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1513 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1516 * We need to update event offset and event wrap
1517 * counter first before updating event flags.
1519 virtio_wmb(vq->weak_barriers);
1522 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1523 vq->packed.event_flags_shadow = vq->event ?
1524 VRING_PACKED_EVENT_FLAG_DESC :
1525 VRING_PACKED_EVENT_FLAG_ENABLE;
1526 vq->packed.vring.driver->flags =
1527 cpu_to_le16(vq->packed.event_flags_shadow);
1531 * We need to update event suppression structure first
1532 * before re-checking for more used buffers.
1534 virtio_mb(vq->weak_barriers);
1536 if (is_used_desc_packed(vq,
1538 vq->packed.used_wrap_counter)) {
1547 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1549 struct vring_virtqueue *vq = to_vvq(_vq);
1555 for (i = 0; i < vq->packed.vring.num; i++) {
1556 if (!vq->packed.desc_state[i].data)
1558 /* detach_buf clears data, so grab it now. */
1559 buf = vq->packed.desc_state[i].data;
1560 detach_buf_packed(vq, i, NULL);
1564 /* That should have freed everything. */
1565 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1571 static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
1574 struct vring_desc_extra *desc_extra;
1577 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1582 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1584 for (i = 0; i < num - 1; i++)
1585 desc_extra[i].next = i + 1;
1590 static struct virtqueue *vring_create_virtqueue_packed(
1593 unsigned int vring_align,
1594 struct virtio_device *vdev,
1596 bool may_reduce_num,
1598 bool (*notify)(struct virtqueue *),
1599 void (*callback)(struct virtqueue *),
1602 struct vring_virtqueue *vq;
1603 struct vring_packed_desc *ring;
1604 struct vring_packed_desc_event *driver, *device;
1605 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1606 size_t ring_size_in_bytes, event_size_in_bytes;
1608 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1610 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1612 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1616 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1618 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1619 &driver_event_dma_addr,
1620 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1624 device = vring_alloc_queue(vdev, event_size_in_bytes,
1625 &device_event_dma_addr,
1626 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1630 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1634 vq->vq.callback = callback;
1637 vq->vq.num_free = num;
1638 vq->vq.index = index;
1639 vq->we_own_ring = true;
1640 vq->notify = notify;
1641 vq->weak_barriers = weak_barriers;
1643 vq->last_used_idx = 0;
1644 vq->event_triggered = false;
1646 vq->packed_ring = true;
1647 vq->use_dma_api = vring_use_dma_api(vdev);
1650 vq->last_add_time_valid = false;
1653 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1655 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1657 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1658 vq->weak_barriers = false;
1660 vq->packed.ring_dma_addr = ring_dma_addr;
1661 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1662 vq->packed.device_event_dma_addr = device_event_dma_addr;
1664 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1665 vq->packed.event_size_in_bytes = event_size_in_bytes;
1667 vq->packed.vring.num = num;
1668 vq->packed.vring.desc = ring;
1669 vq->packed.vring.driver = driver;
1670 vq->packed.vring.device = device;
1672 vq->packed.next_avail_idx = 0;
1673 vq->packed.avail_wrap_counter = 1;
1674 vq->packed.used_wrap_counter = 1;
1675 vq->packed.event_flags_shadow = 0;
1676 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1678 vq->packed.desc_state = kmalloc_array(num,
1679 sizeof(struct vring_desc_state_packed),
1681 if (!vq->packed.desc_state)
1682 goto err_desc_state;
1684 memset(vq->packed.desc_state, 0,
1685 num * sizeof(struct vring_desc_state_packed));
1687 /* Put everything in free lists. */
1690 vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
1691 if (!vq->packed.desc_extra)
1692 goto err_desc_extra;
1694 /* No callback? Tell other side not to bother us. */
1696 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1697 vq->packed.vring.driver->flags =
1698 cpu_to_le16(vq->packed.event_flags_shadow);
1701 list_add_tail(&vq->vq.list, &vdev->vqs);
1705 kfree(vq->packed.desc_state);
1709 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1711 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1713 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1720 * Generic functions and exported symbols.
1723 static inline int virtqueue_add(struct virtqueue *_vq,
1724 struct scatterlist *sgs[],
1725 unsigned int total_sg,
1726 unsigned int out_sgs,
1727 unsigned int in_sgs,
1732 struct vring_virtqueue *vq = to_vvq(_vq);
1734 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1735 out_sgs, in_sgs, data, ctx, gfp) :
1736 virtqueue_add_split(_vq, sgs, total_sg,
1737 out_sgs, in_sgs, data, ctx, gfp);
1741 * virtqueue_add_sgs - expose buffers to other end
1742 * @_vq: the struct virtqueue we're talking about.
1743 * @sgs: array of terminated scatterlists.
1744 * @out_sgs: the number of scatterlists readable by other side
1745 * @in_sgs: the number of scatterlists which are writable (after readable ones)
1746 * @data: the token identifying the buffer.
1747 * @gfp: how to do memory allocations (if necessary).
1749 * Caller must ensure we don't call this with other virtqueue operations
1750 * at the same time (except where noted).
1752 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1754 int virtqueue_add_sgs(struct virtqueue *_vq,
1755 struct scatterlist *sgs[],
1756 unsigned int out_sgs,
1757 unsigned int in_sgs,
1761 unsigned int i, total_sg = 0;
1763 /* Count them first. */
1764 for (i = 0; i < out_sgs + in_sgs; i++) {
1765 struct scatterlist *sg;
1767 for (sg = sgs[i]; sg; sg = sg_next(sg))
1770 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1773 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1776 * virtqueue_add_outbuf - expose output buffers to other end
1777 * @vq: the struct virtqueue we're talking about.
1778 * @sg: scatterlist (must be well-formed and terminated!)
1779 * @num: the number of entries in @sg readable by other side
1780 * @data: the token identifying the buffer.
1781 * @gfp: how to do memory allocations (if necessary).
1783 * Caller must ensure we don't call this with other virtqueue operations
1784 * at the same time (except where noted).
1786 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1788 int virtqueue_add_outbuf(struct virtqueue *vq,
1789 struct scatterlist *sg, unsigned int num,
1793 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1795 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1798 * virtqueue_add_inbuf - expose input buffers to other end
1799 * @vq: the struct virtqueue we're talking about.
1800 * @sg: scatterlist (must be well-formed and terminated!)
1801 * @num: the number of entries in @sg writable by other side
1802 * @data: the token identifying the buffer.
1803 * @gfp: how to do memory allocations (if necessary).
1805 * Caller must ensure we don't call this with other virtqueue operations
1806 * at the same time (except where noted).
1808 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1810 int virtqueue_add_inbuf(struct virtqueue *vq,
1811 struct scatterlist *sg, unsigned int num,
1815 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1817 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1820 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1821 * @vq: the struct virtqueue we're talking about.
1822 * @sg: scatterlist (must be well-formed and terminated!)
1823 * @num: the number of entries in @sg writable by other side
1824 * @data: the token identifying the buffer.
1825 * @ctx: extra context for the token
1826 * @gfp: how to do memory allocations (if necessary).
1828 * Caller must ensure we don't call this with other virtqueue operations
1829 * at the same time (except where noted).
1831 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1833 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1834 struct scatterlist *sg, unsigned int num,
1839 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1841 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1844 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1845 * @_vq: the struct virtqueue
1847 * Instead of virtqueue_kick(), you can do:
1848 * if (virtqueue_kick_prepare(vq))
1849 * virtqueue_notify(vq);
1851 * This is sometimes useful because the virtqueue_kick_prepare() needs
1852 * to be serialized, but the actual virtqueue_notify() call does not.
1854 bool virtqueue_kick_prepare(struct virtqueue *_vq)
1856 struct vring_virtqueue *vq = to_vvq(_vq);
1858 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1859 virtqueue_kick_prepare_split(_vq);
1861 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1864 * virtqueue_notify - second half of split virtqueue_kick call.
1865 * @_vq: the struct virtqueue
1867 * This does not need to be serialized.
1869 * Returns false if host notify failed or queue is broken, otherwise true.
1871 bool virtqueue_notify(struct virtqueue *_vq)
1873 struct vring_virtqueue *vq = to_vvq(_vq);
1875 if (unlikely(vq->broken))
1878 /* Prod other side to tell it about changes. */
1879 if (!vq->notify(_vq)) {
1885 EXPORT_SYMBOL_GPL(virtqueue_notify);
1888 * virtqueue_kick - update after add_buf
1889 * @vq: the struct virtqueue
1891 * After one or more virtqueue_add_* calls, invoke this to kick
1894 * Caller must ensure we don't call this with other virtqueue
1895 * operations at the same time (except where noted).
1897 * Returns false if kick failed, otherwise true.
1899 bool virtqueue_kick(struct virtqueue *vq)
1901 if (virtqueue_kick_prepare(vq))
1902 return virtqueue_notify(vq);
1905 EXPORT_SYMBOL_GPL(virtqueue_kick);
1908 * virtqueue_get_buf_ctx - get the next used buffer
1909 * @_vq: the struct virtqueue we're talking about.
1910 * @len: the length written into the buffer
1911 * @ctx: extra context for the token
1913 * If the device wrote data into the buffer, @len will be set to the
1914 * amount written. This means you don't need to clear the buffer
1915 * beforehand to ensure there's no data leakage in the case of short
1918 * Caller must ensure we don't call this with other virtqueue
1919 * operations at the same time (except where noted).
1921 * Returns NULL if there are no used buffers, or the "data" token
1922 * handed to virtqueue_add_*().
1924 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1927 struct vring_virtqueue *vq = to_vvq(_vq);
1929 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1930 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1932 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1934 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1936 return virtqueue_get_buf_ctx(_vq, len, NULL);
1938 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1940 * virtqueue_disable_cb - disable callbacks
1941 * @_vq: the struct virtqueue we're talking about.
1943 * Note that this is not necessarily synchronous, hence unreliable and only
1944 * useful as an optimization.
1946 * Unlike other operations, this need not be serialized.
1948 void virtqueue_disable_cb(struct virtqueue *_vq)
1950 struct vring_virtqueue *vq = to_vvq(_vq);
1952 /* If device triggered an event already it won't trigger one again:
1953 * no need to disable.
1955 if (vq->event_triggered)
1958 if (vq->packed_ring)
1959 virtqueue_disable_cb_packed(_vq);
1961 virtqueue_disable_cb_split(_vq);
1963 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1966 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1967 * @_vq: the struct virtqueue we're talking about.
1969 * This re-enables callbacks; it returns current queue state
1970 * in an opaque unsigned value. This value should be later tested by
1971 * virtqueue_poll, to detect a possible race between the driver checking for
1972 * more work, and enabling callbacks.
1974 * Caller must ensure we don't call this with other virtqueue
1975 * operations at the same time (except where noted).
1977 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1979 struct vring_virtqueue *vq = to_vvq(_vq);
1981 if (vq->event_triggered)
1982 vq->event_triggered = false;
1984 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
1985 virtqueue_enable_cb_prepare_split(_vq);
1987 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1990 * virtqueue_poll - query pending used buffers
1991 * @_vq: the struct virtqueue we're talking about.
1992 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1994 * Returns "true" if there are pending used buffers in the queue.
1996 * This does not need to be serialized.
1998 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
2000 struct vring_virtqueue *vq = to_vvq(_vq);
2002 if (unlikely(vq->broken))
2005 virtio_mb(vq->weak_barriers);
2006 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2007 virtqueue_poll_split(_vq, last_used_idx);
2009 EXPORT_SYMBOL_GPL(virtqueue_poll);
2012 * virtqueue_enable_cb - restart callbacks after disable_cb.
2013 * @_vq: the struct virtqueue we're talking about.
2015 * This re-enables callbacks; it returns "false" if there are pending
2016 * buffers in the queue, to detect a possible race between the driver
2017 * checking for more work, and enabling callbacks.
2019 * Caller must ensure we don't call this with other virtqueue
2020 * operations at the same time (except where noted).
2022 bool virtqueue_enable_cb(struct virtqueue *_vq)
2024 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
2026 return !virtqueue_poll(_vq, last_used_idx);
2028 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2031 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2032 * @_vq: the struct virtqueue we're talking about.
2034 * This re-enables callbacks but hints to the other side to delay
2035 * interrupts until most of the available buffers have been processed;
2036 * it returns "false" if there are many pending buffers in the queue,
2037 * to detect a possible race between the driver checking for more work,
2038 * and enabling callbacks.
2040 * Caller must ensure we don't call this with other virtqueue
2041 * operations at the same time (except where noted).
2043 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2045 struct vring_virtqueue *vq = to_vvq(_vq);
2047 if (vq->event_triggered)
2048 vq->event_triggered = false;
2050 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2051 virtqueue_enable_cb_delayed_split(_vq);
2053 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2056 * virtqueue_detach_unused_buf - detach first unused buffer
2057 * @_vq: the struct virtqueue we're talking about.
2059 * Returns NULL or the "data" token handed to virtqueue_add_*().
2060 * This is not valid on an active queue; it is useful only for device
2063 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2065 struct vring_virtqueue *vq = to_vvq(_vq);
2067 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2068 virtqueue_detach_unused_buf_split(_vq);
2070 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2072 static inline bool more_used(const struct vring_virtqueue *vq)
2074 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2077 irqreturn_t vring_interrupt(int irq, void *_vq)
2079 struct vring_virtqueue *vq = to_vvq(_vq);
2081 if (!more_used(vq)) {
2082 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2086 if (unlikely(vq->broken))
2089 /* Just a hint for performance: so it's ok that this can be racy! */
2091 vq->event_triggered = true;
2093 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2094 if (vq->vq.callback)
2095 vq->vq.callback(&vq->vq);
2099 EXPORT_SYMBOL_GPL(vring_interrupt);
2101 /* Only available for split ring */
2102 struct virtqueue *__vring_new_virtqueue(unsigned int index,
2104 struct virtio_device *vdev,
2107 bool (*notify)(struct virtqueue *),
2108 void (*callback)(struct virtqueue *),
2112 struct vring_virtqueue *vq;
2114 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2117 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2121 vq->packed_ring = false;
2122 vq->vq.callback = callback;
2125 vq->vq.num_free = vring.num;
2126 vq->vq.index = index;
2127 vq->we_own_ring = false;
2128 vq->notify = notify;
2129 vq->weak_barriers = weak_barriers;
2131 vq->last_used_idx = 0;
2132 vq->event_triggered = false;
2134 vq->use_dma_api = vring_use_dma_api(vdev);
2137 vq->last_add_time_valid = false;
2140 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2142 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2144 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2145 vq->weak_barriers = false;
2147 vq->split.queue_dma_addr = 0;
2148 vq->split.queue_size_in_bytes = 0;
2150 vq->split.vring = vring;
2151 vq->split.avail_flags_shadow = 0;
2152 vq->split.avail_idx_shadow = 0;
2154 /* No callback? Tell other side not to bother us. */
2156 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
2158 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2159 vq->split.avail_flags_shadow);
2162 vq->split.desc_state = kmalloc_array(vring.num,
2163 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2164 if (!vq->split.desc_state)
2167 /* Put everything in free lists. */
2169 for (i = 0; i < vring.num-1; i++)
2170 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2171 memset(vq->split.desc_state, 0, vring.num *
2172 sizeof(struct vring_desc_state_split));
2174 list_add_tail(&vq->vq.list, &vdev->vqs);
2181 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2183 struct virtqueue *vring_create_virtqueue(
2186 unsigned int vring_align,
2187 struct virtio_device *vdev,
2189 bool may_reduce_num,
2191 bool (*notify)(struct virtqueue *),
2192 void (*callback)(struct virtqueue *),
2196 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2197 return vring_create_virtqueue_packed(index, num, vring_align,
2198 vdev, weak_barriers, may_reduce_num,
2199 context, notify, callback, name);
2201 return vring_create_virtqueue_split(index, num, vring_align,
2202 vdev, weak_barriers, may_reduce_num,
2203 context, notify, callback, name);
2205 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2207 /* Only available for split ring */
2208 struct virtqueue *vring_new_virtqueue(unsigned int index,
2210 unsigned int vring_align,
2211 struct virtio_device *vdev,
2215 bool (*notify)(struct virtqueue *vq),
2216 void (*callback)(struct virtqueue *vq),
2221 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2224 vring_init(&vring, num, pages, vring_align);
2225 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2226 notify, callback, name);
2228 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2230 void vring_del_virtqueue(struct virtqueue *_vq)
2232 struct vring_virtqueue *vq = to_vvq(_vq);
2234 if (vq->we_own_ring) {
2235 if (vq->packed_ring) {
2236 vring_free_queue(vq->vq.vdev,
2237 vq->packed.ring_size_in_bytes,
2238 vq->packed.vring.desc,
2239 vq->packed.ring_dma_addr);
2241 vring_free_queue(vq->vq.vdev,
2242 vq->packed.event_size_in_bytes,
2243 vq->packed.vring.driver,
2244 vq->packed.driver_event_dma_addr);
2246 vring_free_queue(vq->vq.vdev,
2247 vq->packed.event_size_in_bytes,
2248 vq->packed.vring.device,
2249 vq->packed.device_event_dma_addr);
2251 kfree(vq->packed.desc_state);
2252 kfree(vq->packed.desc_extra);
2254 vring_free_queue(vq->vq.vdev,
2255 vq->split.queue_size_in_bytes,
2256 vq->split.vring.desc,
2257 vq->split.queue_dma_addr);
2260 if (!vq->packed_ring)
2261 kfree(vq->split.desc_state);
2262 list_del(&_vq->list);
2265 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2267 /* Manipulates transport-specific feature bits. */
2268 void vring_transport_features(struct virtio_device *vdev)
2272 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2274 case VIRTIO_RING_F_INDIRECT_DESC:
2276 case VIRTIO_RING_F_EVENT_IDX:
2278 case VIRTIO_F_VERSION_1:
2280 case VIRTIO_F_ACCESS_PLATFORM:
2282 case VIRTIO_F_RING_PACKED:
2284 case VIRTIO_F_ORDER_PLATFORM:
2287 /* We don't understand this bit. */
2288 __virtio_clear_bit(vdev, i);
2292 EXPORT_SYMBOL_GPL(vring_transport_features);
2295 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2296 * @_vq: the struct virtqueue containing the vring of interest.
2298 * Returns the size of the vring. This is mainly used for boasting to
2299 * userspace. Unlike other operations, this need not be serialized.
2301 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2304 struct vring_virtqueue *vq = to_vvq(_vq);
2306 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2308 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2310 bool virtqueue_is_broken(struct virtqueue *_vq)
2312 struct vring_virtqueue *vq = to_vvq(_vq);
2316 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2319 * This should prevent the device from being used, allowing drivers to
2320 * recover. You may need to grab appropriate locks to flush.
2322 void virtio_break_device(struct virtio_device *dev)
2324 struct virtqueue *_vq;
2326 list_for_each_entry(_vq, &dev->vqs, list) {
2327 struct vring_virtqueue *vq = to_vvq(_vq);
2331 EXPORT_SYMBOL_GPL(virtio_break_device);
2333 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2335 struct vring_virtqueue *vq = to_vvq(_vq);
2337 BUG_ON(!vq->we_own_ring);
2339 if (vq->packed_ring)
2340 return vq->packed.ring_dma_addr;
2342 return vq->split.queue_dma_addr;
2344 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2346 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2348 struct vring_virtqueue *vq = to_vvq(_vq);
2350 BUG_ON(!vq->we_own_ring);
2352 if (vq->packed_ring)
2353 return vq->packed.driver_event_dma_addr;
2355 return vq->split.queue_dma_addr +
2356 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2358 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2360 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2362 struct vring_virtqueue *vq = to_vvq(_vq);
2364 BUG_ON(!vq->we_own_ring);
2366 if (vq->packed_ring)
2367 return vq->packed.device_event_dma_addr;
2369 return vq->split.queue_dma_addr +
2370 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2372 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2374 /* Only available for split ring */
2375 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2377 return &to_vvq(vq)->split.vring;
2379 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2381 MODULE_LICENSE("GPL");