Merge branch 'for-next-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/nab...
[linux-2.6-block.git] / drivers / virtio / virtio_ring.c
index e12e385f7ac3507e60a3c77b0ff1b25a3c168b26..5c802d47892c5091c971eaef256310f99b4d175d 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/module.h>
 #include <linux/hrtimer.h>
 #include <linux/kmemleak.h>
+#include <linux/dma-mapping.h>
+#include <xen/xen.h>
 
 #ifdef DEBUG
 /* For development, we want to crash whenever the ring is screwed. */
 #define END_USE(vq)
 #endif
 
+struct vring_desc_state {
+       void *data;                     /* Data for callback. */
+       struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
+};
+
 struct vring_virtqueue {
        struct virtqueue vq;
 
@@ -89,6 +96,11 @@ struct vring_virtqueue {
        /* How to notify other side. FIXME: commonalize hcalls! */
        bool (*notify)(struct virtqueue *vq);
 
+       /* DMA, allocation, and size information */
+       bool we_own_ring;
+       size_t queue_size_in_bytes;
+       dma_addr_t queue_dma_addr;
+
 #ifdef DEBUG
        /* They're supposed to lock for us. */
        unsigned int in_use;
@@ -98,12 +110,120 @@ struct vring_virtqueue {
        ktime_t last_add_time;
 #endif
 
-       /* Tokens for callbacks. */
-       void *data[];
+       /* Per-descriptor state. */
+       struct vring_desc_state desc_state[];
 };
 
 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
 
+/*
+ * The interaction between virtio and a possible IOMMU is a mess.
+ *
+ * On most systems with virtio, physical addresses match bus addresses,
+ * and it doesn't particularly matter whether we use the DMA API.
+ *
+ * On some systems, including Xen and any system with a physical device
+ * that speaks virtio behind a physical IOMMU, we must use the DMA API
+ * for virtio DMA to work at all.
+ *
+ * On other systems, including SPARC and PPC64, virtio-pci devices are
+ * enumerated as though they are behind an IOMMU, but the virtio host
+ * ignores the IOMMU, so we must either pretend that the IOMMU isn't
+ * there or somehow map everything as the identity.
+ *
+ * For the time being, we preserve historic behavior and bypass the DMA
+ * API.
+ */
+
+static bool vring_use_dma_api(struct virtio_device *vdev)
+{
+       /*
+        * In theory, it's possible to have a buggy QEMU-supposed
+        * emulated Q35 IOMMU and Xen enabled at the same time.  On
+        * such a configuration, virtio has never worked and will
+        * not work without an even larger kludge.  Instead, enable
+        * the DMA API if we're a Xen guest, which at least allows
+        * all of the sensible Xen configurations to work correctly.
+        */
+       if (xen_domain())
+               return true;
+
+       return false;
+}
+
+/*
+ * The DMA ops on various arches are rather gnarly right now, and
+ * making all of the arch DMA ops work on the vring device itself
+ * is a mess.  For now, we use the parent device for DMA ops.
+ */
+struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+{
+       return vq->vq.vdev->dev.parent;
+}
+
+/* Map one sg entry. */
+static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
+                                  struct scatterlist *sg,
+                                  enum dma_data_direction direction)
+{
+       if (!vring_use_dma_api(vq->vq.vdev))
+               return (dma_addr_t)sg_phys(sg);
+
+       /*
+        * We can't use dma_map_sg, because we don't use scatterlists in
+        * the way it expects (we don't guarantee that the scatterlist
+        * will exist for the lifetime of the mapping).
+        */
+       return dma_map_page(vring_dma_dev(vq),
+                           sg_page(sg), sg->offset, sg->length,
+                           direction);
+}
+
+static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
+                                  void *cpu_addr, size_t size,
+                                  enum dma_data_direction direction)
+{
+       if (!vring_use_dma_api(vq->vq.vdev))
+               return (dma_addr_t)virt_to_phys(cpu_addr);
+
+       return dma_map_single(vring_dma_dev(vq),
+                             cpu_addr, size, direction);
+}
+
+static void vring_unmap_one(const struct vring_virtqueue *vq,
+                           struct vring_desc *desc)
+{
+       u16 flags;
+
+       if (!vring_use_dma_api(vq->vq.vdev))
+               return;
+
+       flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
+
+       if (flags & VRING_DESC_F_INDIRECT) {
+               dma_unmap_single(vring_dma_dev(vq),
+                                virtio64_to_cpu(vq->vq.vdev, desc->addr),
+                                virtio32_to_cpu(vq->vq.vdev, desc->len),
+                                (flags & VRING_DESC_F_WRITE) ?
+                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
+       } else {
+               dma_unmap_page(vring_dma_dev(vq),
+                              virtio64_to_cpu(vq->vq.vdev, desc->addr),
+                              virtio32_to_cpu(vq->vq.vdev, desc->len),
+                              (flags & VRING_DESC_F_WRITE) ?
+                              DMA_FROM_DEVICE : DMA_TO_DEVICE);
+       }
+}
+
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+                              dma_addr_t addr)
+{
+       if (!vring_use_dma_api(vq->vq.vdev))
+               return 0;
+
+       return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+
 static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
                                         unsigned int total_sg, gfp_t gfp)
 {
@@ -137,7 +257,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
        struct vring_virtqueue *vq = to_vvq(_vq);
        struct scatterlist *sg;
        struct vring_desc *desc;
-       unsigned int i, n, avail, descs_used, uninitialized_var(prev);
+       unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
        int head;
        bool indirect;
 
@@ -177,21 +297,15 @@ static inline int virtqueue_add(struct virtqueue *_vq,
 
        if (desc) {
                /* Use a single buffer which doesn't continue */
-               vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
-               vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
-               /* avoid kmemleak false positive (hidden by virt_to_phys) */
-               kmemleak_ignore(desc);
-               vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
-
+               indirect = true;
                /* Set up rest to use this indirect table. */
                i = 0;
                descs_used = 1;
-               indirect = true;
        } else {
+               indirect = false;
                desc = vq->vring.desc;
                i = head;
                descs_used = total_sg;
-               indirect = false;
        }
 
        if (vq->vq.num_free < descs_used) {
@@ -206,13 +320,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
                return -ENOSPC;
        }
 
-       /* We're about to use some buffers from the free list. */
-       vq->vq.num_free -= descs_used;
-
        for (n = 0; n < out_sgs; n++) {
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+                       dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+                       if (vring_mapping_error(vq, addr))
+                               goto unmap_release;
+
                        desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
-                       desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
+                       desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
                        desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
                        prev = i;
                        i = virtio16_to_cpu(_vq->vdev, desc[i].next);
@@ -220,8 +335,12 @@ static inline int virtqueue_add(struct virtqueue *_vq,
        }
        for (; n < (out_sgs + in_sgs); n++) {
                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+                       dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+                       if (vring_mapping_error(vq, addr))
+                               goto unmap_release;
+
                        desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
-                       desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
+                       desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
                        desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
                        prev = i;
                        i = virtio16_to_cpu(_vq->vdev, desc[i].next);
@@ -230,14 +349,33 @@ static inline int virtqueue_add(struct virtqueue *_vq,
        /* Last one doesn't continue. */
        desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
 
+       if (indirect) {
+               /* Now that the indirect table is filled in, map it. */
+               dma_addr_t addr = vring_map_single(
+                       vq, desc, total_sg * sizeof(struct vring_desc),
+                       DMA_TO_DEVICE);
+               if (vring_mapping_error(vq, addr))
+                       goto unmap_release;
+
+               vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
+               vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
+
+               vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
+       }
+
+       /* We're using some buffers from the free list. */
+       vq->vq.num_free -= descs_used;
+
        /* Update free pointer */
        if (indirect)
                vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
        else
                vq->free_head = i;
 
-       /* Set token. */
-       vq->data[head] = data;
+       /* Store token and indirect buffer state. */
+       vq->desc_state[head].data = data;
+       if (indirect)
+               vq->desc_state[head].indir_desc = desc;
 
        /* Put entry in available array (but don't update avail->idx until they
         * do sync). */
@@ -260,6 +398,24 @@ static inline int virtqueue_add(struct virtqueue *_vq,
                virtqueue_kick(_vq);
 
        return 0;
+
+unmap_release:
+       err_idx = i;
+       i = head;
+
+       for (n = 0; n < total_sg; n++) {
+               if (i == err_idx)
+                       break;
+               vring_unmap_one(vq, &desc[i]);
+               i = vq->vring.desc[i].next;
+       }
+
+       vq->vq.num_free += total_sg;
+
+       if (indirect)
+               kfree(desc);
+
+       return -EIO;
 }
 
 /**
@@ -430,27 +586,43 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
 
 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
 {
-       unsigned int i;
+       unsigned int i, j;
+       u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
 
        /* Clear data ptr. */
-       vq->data[head] = NULL;
+       vq->desc_state[head].data = NULL;
 
-       /* Put back on free list: find end */
+       /* Put back on free list: unmap first-level descriptors and find end */
        i = head;
 
-       /* Free the indirect table */
-       if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
-               kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
-
-       while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
+       while (vq->vring.desc[i].flags & nextflag) {
+               vring_unmap_one(vq, &vq->vring.desc[i]);
                i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
                vq->vq.num_free++;
        }
 
+       vring_unmap_one(vq, &vq->vring.desc[i]);
        vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
        vq->free_head = head;
+
        /* Plus final descriptor */
        vq->vq.num_free++;
+
+       /* Free the indirect table, if any, now that it's unmapped. */
+       if (vq->desc_state[head].indir_desc) {
+               struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
+               u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
+
+               BUG_ON(!(vq->vring.desc[head].flags &
+                        cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
+               BUG_ON(len == 0 || len % sizeof(struct vring_desc));
+
+               for (j = 0; j < len / sizeof(struct vring_desc); j++)
+                       vring_unmap_one(vq, &indir_desc[j]);
+
+               kfree(vq->desc_state[head].indir_desc);
+               vq->desc_state[head].indir_desc = NULL;
+       }
 }
 
 static inline bool more_used(const struct vring_virtqueue *vq)
@@ -505,13 +677,13 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
                BAD_RING(vq, "id %u out of range\n", i);
                return NULL;
        }
-       if (unlikely(!vq->data[i])) {
+       if (unlikely(!vq->desc_state[i].data)) {
                BAD_RING(vq, "id %u is not a head!\n", i);
                return NULL;
        }
 
        /* detach_buf clears data, so grab it now. */
-       ret = vq->data[i];
+       ret = vq->desc_state[i].data;
        detach_buf(vq, i);
        vq->last_used_idx++;
        /* If we expect an interrupt for the next entry, tell host
@@ -685,10 +857,10 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
        START_USE(vq);
 
        for (i = 0; i < vq->vring.num; i++) {
-               if (!vq->data[i])
+               if (!vq->desc_state[i].data)
                        continue;
                /* detach_buf clears data, so grab it now. */
-               buf = vq->data[i];
+               buf = vq->desc_state[i].data;
                detach_buf(vq, i);
                vq->avail_idx_shadow--;
                vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
@@ -723,35 +895,31 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
 }
 EXPORT_SYMBOL_GPL(vring_interrupt);
 
-struct virtqueue *vring_new_virtqueue(unsigned int index,
-                                     unsigned int num,
-                                     unsigned int vring_align,
-                                     struct virtio_device *vdev,
-                                     bool weak_barriers,
-                                     void *pages,
-                                     bool (*notify)(struct virtqueue *),
-                                     void (*callback)(struct virtqueue *),
-                                     const char *name)
+struct virtqueue *__vring_new_virtqueue(unsigned int index,
+                                       struct vring vring,
+                                       struct virtio_device *vdev,
+                                       bool weak_barriers,
+                                       bool (*notify)(struct virtqueue *),
+                                       void (*callback)(struct virtqueue *),
+                                       const char *name)
 {
-       struct vring_virtqueue *vq;
        unsigned int i;
+       struct vring_virtqueue *vq;
 
-       /* We assume num is a power of 2. */
-       if (num & (num - 1)) {
-               dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
-               return NULL;
-       }
-
-       vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
+       vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
+                    GFP_KERNEL);
        if (!vq)
                return NULL;
 
-       vring_init(&vq->vring, num, pages, vring_align);
+       vq->vring = vring;
        vq->vq.callback = callback;
        vq->vq.vdev = vdev;
        vq->vq.name = name;
-       vq->vq.num_free = num;
+       vq->vq.num_free = vring.num;
        vq->vq.index = index;
+       vq->we_own_ring = false;
+       vq->queue_dma_addr = 0;
+       vq->queue_size_in_bytes = 0;
        vq->notify = notify;
        vq->weak_barriers = weak_barriers;
        vq->broken = false;
@@ -776,20 +944,145 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
 
        /* Put everything in free lists. */
        vq->free_head = 0;
-       for (i = 0; i < num-1; i++) {
+       for (i = 0; i < vring.num-1; i++)
                vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
-               vq->data[i] = NULL;
-       }
-       vq->data[i] = NULL;
+       memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
 
        return &vq->vq;
 }
+EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
+
+static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
+                             dma_addr_t *dma_handle, gfp_t flag)
+{
+       if (vring_use_dma_api(vdev)) {
+               return dma_alloc_coherent(vdev->dev.parent, size,
+                                         dma_handle, flag);
+       } else {
+               void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
+               if (queue) {
+                       phys_addr_t phys_addr = virt_to_phys(queue);
+                       *dma_handle = (dma_addr_t)phys_addr;
+
+                       /*
+                        * Sanity check: make sure we dind't truncate
+                        * the address.  The only arches I can find that
+                        * have 64-bit phys_addr_t but 32-bit dma_addr_t
+                        * are certain non-highmem MIPS and x86
+                        * configurations, but these configurations
+                        * should never allocate physical pages above 32
+                        * bits, so this is fine.  Just in case, throw a
+                        * warning and abort if we end up with an
+                        * unrepresentable address.
+                        */
+                       if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
+                               free_pages_exact(queue, PAGE_ALIGN(size));
+                               return NULL;
+                       }
+               }
+               return queue;
+       }
+}
+
+static void vring_free_queue(struct virtio_device *vdev, size_t size,
+                            void *queue, dma_addr_t dma_handle)
+{
+       if (vring_use_dma_api(vdev)) {
+               dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
+       } else {
+               free_pages_exact(queue, PAGE_ALIGN(size));
+       }
+}
+
+struct virtqueue *vring_create_virtqueue(
+       unsigned int index,
+       unsigned int num,
+       unsigned int vring_align,
+       struct virtio_device *vdev,
+       bool weak_barriers,
+       bool may_reduce_num,
+       bool (*notify)(struct virtqueue *),
+       void (*callback)(struct virtqueue *),
+       const char *name)
+{
+       struct virtqueue *vq;
+       void *queue;
+       dma_addr_t dma_addr;
+       size_t queue_size_in_bytes;
+       struct vring vring;
+
+       /* We assume num is a power of 2. */
+       if (num & (num - 1)) {
+               dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
+               return NULL;
+       }
+
+       /* TODO: allocate each queue chunk individually */
+       for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
+               queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
+                                         &dma_addr,
+                                         GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+               if (queue)
+                       break;
+       }
+
+       if (!num)
+               return NULL;
+
+       if (!queue) {
+               /* Try to get a single page. You are my only hope! */
+               queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
+                                         &dma_addr, GFP_KERNEL|__GFP_ZERO);
+       }
+       if (!queue)
+               return NULL;
+
+       queue_size_in_bytes = vring_size(num, vring_align);
+       vring_init(&vring, num, queue, vring_align);
+
+       vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers,
+                                  notify, callback, name);
+       if (!vq) {
+               vring_free_queue(vdev, queue_size_in_bytes, queue,
+                                dma_addr);
+               return NULL;
+       }
+
+       to_vvq(vq)->queue_dma_addr = dma_addr;
+       to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
+       to_vvq(vq)->we_own_ring = true;
+
+       return vq;
+}
+EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+
+struct virtqueue *vring_new_virtqueue(unsigned int index,
+                                     unsigned int num,
+                                     unsigned int vring_align,
+                                     struct virtio_device *vdev,
+                                     bool weak_barriers,
+                                     void *pages,
+                                     bool (*notify)(struct virtqueue *vq),
+                                     void (*callback)(struct virtqueue *vq),
+                                     const char *name)
+{
+       struct vring vring;
+       vring_init(&vring, num, pages, vring_align);
+       return __vring_new_virtqueue(index, vring, vdev, weak_barriers,
+                                    notify, callback, name);
+}
 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
 
-void vring_del_virtqueue(struct virtqueue *vq)
+void vring_del_virtqueue(struct virtqueue *_vq)
 {
-       list_del(&vq->list);
-       kfree(to_vvq(vq));
+       struct vring_virtqueue *vq = to_vvq(_vq);
+
+       if (vq->we_own_ring) {
+               vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
+                                vq->vring.desc, vq->queue_dma_addr);
+       }
+       list_del(&_vq->list);
+       kfree(vq);
 }
 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
 
@@ -853,20 +1146,42 @@ void virtio_break_device(struct virtio_device *dev)
 }
 EXPORT_SYMBOL_GPL(virtio_break_device);
 
-void *virtqueue_get_avail(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
-       return vq->vring.avail;
+       BUG_ON(!vq->we_own_ring);
+
+       return vq->queue_dma_addr;
 }
-EXPORT_SYMBOL_GPL(virtqueue_get_avail);
+EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
 
-void *virtqueue_get_used(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
-       return vq->vring.used;
+       BUG_ON(!vq->we_own_ring);
+
+       return vq->queue_dma_addr +
+               ((char *)vq->vring.avail - (char *)vq->vring.desc);
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
+
+dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+
+       BUG_ON(!vq->we_own_ring);
+
+       return vq->queue_dma_addr +
+               ((char *)vq->vring.used - (char *)vq->vring.desc);
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
+
+const struct vring *virtqueue_get_vring(struct virtqueue *vq)
+{
+       return &to_vvq(vq)->vring;
 }
-EXPORT_SYMBOL_GPL(virtqueue_get_used);
+EXPORT_SYMBOL_GPL(virtqueue_get_vring);
 
 MODULE_LICENSE("GPL");