drm/vmwgfx: Replace iowrite/ioread with volatile memory accesses
authorThomas Hellstrom <thellstrom@vmware.com>
Wed, 28 Oct 2015 09:44:04 +0000 (10:44 +0100)
committerThomas Hellstrom <thellstrom@vmware.com>
Mon, 2 Nov 2015 08:16:05 +0000 (00:16 -0800)
Now that we use memremap instead of ioremap, Use WRITE_ONCE / READ_ONCE
instead of iowrite / ioread.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c

index bee0a45082e2d3c11dce6a6dfbd36ebd7e0955e1..d1c34aba7abdec4fae7d80fb472ad8b44556dd34 100644 (file)
@@ -752,14 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        dev_priv->active_master = &dev_priv->fbdev_master;
 
-       /*
-        * Force __iomem for this mapping until the implied compiler
-        * barriers and {READ|WRITE}_ONCE semantics from the
-        * io{read|write}32() accessors can be replaced with explicit
-        * barriers.
-        */
-       dev_priv->mmio_virt = (void __iomem *) memremap(dev_priv->mmio_start,
-                                           dev_priv->mmio_size, MEMREMAP_WB);
+       dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
+                                      dev_priv->mmio_size, MEMREMAP_WB);
 
        if (unlikely(dev_priv->mmio_virt == NULL)) {
                ret = -ENOMEM;
@@ -913,7 +907,7 @@ out_no_irq:
 out_no_device:
        ttm_object_device_release(&dev_priv->tdev);
 out_err4:
-       memunmap((void __force *) dev_priv->mmio_virt);
+       memunmap(dev_priv->mmio_virt);
 out_err3:
        vmw_ttm_global_release(dev_priv);
 out_err0:
@@ -964,7 +958,7 @@ static int vmw_driver_unload(struct drm_device *dev)
                pci_release_regions(dev->pdev);
 
        ttm_object_device_release(&dev_priv->tdev);
-       memunmap((void __force *) dev_priv->mmio_virt);
+       memunmap(dev_priv->mmio_virt);
        if (dev_priv->ctx.staged_bindings)
                vmw_binding_state_free(dev_priv->ctx.staged_bindings);
        vmw_ttm_global_release(dev_priv);
index a613bd4851ba84351d3e6181502511c34c9dec9d..198c8b1a81e286e9c681ae022d63d909bf64d8a3 100644 (file)
@@ -375,7 +375,7 @@ struct vmw_private {
        uint32_t stdu_max_height;
        uint32_t initial_width;
        uint32_t initial_height;
-       u32 __iomem *mmio_virt;
+       u32 *mmio_virt;
        uint32_t capabilities;
        uint32_t max_gmr_ids;
        uint32_t max_gmr_pages;
@@ -1206,4 +1206,30 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
 {
        atomic_dec(&dev_priv->num_fifo_resources);
 }
+
+/**
+ * vmw_mmio_read - Perform a MMIO read from volatile memory
+ *
+ * @addr: The address to read from
+ *
+ * This function is intended to be equivalent to ioread32() on
+ * memremap'd memory, but without byteswapping.
+ */
+static inline u32 vmw_mmio_read(u32 *addr)
+{
+       return READ_ONCE(*addr);
+}
+
+/**
+ * vmw_mmio_write - Perform a MMIO write to volatile memory
+ *
+ * @addr: The address to write to
+ *
+ * This function is intended to be equivalent to iowrite32 on
+ * memremap'd memory, but without byteswapping.
+ */
+static inline void vmw_mmio_write(u32 value, u32 *addr)
+{
+       WRITE_ONCE(*addr, value);
+}
 #endif
index 567ddede51d10236c0ed13f39e4b8ddc4eb84320..8e689b439890061ffc066b4c265590566c69e2d2 100644 (file)
@@ -142,8 +142,8 @@ static bool vmw_fence_enable_signaling(struct fence *f)
        struct vmw_fence_manager *fman = fman_from_fence(fence);
        struct vmw_private *dev_priv = fman->dev_priv;
 
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
-       u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+       u32 *fifo_mem = dev_priv->mmio_virt;
+       u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
        if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
                return false;
 
@@ -386,14 +386,14 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
                                      u32 passed_seqno)
 {
        u32 goal_seqno;
-       u32 __iomem *fifo_mem;
+       u32 *fifo_mem;
        struct vmw_fence_obj *fence;
 
        if (likely(!fman->seqno_valid))
                return false;
 
        fifo_mem = fman->dev_priv->mmio_virt;
-       goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+       goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
        if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
                return false;
 
@@ -401,8 +401,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
        list_for_each_entry(fence, &fman->fence_list, head) {
                if (!list_empty(&fence->seq_passed_actions)) {
                        fman->seqno_valid = true;
-                       iowrite32(fence->base.seqno,
-                                 fifo_mem + SVGA_FIFO_FENCE_GOAL);
+                       vmw_mmio_write(fence->base.seqno,
+                                      fifo_mem + SVGA_FIFO_FENCE_GOAL);
                        break;
                }
        }
@@ -430,18 +430,18 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
 {
        struct vmw_fence_manager *fman = fman_from_fence(fence);
        u32 goal_seqno;
-       u32 __iomem *fifo_mem;
+       u32 *fifo_mem;
 
        if (fence_is_signaled_locked(&fence->base))
                return false;
 
        fifo_mem = fman->dev_priv->mmio_virt;
-       goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+       goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
        if (likely(fman->seqno_valid &&
                   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
                return false;
 
-       iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+       vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
        fman->seqno_valid = true;
 
        return true;
@@ -453,9 +453,9 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
        struct list_head action_list;
        bool needs_rerun;
        uint32_t seqno, new_seqno;
-       u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
+       u32 *fifo_mem = fman->dev_priv->mmio_virt;
 
-       seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+       seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 rerun:
        list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
                if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
@@ -477,7 +477,7 @@ rerun:
 
        needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
        if (unlikely(needs_rerun)) {
-               new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+               new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
                if (new_seqno != seqno) {
                        seqno = new_seqno;
                        goto rerun;
index 80c40c31d4f84867d571b44d6d75e7859f68a5c1..0cbaf88329689c224fc6a50aff224d536cc3ef6e 100644 (file)
@@ -36,7 +36,7 @@ struct vmw_temp_set_context {
 
 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 {
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       u32 *fifo_mem = dev_priv->mmio_virt;
        uint32_t fifo_min, hwversion;
        const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
@@ -60,15 +60,15 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
                return false;
 
-       fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
+       fifo_min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
                return false;
 
-       hwversion = ioread32(fifo_mem +
-                            ((fifo->capabilities &
-                              SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
-                             SVGA_FIFO_3D_HWVERSION_REVISED :
-                             SVGA_FIFO_3D_HWVERSION));
+       hwversion = vmw_mmio_read(fifo_mem +
+                                 ((fifo->capabilities &
+                                   SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                                  SVGA_FIFO_3D_HWVERSION_REVISED :
+                                  SVGA_FIFO_3D_HWVERSION));
 
        if (hwversion == 0)
                return false;
@@ -85,13 +85,13 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 
 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 {
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       u32  *fifo_mem = dev_priv->mmio_virt;
        uint32_t caps;
 
        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
                return false;
 
-       caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+       caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
        if (caps & SVGA_FIFO_CAP_PITCHLOCK)
                return true;
 
@@ -100,7 +100,7 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 
 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       u32  *fifo_mem = dev_priv->mmio_virt;
        uint32_t max;
        uint32_t min;
 
@@ -137,19 +137,19 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        if (min < PAGE_SIZE)
                min = PAGE_SIZE;
 
-       iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
-       iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+       vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
+       vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
        wmb();
-       iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
-       iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
-       iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
+       vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
+       vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_STOP);
+       vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
        mb();
 
        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
 
-       max = ioread32(fifo_mem + SVGA_FIFO_MAX);
-       min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
-       fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+       max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
+       min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
+       fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
 
        DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
                 (unsigned int) max,
@@ -157,7 +157,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
                 (unsigned int) fifo->capabilities);
 
        atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
-       iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
+       vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
        vmw_marker_queue_init(&fifo->marker_queue);
 
        return 0;
@@ -165,31 +165,23 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 
 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
-       static DEFINE_SPINLOCK(ping_lock);
-       unsigned long irq_flags;
+       u32 *fifo_mem = dev_priv->mmio_virt;
 
-       /*
-        * The ping_lock is needed because we don't have an atomic
-        * test-and-set of the SVGA_FIFO_BUSY register.
-        */
-       spin_lock_irqsave(&ping_lock, irq_flags);
-       if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
-               iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
+       preempt_disable();
+       if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
                vmw_write(dev_priv, SVGA_REG_SYNC, reason);
-       }
-       spin_unlock_irqrestore(&ping_lock, irq_flags);
+       preempt_enable();
 }
 
 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       u32  *fifo_mem = dev_priv->mmio_virt;
 
        vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
        while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
                ;
 
-       dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+       dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 
        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
                  dev_priv->config_done_state);
@@ -213,11 +205,11 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 
 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
 {
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
-       uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
-       uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
-       uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
-       uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+       u32  *fifo_mem = dev_priv->mmio_virt;
+       uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
+       uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
+       uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+       uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
 
        return ((max - next_cmd) + (stop - min) <= bytes);
 }
@@ -321,7 +313,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
                                    uint32_t bytes)
 {
        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       u32  *fifo_mem = dev_priv->mmio_virt;
        uint32_t max;
        uint32_t min;
        uint32_t next_cmd;
@@ -329,9 +321,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
        int ret;
 
        mutex_lock(&fifo_state->fifo_mutex);
-       max = ioread32(fifo_mem + SVGA_FIFO_MAX);
-       min = ioread32(fifo_mem + SVGA_FIFO_MIN);
-       next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+       max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
+       min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+       next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
 
        if (unlikely(bytes >= (max - min)))
                goto out_err;
@@ -342,7 +334,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
        fifo_state->reserved_size = bytes;
 
        while (1) {
-               uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+               uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
                bool need_bounce = false;
                bool reserve_in_place = false;
 
@@ -376,8 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
                                fifo_state->using_bounce_buffer = false;
 
                                if (reserveable)
-                                       iowrite32(bytes, fifo_mem +
-                                                 SVGA_FIFO_RESERVED);
+                                       vmw_mmio_write(bytes, fifo_mem +
+                                                      SVGA_FIFO_RESERVED);
                                return (void __force *) (fifo_mem +
                                                         (next_cmd >> 2));
                        } else {
@@ -427,7 +419,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
 }
 
 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
-                             u32 __iomem *fifo_mem,
+                             u32  *fifo_mem,
                              uint32_t next_cmd,
                              uint32_t max, uint32_t min, uint32_t bytes)
 {
@@ -439,17 +431,16 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
        if (bytes < chunk_size)
                chunk_size = bytes;
 
-       iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+       vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
        mb();
-       memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
+       memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
        rest = bytes - chunk_size;
        if (rest)
-               memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
-                           rest);
+               memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
 }
 
 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
-                              u32 __iomem *fifo_mem,
+                              u32  *fifo_mem,
                               uint32_t next_cmd,
                               uint32_t max, uint32_t min, uint32_t bytes)
 {
@@ -457,12 +448,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
            fifo_state->dynamic_buffer : fifo_state->static_buffer;
 
        while (bytes > 0) {
-               iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
+               vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
                next_cmd += sizeof(uint32_t);
                if (unlikely(next_cmd == max))
                        next_cmd = min;
                mb();
-               iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+               vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
                mb();
                bytes -= sizeof(uint32_t);
        }
@@ -471,10 +462,10 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 {
        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
-       uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
-       uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
-       uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+       u32  *fifo_mem = dev_priv->mmio_virt;
+       uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
+       uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
+       uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
        bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
        if (fifo_state->dx)
@@ -507,11 +498,11 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
                if (next_cmd >= max)
                        next_cmd -= max - min;
                mb();
-               iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+               vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
        }
 
        if (reserveable)
-               iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
+               vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
        mb();
        up_write(&fifo_state->rwsem);
        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
index a3e3c83c407b907abac614433131fa8d10d9b9ae..b8c6a03c8c54df15def2d359ee5253f213f1816e 100644 (file)
@@ -64,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                break;
        case DRM_VMW_PARAM_FIFO_HW_VERSION:
        {
-               u32 __iomem *fifo_mem = dev_priv->mmio_virt;
+               u32 *fifo_mem = dev_priv->mmio_virt;
                const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
                if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -73,11 +73,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                }
 
                param->value =
-                       ioread32(fifo_mem +
-                                ((fifo->capabilities &
-                                  SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
-                                 SVGA_FIFO_3D_HWVERSION_REVISED :
-                                 SVGA_FIFO_3D_HWVERSION));
+                       vmw_mmio_read(fifo_mem +
+                                     ((fifo->capabilities &
+                                       SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                                      SVGA_FIFO_3D_HWVERSION_REVISED :
+                                      SVGA_FIFO_3D_HWVERSION));
                break;
        }
        case DRM_VMW_PARAM_MAX_SURF_MEMORY:
@@ -179,7 +179,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
                (struct drm_vmw_get_3d_cap_arg *) data;
        struct vmw_private *dev_priv = vmw_priv(dev);
        uint32_t size;
-       u32 __iomem *fifo_mem;
+       u32 *fifo_mem;
        void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
        void *bounce;
        int ret;
@@ -229,7 +229,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
                        goto out_err;
        } else {
                fifo_mem = dev_priv->mmio_virt;
-               memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+               memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
        }
 
        ret = copy_to_user(buffer, bounce, size);
index 9498a5e33c12b1a3955353a7201446de9634accb..ac3eccd9223f3eaefb1187b03018f501af7a16a7 100644 (file)
@@ -72,8 +72,8 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 void vmw_update_seqno(struct vmw_private *dev_priv,
                         struct vmw_fifo_state *fifo_state)
 {
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
-       uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+       u32 *fifo_mem = dev_priv->mmio_virt;
+       uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 
        if (dev_priv->last_read_seqno != seqno) {
                dev_priv->last_read_seqno = seqno;
@@ -178,8 +178,9 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
        }
        finish_wait(&dev_priv->fence_queue, &__wait);
        if (ret == 0 && fifo_idle) {
-               u32 __iomem *fifo_mem = dev_priv->mmio_virt;
-               iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
+               u32 *fifo_mem = dev_priv->mmio_virt;
+
+               vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
        }
        wake_up_all(&dev_priv->fence_queue);
 out_err:
index 03ffab2a6a9cb7d235565e9130909f25d83d4dbf..a94b24d041f16e7fdcfcc01945b5285f9396345c 100644 (file)
@@ -123,14 +123,14 @@ err_unreserve:
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
                                bool show, int x, int y)
 {
-       u32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       u32 *fifo_mem = dev_priv->mmio_virt;
        uint32_t count;
 
-       iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
-       iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
-       iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
-       count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
-       iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+       vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
+       vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
+       vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
+       count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+       vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
 }
 
 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
@@ -1155,7 +1155,8 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
        if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
                vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
        else if (vmw_fifo_have_pitchlock(vmw_priv))
-               iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+               vmw_mmio_write(pitch, vmw_priv->mmio_virt +
+                              SVGA_FIFO_PITCHLOCK);
        vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
        vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
        vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
@@ -1181,8 +1182,8 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
                vmw_priv->vga_pitchlock =
                  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
        else if (vmw_fifo_have_pitchlock(vmw_priv))
-               vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
-                                                  SVGA_FIFO_PITCHLOCK);
+               vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
+                                                       SVGA_FIFO_PITCHLOCK);
 
        if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
                return 0;
@@ -1230,8 +1231,8 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
                vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
                          vmw_priv->vga_pitchlock);
        else if (vmw_fifo_have_pitchlock(vmw_priv))
-               iowrite32(vmw_priv->vga_pitchlock,
-                         vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+               vmw_mmio_write(vmw_priv->vga_pitchlock,
+                              vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
 
        if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
                return 0;