drm/vmwgfx: Replace iowrite/ioread with volatile memory accesses
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_irq.c
1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/drmP.h>
29 #include "vmwgfx_drv.h"
30
31 #define VMW_FENCE_WRAP (1 << 24)
32
33 irqreturn_t vmw_irq_handler(int irq, void *arg)
34 {
35         struct drm_device *dev = (struct drm_device *)arg;
36         struct vmw_private *dev_priv = vmw_priv(dev);
37         uint32_t status, masked_status;
38
39         spin_lock(&dev_priv->irq_lock);
40         status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
41         masked_status = status & dev_priv->irq_mask;
42         spin_unlock(&dev_priv->irq_lock);
43
44         if (likely(status))
45                 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
46
47         if (!masked_status)
48                 return IRQ_NONE;
49
50         if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
51                              SVGA_IRQFLAG_FENCE_GOAL)) {
52                 vmw_fences_update(dev_priv->fman);
53                 wake_up_all(&dev_priv->fence_queue);
54         }
55
56         if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
57                 wake_up_all(&dev_priv->fifo_queue);
58
59         if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
60                              SVGA_IRQFLAG_ERROR))
61                 vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
62
63         return IRQ_HANDLED;
64 }
65
66 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
67 {
68
69         return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
70 }
71
72 void vmw_update_seqno(struct vmw_private *dev_priv,
73                          struct vmw_fifo_state *fifo_state)
74 {
75         u32 *fifo_mem = dev_priv->mmio_virt;
76         uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
77
78         if (dev_priv->last_read_seqno != seqno) {
79                 dev_priv->last_read_seqno = seqno;
80                 vmw_marker_pull(&fifo_state->marker_queue, seqno);
81                 vmw_fences_update(dev_priv->fman);
82         }
83 }
84
85 bool vmw_seqno_passed(struct vmw_private *dev_priv,
86                          uint32_t seqno)
87 {
88         struct vmw_fifo_state *fifo_state;
89         bool ret;
90
91         if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
92                 return true;
93
94         fifo_state = &dev_priv->fifo;
95         vmw_update_seqno(dev_priv, fifo_state);
96         if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
97                 return true;
98
99         if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
100             vmw_fifo_idle(dev_priv, seqno))
101                 return true;
102
103         /**
104          * Then check if the seqno is higher than what we've actually
105          * emitted. Then the fence is stale and signaled.
106          */
107
108         ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
109                > VMW_FENCE_WRAP);
110
111         return ret;
112 }
113
114 int vmw_fallback_wait(struct vmw_private *dev_priv,
115                       bool lazy,
116                       bool fifo_idle,
117                       uint32_t seqno,
118                       bool interruptible,
119                       unsigned long timeout)
120 {
121         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
122
123         uint32_t count = 0;
124         uint32_t signal_seq;
125         int ret;
126         unsigned long end_jiffies = jiffies + timeout;
127         bool (*wait_condition)(struct vmw_private *, uint32_t);
128         DEFINE_WAIT(__wait);
129
130         wait_condition = (fifo_idle) ? &vmw_fifo_idle :
131                 &vmw_seqno_passed;
132
133         /**
134          * Block command submission while waiting for idle.
135          */
136
137         if (fifo_idle) {
138                 down_read(&fifo_state->rwsem);
139                 if (dev_priv->cman) {
140                         ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
141                                               10*HZ);
142                         if (ret)
143                                 goto out_err;
144                 }
145         }
146
147         signal_seq = atomic_read(&dev_priv->marker_seq);
148         ret = 0;
149
150         for (;;) {
151                 prepare_to_wait(&dev_priv->fence_queue, &__wait,
152                                 (interruptible) ?
153                                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
154                 if (wait_condition(dev_priv, seqno))
155                         break;
156                 if (time_after_eq(jiffies, end_jiffies)) {
157                         DRM_ERROR("SVGA device lockup.\n");
158                         break;
159                 }
160                 if (lazy)
161                         schedule_timeout(1);
162                 else if ((++count & 0x0F) == 0) {
163                         /**
164                          * FIXME: Use schedule_hr_timeout here for
165                          * newer kernels and lower CPU utilization.
166                          */
167
168                         __set_current_state(TASK_RUNNING);
169                         schedule();
170                         __set_current_state((interruptible) ?
171                                             TASK_INTERRUPTIBLE :
172                                             TASK_UNINTERRUPTIBLE);
173                 }
174                 if (interruptible && signal_pending(current)) {
175                         ret = -ERESTARTSYS;
176                         break;
177                 }
178         }
179         finish_wait(&dev_priv->fence_queue, &__wait);
180         if (ret == 0 && fifo_idle) {
181                 u32 *fifo_mem = dev_priv->mmio_virt;
182
183                 vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
184         }
185         wake_up_all(&dev_priv->fence_queue);
186 out_err:
187         if (fifo_idle)
188                 up_read(&fifo_state->rwsem);
189
190         return ret;
191 }
192
193 void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
194 {
195         spin_lock(&dev_priv->waiter_lock);
196         if (dev_priv->fence_queue_waiters++ == 0) {
197                 unsigned long irq_flags;
198
199                 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
200                 outl(SVGA_IRQFLAG_ANY_FENCE,
201                      dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
202                 dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
203                 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
204                 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
205         }
206         spin_unlock(&dev_priv->waiter_lock);
207 }
208
209 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
210 {
211         spin_lock(&dev_priv->waiter_lock);
212         if (--dev_priv->fence_queue_waiters == 0) {
213                 unsigned long irq_flags;
214
215                 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
216                 dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
217                 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
218                 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
219         }
220         spin_unlock(&dev_priv->waiter_lock);
221 }
222
223
224 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
225 {
226         spin_lock(&dev_priv->waiter_lock);
227         if (dev_priv->goal_queue_waiters++ == 0) {
228                 unsigned long irq_flags;
229
230                 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
231                 outl(SVGA_IRQFLAG_FENCE_GOAL,
232                      dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
233                 dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
234                 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
235                 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
236         }
237         spin_unlock(&dev_priv->waiter_lock);
238 }
239
240 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
241 {
242         spin_lock(&dev_priv->waiter_lock);
243         if (--dev_priv->goal_queue_waiters == 0) {
244                 unsigned long irq_flags;
245
246                 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
247                 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
248                 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
249                 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
250         }
251         spin_unlock(&dev_priv->waiter_lock);
252 }
253
254 int vmw_wait_seqno(struct vmw_private *dev_priv,
255                       bool lazy, uint32_t seqno,
256                       bool interruptible, unsigned long timeout)
257 {
258         long ret;
259         struct vmw_fifo_state *fifo = &dev_priv->fifo;
260
261         if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
262                 return 0;
263
264         if (likely(vmw_seqno_passed(dev_priv, seqno)))
265                 return 0;
266
267         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
268
269         if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
270                 return vmw_fallback_wait(dev_priv, lazy, true, seqno,
271                                          interruptible, timeout);
272
273         if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
274                 return vmw_fallback_wait(dev_priv, lazy, false, seqno,
275                                          interruptible, timeout);
276
277         vmw_seqno_waiter_add(dev_priv);
278
279         if (interruptible)
280                 ret = wait_event_interruptible_timeout
281                     (dev_priv->fence_queue,
282                      vmw_seqno_passed(dev_priv, seqno),
283                      timeout);
284         else
285                 ret = wait_event_timeout
286                     (dev_priv->fence_queue,
287                      vmw_seqno_passed(dev_priv, seqno),
288                      timeout);
289
290         vmw_seqno_waiter_remove(dev_priv);
291
292         if (unlikely(ret == 0))
293                 ret = -EBUSY;
294         else if (likely(ret > 0))
295                 ret = 0;
296
297         return ret;
298 }
299
300 void vmw_irq_preinstall(struct drm_device *dev)
301 {
302         struct vmw_private *dev_priv = vmw_priv(dev);
303         uint32_t status;
304
305         if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
306                 return;
307
308         spin_lock_init(&dev_priv->irq_lock);
309         status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
310         outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
311 }
312
313 int vmw_irq_postinstall(struct drm_device *dev)
314 {
315         return 0;
316 }
317
318 void vmw_irq_uninstall(struct drm_device *dev)
319 {
320         struct vmw_private *dev_priv = vmw_priv(dev);
321         uint32_t status;
322
323         if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
324                 return;
325
326         vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
327
328         status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
329         outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
330 }
331
332 void vmw_generic_waiter_add(struct vmw_private *dev_priv,
333                             u32 flag, int *waiter_count)
334 {
335         unsigned long irq_flags;
336
337         spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
338         if ((*waiter_count)++ == 0) {
339                 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
340                 dev_priv->irq_mask |= flag;
341                 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
342         }
343         spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
344 }
345
346 void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
347                                u32 flag, int *waiter_count)
348 {
349         unsigned long irq_flags;
350
351         spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
352         if (--(*waiter_count) == 0) {
353                 dev_priv->irq_mask &= ~flag;
354                 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
355         }
356         spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
357 }