Commit | Line | Data |
---|---|---|
fb1d9738 JB |
1 | /************************************************************************** |
2 | * | |
54fbde8a | 3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
fb1d9738 JB |
4 | * All Rights Reserved. |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
760285e7 | 28 | #include <drm/drmP.h> |
fb1d9738 JB |
29 | #include "vmwgfx_drv.h" |
30 | ||
31 | #define VMW_FENCE_WRAP (1 << 24) | |
32 | ||
e9f0d76f | 33 | irqreturn_t vmw_irq_handler(int irq, void *arg) |
fb1d9738 JB |
34 | { |
35 | struct drm_device *dev = (struct drm_device *)arg; | |
36 | struct vmw_private *dev_priv = vmw_priv(dev); | |
57c5ee79 | 37 | uint32_t status, masked_status; |
fb1d9738 JB |
38 | |
39 | spin_lock(&dev_priv->irq_lock); | |
40 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | |
57c5ee79 | 41 | masked_status = status & dev_priv->irq_mask; |
fb1d9738 JB |
42 | spin_unlock(&dev_priv->irq_lock); |
43 | ||
57c5ee79 TH |
44 | if (likely(status)) |
45 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | |
ae2a1040 | 46 | |
57c5ee79 TH |
47 | if (!masked_status) |
48 | return IRQ_NONE; | |
49 | ||
50 | if (masked_status & (SVGA_IRQFLAG_ANY_FENCE | | |
51 | SVGA_IRQFLAG_FENCE_GOAL)) { | |
52 | vmw_fences_update(dev_priv->fman); | |
fb1d9738 | 53 | wake_up_all(&dev_priv->fence_queue); |
ae2a1040 | 54 | } |
57c5ee79 TH |
55 | |
56 | if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) | |
fb1d9738 JB |
57 | wake_up_all(&dev_priv->fifo_queue); |
58 | ||
3eab3d9e TH |
59 | if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | |
60 | SVGA_IRQFLAG_ERROR)) | |
61 | vmw_cmdbuf_tasklet_schedule(dev_priv->cman); | |
fb1d9738 | 62 | |
57c5ee79 | 63 | return IRQ_HANDLED; |
fb1d9738 JB |
64 | } |
65 | ||
6bcd8d3c | 66 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) |
fb1d9738 | 67 | { |
fb1d9738 | 68 | |
496eb6fd | 69 | return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); |
fb1d9738 JB |
70 | } |
71 | ||
6bcd8d3c | 72 | void vmw_update_seqno(struct vmw_private *dev_priv, |
1925d456 TH |
73 | struct vmw_fifo_state *fifo_state) |
74 | { | |
b76ff5ea TH |
75 | u32 *fifo_mem = dev_priv->mmio_virt; |
76 | uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); | |
1925d456 | 77 | |
6bcd8d3c TH |
78 | if (dev_priv->last_read_seqno != seqno) { |
79 | dev_priv->last_read_seqno = seqno; | |
80 | vmw_marker_pull(&fifo_state->marker_queue, seqno); | |
57c5ee79 | 81 | vmw_fences_update(dev_priv->fman); |
1925d456 TH |
82 | } |
83 | } | |
fb1d9738 | 84 | |
6bcd8d3c TH |
85 | bool vmw_seqno_passed(struct vmw_private *dev_priv, |
86 | uint32_t seqno) | |
fb1d9738 | 87 | { |
fb1d9738 JB |
88 | struct vmw_fifo_state *fifo_state; |
89 | bool ret; | |
90 | ||
6bcd8d3c | 91 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) |
fb1d9738 JB |
92 | return true; |
93 | ||
1925d456 | 94 | fifo_state = &dev_priv->fifo; |
6bcd8d3c TH |
95 | vmw_update_seqno(dev_priv, fifo_state); |
96 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) | |
fb1d9738 JB |
97 | return true; |
98 | ||
fb1d9738 | 99 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && |
6bcd8d3c | 100 | vmw_fifo_idle(dev_priv, seqno)) |
fb1d9738 JB |
101 | return true; |
102 | ||
fb1d9738 | 103 | /** |
6bcd8d3c | 104 | * Then check if the seqno is higher than what we've actually |
fb1d9738 JB |
105 | * emitted. Then the fence is stale and signaled. |
106 | */ | |
107 | ||
6bcd8d3c | 108 | ret = ((atomic_read(&dev_priv->marker_seq) - seqno) |
85b9e487 | 109 | > VMW_FENCE_WRAP); |
fb1d9738 JB |
110 | |
111 | return ret; | |
112 | } | |
113 | ||
114 | int vmw_fallback_wait(struct vmw_private *dev_priv, | |
115 | bool lazy, | |
116 | bool fifo_idle, | |
6bcd8d3c | 117 | uint32_t seqno, |
fb1d9738 JB |
118 | bool interruptible, |
119 | unsigned long timeout) | |
120 | { | |
121 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | |
122 | ||
123 | uint32_t count = 0; | |
124 | uint32_t signal_seq; | |
125 | int ret; | |
126 | unsigned long end_jiffies = jiffies + timeout; | |
127 | bool (*wait_condition)(struct vmw_private *, uint32_t); | |
128 | DEFINE_WAIT(__wait); | |
129 | ||
130 | wait_condition = (fifo_idle) ? &vmw_fifo_idle : | |
6bcd8d3c | 131 | &vmw_seqno_passed; |
fb1d9738 JB |
132 | |
133 | /** | |
134 | * Block command submission while waiting for idle. | |
135 | */ | |
136 | ||
3eab3d9e | 137 | if (fifo_idle) { |
fb1d9738 | 138 | down_read(&fifo_state->rwsem); |
3eab3d9e TH |
139 | if (dev_priv->cman) { |
140 | ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, | |
141 | 10*HZ); | |
142 | if (ret) | |
143 | goto out_err; | |
144 | } | |
145 | } | |
146 | ||
6bcd8d3c | 147 | signal_seq = atomic_read(&dev_priv->marker_seq); |
fb1d9738 JB |
148 | ret = 0; |
149 | ||
150 | for (;;) { | |
151 | prepare_to_wait(&dev_priv->fence_queue, &__wait, | |
152 | (interruptible) ? | |
153 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | |
6bcd8d3c | 154 | if (wait_condition(dev_priv, seqno)) |
fb1d9738 JB |
155 | break; |
156 | if (time_after_eq(jiffies, end_jiffies)) { | |
157 | DRM_ERROR("SVGA device lockup.\n"); | |
158 | break; | |
159 | } | |
160 | if (lazy) | |
161 | schedule_timeout(1); | |
162 | else if ((++count & 0x0F) == 0) { | |
163 | /** | |
164 | * FIXME: Use schedule_hr_timeout here for | |
165 | * newer kernels and lower CPU utilization. | |
166 | */ | |
167 | ||
168 | __set_current_state(TASK_RUNNING); | |
169 | schedule(); | |
170 | __set_current_state((interruptible) ? | |
171 | TASK_INTERRUPTIBLE : | |
172 | TASK_UNINTERRUPTIBLE); | |
173 | } | |
174 | if (interruptible && signal_pending(current)) { | |
3d3a5b32 | 175 | ret = -ERESTARTSYS; |
fb1d9738 JB |
176 | break; |
177 | } | |
178 | } | |
179 | finish_wait(&dev_priv->fence_queue, &__wait); | |
180 | if (ret == 0 && fifo_idle) { | |
b76ff5ea TH |
181 | u32 *fifo_mem = dev_priv->mmio_virt; |
182 | ||
183 | vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE); | |
fb1d9738 JB |
184 | } |
185 | wake_up_all(&dev_priv->fence_queue); | |
3eab3d9e | 186 | out_err: |
fb1d9738 JB |
187 | if (fifo_idle) |
188 | up_read(&fifo_state->rwsem); | |
189 | ||
190 | return ret; | |
191 | } | |
192 | ||
ae2a1040 | 193 | void vmw_seqno_waiter_add(struct vmw_private *dev_priv) |
4f73a96b | 194 | { |
496eb6fd | 195 | spin_lock(&dev_priv->waiter_lock); |
4f73a96b TH |
196 | if (dev_priv->fence_queue_waiters++ == 0) { |
197 | unsigned long irq_flags; | |
198 | ||
199 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | |
200 | outl(SVGA_IRQFLAG_ANY_FENCE, | |
201 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | |
57c5ee79 TH |
202 | dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE; |
203 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | |
4f73a96b TH |
204 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
205 | } | |
496eb6fd | 206 | spin_unlock(&dev_priv->waiter_lock); |
4f73a96b TH |
207 | } |
208 | ||
ae2a1040 | 209 | void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) |
4f73a96b | 210 | { |
496eb6fd | 211 | spin_lock(&dev_priv->waiter_lock); |
4f73a96b TH |
212 | if (--dev_priv->fence_queue_waiters == 0) { |
213 | unsigned long irq_flags; | |
214 | ||
215 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | |
57c5ee79 TH |
216 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE; |
217 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | |
218 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | |
219 | } | |
496eb6fd | 220 | spin_unlock(&dev_priv->waiter_lock); |
57c5ee79 TH |
221 | } |
222 | ||
223 | ||
224 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) | |
225 | { | |
496eb6fd | 226 | spin_lock(&dev_priv->waiter_lock); |
57c5ee79 TH |
227 | if (dev_priv->goal_queue_waiters++ == 0) { |
228 | unsigned long irq_flags; | |
229 | ||
230 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | |
231 | outl(SVGA_IRQFLAG_FENCE_GOAL, | |
232 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | |
233 | dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL; | |
234 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | |
235 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | |
236 | } | |
496eb6fd | 237 | spin_unlock(&dev_priv->waiter_lock); |
57c5ee79 TH |
238 | } |
239 | ||
240 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) | |
241 | { | |
496eb6fd | 242 | spin_lock(&dev_priv->waiter_lock); |
57c5ee79 TH |
243 | if (--dev_priv->goal_queue_waiters == 0) { |
244 | unsigned long irq_flags; | |
245 | ||
246 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | |
247 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL; | |
248 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | |
4f73a96b TH |
249 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
250 | } | |
496eb6fd | 251 | spin_unlock(&dev_priv->waiter_lock); |
4f73a96b TH |
252 | } |
253 | ||
6bcd8d3c TH |
254 | int vmw_wait_seqno(struct vmw_private *dev_priv, |
255 | bool lazy, uint32_t seqno, | |
256 | bool interruptible, unsigned long timeout) | |
fb1d9738 JB |
257 | { |
258 | long ret; | |
fb1d9738 JB |
259 | struct vmw_fifo_state *fifo = &dev_priv->fifo; |
260 | ||
6bcd8d3c | 261 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) |
fb1d9738 JB |
262 | return 0; |
263 | ||
6bcd8d3c | 264 | if (likely(vmw_seqno_passed(dev_priv, seqno))) |
fb1d9738 JB |
265 | return 0; |
266 | ||
267 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | |
268 | ||
269 | if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) | |
6bcd8d3c | 270 | return vmw_fallback_wait(dev_priv, lazy, true, seqno, |
fb1d9738 JB |
271 | interruptible, timeout); |
272 | ||
273 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | |
6bcd8d3c | 274 | return vmw_fallback_wait(dev_priv, lazy, false, seqno, |
fb1d9738 JB |
275 | interruptible, timeout); |
276 | ||
4f73a96b | 277 | vmw_seqno_waiter_add(dev_priv); |
fb1d9738 JB |
278 | |
279 | if (interruptible) | |
280 | ret = wait_event_interruptible_timeout | |
281 | (dev_priv->fence_queue, | |
6bcd8d3c | 282 | vmw_seqno_passed(dev_priv, seqno), |
fb1d9738 JB |
283 | timeout); |
284 | else | |
285 | ret = wait_event_timeout | |
286 | (dev_priv->fence_queue, | |
6bcd8d3c | 287 | vmw_seqno_passed(dev_priv, seqno), |
fb1d9738 JB |
288 | timeout); |
289 | ||
4f73a96b TH |
290 | vmw_seqno_waiter_remove(dev_priv); |
291 | ||
3d3a5b32 | 292 | if (unlikely(ret == 0)) |
fb1d9738 JB |
293 | ret = -EBUSY; |
294 | else if (likely(ret > 0)) | |
295 | ret = 0; | |
296 | ||
fb1d9738 JB |
297 | return ret; |
298 | } | |
299 | ||
300 | void vmw_irq_preinstall(struct drm_device *dev) | |
301 | { | |
302 | struct vmw_private *dev_priv = vmw_priv(dev); | |
303 | uint32_t status; | |
304 | ||
305 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | |
306 | return; | |
307 | ||
308 | spin_lock_init(&dev_priv->irq_lock); | |
309 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | |
310 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | |
311 | } | |
312 | ||
313 | int vmw_irq_postinstall(struct drm_device *dev) | |
314 | { | |
315 | return 0; | |
316 | } | |
317 | ||
318 | void vmw_irq_uninstall(struct drm_device *dev) | |
319 | { | |
320 | struct vmw_private *dev_priv = vmw_priv(dev); | |
321 | uint32_t status; | |
322 | ||
323 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | |
324 | return; | |
325 | ||
fb1d9738 | 326 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); |
fb1d9738 JB |
327 | |
328 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | |
329 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | |
330 | } | |
3eab3d9e TH |
331 | |
332 | void vmw_generic_waiter_add(struct vmw_private *dev_priv, | |
333 | u32 flag, int *waiter_count) | |
334 | { | |
335 | unsigned long irq_flags; | |
336 | ||
337 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | |
338 | if ((*waiter_count)++ == 0) { | |
339 | outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | |
340 | dev_priv->irq_mask |= flag; | |
341 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | |
342 | } | |
343 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | |
344 | } | |
345 | ||
346 | void vmw_generic_waiter_remove(struct vmw_private *dev_priv, | |
347 | u32 flag, int *waiter_count) | |
348 | { | |
349 | unsigned long irq_flags; | |
350 | ||
351 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | |
352 | if (--(*waiter_count) == 0) { | |
353 | dev_priv->irq_mask &= ~flag; | |
354 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | |
355 | } | |
356 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | |
357 | } |