drm/vmwgfx: Fix an fb unlocking bug
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fifo.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
760285e7
DH
29#include <drm/drmP.h>
30#include <drm/ttm/ttm_placement.h>
fb1d9738 31
8e19a951
JB
32bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion;
ebd4c6f6 36 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
8e19a951 37
d8c08b2b
TH
38 if (!(dev_priv->capabilities & SVGA_CAP_3D))
39 return false;
40
41 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
42 uint32_t result;
43
44 if (!dev_priv->has_mob)
45 return false;
46
496eb6fd 47 spin_lock(&dev_priv->cap_lock);
d8c08b2b
TH
48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
496eb6fd 50 spin_unlock(&dev_priv->cap_lock);
d8c08b2b
TH
51
52 return (result != 0);
53 }
54
d7e1958d
JB
55 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
56 return false;
57
8e19a951
JB
58 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
59 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
60 return false;
61
ebd4c6f6
TH
62 hwversion = ioread32(fifo_mem +
63 ((fifo->capabilities &
64 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
65 SVGA_FIFO_3D_HWVERSION_REVISED :
66 SVGA_FIFO_3D_HWVERSION));
67
8e19a951
JB
68 if (hwversion == 0)
69 return false;
70
b7b70024 71 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
8e19a951
JB
72 return false;
73
01e81419
JB
74 /* Non-Screen Object path does not support surfaces */
75 if (!dev_priv->sou_priv)
76 return false;
77
8e19a951
JB
78 return true;
79}
80
d7e1958d
JB
81bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
82{
83 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
84 uint32_t caps;
85
86 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
87 return false;
88
89 caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
90 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
91 return true;
92
93 return false;
94}
95
fb1d9738
JB
96int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
97{
98 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
99 uint32_t max;
100 uint32_t min;
101 uint32_t dummy;
fb1d9738
JB
102
103 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
104 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
105 if (unlikely(fifo->static_buffer == NULL))
106 return -ENOMEM;
107
fb1d9738
JB
108 fifo->dynamic_buffer = NULL;
109 fifo->reserved_size = 0;
110 fifo->using_bounce_buffer = false;
111
85b9e487 112 mutex_init(&fifo->fifo_mutex);
fb1d9738
JB
113 init_rwsem(&fifo->rwsem);
114
115 /*
116 * Allow mapping the first page read-only to user-space.
117 */
118
119 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
122
fb1d9738
JB
123 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
124 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
30c78bb8 125 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
fb1d9738
JB
126 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
127
128 min = 4;
129 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
130 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
131 min <<= 2;
132
133 if (min < PAGE_SIZE)
134 min = PAGE_SIZE;
135
136 iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
137 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
138 wmb();
139 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
140 iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
141 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
142 mb();
143
144 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
fb1d9738
JB
145
146 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
147 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
148 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
149
150 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
151 (unsigned int) max,
152 (unsigned int) min,
153 (unsigned int) fifo->capabilities);
154
6bcd8d3c
TH
155 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
156 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
157 vmw_marker_queue_init(&fifo->marker_queue);
fb1d9738 158 return vmw_fifo_send_fence(dev_priv, &dummy);
fb1d9738
JB
159}
160
496eb6fd 161void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
fb1d9738
JB
162{
163 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
496eb6fd
TH
164 static DEFINE_SPINLOCK(ping_lock);
165 unsigned long irq_flags;
fb1d9738 166
496eb6fd
TH
167 /*
168 * The ping_lock is needed because we don't have an atomic
169 * test-and-set of the SVGA_FIFO_BUSY register.
170 */
171 spin_lock_irqsave(&ping_lock, irq_flags);
fb1d9738
JB
172 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
173 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
174 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
175 }
496eb6fd 176 spin_unlock_irqrestore(&ping_lock, irq_flags);
fb1d9738
JB
177}
178
179void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
180{
181 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
182
f01ea0c3 183 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
fb1d9738 184 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
f01ea0c3 185 ;
fb1d9738 186
6bcd8d3c 187 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
fb1d9738
JB
188
189 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
190 dev_priv->config_done_state);
191 vmw_write(dev_priv, SVGA_REG_ENABLE,
192 dev_priv->enable_state);
30c78bb8
TH
193 vmw_write(dev_priv, SVGA_REG_TRACES,
194 dev_priv->traces_state);
fb1d9738 195
6bcd8d3c 196 vmw_marker_queue_takedown(&fifo->marker_queue);
fb1d9738 197
fb1d9738
JB
198 if (likely(fifo->static_buffer != NULL)) {
199 vfree(fifo->static_buffer);
200 fifo->static_buffer = NULL;
201 }
202
203 if (likely(fifo->dynamic_buffer != NULL)) {
204 vfree(fifo->dynamic_buffer);
205 fifo->dynamic_buffer = NULL;
206 }
207}
208
209static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
210{
211 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
212 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
213 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
214 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
215 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
216
217 return ((max - next_cmd) + (stop - min) <= bytes);
218}
219
220static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
221 uint32_t bytes, bool interruptible,
222 unsigned long timeout)
223{
224 int ret = 0;
225 unsigned long end_jiffies = jiffies + timeout;
226 DEFINE_WAIT(__wait);
227
228 DRM_INFO("Fifo wait noirq.\n");
229
230 for (;;) {
231 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
232 (interruptible) ?
233 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
234 if (!vmw_fifo_is_full(dev_priv, bytes))
235 break;
236 if (time_after_eq(jiffies, end_jiffies)) {
237 ret = -EBUSY;
238 DRM_ERROR("SVGA device lockup.\n");
239 break;
240 }
241 schedule_timeout(1);
242 if (interruptible && signal_pending(current)) {
3d3a5b32 243 ret = -ERESTARTSYS;
fb1d9738
JB
244 break;
245 }
246 }
247 finish_wait(&dev_priv->fifo_queue, &__wait);
248 wake_up_all(&dev_priv->fifo_queue);
249 DRM_INFO("Fifo noirq exit.\n");
250 return ret;
251}
252
253static int vmw_fifo_wait(struct vmw_private *dev_priv,
254 uint32_t bytes, bool interruptible,
255 unsigned long timeout)
256{
257 long ret = 1L;
258 unsigned long irq_flags;
259
260 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
261 return 0;
262
263 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
264 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
265 return vmw_fifo_wait_noirq(dev_priv, bytes,
266 interruptible, timeout);
267
496eb6fd 268 spin_lock(&dev_priv->waiter_lock);
fb1d9738
JB
269 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
270 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
271 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
272 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
57c5ee79
TH
273 dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
274 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
fb1d9738
JB
275 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
276 }
496eb6fd 277 spin_unlock(&dev_priv->waiter_lock);
fb1d9738
JB
278
279 if (interruptible)
280 ret = wait_event_interruptible_timeout
281 (dev_priv->fifo_queue,
282 !vmw_fifo_is_full(dev_priv, bytes), timeout);
283 else
284 ret = wait_event_timeout
285 (dev_priv->fifo_queue,
286 !vmw_fifo_is_full(dev_priv, bytes), timeout);
287
3d3a5b32 288 if (unlikely(ret == 0))
fb1d9738
JB
289 ret = -EBUSY;
290 else if (likely(ret > 0))
291 ret = 0;
292
496eb6fd 293 spin_lock(&dev_priv->waiter_lock);
fb1d9738
JB
294 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
295 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
57c5ee79
TH
296 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
297 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
fb1d9738
JB
298 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
299 }
496eb6fd 300 spin_unlock(&dev_priv->waiter_lock);
fb1d9738
JB
301
302 return ret;
303}
304
de12d44f
JB
305/**
306 * Reserve @bytes number of bytes in the fifo.
307 *
308 * This function will return NULL (error) on two conditions:
309 * If it timeouts waiting for fifo space, or if @bytes is larger than the
310 * available fifo space.
311 *
312 * Returns:
313 * Pointer to the fifo, or null on error (possible hardware hang).
314 */
fb1d9738
JB
315void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
316{
317 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
318 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
319 uint32_t max;
320 uint32_t min;
321 uint32_t next_cmd;
322 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
323 int ret;
324
85b9e487 325 mutex_lock(&fifo_state->fifo_mutex);
fb1d9738
JB
326 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
327 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
328 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
329
330 if (unlikely(bytes >= (max - min)))
331 goto out_err;
332
333 BUG_ON(fifo_state->reserved_size != 0);
334 BUG_ON(fifo_state->dynamic_buffer != NULL);
335
336 fifo_state->reserved_size = bytes;
337
338 while (1) {
339 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
340 bool need_bounce = false;
341 bool reserve_in_place = false;
342
343 if (next_cmd >= stop) {
344 if (likely((next_cmd + bytes < max ||
345 (next_cmd + bytes == max && stop > min))))
346 reserve_in_place = true;
347
348 else if (vmw_fifo_is_full(dev_priv, bytes)) {
349 ret = vmw_fifo_wait(dev_priv, bytes,
350 false, 3 * HZ);
351 if (unlikely(ret != 0))
352 goto out_err;
353 } else
354 need_bounce = true;
355
356 } else {
357
358 if (likely((next_cmd + bytes < stop)))
359 reserve_in_place = true;
360 else {
361 ret = vmw_fifo_wait(dev_priv, bytes,
362 false, 3 * HZ);
363 if (unlikely(ret != 0))
364 goto out_err;
365 }
366 }
367
368 if (reserve_in_place) {
369 if (reserveable || bytes <= sizeof(uint32_t)) {
370 fifo_state->using_bounce_buffer = false;
371
372 if (reserveable)
373 iowrite32(bytes, fifo_mem +
374 SVGA_FIFO_RESERVED);
375 return fifo_mem + (next_cmd >> 2);
376 } else {
377 need_bounce = true;
378 }
379 }
380
381 if (need_bounce) {
382 fifo_state->using_bounce_buffer = true;
383 if (bytes < fifo_state->static_buffer_size)
384 return fifo_state->static_buffer;
385 else {
386 fifo_state->dynamic_buffer = vmalloc(bytes);
387 return fifo_state->dynamic_buffer;
388 }
389 }
390 }
391out_err:
392 fifo_state->reserved_size = 0;
85b9e487 393 mutex_unlock(&fifo_state->fifo_mutex);
fb1d9738
JB
394 return NULL;
395}
396
397static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
398 __le32 __iomem *fifo_mem,
399 uint32_t next_cmd,
400 uint32_t max, uint32_t min, uint32_t bytes)
401{
402 uint32_t chunk_size = max - next_cmd;
403 uint32_t rest;
404 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
405 fifo_state->dynamic_buffer : fifo_state->static_buffer;
406
407 if (bytes < chunk_size)
408 chunk_size = bytes;
409
410 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
411 mb();
412 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
413 rest = bytes - chunk_size;
414 if (rest)
415 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
416 rest);
417}
418
419static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
420 __le32 __iomem *fifo_mem,
421 uint32_t next_cmd,
422 uint32_t max, uint32_t min, uint32_t bytes)
423{
424 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
425 fifo_state->dynamic_buffer : fifo_state->static_buffer;
426
427 while (bytes > 0) {
428 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
429 next_cmd += sizeof(uint32_t);
430 if (unlikely(next_cmd == max))
431 next_cmd = min;
432 mb();
433 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
434 mb();
435 bytes -= sizeof(uint32_t);
436 }
437}
438
439void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
440{
441 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
442 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
443 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
444 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
445 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
446 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
447
448 BUG_ON((bytes & 3) != 0);
449 BUG_ON(bytes > fifo_state->reserved_size);
450
451 fifo_state->reserved_size = 0;
452
453 if (fifo_state->using_bounce_buffer) {
454 if (reserveable)
455 vmw_fifo_res_copy(fifo_state, fifo_mem,
456 next_cmd, max, min, bytes);
457 else
458 vmw_fifo_slow_copy(fifo_state, fifo_mem,
459 next_cmd, max, min, bytes);
460
461 if (fifo_state->dynamic_buffer) {
462 vfree(fifo_state->dynamic_buffer);
463 fifo_state->dynamic_buffer = NULL;
464 }
465
466 }
467
85b9e487 468 down_write(&fifo_state->rwsem);
fb1d9738
JB
469 if (fifo_state->using_bounce_buffer || reserveable) {
470 next_cmd += bytes;
471 if (next_cmd >= max)
472 next_cmd -= max - min;
473 mb();
474 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
475 }
476
477 if (reserveable)
478 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
479 mb();
fb1d9738 480 up_write(&fifo_state->rwsem);
85b9e487
TH
481 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
482 mutex_unlock(&fifo_state->fifo_mutex);
fb1d9738
JB
483}
484
6bcd8d3c 485int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
fb1d9738
JB
486{
487 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
488 struct svga_fifo_cmd_fence *cmd_fence;
489 void *fm;
490 int ret = 0;
491 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
492
493 fm = vmw_fifo_reserve(dev_priv, bytes);
494 if (unlikely(fm == NULL)) {
6bcd8d3c 495 *seqno = atomic_read(&dev_priv->marker_seq);
fb1d9738 496 ret = -ENOMEM;
6bcd8d3c 497 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
fb1d9738
JB
498 false, 3*HZ);
499 goto out_err;
500 }
501
502 do {
6bcd8d3c
TH
503 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
504 } while (*seqno == 0);
fb1d9738
JB
505
506 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
507
508 /*
509 * Don't request hardware to send a fence. The
510 * waiting code in vmwgfx_irq.c will emulate this.
511 */
512
513 vmw_fifo_commit(dev_priv, 0);
514 return 0;
515 }
516
517 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
518 cmd_fence = (struct svga_fifo_cmd_fence *)
519 ((unsigned long)fm + sizeof(__le32));
520
6bcd8d3c 521 iowrite32(*seqno, &cmd_fence->fence);
fb1d9738 522 vmw_fifo_commit(dev_priv, bytes);
6bcd8d3c
TH
523 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
524 vmw_update_seqno(dev_priv, fifo_state);
fb1d9738
JB
525
526out_err:
527 return ret;
528}
e2fa3a76
TH
529
530/**
ddcda24e
TH
531 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
532 * legacy query commands.
e2fa3a76
TH
533 *
534 * @dev_priv: The device private structure.
535 * @cid: The hardware context id used for the query.
536 *
ddcda24e 537 * See the vmw_fifo_emit_dummy_query documentation.
e2fa3a76 538 */
ddcda24e
TH
539static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
540 uint32_t cid)
e2fa3a76
TH
541{
542 /*
543 * A query wait without a preceding query end will
544 * actually finish all queries for this cid
545 * without writing to the query result structure.
546 */
547
548 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
549 struct {
550 SVGA3dCmdHeader header;
551 SVGA3dCmdWaitForQuery body;
552 } *cmd;
553
554 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
555
556 if (unlikely(cmd == NULL)) {
557 DRM_ERROR("Out of fifo space for dummy query.\n");
558 return -ENOMEM;
559 }
560
561 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
562 cmd->header.size = sizeof(cmd->body);
563 cmd->body.cid = cid;
564 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
565
566 if (bo->mem.mem_type == TTM_PL_VRAM) {
567 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
568 cmd->body.guestResult.offset = bo->offset;
569 } else {
570 cmd->body.guestResult.gmrId = bo->mem.start;
571 cmd->body.guestResult.offset = 0;
572 }
573
574 vmw_fifo_commit(dev_priv, sizeof(*cmd));
575
576 return 0;
577}
ddcda24e
TH
578
579/**
580 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
581 * guest-backed resource query commands.
582 *
583 * @dev_priv: The device private structure.
584 * @cid: The hardware context id used for the query.
585 *
586 * See the vmw_fifo_emit_dummy_query documentation.
587 */
588static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
589 uint32_t cid)
590{
591 /*
592 * A query wait without a preceding query end will
593 * actually finish all queries for this cid
594 * without writing to the query result structure.
595 */
596
597 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
598 struct {
599 SVGA3dCmdHeader header;
600 SVGA3dCmdWaitForGBQuery body;
601 } *cmd;
602
603 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
604
605 if (unlikely(cmd == NULL)) {
606 DRM_ERROR("Out of fifo space for dummy query.\n");
607 return -ENOMEM;
608 }
609
610 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
611 cmd->header.size = sizeof(cmd->body);
612 cmd->body.cid = cid;
613 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
614 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
615 cmd->body.mobid = bo->mem.start;
616 cmd->body.offset = 0;
617
618 vmw_fifo_commit(dev_priv, sizeof(*cmd));
619
620 return 0;
621}
622
623
624/**
625 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
626 * appropriate resource query commands.
627 *
628 * @dev_priv: The device private structure.
629 * @cid: The hardware context id used for the query.
630 *
631 * This function is used to emit a dummy occlusion query with
632 * no primitives rendered between query begin and query end.
633 * It's used to provide a query barrier, in order to know that when
634 * this query is finished, all preceding queries are also finished.
635 *
636 * A Query results structure should have been initialized at the start
637 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
638 * must also be either reserved or pinned when this function is called.
639 *
640 * Returns -ENOMEM on failure to reserve fifo space.
641 */
642int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
643 uint32_t cid)
644{
645 if (dev_priv->has_mob)
646 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
647
648 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
649}