1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
30 #include <drm/ttm/ttm_placement.h>
32 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
34 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion;
36 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
38 if (!(dev_priv->capabilities & SVGA_CAP_3D))
41 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
44 if (!dev_priv->has_mob)
47 spin_lock(&dev_priv->cap_lock);
48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
50 spin_unlock(&dev_priv->cap_lock);
55 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
58 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
59 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
62 hwversion = ioread32(fifo_mem +
63 ((fifo->capabilities &
64 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
65 SVGA_FIFO_3D_HWVERSION_REVISED :
66 SVGA_FIFO_3D_HWVERSION));
71 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
74 /* Legacy Display Unit does not support surfaces */
75 if (dev_priv->active_display_unit == vmw_du_legacy)
81 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
83 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
86 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
89 caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
90 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
96 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
98 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
102 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
103 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
104 if (unlikely(fifo->static_buffer == NULL))
107 fifo->dynamic_buffer = NULL;
108 fifo->reserved_size = 0;
109 fifo->using_bounce_buffer = false;
111 mutex_init(&fifo->fifo_mutex);
112 init_rwsem(&fifo->rwsem);
114 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
115 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
116 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
118 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
119 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
120 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
122 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE_HIDE);
123 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
126 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
127 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
133 iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
134 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
136 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
137 iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
138 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
141 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
143 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
144 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
145 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
147 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
150 (unsigned int) fifo->capabilities);
152 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
153 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
154 vmw_marker_queue_init(&fifo->marker_queue);
159 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
161 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
162 static DEFINE_SPINLOCK(ping_lock);
163 unsigned long irq_flags;
166 * The ping_lock is needed because we don't have an atomic
167 * test-and-set of the SVGA_FIFO_BUSY register.
169 spin_lock_irqsave(&ping_lock, irq_flags);
170 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
171 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
172 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
174 spin_unlock_irqrestore(&ping_lock, irq_flags);
177 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
179 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
181 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
182 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
185 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
187 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
188 dev_priv->config_done_state);
189 vmw_write(dev_priv, SVGA_REG_ENABLE,
190 dev_priv->enable_state);
191 vmw_write(dev_priv, SVGA_REG_TRACES,
192 dev_priv->traces_state);
194 vmw_marker_queue_takedown(&fifo->marker_queue);
196 if (likely(fifo->static_buffer != NULL)) {
197 vfree(fifo->static_buffer);
198 fifo->static_buffer = NULL;
201 if (likely(fifo->dynamic_buffer != NULL)) {
202 vfree(fifo->dynamic_buffer);
203 fifo->dynamic_buffer = NULL;
207 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
209 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
210 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
211 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
212 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
213 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
215 return ((max - next_cmd) + (stop - min) <= bytes);
218 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
219 uint32_t bytes, bool interruptible,
220 unsigned long timeout)
223 unsigned long end_jiffies = jiffies + timeout;
226 DRM_INFO("Fifo wait noirq.\n");
229 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
231 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
232 if (!vmw_fifo_is_full(dev_priv, bytes))
234 if (time_after_eq(jiffies, end_jiffies)) {
236 DRM_ERROR("SVGA device lockup.\n");
240 if (interruptible && signal_pending(current)) {
245 finish_wait(&dev_priv->fifo_queue, &__wait);
246 wake_up_all(&dev_priv->fifo_queue);
247 DRM_INFO("Fifo noirq exit.\n");
251 static int vmw_fifo_wait(struct vmw_private *dev_priv,
252 uint32_t bytes, bool interruptible,
253 unsigned long timeout)
256 unsigned long irq_flags;
258 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
261 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
262 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
263 return vmw_fifo_wait_noirq(dev_priv, bytes,
264 interruptible, timeout);
266 spin_lock(&dev_priv->waiter_lock);
267 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
268 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
269 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
270 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
271 dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
272 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
273 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
275 spin_unlock(&dev_priv->waiter_lock);
278 ret = wait_event_interruptible_timeout
279 (dev_priv->fifo_queue,
280 !vmw_fifo_is_full(dev_priv, bytes), timeout);
282 ret = wait_event_timeout
283 (dev_priv->fifo_queue,
284 !vmw_fifo_is_full(dev_priv, bytes), timeout);
286 if (unlikely(ret == 0))
288 else if (likely(ret > 0))
291 spin_lock(&dev_priv->waiter_lock);
292 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
293 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
294 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
295 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
296 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
298 spin_unlock(&dev_priv->waiter_lock);
304 * Reserve @bytes number of bytes in the fifo.
306 * This function will return NULL (error) on two conditions:
307 * If it timeouts waiting for fifo space, or if @bytes is larger than the
308 * available fifo space.
311 * Pointer to the fifo, or null on error (possible hardware hang).
313 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
316 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
317 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
321 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
324 mutex_lock(&fifo_state->fifo_mutex);
325 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
326 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
327 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
329 if (unlikely(bytes >= (max - min)))
332 BUG_ON(fifo_state->reserved_size != 0);
333 BUG_ON(fifo_state->dynamic_buffer != NULL);
335 fifo_state->reserved_size = bytes;
338 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
339 bool need_bounce = false;
340 bool reserve_in_place = false;
342 if (next_cmd >= stop) {
343 if (likely((next_cmd + bytes < max ||
344 (next_cmd + bytes == max && stop > min))))
345 reserve_in_place = true;
347 else if (vmw_fifo_is_full(dev_priv, bytes)) {
348 ret = vmw_fifo_wait(dev_priv, bytes,
350 if (unlikely(ret != 0))
357 if (likely((next_cmd + bytes < stop)))
358 reserve_in_place = true;
360 ret = vmw_fifo_wait(dev_priv, bytes,
362 if (unlikely(ret != 0))
367 if (reserve_in_place) {
368 if (reserveable || bytes <= sizeof(uint32_t)) {
369 fifo_state->using_bounce_buffer = false;
372 iowrite32(bytes, fifo_mem +
374 return (void __force *) (fifo_mem +
382 fifo_state->using_bounce_buffer = true;
383 if (bytes < fifo_state->static_buffer_size)
384 return fifo_state->static_buffer;
386 fifo_state->dynamic_buffer = vmalloc(bytes);
387 return fifo_state->dynamic_buffer;
392 fifo_state->reserved_size = 0;
393 mutex_unlock(&fifo_state->fifo_mutex);
398 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
403 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
404 SVGA3D_INVALID_ID, false, NULL);
406 ret = vmw_local_fifo_reserve(dev_priv, bytes);
407 if (IS_ERR_OR_NULL(ret)) {
408 DRM_ERROR("Fifo reserve failure of %u bytes.\n",
417 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
418 u32 __iomem *fifo_mem,
420 uint32_t max, uint32_t min, uint32_t bytes)
422 uint32_t chunk_size = max - next_cmd;
424 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
425 fifo_state->dynamic_buffer : fifo_state->static_buffer;
427 if (bytes < chunk_size)
430 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
432 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
433 rest = bytes - chunk_size;
435 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
439 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
440 u32 __iomem *fifo_mem,
442 uint32_t max, uint32_t min, uint32_t bytes)
444 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
445 fifo_state->dynamic_buffer : fifo_state->static_buffer;
448 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
449 next_cmd += sizeof(uint32_t);
450 if (unlikely(next_cmd == max))
453 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
455 bytes -= sizeof(uint32_t);
459 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
461 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
462 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
463 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
464 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
465 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
466 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
468 BUG_ON((bytes & 3) != 0);
469 BUG_ON(bytes > fifo_state->reserved_size);
471 fifo_state->reserved_size = 0;
473 if (fifo_state->using_bounce_buffer) {
475 vmw_fifo_res_copy(fifo_state, fifo_mem,
476 next_cmd, max, min, bytes);
478 vmw_fifo_slow_copy(fifo_state, fifo_mem,
479 next_cmd, max, min, bytes);
481 if (fifo_state->dynamic_buffer) {
482 vfree(fifo_state->dynamic_buffer);
483 fifo_state->dynamic_buffer = NULL;
488 down_write(&fifo_state->rwsem);
489 if (fifo_state->using_bounce_buffer || reserveable) {
492 next_cmd -= max - min;
494 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
498 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
500 up_write(&fifo_state->rwsem);
501 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
502 mutex_unlock(&fifo_state->fifo_mutex);
505 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
508 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
510 vmw_local_fifo_commit(dev_priv, bytes);
515 * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
517 * @dev_priv: Pointer to device private structure.
518 * @bytes: Number of bytes to commit.
520 static void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
523 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
525 vmw_local_fifo_commit(dev_priv, bytes);
529 * vmw_fifo_flush - Flush any buffered commands and make sure command processing
532 * @dev_priv: Pointer to device private structure.
533 * @interruptible: Whether to wait interruptible if function needs to sleep.
535 int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
540 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
545 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
547 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
548 struct svga_fifo_cmd_fence *cmd_fence;
551 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
553 fm = vmw_fifo_reserve(dev_priv, bytes);
554 if (unlikely(fm == NULL)) {
555 *seqno = atomic_read(&dev_priv->marker_seq);
557 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
563 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
564 } while (*seqno == 0);
566 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
569 * Don't request hardware to send a fence. The
570 * waiting code in vmwgfx_irq.c will emulate this.
573 vmw_fifo_commit(dev_priv, 0);
577 *fm++ = SVGA_CMD_FENCE;
578 cmd_fence = (struct svga_fifo_cmd_fence *) fm;
579 cmd_fence->fence = *seqno;
580 vmw_fifo_commit_flush(dev_priv, bytes);
581 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
582 vmw_update_seqno(dev_priv, fifo_state);
589 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
590 * legacy query commands.
592 * @dev_priv: The device private structure.
593 * @cid: The hardware context id used for the query.
595 * See the vmw_fifo_emit_dummy_query documentation.
597 static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
601 * A query wait without a preceding query end will
602 * actually finish all queries for this cid
603 * without writing to the query result structure.
606 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
608 SVGA3dCmdHeader header;
609 SVGA3dCmdWaitForQuery body;
612 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
614 if (unlikely(cmd == NULL)) {
615 DRM_ERROR("Out of fifo space for dummy query.\n");
619 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
620 cmd->header.size = sizeof(cmd->body);
622 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
624 if (bo->mem.mem_type == TTM_PL_VRAM) {
625 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
626 cmd->body.guestResult.offset = bo->offset;
628 cmd->body.guestResult.gmrId = bo->mem.start;
629 cmd->body.guestResult.offset = 0;
632 vmw_fifo_commit(dev_priv, sizeof(*cmd));
638 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
639 * guest-backed resource query commands.
641 * @dev_priv: The device private structure.
642 * @cid: The hardware context id used for the query.
644 * See the vmw_fifo_emit_dummy_query documentation.
646 static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
650 * A query wait without a preceding query end will
651 * actually finish all queries for this cid
652 * without writing to the query result structure.
655 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
657 SVGA3dCmdHeader header;
658 SVGA3dCmdWaitForGBQuery body;
661 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
663 if (unlikely(cmd == NULL)) {
664 DRM_ERROR("Out of fifo space for dummy query.\n");
668 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
669 cmd->header.size = sizeof(cmd->body);
671 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
672 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
673 cmd->body.mobid = bo->mem.start;
674 cmd->body.offset = 0;
676 vmw_fifo_commit(dev_priv, sizeof(*cmd));
683 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
684 * appropriate resource query commands.
686 * @dev_priv: The device private structure.
687 * @cid: The hardware context id used for the query.
689 * This function is used to emit a dummy occlusion query with
690 * no primitives rendered between query begin and query end.
691 * It's used to provide a query barrier, in order to know that when
692 * this query is finished, all preceding queries are also finished.
694 * A Query results structure should have been initialized at the start
695 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
696 * must also be either reserved or pinned when this function is called.
698 * Returns -ENOMEM on failure to reserve fifo space.
700 int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
703 if (dev_priv->has_mob)
704 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
706 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);