1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
33 static struct ttm_place vram_placement_flags = {
36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
39 static struct ttm_place vram_ne_placement_flags = {
42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
45 static struct ttm_place sys_placement_flags = {
48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
51 static struct ttm_place sys_ne_placement_flags = {
54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
57 static struct ttm_place gmr_placement_flags = {
60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
63 static struct ttm_place gmr_ne_placement_flags = {
66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
69 static struct ttm_place mob_placement_flags = {
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
75 struct ttm_placement vmw_vram_placement = {
77 .placement = &vram_placement_flags,
78 .num_busy_placement = 1,
79 .busy_placement = &vram_placement_flags
82 static struct ttm_place vram_gmr_placement_flags[] = {
86 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
90 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
94 static struct ttm_place gmr_vram_placement_flags[] = {
98 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
102 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
106 struct ttm_placement vmw_vram_gmr_placement = {
108 .placement = vram_gmr_placement_flags,
109 .num_busy_placement = 1,
110 .busy_placement = &gmr_placement_flags
113 static struct ttm_place vram_gmr_ne_placement_flags[] = {
117 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
122 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
127 struct ttm_placement vmw_vram_gmr_ne_placement = {
129 .placement = vram_gmr_ne_placement_flags,
130 .num_busy_placement = 1,
131 .busy_placement = &gmr_ne_placement_flags
134 struct ttm_placement vmw_vram_sys_placement = {
136 .placement = &vram_placement_flags,
137 .num_busy_placement = 1,
138 .busy_placement = &sys_placement_flags
141 struct ttm_placement vmw_vram_ne_placement = {
143 .placement = &vram_ne_placement_flags,
144 .num_busy_placement = 1,
145 .busy_placement = &vram_ne_placement_flags
148 struct ttm_placement vmw_sys_placement = {
150 .placement = &sys_placement_flags,
151 .num_busy_placement = 1,
152 .busy_placement = &sys_placement_flags
155 struct ttm_placement vmw_sys_ne_placement = {
157 .placement = &sys_ne_placement_flags,
158 .num_busy_placement = 1,
159 .busy_placement = &sys_ne_placement_flags
162 static struct ttm_place evictable_placement_flags[] = {
166 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
170 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
174 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
178 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
182 struct ttm_placement vmw_evictable_placement = {
184 .placement = evictable_placement_flags,
185 .num_busy_placement = 1,
186 .busy_placement = &sys_placement_flags
189 struct ttm_placement vmw_srf_placement = {
191 .num_busy_placement = 2,
192 .placement = &gmr_placement_flags,
193 .busy_placement = gmr_vram_placement_flags
196 struct ttm_placement vmw_mob_placement = {
198 .num_busy_placement = 1,
199 .placement = &mob_placement_flags,
200 .busy_placement = &mob_placement_flags
204 struct ttm_dma_tt dma_ttm;
205 struct vmw_private *dev_priv;
210 struct vmw_sg_table vsgt;
211 uint64_t sg_alloc_size;
215 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
218 * Helper functions to advance a struct vmw_piter iterator.
220 * @viter: Pointer to the iterator.
222 * These functions return false if past the end of the list,
223 * true otherwise. Functions are selected depending on the current
226 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
228 return ++(viter->i) < viter->num_pages;
231 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
233 return __sg_page_iter_next(&viter->iter);
238 * Helper functions to return a pointer to the current page.
240 * @viter: Pointer to the iterator
242 * These functions return a pointer to the page currently
243 * pointed to by @viter. Functions are selected depending on the
244 * current mapping mode.
246 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
248 return viter->pages[viter->i];
251 static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
253 return sg_page_iter_page(&viter->iter);
258 * Helper functions to return the DMA address of the current page.
260 * @viter: Pointer to the iterator
262 * These functions return the DMA address of the page currently
263 * pointed to by @viter. Functions are selected depending on the
264 * current mapping mode.
266 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
268 return page_to_phys(viter->pages[viter->i]);
271 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
273 return viter->addrs[viter->i];
276 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
278 return sg_page_iter_dma_address(&viter->iter);
283 * vmw_piter_start - Initialize a struct vmw_piter.
285 * @viter: Pointer to the iterator to initialize
286 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
288 * Note that we're following the convention of __sg_page_iter_start, so that
289 * the iterator doesn't point to a valid page after initialization; it has
290 * to be advanced one step first.
292 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
293 unsigned long p_offset)
295 viter->i = p_offset - 1;
296 viter->num_pages = vsgt->num_pages;
297 switch (vsgt->mode) {
299 viter->next = &__vmw_piter_non_sg_next;
300 viter->dma_address = &__vmw_piter_phys_addr;
301 viter->page = &__vmw_piter_non_sg_page;
302 viter->pages = vsgt->pages;
304 case vmw_dma_alloc_coherent:
305 viter->next = &__vmw_piter_non_sg_next;
306 viter->dma_address = &__vmw_piter_dma_addr;
307 viter->page = &__vmw_piter_non_sg_page;
308 viter->addrs = vsgt->addrs;
309 viter->pages = vsgt->pages;
311 case vmw_dma_map_populate:
312 case vmw_dma_map_bind:
313 viter->next = &__vmw_piter_sg_next;
314 viter->dma_address = &__vmw_piter_sg_addr;
315 viter->page = &__vmw_piter_sg_page;
316 __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
317 vsgt->sgt->orig_nents, p_offset);
325 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
328 * @vmw_tt: Pointer to a struct vmw_ttm_backend
330 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
332 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
334 struct device *dev = vmw_tt->dev_priv->dev->dev;
336 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
338 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
342 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
344 * @vmw_tt: Pointer to a struct vmw_ttm_backend
346 * This function is used to get device addresses from the kernel DMA layer.
347 * However, it's violating the DMA API in that when this operation has been
348 * performed, it's illegal for the CPU to write to the pages without first
349 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
350 * therefore only legal to call this function if we know that the function
351 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
352 * a CPU write buffer flush.
354 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
356 struct device *dev = vmw_tt->dev_priv->dev->dev;
359 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
361 if (unlikely(ret == 0))
364 vmw_tt->sgt.nents = ret;
370 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
372 * @vmw_tt: Pointer to a struct vmw_ttm_tt
374 * Select the correct function for and make sure the TTM pages are
375 * visible to the device. Allocate storage for the device mappings.
376 * If a mapping has already been performed, indicated by the storage
377 * pointer being non NULL, the function returns success.
379 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
381 struct vmw_private *dev_priv = vmw_tt->dev_priv;
382 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
383 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
384 struct vmw_piter iter;
387 static size_t sgl_size;
388 static size_t sgt_size;
393 vsgt->mode = dev_priv->map_mode;
394 vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
395 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
396 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
397 vsgt->sgt = &vmw_tt->sgt;
399 switch (dev_priv->map_mode) {
400 case vmw_dma_map_bind:
401 case vmw_dma_map_populate:
402 if (unlikely(!sgl_size)) {
403 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
404 sgt_size = ttm_round_pot(sizeof(struct sg_table));
406 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
407 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
409 if (unlikely(ret != 0))
412 ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
415 vsgt->num_pages << PAGE_SHIFT,
417 if (unlikely(ret != 0))
418 goto out_sg_alloc_fail;
420 if (vsgt->num_pages > vmw_tt->sgt.nents) {
421 uint64_t over_alloc =
422 sgl_size * (vsgt->num_pages -
425 ttm_mem_global_free(glob, over_alloc);
426 vmw_tt->sg_alloc_size -= over_alloc;
429 ret = vmw_ttm_map_for_dma(vmw_tt);
430 if (unlikely(ret != 0))
438 old = ~((dma_addr_t) 0);
439 vmw_tt->vsgt.num_regions = 0;
440 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
441 dma_addr_t cur = vmw_piter_dma_addr(&iter);
443 if (cur != old + PAGE_SIZE)
444 vmw_tt->vsgt.num_regions++;
448 vmw_tt->mapped = true;
452 sg_free_table(vmw_tt->vsgt.sgt);
453 vmw_tt->vsgt.sgt = NULL;
455 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
460 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
462 * @vmw_tt: Pointer to a struct vmw_ttm_tt
464 * Tear down any previously set up device DMA mappings and free
465 * any storage space allocated for them. If there are no mappings set up,
466 * this function is a NOP.
468 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
470 struct vmw_private *dev_priv = vmw_tt->dev_priv;
472 if (!vmw_tt->vsgt.sgt)
475 switch (dev_priv->map_mode) {
476 case vmw_dma_map_bind:
477 case vmw_dma_map_populate:
478 vmw_ttm_unmap_from_dma(vmw_tt);
479 sg_free_table(vmw_tt->vsgt.sgt);
480 vmw_tt->vsgt.sgt = NULL;
481 ttm_mem_global_free(vmw_mem_glob(dev_priv),
482 vmw_tt->sg_alloc_size);
487 vmw_tt->mapped = false;
492 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
494 * @bo: Pointer to a struct ttm_buffer_object
496 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
497 * instead of a pointer to a struct vmw_ttm_backend as argument.
498 * Note that the buffer object must be either pinned or reserved before
499 * calling this function.
501 int vmw_bo_map_dma(struct ttm_buffer_object *bo)
503 struct vmw_ttm_tt *vmw_tt =
504 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
506 return vmw_ttm_map_dma(vmw_tt);
511 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
513 * @bo: Pointer to a struct ttm_buffer_object
515 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
516 * instead of a pointer to a struct vmw_ttm_backend as argument.
518 void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
520 struct vmw_ttm_tt *vmw_tt =
521 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
523 vmw_ttm_unmap_dma(vmw_tt);
528 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
531 * @bo: Pointer to a struct ttm_buffer_object
533 * Returns a pointer to a struct vmw_sg_table object. The object should
534 * not be freed after use.
535 * Note that for the device addresses to be valid, the buffer object must
536 * either be reserved or pinned.
538 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
540 struct vmw_ttm_tt *vmw_tt =
541 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
543 return &vmw_tt->vsgt;
547 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
549 struct vmw_ttm_tt *vmw_be =
550 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
553 ret = vmw_ttm_map_dma(vmw_be);
554 if (unlikely(ret != 0))
557 vmw_be->gmr_id = bo_mem->start;
558 vmw_be->mem_type = bo_mem->mem_type;
560 switch (bo_mem->mem_type) {
562 return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
563 ttm->num_pages, vmw_be->gmr_id);
565 if (unlikely(vmw_be->mob == NULL)) {
567 vmw_mob_create(ttm->num_pages);
568 if (unlikely(vmw_be->mob == NULL))
572 return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
573 &vmw_be->vsgt, ttm->num_pages,
581 static int vmw_ttm_unbind(struct ttm_tt *ttm)
583 struct vmw_ttm_tt *vmw_be =
584 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
586 switch (vmw_be->mem_type) {
588 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
591 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
597 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
598 vmw_ttm_unmap_dma(vmw_be);
604 static void vmw_ttm_destroy(struct ttm_tt *ttm)
606 struct vmw_ttm_tt *vmw_be =
607 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
609 vmw_ttm_unmap_dma(vmw_be);
610 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
611 ttm_dma_tt_fini(&vmw_be->dma_ttm);
616 vmw_mob_destroy(vmw_be->mob);
622 static int vmw_ttm_populate(struct ttm_tt *ttm)
624 struct vmw_ttm_tt *vmw_tt =
625 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
626 struct vmw_private *dev_priv = vmw_tt->dev_priv;
627 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
630 if (ttm->state != tt_unpopulated)
633 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
635 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
636 ret = ttm_mem_global_alloc(glob, size, false, true);
637 if (unlikely(ret != 0))
640 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
641 if (unlikely(ret != 0))
642 ttm_mem_global_free(glob, size);
644 ret = ttm_pool_populate(ttm);
649 static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
651 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
653 struct vmw_private *dev_priv = vmw_tt->dev_priv;
654 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
658 vmw_mob_destroy(vmw_tt->mob);
662 vmw_ttm_unmap_dma(vmw_tt);
663 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
665 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
667 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
668 ttm_mem_global_free(glob, size);
670 ttm_pool_unpopulate(ttm);
673 static struct ttm_backend_func vmw_ttm_func = {
674 .bind = vmw_ttm_bind,
675 .unbind = vmw_ttm_unbind,
676 .destroy = vmw_ttm_destroy,
679 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
680 unsigned long size, uint32_t page_flags,
681 struct page *dummy_read_page)
683 struct vmw_ttm_tt *vmw_be;
686 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
690 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
691 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
694 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
695 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
698 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
700 if (unlikely(ret != 0))
703 return &vmw_be->dma_ttm.ttm;
709 static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
714 static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
715 struct ttm_mem_type_manager *man)
721 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
722 man->available_caching = TTM_PL_FLAG_CACHED;
723 man->default_caching = TTM_PL_FLAG_CACHED;
726 /* "On-card" video ram */
727 man->func = &ttm_bo_manager_func;
729 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
730 man->available_caching = TTM_PL_FLAG_CACHED;
731 man->default_caching = TTM_PL_FLAG_CACHED;
736 * "Guest Memory Regions" is an aperture like feature with
737 * one slot per bo. There is an upper limit of the number of
738 * slots as well as the bo size.
740 man->func = &vmw_gmrid_manager_func;
742 man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
743 man->available_caching = TTM_PL_FLAG_CACHED;
744 man->default_caching = TTM_PL_FLAG_CACHED;
747 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
753 static void vmw_evict_flags(struct ttm_buffer_object *bo,
754 struct ttm_placement *placement)
756 *placement = vmw_sys_placement;
759 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
761 struct ttm_object_file *tfile =
762 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
764 return vmw_user_dmabuf_verify_access(bo, tfile);
767 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
769 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
770 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
772 mem->bus.addr = NULL;
773 mem->bus.is_iomem = false;
775 mem->bus.size = mem->num_pages << PAGE_SHIFT;
777 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
779 switch (mem->mem_type) {
785 mem->bus.offset = mem->start << PAGE_SHIFT;
786 mem->bus.base = dev_priv->vram_start;
787 mem->bus.is_iomem = true;
795 static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
799 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
805 * FIXME: We're using the old vmware polling method to sync.
806 * Do this with fences instead.
809 static void *vmw_sync_obj_ref(void *sync_obj)
813 vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
816 static void vmw_sync_obj_unref(void **sync_obj)
818 vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
821 static int vmw_sync_obj_flush(void *sync_obj)
823 vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
827 static bool vmw_sync_obj_signaled(void *sync_obj)
829 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
830 DRM_VMW_FENCE_FLAG_EXEC);
834 static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
836 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
837 DRM_VMW_FENCE_FLAG_EXEC,
839 VMW_FENCE_WAIT_TIMEOUT);
843 * vmw_move_notify - TTM move_notify_callback
845 * @bo: The TTM buffer object about to move.
846 * @mem: The truct ttm_mem_reg indicating to what memory
847 * region the move is taking place.
849 * Calls move_notify for all subsystems needing it.
850 * (currently only resources).
852 static void vmw_move_notify(struct ttm_buffer_object *bo,
853 struct ttm_mem_reg *mem)
855 vmw_resource_move_notify(bo, mem);
860 * vmw_swap_notify - TTM move_notify_callback
862 * @bo: The TTM buffer object about to be swapped out.
864 static void vmw_swap_notify(struct ttm_buffer_object *bo)
866 struct ttm_bo_device *bdev = bo->bdev;
868 spin_lock(&bdev->fence_lock);
869 ttm_bo_wait(bo, false, false, false);
870 spin_unlock(&bdev->fence_lock);
874 struct ttm_bo_driver vmw_bo_driver = {
875 .ttm_tt_create = &vmw_ttm_tt_create,
876 .ttm_tt_populate = &vmw_ttm_populate,
877 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
878 .invalidate_caches = vmw_invalidate_caches,
879 .init_mem_type = vmw_init_mem_type,
880 .evict_flags = vmw_evict_flags,
882 .verify_access = vmw_verify_access,
883 .sync_obj_signaled = vmw_sync_obj_signaled,
884 .sync_obj_wait = vmw_sync_obj_wait,
885 .sync_obj_flush = vmw_sync_obj_flush,
886 .sync_obj_unref = vmw_sync_obj_unref,
887 .sync_obj_ref = vmw_sync_obj_ref,
888 .move_notify = vmw_move_notify,
889 .swap_notify = vmw_swap_notify,
890 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
891 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
892 .io_mem_free = &vmw_ttm_io_mem_free,