1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
34 #define VMW_RES_EVICT_ERR_COUNT 10
37 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
40 void vmw_resource_mob_attach(struct vmw_resource *res)
42 struct vmw_buffer_object *backup = res->backup;
43 struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
45 dma_resv_assert_held(res->backup->base.base.resv);
46 res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
50 struct vmw_resource *this =
51 container_of(*new, struct vmw_resource, mob_node);
54 new = (res->backup_offset < this->backup_offset) ?
55 &((*new)->rb_left) : &((*new)->rb_right);
58 rb_link_node(&res->mob_node, parent, new);
59 rb_insert_color(&res->mob_node, &backup->res_tree);
61 vmw_bo_prio_add(backup, res->used_prio);
65 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
68 void vmw_resource_mob_detach(struct vmw_resource *res)
70 struct vmw_buffer_object *backup = res->backup;
72 dma_resv_assert_held(backup->base.base.resv);
73 if (vmw_resource_mob_attached(res)) {
74 rb_erase(&res->mob_node, &backup->res_tree);
75 RB_CLEAR_NODE(&res->mob_node);
76 vmw_bo_prio_del(backup, res->used_prio);
80 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
87 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
89 return kref_get_unless_zero(&res->kref) ? res : NULL;
93 * vmw_resource_release_id - release a resource id to the id manager.
95 * @res: Pointer to the resource.
97 * Release the resource id to the resource id manager and set it to -1
99 void vmw_resource_release_id(struct vmw_resource *res)
101 struct vmw_private *dev_priv = res->dev_priv;
102 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
104 spin_lock(&dev_priv->resource_lock);
106 idr_remove(idr, res->id);
108 spin_unlock(&dev_priv->resource_lock);
111 static void vmw_resource_release(struct kref *kref)
113 struct vmw_resource *res =
114 container_of(kref, struct vmw_resource, kref);
115 struct vmw_private *dev_priv = res->dev_priv;
118 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
120 spin_lock(&dev_priv->resource_lock);
121 list_del_init(&res->lru_head);
122 spin_unlock(&dev_priv->resource_lock);
124 struct ttm_buffer_object *bo = &res->backup->base;
126 ret = ttm_bo_reserve(bo, false, false, NULL);
128 if (vmw_resource_mob_attached(res) &&
129 res->func->unbind != NULL) {
130 struct ttm_validate_buffer val_buf;
133 val_buf.num_shared = 0;
134 res->func->unbind(res, false, &val_buf);
136 res->backup_dirty = false;
137 vmw_resource_mob_detach(res);
139 res->func->dirty_free(res);
141 vmw_bo_dirty_release(res->backup);
142 ttm_bo_unreserve(bo);
143 vmw_bo_unreference(&res->backup);
146 if (likely(res->hw_destroy != NULL)) {
147 mutex_lock(&dev_priv->binding_mutex);
148 vmw_binding_res_list_kill(&res->binding_head);
149 mutex_unlock(&dev_priv->binding_mutex);
150 res->hw_destroy(res);
154 if (res->res_free != NULL)
159 spin_lock(&dev_priv->resource_lock);
162 spin_unlock(&dev_priv->resource_lock);
165 void vmw_resource_unreference(struct vmw_resource **p_res)
167 struct vmw_resource *res = *p_res;
170 kref_put(&res->kref, vmw_resource_release);
175 * vmw_resource_alloc_id - release a resource id to the id manager.
177 * @res: Pointer to the resource.
179 * Allocate the lowest free resource from the resource manager, and set
180 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
182 int vmw_resource_alloc_id(struct vmw_resource *res)
184 struct vmw_private *dev_priv = res->dev_priv;
186 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
188 BUG_ON(res->id != -1);
190 idr_preload(GFP_KERNEL);
191 spin_lock(&dev_priv->resource_lock);
193 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
197 spin_unlock(&dev_priv->resource_lock);
199 return ret < 0 ? ret : 0;
203 * vmw_resource_init - initialize a struct vmw_resource
205 * @dev_priv: Pointer to a device private struct.
206 * @res: The struct vmw_resource to initialize.
207 * @delay_id: Boolean whether to defer device id allocation until
208 * the first validation.
209 * @res_free: Resource destructor.
210 * @func: Resource function table.
212 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
214 void (*res_free) (struct vmw_resource *res),
215 const struct vmw_res_func *func)
217 kref_init(&res->kref);
218 res->hw_destroy = NULL;
219 res->res_free = res_free;
220 res->dev_priv = dev_priv;
222 RB_CLEAR_NODE(&res->mob_node);
223 INIT_LIST_HEAD(&res->lru_head);
224 INIT_LIST_HEAD(&res->binding_head);
227 res->backup_offset = 0;
228 res->backup_dirty = false;
229 res->res_dirty = false;
230 res->coherent = false;
236 return vmw_resource_alloc_id(res);
241 * vmw_user_resource_lookup_handle - lookup a struct resource from a
242 * TTM user-space handle and perform basic type checks
244 * @dev_priv: Pointer to a device private struct
245 * @tfile: Pointer to a struct ttm_object_file identifying the caller
246 * @handle: The TTM user-space handle
247 * @converter: Pointer to an object describing the resource type
248 * @p_res: On successful return the location pointed to will contain
249 * a pointer to a refcounted struct vmw_resource.
251 * If the handle can't be found or is associated with an incorrect resource
252 * type, -EINVAL will be returned.
254 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
255 struct ttm_object_file *tfile,
257 const struct vmw_user_resource_conv
259 struct vmw_resource **p_res)
261 struct ttm_base_object *base;
262 struct vmw_resource *res;
265 base = ttm_base_object_lookup(tfile, handle);
266 if (unlikely(base == NULL))
269 if (unlikely(ttm_base_object_type(base) != converter->object_type))
270 goto out_bad_resource;
272 res = converter->base_obj_to_res(base);
273 kref_get(&res->kref);
279 ttm_base_object_unref(&base);
285 * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
286 * TTM user-space handle and perform basic type checks
288 * @dev_priv: Pointer to a device private struct
289 * @tfile: Pointer to a struct ttm_object_file identifying the caller
290 * @handle: The TTM user-space handle
291 * @converter: Pointer to an object describing the resource type
293 * If the handle can't be found or is associated with an incorrect resource
294 * type, -EINVAL will be returned.
296 struct vmw_resource *
297 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
298 struct ttm_object_file *tfile,
300 const struct vmw_user_resource_conv
303 struct ttm_base_object *base;
305 base = ttm_base_object_noref_lookup(tfile, handle);
307 return ERR_PTR(-ESRCH);
309 if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
310 ttm_base_object_noref_release();
311 return ERR_PTR(-EINVAL);
314 return converter->base_obj_to_res(base);
318 * Helper function that looks either a surface or bo.
320 * The pointer this pointed at by out_surf and out_buf needs to be null.
322 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
323 struct drm_file *filp,
325 struct vmw_surface **out_surf,
326 struct vmw_buffer_object **out_buf)
328 struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
329 struct vmw_resource *res;
332 BUG_ON(*out_surf || *out_buf);
334 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
335 user_surface_converter,
338 *out_surf = vmw_res_to_srf(res);
343 ret = vmw_user_bo_lookup(filp, handle, out_buf);
348 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
350 * @res: The resource for which to allocate a backup buffer.
351 * @interruptible: Whether any sleeps during allocation should be
352 * performed while interruptible.
354 static int vmw_resource_buf_alloc(struct vmw_resource *res,
357 unsigned long size = PFN_ALIGN(res->backup_size);
358 struct vmw_buffer_object *backup;
361 if (likely(res->backup)) {
362 BUG_ON(res->backup->base.base.size < size);
366 ret = vmw_bo_create(res->dev_priv, res->backup_size,
367 res->func->backup_placement,
368 interruptible, false,
369 &vmw_bo_bo_free, &backup);
370 if (unlikely(ret != 0))
373 res->backup = backup;
380 * vmw_resource_do_validate - Make a resource up-to-date and visible
383 * @res: The resource to make visible to the device.
384 * @val_buf: Information about a buffer possibly
385 * containing backup data if a bind operation is needed.
386 * @dirtying: Transfer dirty regions.
388 * On hardware resource shortage, this function returns -EBUSY and
389 * should be retried once resources have been freed up.
391 static int vmw_resource_do_validate(struct vmw_resource *res,
392 struct ttm_validate_buffer *val_buf,
396 const struct vmw_res_func *func = res->func;
398 if (unlikely(res->id == -1)) {
399 ret = func->create(res);
400 if (unlikely(ret != 0))
405 ((func->needs_backup && !vmw_resource_mob_attached(res) &&
406 val_buf->bo != NULL) ||
407 (!func->needs_backup && val_buf->bo != NULL))) {
408 ret = func->bind(res, val_buf);
409 if (unlikely(ret != 0))
410 goto out_bind_failed;
411 if (func->needs_backup)
412 vmw_resource_mob_attach(res);
416 * Handle the case where the backup mob is marked coherent but
417 * the resource isn't.
419 if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
421 if (res->backup->dirty && !res->dirty) {
422 ret = func->dirty_alloc(res);
425 } else if (!res->backup->dirty && res->dirty) {
426 func->dirty_free(res);
431 * Transfer the dirty regions to the resource and update
435 if (dirtying && !res->res_dirty) {
436 pgoff_t start = res->backup_offset >> PAGE_SHIFT;
437 pgoff_t end = __KERNEL_DIV_ROUND_UP
438 (res->backup_offset + res->backup_size,
441 vmw_bo_dirty_unmap(res->backup, start, end);
444 vmw_bo_dirty_transfer_to_res(res);
445 return func->dirty_sync(res);
457 * vmw_resource_unreserve - Unreserve a resource previously reserved for
458 * command submission.
460 * @res: Pointer to the struct vmw_resource to unreserve.
461 * @dirty_set: Change dirty status of the resource.
462 * @dirty: When changing dirty status indicates the new status.
463 * @switch_backup: Backup buffer has been switched.
464 * @new_backup: Pointer to new backup buffer if command submission
465 * switched. May be NULL.
466 * @new_backup_offset: New backup offset if @switch_backup is true.
468 * Currently unreserving a resource means putting it back on the device's
469 * resource lru list, so that it can be evicted if necessary.
471 void vmw_resource_unreserve(struct vmw_resource *res,
475 struct vmw_buffer_object *new_backup,
476 unsigned long new_backup_offset)
478 struct vmw_private *dev_priv = res->dev_priv;
480 if (!list_empty(&res->lru_head))
483 if (switch_backup && new_backup != res->backup) {
485 vmw_resource_mob_detach(res);
487 vmw_bo_dirty_release(res->backup);
488 vmw_bo_unreference(&res->backup);
492 res->backup = vmw_bo_reference(new_backup);
495 * The validation code should already have added a
496 * dirty tracker here.
498 WARN_ON(res->coherent && !new_backup->dirty);
500 vmw_resource_mob_attach(res);
504 } else if (switch_backup && res->coherent) {
505 vmw_bo_dirty_release(res->backup);
509 res->backup_offset = new_backup_offset;
512 res->res_dirty = dirty;
514 if (!res->func->may_evict || res->id == -1 || res->pin_count)
517 spin_lock(&dev_priv->resource_lock);
518 list_add_tail(&res->lru_head,
519 &res->dev_priv->res_lru[res->func->res_type]);
520 spin_unlock(&dev_priv->resource_lock);
524 * vmw_resource_check_buffer - Check whether a backup buffer is needed
525 * for a resource and in that case, allocate
526 * one, reserve and validate it.
528 * @ticket: The ww aqcquire context to use, or NULL if trylocking.
529 * @res: The resource for which to allocate a backup buffer.
530 * @interruptible: Whether any sleeps during allocation should be
531 * performed while interruptible.
532 * @val_buf: On successful return contains data about the
533 * reserved and validated backup buffer.
536 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
537 struct vmw_resource *res,
539 struct ttm_validate_buffer *val_buf)
541 struct ttm_operation_ctx ctx = { true, false };
542 struct list_head val_list;
543 bool backup_dirty = false;
546 if (unlikely(res->backup == NULL)) {
547 ret = vmw_resource_buf_alloc(res, interruptible);
548 if (unlikely(ret != 0))
552 INIT_LIST_HEAD(&val_list);
553 ttm_bo_get(&res->backup->base);
554 val_buf->bo = &res->backup->base;
555 val_buf->num_shared = 0;
556 list_add_tail(&val_buf->head, &val_list);
557 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
558 if (unlikely(ret != 0))
561 if (res->func->needs_backup && !vmw_resource_mob_attached(res))
564 backup_dirty = res->backup_dirty;
565 ret = ttm_bo_validate(&res->backup->base,
566 res->func->backup_placement,
569 if (unlikely(ret != 0))
570 goto out_no_validate;
575 ttm_eu_backoff_reservation(ticket, &val_list);
577 ttm_bo_put(val_buf->bo);
580 vmw_bo_unreference(&res->backup);
586 * vmw_resource_reserve - Reserve a resource for command submission
588 * @res: The resource to reserve.
590 * This function takes the resource off the LRU list and make sure
591 * a backup buffer is present for guest-backed resources. However,
592 * the buffer may not be bound to the resource at this point.
595 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
598 struct vmw_private *dev_priv = res->dev_priv;
601 spin_lock(&dev_priv->resource_lock);
602 list_del_init(&res->lru_head);
603 spin_unlock(&dev_priv->resource_lock);
605 if (res->func->needs_backup && res->backup == NULL &&
607 ret = vmw_resource_buf_alloc(res, interruptible);
608 if (unlikely(ret != 0)) {
609 DRM_ERROR("Failed to allocate a backup buffer "
610 "of size %lu. bytes\n",
611 (unsigned long) res->backup_size);
620 * vmw_resource_backoff_reservation - Unreserve and unreference a
623 * @ticket: The ww acquire ctx used for reservation.
624 * @val_buf: Backup buffer information.
627 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
628 struct ttm_validate_buffer *val_buf)
630 struct list_head val_list;
632 if (likely(val_buf->bo == NULL))
635 INIT_LIST_HEAD(&val_list);
636 list_add_tail(&val_buf->head, &val_list);
637 ttm_eu_backoff_reservation(ticket, &val_list);
638 ttm_bo_put(val_buf->bo);
643 * vmw_resource_do_evict - Evict a resource, and transfer its data
644 * to a backup buffer.
646 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
647 * @res: The resource to evict.
648 * @interruptible: Whether to wait interruptible.
650 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
651 struct vmw_resource *res, bool interruptible)
653 struct ttm_validate_buffer val_buf;
654 const struct vmw_res_func *func = res->func;
657 BUG_ON(!func->may_evict);
660 val_buf.num_shared = 0;
661 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
662 if (unlikely(ret != 0))
665 if (unlikely(func->unbind != NULL &&
666 (!func->needs_backup || vmw_resource_mob_attached(res)))) {
667 ret = func->unbind(res, res->res_dirty, &val_buf);
668 if (unlikely(ret != 0))
670 vmw_resource_mob_detach(res);
672 ret = func->destroy(res);
673 res->backup_dirty = true;
674 res->res_dirty = false;
676 vmw_resource_backoff_reservation(ticket, &val_buf);
683 * vmw_resource_validate - Make a resource up-to-date and visible
685 * @res: The resource to make visible to the device.
686 * @intr: Perform waits interruptible if possible.
687 * @dirtying: Pending GPU operation will dirty the resource
689 * On succesful return, any backup DMA buffer pointed to by @res->backup will
690 * be reserved and validated.
691 * On hardware resource shortage, this function will repeatedly evict
692 * resources of the same type until the validation succeeds.
694 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
697 int vmw_resource_validate(struct vmw_resource *res, bool intr,
701 struct vmw_resource *evict_res;
702 struct vmw_private *dev_priv = res->dev_priv;
703 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
704 struct ttm_validate_buffer val_buf;
705 unsigned err_count = 0;
707 if (!res->func->create)
711 val_buf.num_shared = 0;
713 val_buf.bo = &res->backup->base;
715 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
716 if (likely(ret != -EBUSY))
719 spin_lock(&dev_priv->resource_lock);
720 if (list_empty(lru_list) || !res->func->may_evict) {
721 DRM_ERROR("Out of device device resources "
722 "for %s.\n", res->func->type_name);
724 spin_unlock(&dev_priv->resource_lock);
728 evict_res = vmw_resource_reference
729 (list_first_entry(lru_list, struct vmw_resource,
731 list_del_init(&evict_res->lru_head);
733 spin_unlock(&dev_priv->resource_lock);
735 /* Trylock backup buffers with a NULL ticket. */
736 ret = vmw_resource_do_evict(NULL, evict_res, intr);
737 if (unlikely(ret != 0)) {
738 spin_lock(&dev_priv->resource_lock);
739 list_add_tail(&evict_res->lru_head, lru_list);
740 spin_unlock(&dev_priv->resource_lock);
741 if (ret == -ERESTARTSYS ||
742 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
743 vmw_resource_unreference(&evict_res);
744 goto out_no_validate;
748 vmw_resource_unreference(&evict_res);
751 if (unlikely(ret != 0))
752 goto out_no_validate;
753 else if (!res->func->needs_backup && res->backup) {
754 WARN_ON_ONCE(vmw_resource_mob_attached(res));
755 vmw_bo_unreference(&res->backup);
766 * vmw_resource_unbind_list
768 * @vbo: Pointer to the current backing MOB.
770 * Evicts the Guest Backed hardware resource if the backup
771 * buffer is being moved out of MOB memory.
772 * Note that this function will not race with the resource
773 * validation code, since resource validation and eviction
774 * both require the backup buffer to be reserved.
776 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
778 struct ttm_validate_buffer val_buf = {
783 dma_resv_assert_held(vbo->base.base.resv);
784 while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
785 struct rb_node *node = vbo->res_tree.rb_node;
786 struct vmw_resource *res =
787 container_of(node, struct vmw_resource, mob_node);
789 if (!WARN_ON_ONCE(!res->func->unbind))
790 (void) res->func->unbind(res, res->res_dirty, &val_buf);
792 res->backup_dirty = true;
793 res->res_dirty = false;
794 vmw_resource_mob_detach(res);
797 (void) ttm_bo_wait(&vbo->base, false, false);
802 * vmw_query_readback_all - Read back cached query states
804 * @dx_query_mob: Buffer containing the DX query MOB
806 * Read back cached states from the device if they exist. This function
807 * assumings binding_mutex is held.
809 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
811 struct vmw_resource *dx_query_ctx;
812 struct vmw_private *dev_priv;
814 SVGA3dCmdHeader header;
815 SVGA3dCmdDXReadbackAllQuery body;
819 /* No query bound, so do nothing */
820 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
823 dx_query_ctx = dx_query_mob->dx_query_ctx;
824 dev_priv = dx_query_ctx->dev_priv;
826 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
827 if (unlikely(cmd == NULL))
830 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
831 cmd->header.size = sizeof(cmd->body);
832 cmd->body.cid = dx_query_ctx->id;
834 vmw_cmd_commit(dev_priv, sizeof(*cmd));
836 /* Triggers a rebind the next time affected context is bound */
837 dx_query_mob->dx_query_ctx = NULL;
845 * vmw_query_move_notify - Read back cached query states
847 * @bo: The TTM buffer object about to move.
848 * @old_mem: The memory region @bo is moving from.
849 * @new_mem: The memory region @bo is moving to.
851 * Called before the query MOB is swapped out to read back cached query
852 * states from the device.
854 void vmw_query_move_notify(struct ttm_buffer_object *bo,
855 struct ttm_resource *old_mem,
856 struct ttm_resource *new_mem)
858 struct vmw_buffer_object *dx_query_mob;
859 struct ttm_device *bdev = bo->bdev;
860 struct vmw_private *dev_priv;
862 dev_priv = container_of(bdev, struct vmw_private, bdev);
864 mutex_lock(&dev_priv->binding_mutex);
866 /* If BO is being moved from MOB to system memory */
867 if (new_mem->mem_type == TTM_PL_SYSTEM &&
868 old_mem->mem_type == VMW_PL_MOB) {
869 struct vmw_fence_obj *fence;
871 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
872 if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
873 mutex_unlock(&dev_priv->binding_mutex);
877 (void) vmw_query_readback_all(dx_query_mob);
878 mutex_unlock(&dev_priv->binding_mutex);
880 /* Create a fence and attach the BO to it */
881 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
882 vmw_bo_fence_single(bo, fence);
885 vmw_fence_obj_unreference(&fence);
887 (void) ttm_bo_wait(bo, false, false);
889 mutex_unlock(&dev_priv->binding_mutex);
893 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
895 * @res: The resource being queried.
897 bool vmw_resource_needs_backup(const struct vmw_resource *res)
899 return res->func->needs_backup;
903 * vmw_resource_evict_type - Evict all resources of a specific type
905 * @dev_priv: Pointer to a device private struct
906 * @type: The resource type to evict
908 * To avoid thrashing starvation or as part of the hibernation sequence,
909 * try to evict all evictable resources of a specific type.
911 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
912 enum vmw_res_type type)
914 struct list_head *lru_list = &dev_priv->res_lru[type];
915 struct vmw_resource *evict_res;
916 unsigned err_count = 0;
918 struct ww_acquire_ctx ticket;
921 spin_lock(&dev_priv->resource_lock);
923 if (list_empty(lru_list))
926 evict_res = vmw_resource_reference(
927 list_first_entry(lru_list, struct vmw_resource,
929 list_del_init(&evict_res->lru_head);
930 spin_unlock(&dev_priv->resource_lock);
932 /* Wait lock backup buffers with a ticket. */
933 ret = vmw_resource_do_evict(&ticket, evict_res, false);
934 if (unlikely(ret != 0)) {
935 spin_lock(&dev_priv->resource_lock);
936 list_add_tail(&evict_res->lru_head, lru_list);
937 spin_unlock(&dev_priv->resource_lock);
938 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
939 vmw_resource_unreference(&evict_res);
944 vmw_resource_unreference(&evict_res);
948 spin_unlock(&dev_priv->resource_lock);
952 * vmw_resource_evict_all - Evict all evictable resources
954 * @dev_priv: Pointer to a device private struct
956 * To avoid thrashing starvation or as part of the hibernation sequence,
957 * evict all evictable resources. In particular this means that all
958 * guest-backed resources that are registered with the device are
959 * evicted and the OTable becomes clean.
961 void vmw_resource_evict_all(struct vmw_private *dev_priv)
963 enum vmw_res_type type;
965 mutex_lock(&dev_priv->cmdbuf_mutex);
967 for (type = 0; type < vmw_res_max; ++type)
968 vmw_resource_evict_type(dev_priv, type);
970 mutex_unlock(&dev_priv->cmdbuf_mutex);
974 * vmw_resource_pin - Add a pin reference on a resource
976 * @res: The resource to add a pin reference on
978 * This function adds a pin reference, and if needed validates the resource.
979 * Having a pin reference means that the resource can never be evicted, and
980 * its id will never change as long as there is a pin reference.
981 * This function returns 0 on success and a negative error code on failure.
983 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
985 struct ttm_operation_ctx ctx = { interruptible, false };
986 struct vmw_private *dev_priv = res->dev_priv;
989 mutex_lock(&dev_priv->cmdbuf_mutex);
990 ret = vmw_resource_reserve(res, interruptible, false);
994 if (res->pin_count == 0) {
995 struct vmw_buffer_object *vbo = NULL;
1000 ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1002 goto out_no_validate;
1003 if (!vbo->base.pin_count) {
1004 ret = ttm_bo_validate
1006 res->func->backup_placement,
1009 ttm_bo_unreserve(&vbo->base);
1010 goto out_no_validate;
1014 /* Do we really need to pin the MOB as well? */
1015 vmw_bo_pin_reserved(vbo, true);
1017 ret = vmw_resource_validate(res, interruptible, true);
1019 ttm_bo_unreserve(&vbo->base);
1021 goto out_no_validate;
1026 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1028 mutex_unlock(&dev_priv->cmdbuf_mutex);
1034 * vmw_resource_unpin - Remove a pin reference from a resource
1036 * @res: The resource to remove a pin reference from
1038 * Having a pin reference means that the resource can never be evicted, and
1039 * its id will never change as long as there is a pin reference.
1041 void vmw_resource_unpin(struct vmw_resource *res)
1043 struct vmw_private *dev_priv = res->dev_priv;
1046 mutex_lock(&dev_priv->cmdbuf_mutex);
1048 ret = vmw_resource_reserve(res, false, true);
1051 WARN_ON(res->pin_count == 0);
1052 if (--res->pin_count == 0 && res->backup) {
1053 struct vmw_buffer_object *vbo = res->backup;
1055 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1056 vmw_bo_pin_reserved(vbo, false);
1057 ttm_bo_unreserve(&vbo->base);
1060 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1062 mutex_unlock(&dev_priv->cmdbuf_mutex);
1066 * vmw_res_type - Return the resource type
1068 * @res: Pointer to the resource
1070 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1072 return res->func->res_type;
1076 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1077 * sequential range of touched backing store memory.
1078 * @res: The resource.
1079 * @start: The first page touched.
1080 * @end: The last page touched + 1.
1082 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1086 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1091 * vmw_resources_clean - Clean resources intersecting a mob range
1092 * @vbo: The mob buffer object
1093 * @start: The mob page offset starting the range
1094 * @end: The mob page offset ending the range
1095 * @num_prefault: Returns how many pages including the first have been
1096 * cleaned and are ok to prefault
1098 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1099 pgoff_t end, pgoff_t *num_prefault)
1101 struct rb_node *cur = vbo->res_tree.rb_node;
1102 struct vmw_resource *found = NULL;
1103 unsigned long res_start = start << PAGE_SHIFT;
1104 unsigned long res_end = end << PAGE_SHIFT;
1105 unsigned long last_cleaned = 0;
1108 * Find the resource with lowest backup_offset that intersects the
1112 struct vmw_resource *cur_res =
1113 container_of(cur, struct vmw_resource, mob_node);
1115 if (cur_res->backup_offset >= res_end) {
1117 } else if (cur_res->backup_offset + cur_res->backup_size <=
1119 cur = cur->rb_right;
1123 /* Continue to look for resources with lower offsets */
1128 * In order of increasing backup_offset, clean dirty resorces
1129 * intersecting the range.
1132 if (found->res_dirty) {
1135 if (!found->func->clean)
1138 ret = found->func->clean(found);
1142 found->res_dirty = false;
1144 last_cleaned = found->backup_offset + found->backup_size;
1145 cur = rb_next(&found->mob_node);
1149 found = container_of(cur, struct vmw_resource, mob_node);
1150 if (found->backup_offset >= res_end)
1155 * Set number of pages allowed prefaulting and fence the buffer object
1158 if (last_cleaned > res_start) {
1159 struct ttm_buffer_object *bo = &vbo->base;
1161 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1163 vmw_bo_fence_single(bo, NULL);