1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/dma-resv.h>
45 #include "ttm_module.h"
47 /* default destructor */
48 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
53 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
54 struct ttm_placement *placement)
56 struct drm_printer p = drm_debug_printer(TTM_PFX);
57 struct ttm_resource_manager *man;
60 drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
61 bo, bo->resource->num_pages, bo->base.size >> 10,
63 for (i = 0; i < placement->num_placement; i++) {
64 mem_type = placement->placement[i].mem_type;
65 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
66 i, placement->placement[i].flags, mem_type);
67 man = ttm_manager_type(bo->bdev, mem_type);
68 ttm_resource_manager_debug(man, &p);
73 * ttm_bo_move_to_lru_tail
75 * @bo: The buffer object.
77 * Move this BO to the tail of all lru lists used to lookup and reserve an
78 * object. This function must be called with struct ttm_global::lru_lock
79 * held, and is used to make a BO less likely to be considered for eviction.
81 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
83 dma_resv_assert_held(bo->base.resv);
86 ttm_resource_move_to_lru_tail(bo->resource);
88 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
91 * ttm_bo_set_bulk_move - update BOs bulk move object
93 * @bo: The buffer object.
95 * Update the BOs bulk move object, making sure that resources are added/removed
96 * as well. A bulk move allows to move many resource on the LRU at once,
97 * resulting in much less overhead of maintaining the LRU.
98 * The only requirement is that the resources stay together on the LRU and are
99 * never separated. This is enforces by setting the bulk_move structure on a BO.
100 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
103 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
104 struct ttm_lru_bulk_move *bulk)
106 dma_resv_assert_held(bo->base.resv);
108 if (bo->bulk_move == bulk)
111 spin_lock(&bo->bdev->lru_lock);
112 if (bo->bulk_move && bo->resource)
113 ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
114 bo->bulk_move = bulk;
115 if (bo->bulk_move && bo->resource)
116 ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
117 spin_unlock(&bo->bdev->lru_lock);
119 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
121 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
122 struct ttm_resource *mem, bool evict,
123 struct ttm_operation_ctx *ctx,
124 struct ttm_place *hop)
126 struct ttm_resource_manager *old_man, *new_man;
127 struct ttm_device *bdev = bo->bdev;
130 old_man = ttm_manager_type(bdev, bo->resource->mem_type);
131 new_man = ttm_manager_type(bdev, mem->mem_type);
133 ttm_bo_unmap_virtual(bo);
136 * Create and bind a ttm if required.
139 if (new_man->use_tt) {
140 /* Zero init the new TTM structure if the old location should
141 * have used one as well.
143 ret = ttm_tt_create(bo, old_man->use_tt);
147 if (mem->mem_type != TTM_PL_SYSTEM) {
148 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
154 ret = dma_resv_reserve_fences(bo->base.resv, 1);
158 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
160 if (ret == -EMULTIHOP)
165 ctx->bytes_moved += bo->base.size;
169 new_man = ttm_manager_type(bdev, bo->resource->mem_type);
170 if (!new_man->use_tt)
171 ttm_bo_tt_destroy(bo);
178 * Will release GPU memory type usage on destruction.
179 * This is the place to put in driver specific hooks to release
180 * driver private resources.
181 * Will release the bo::reserved lock.
184 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
186 if (bo->bdev->funcs->delete_mem_notify)
187 bo->bdev->funcs->delete_mem_notify(bo);
189 ttm_bo_tt_destroy(bo);
190 ttm_resource_free(bo, &bo->resource);
193 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
197 if (bo->base.resv == &bo->base._resv)
200 BUG_ON(!dma_resv_trylock(&bo->base._resv));
202 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
203 dma_resv_unlock(&bo->base._resv);
207 if (bo->type != ttm_bo_type_sg) {
208 /* This works because the BO is about to be destroyed and nobody
209 * reference it any more. The only tricky case is the trylock on
210 * the resv object while holding the lru_lock.
212 spin_lock(&bo->bdev->lru_lock);
213 bo->base.resv = &bo->base._resv;
214 spin_unlock(&bo->bdev->lru_lock);
220 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
222 struct dma_resv *resv = &bo->base._resv;
223 struct dma_resv_iter cursor;
224 struct dma_fence *fence;
226 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
227 dma_resv_for_each_fence_unlocked(&cursor, fence) {
228 if (!fence->ops->signaled)
229 dma_fence_enable_sw_signaling(fence);
231 dma_resv_iter_end(&cursor);
235 * ttm_bo_cleanup_refs
236 * If bo idle, remove from lru lists, and unref.
237 * If not idle, block if possible.
239 * Must be called with lru_lock and reservation held, this function
240 * will drop the lru lock and optionally the reservation lock before returning.
242 * @bo: The buffer object to clean-up
243 * @interruptible: Any sleeps should occur interruptibly.
244 * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
245 * @unlock_resv: Unlock the reservation lock as well.
248 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
249 bool interruptible, bool no_wait_gpu,
252 struct dma_resv *resv = &bo->base._resv;
255 if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
260 if (ret && !no_wait_gpu) {
264 dma_resv_unlock(bo->base.resv);
265 spin_unlock(&bo->bdev->lru_lock);
267 lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
276 spin_lock(&bo->bdev->lru_lock);
277 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
279 * We raced, and lost, someone else holds the reservation now,
280 * and is probably busy in ttm_bo_cleanup_memtype_use.
282 * Even if it's not the case, because we finished waiting any
283 * delayed destruction would succeed, so just return success
286 spin_unlock(&bo->bdev->lru_lock);
292 if (ret || unlikely(list_empty(&bo->ddestroy))) {
294 dma_resv_unlock(bo->base.resv);
295 spin_unlock(&bo->bdev->lru_lock);
299 list_del_init(&bo->ddestroy);
300 spin_unlock(&bo->bdev->lru_lock);
301 ttm_bo_cleanup_memtype_use(bo);
304 dma_resv_unlock(bo->base.resv);
312 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
313 * encountered buffers.
315 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
317 struct list_head removed;
320 INIT_LIST_HEAD(&removed);
322 spin_lock(&bdev->lru_lock);
323 while (!list_empty(&bdev->ddestroy)) {
324 struct ttm_buffer_object *bo;
326 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
328 list_move_tail(&bo->ddestroy, &removed);
329 if (!ttm_bo_get_unless_zero(bo))
332 if (remove_all || bo->base.resv != &bo->base._resv) {
333 spin_unlock(&bdev->lru_lock);
334 dma_resv_lock(bo->base.resv, NULL);
336 spin_lock(&bdev->lru_lock);
337 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
339 } else if (dma_resv_trylock(bo->base.resv)) {
340 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
342 spin_unlock(&bdev->lru_lock);
346 spin_lock(&bdev->lru_lock);
348 list_splice_tail(&removed, &bdev->ddestroy);
349 empty = list_empty(&bdev->ddestroy);
350 spin_unlock(&bdev->lru_lock);
355 static void ttm_bo_release(struct kref *kref)
357 struct ttm_buffer_object *bo =
358 container_of(kref, struct ttm_buffer_object, kref);
359 struct ttm_device *bdev = bo->bdev;
362 WARN_ON_ONCE(bo->pin_count);
363 WARN_ON_ONCE(bo->bulk_move);
366 ret = ttm_bo_individualize_resv(bo);
368 /* Last resort, if we fail to allocate memory for the
369 * fences block for the BO to become idle
371 dma_resv_wait_timeout(bo->base.resv,
372 DMA_RESV_USAGE_BOOKKEEP, false,
376 if (bo->bdev->funcs->release_notify)
377 bo->bdev->funcs->release_notify(bo);
379 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
380 ttm_mem_io_free(bdev, bo->resource);
383 if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
384 !dma_resv_trylock(bo->base.resv)) {
385 /* The BO is not idle, resurrect it for delayed destroy */
386 ttm_bo_flush_all_fences(bo);
389 spin_lock(&bo->bdev->lru_lock);
392 * Make pinned bos immediately available to
393 * shrinkers, now that they are queued for
396 * FIXME: QXL is triggering this. Can be removed when the
401 ttm_resource_move_to_lru_tail(bo->resource);
404 kref_init(&bo->kref);
405 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
406 spin_unlock(&bo->bdev->lru_lock);
408 schedule_delayed_work(&bdev->wq,
409 ((HZ / 100) < 1) ? 1 : HZ / 100);
413 spin_lock(&bo->bdev->lru_lock);
414 list_del(&bo->ddestroy);
415 spin_unlock(&bo->bdev->lru_lock);
417 ttm_bo_cleanup_memtype_use(bo);
418 dma_resv_unlock(bo->base.resv);
420 atomic_dec(&ttm_glob.bo_count);
424 void ttm_bo_put(struct ttm_buffer_object *bo)
426 kref_put(&bo->kref, ttm_bo_release);
428 EXPORT_SYMBOL(ttm_bo_put);
430 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
432 return cancel_delayed_work_sync(&bdev->wq);
434 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
436 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
439 schedule_delayed_work(&bdev->wq,
440 ((HZ / 100) < 1) ? 1 : HZ / 100);
442 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
444 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
445 struct ttm_resource **mem,
446 struct ttm_operation_ctx *ctx,
447 struct ttm_place *hop)
449 struct ttm_placement hop_placement;
450 struct ttm_resource *hop_mem;
453 hop_placement.num_placement = hop_placement.num_busy_placement = 1;
454 hop_placement.placement = hop_placement.busy_placement = hop;
456 /* find space in the bounce domain */
457 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
460 /* move to the bounce domain */
461 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
463 ttm_resource_free(bo, &hop_mem);
469 static int ttm_bo_evict(struct ttm_buffer_object *bo,
470 struct ttm_operation_ctx *ctx)
472 struct ttm_device *bdev = bo->bdev;
473 struct ttm_resource *evict_mem;
474 struct ttm_placement placement;
475 struct ttm_place hop;
478 memset(&hop, 0, sizeof(hop));
480 dma_resv_assert_held(bo->base.resv);
482 placement.num_placement = 0;
483 placement.num_busy_placement = 0;
484 bdev->funcs->evict_flags(bo, &placement);
486 if (!placement.num_placement && !placement.num_busy_placement) {
487 ret = ttm_bo_wait(bo, true, false);
492 * Since we've already synced, this frees backing store
495 return ttm_bo_pipeline_gutting(bo);
498 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
500 if (ret != -ERESTARTSYS) {
501 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
503 ttm_bo_mem_space_debug(bo, &placement);
509 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
510 if (ret == -EMULTIHOP) {
511 ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
513 pr_err("Buffer eviction failed\n");
514 ttm_resource_free(bo, &evict_mem);
517 /* try and move to final place now. */
524 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
525 const struct ttm_place *place)
527 dma_resv_assert_held(bo->base.resv);
528 if (bo->resource->mem_type == TTM_PL_SYSTEM)
531 /* Don't evict this BO if it's outside of the
532 * requested placement range
534 if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
535 (place->lpfn && place->lpfn <= bo->resource->start))
540 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
543 * Check the target bo is allowable to be evicted or swapout, including cases:
545 * a. if share same reservation object with ctx->resv, have assumption
546 * reservation objects should already be locked, so not lock again and
547 * return true directly when either the opreation allow_reserved_eviction
548 * or the target bo already is in delayed free list;
550 * b. Otherwise, trylock it.
552 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
553 struct ttm_operation_ctx *ctx,
554 const struct ttm_place *place,
555 bool *locked, bool *busy)
559 if (bo->base.resv == ctx->resv) {
560 dma_resv_assert_held(bo->base.resv);
561 if (ctx->allow_res_evict)
567 ret = dma_resv_trylock(bo->base.resv);
573 if (ret && place && (bo->resource->mem_type != place->mem_type ||
574 !bo->bdev->funcs->eviction_valuable(bo, place))) {
577 dma_resv_unlock(bo->base.resv);
586 * ttm_mem_evict_wait_busy - wait for a busy BO to become available
588 * @busy_bo: BO which couldn't be locked with trylock
589 * @ctx: operation context
590 * @ticket: acquire ticket
592 * Try to lock a busy buffer object to avoid failing eviction.
594 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
595 struct ttm_operation_ctx *ctx,
596 struct ww_acquire_ctx *ticket)
600 if (!busy_bo || !ticket)
603 if (ctx->interruptible)
604 r = dma_resv_lock_interruptible(busy_bo->base.resv,
607 r = dma_resv_lock(busy_bo->base.resv, ticket);
610 * TODO: It would be better to keep the BO locked until allocation is at
611 * least tried one more time, but that would mean a much larger rework
615 dma_resv_unlock(busy_bo->base.resv);
617 return r == -EDEADLK ? -EBUSY : r;
620 int ttm_mem_evict_first(struct ttm_device *bdev,
621 struct ttm_resource_manager *man,
622 const struct ttm_place *place,
623 struct ttm_operation_ctx *ctx,
624 struct ww_acquire_ctx *ticket)
626 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
627 struct ttm_resource_cursor cursor;
628 struct ttm_resource *res;
632 spin_lock(&bdev->lru_lock);
633 ttm_resource_manager_for_each_res(man, &cursor, res) {
636 if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
638 if (busy && !busy_bo && ticket !=
639 dma_resv_locking_ctx(res->bo->base.resv))
644 if (ttm_bo_get_unless_zero(res->bo)) {
649 dma_resv_unlock(res->bo->base.resv);
653 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
655 spin_unlock(&bdev->lru_lock);
656 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
663 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
664 ctx->no_wait_gpu, locked);
669 spin_unlock(&bdev->lru_lock);
671 ret = ttm_bo_evict(bo, ctx);
673 ttm_bo_unreserve(bo);
675 ttm_bo_move_to_lru_tail_unlocked(bo);
682 * ttm_bo_pin - Pin the buffer object.
683 * @bo: The buffer object to pin
685 * Make sure the buffer is not evicted any more during memory pressure.
686 * @bo must be unpinned again by calling ttm_bo_unpin().
688 void ttm_bo_pin(struct ttm_buffer_object *bo)
690 dma_resv_assert_held(bo->base.resv);
691 WARN_ON_ONCE(!kref_read(&bo->kref));
692 if (!(bo->pin_count++) && bo->bulk_move && bo->resource)
693 ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
695 EXPORT_SYMBOL(ttm_bo_pin);
698 * ttm_bo_unpin - Unpin the buffer object.
699 * @bo: The buffer object to unpin
701 * Allows the buffer object to be evicted again during memory pressure.
703 void ttm_bo_unpin(struct ttm_buffer_object *bo)
705 dma_resv_assert_held(bo->base.resv);
706 WARN_ON_ONCE(!kref_read(&bo->kref));
707 if (WARN_ON_ONCE(!bo->pin_count))
710 if (!(--bo->pin_count) && bo->bulk_move && bo->resource)
711 ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
713 EXPORT_SYMBOL(ttm_bo_unpin);
716 * Add the last move fence to the BO as kernel dependency and reserve a new
719 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
720 struct ttm_resource_manager *man,
721 struct ttm_resource *mem,
724 struct dma_fence *fence;
727 spin_lock(&man->move_lock);
728 fence = dma_fence_get(man->move);
729 spin_unlock(&man->move_lock);
735 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
736 dma_fence_put(fence);
740 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
742 ret = dma_resv_reserve_fences(bo->base.resv, 1);
743 dma_fence_put(fence);
748 * Repeatedly evict memory from the LRU for @mem_type until we create enough
749 * space, or we've evicted everything and there isn't enough space.
751 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
752 const struct ttm_place *place,
753 struct ttm_resource **mem,
754 struct ttm_operation_ctx *ctx)
756 struct ttm_device *bdev = bo->bdev;
757 struct ttm_resource_manager *man;
758 struct ww_acquire_ctx *ticket;
761 man = ttm_manager_type(bdev, place->mem_type);
762 ticket = dma_resv_locking_ctx(bo->base.resv);
764 ret = ttm_resource_alloc(bo, place, mem);
767 if (unlikely(ret != -ENOSPC))
769 ret = ttm_mem_evict_first(bdev, man, place, ctx,
771 if (unlikely(ret != 0))
775 return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
779 * Creates space for memory region @mem according to its type.
781 * This function first searches for free space in compatible memory types in
782 * the priority order defined by the driver. If free space isn't found, then
783 * ttm_bo_mem_force_space is attempted in priority order to evict and find
786 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
787 struct ttm_placement *placement,
788 struct ttm_resource **mem,
789 struct ttm_operation_ctx *ctx)
791 struct ttm_device *bdev = bo->bdev;
792 bool type_found = false;
795 ret = dma_resv_reserve_fences(bo->base.resv, 1);
799 for (i = 0; i < placement->num_placement; ++i) {
800 const struct ttm_place *place = &placement->placement[i];
801 struct ttm_resource_manager *man;
803 man = ttm_manager_type(bdev, place->mem_type);
804 if (!man || !ttm_resource_manager_used(man))
808 ret = ttm_resource_alloc(bo, place, mem);
814 ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
816 ttm_resource_free(bo, mem);
825 for (i = 0; i < placement->num_busy_placement; ++i) {
826 const struct ttm_place *place = &placement->busy_placement[i];
827 struct ttm_resource_manager *man;
829 man = ttm_manager_type(bdev, place->mem_type);
830 if (!man || !ttm_resource_manager_used(man))
834 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
838 if (ret && ret != -EBUSY)
844 pr_err(TTM_PFX "No compatible memory type found\n");
851 EXPORT_SYMBOL(ttm_bo_mem_space);
853 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
854 struct ttm_placement *placement,
855 struct ttm_operation_ctx *ctx)
857 struct ttm_resource *mem;
858 struct ttm_place hop;
861 dma_resv_assert_held(bo->base.resv);
864 * Determine where to move the buffer.
866 * If driver determines move is going to need
867 * an extra step then it will return -EMULTIHOP
868 * and the buffer will be moved to the temporary
869 * stop and the driver will be called to make
872 ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
876 ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
877 if (ret == -EMULTIHOP) {
878 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
881 /* try and move to final place now. */
886 ttm_resource_free(bo, &mem);
890 int ttm_bo_validate(struct ttm_buffer_object *bo,
891 struct ttm_placement *placement,
892 struct ttm_operation_ctx *ctx)
896 dma_resv_assert_held(bo->base.resv);
899 * Remove the backing store if no placement is given.
901 if (!placement->num_placement && !placement->num_busy_placement)
902 return ttm_bo_pipeline_gutting(bo);
905 * Check whether we need to move buffer.
907 if (!ttm_resource_compat(bo->resource, placement)) {
908 ret = ttm_bo_move_buffer(bo, placement, ctx);
913 * We might need to add a TTM.
915 if (bo->resource->mem_type == TTM_PL_SYSTEM) {
916 ret = ttm_tt_create(bo, true);
922 EXPORT_SYMBOL(ttm_bo_validate);
924 int ttm_bo_init_reserved(struct ttm_device *bdev,
925 struct ttm_buffer_object *bo,
927 enum ttm_bo_type type,
928 struct ttm_placement *placement,
929 uint32_t page_alignment,
930 struct ttm_operation_ctx *ctx,
932 struct dma_resv *resv,
933 void (*destroy) (struct ttm_buffer_object *))
935 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
939 bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
941 kref_init(&bo->kref);
942 INIT_LIST_HEAD(&bo->ddestroy);
945 bo->page_alignment = page_alignment;
948 bo->bulk_move = NULL;
950 bo->base.resv = resv;
951 dma_resv_assert_held(bo->base.resv);
953 bo->base.resv = &bo->base._resv;
955 atomic_inc(&ttm_glob.bo_count);
957 ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
964 * For ttm_bo_type_device buffers, allocate
965 * address space from the device.
967 if (bo->type == ttm_bo_type_device ||
968 bo->type == ttm_bo_type_sg)
969 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
970 bo->resource->num_pages);
972 /* passed reservation objects should already be locked,
973 * since otherwise lockdep will be angered in radeon.
976 locked = dma_resv_trylock(bo->base.resv);
981 ret = ttm_bo_validate(bo, placement, ctx);
985 ttm_bo_unreserve(bo);
993 EXPORT_SYMBOL(ttm_bo_init_reserved);
995 int ttm_bo_init(struct ttm_device *bdev,
996 struct ttm_buffer_object *bo,
998 enum ttm_bo_type type,
999 struct ttm_placement *placement,
1000 uint32_t page_alignment,
1002 struct sg_table *sg,
1003 struct dma_resv *resv,
1004 void (*destroy) (struct ttm_buffer_object *))
1006 struct ttm_operation_ctx ctx = { interruptible, false };
1009 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1010 page_alignment, &ctx, sg, resv, destroy);
1015 ttm_bo_unreserve(bo);
1019 EXPORT_SYMBOL(ttm_bo_init);
1022 * buffer object vm functions.
1025 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1027 struct ttm_device *bdev = bo->bdev;
1029 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1030 ttm_mem_io_free(bdev, bo->resource);
1032 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1034 int ttm_bo_wait(struct ttm_buffer_object *bo,
1035 bool interruptible, bool no_wait)
1037 long timeout = 15 * HZ;
1040 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
1046 timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1047 interruptible, timeout);
1056 EXPORT_SYMBOL(ttm_bo_wait);
1058 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1061 struct ttm_place place;
1066 * While the bo may already reside in SYSTEM placement, set
1067 * SYSTEM as new placement to cover also the move further below.
1068 * The driver may use the fact that we're moving from SYSTEM
1069 * as an indication that we're about to swap out.
1071 memset(&place, 0, sizeof(place));
1072 place.mem_type = bo->resource->mem_type;
1073 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
1076 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1077 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1078 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
1079 !ttm_bo_get_unless_zero(bo)) {
1081 dma_resv_unlock(bo->base.resv);
1086 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1088 return ret == -EBUSY ? -ENOSPC : ret;
1091 /* TODO: Cleanup the locking */
1092 spin_unlock(&bo->bdev->lru_lock);
1095 * Move to system cached
1097 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1098 struct ttm_operation_ctx ctx = { false, false };
1099 struct ttm_resource *evict_mem;
1100 struct ttm_place hop;
1102 memset(&hop, 0, sizeof(hop));
1103 place.mem_type = TTM_PL_SYSTEM;
1104 ret = ttm_resource_alloc(bo, &place, &evict_mem);
1108 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
1109 if (unlikely(ret != 0)) {
1110 WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1116 * Make sure BO is idle.
1118 ret = ttm_bo_wait(bo, false, false);
1119 if (unlikely(ret != 0))
1122 ttm_bo_unmap_virtual(bo);
1125 * Swap out. Buffer will be swapped in again as soon as
1126 * anyone tries to access a ttm page.
1128 if (bo->bdev->funcs->swap_notify)
1129 bo->bdev->funcs->swap_notify(bo);
1131 if (ttm_tt_is_populated(bo->ttm))
1132 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1136 * Unreserve without putting on LRU to avoid swapping out an
1137 * already swapped buffer.
1140 dma_resv_unlock(bo->base.resv);
1142 return ret == -EBUSY ? -ENOSPC : ret;
1145 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1147 if (bo->ttm == NULL)
1150 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1151 ttm_tt_destroy(bo->bdev, bo->ttm);