1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/dma-resv.h>
45 #include "ttm_module.h"
47 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
48 struct ttm_placement *placement)
50 struct drm_printer p = drm_debug_printer(TTM_PFX);
51 struct ttm_resource_manager *man;
54 drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
55 bo, bo->resource->num_pages, bo->base.size >> 10,
57 for (i = 0; i < placement->num_placement; i++) {
58 mem_type = placement->placement[i].mem_type;
59 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
60 i, placement->placement[i].flags, mem_type);
61 man = ttm_manager_type(bo->bdev, mem_type);
62 ttm_resource_manager_debug(man, &p);
67 * ttm_bo_move_to_lru_tail
69 * @bo: The buffer object.
71 * Move this BO to the tail of all lru lists used to lookup and reserve an
72 * object. This function must be called with struct ttm_global::lru_lock
73 * held, and is used to make a BO less likely to be considered for eviction.
75 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
77 dma_resv_assert_held(bo->base.resv);
80 ttm_resource_move_to_lru_tail(bo->resource);
82 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
85 * ttm_bo_set_bulk_move - update BOs bulk move object
87 * @bo: The buffer object.
89 * Update the BOs bulk move object, making sure that resources are added/removed
90 * as well. A bulk move allows to move many resource on the LRU at once,
91 * resulting in much less overhead of maintaining the LRU.
92 * The only requirement is that the resources stay together on the LRU and are
93 * never separated. This is enforces by setting the bulk_move structure on a BO.
94 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
97 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
98 struct ttm_lru_bulk_move *bulk)
100 dma_resv_assert_held(bo->base.resv);
102 if (bo->bulk_move == bulk)
105 spin_lock(&bo->bdev->lru_lock);
107 ttm_resource_del_bulk_move(bo->resource, bo);
108 bo->bulk_move = bulk;
110 ttm_resource_add_bulk_move(bo->resource, bo);
111 spin_unlock(&bo->bdev->lru_lock);
113 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
115 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
116 struct ttm_resource *mem, bool evict,
117 struct ttm_operation_ctx *ctx,
118 struct ttm_place *hop)
120 struct ttm_device *bdev = bo->bdev;
121 bool old_use_tt, new_use_tt;
124 old_use_tt = bo->resource &&
125 ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
126 new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
128 ttm_bo_unmap_virtual(bo);
131 * Create and bind a ttm if required.
135 /* Zero init the new TTM structure if the old location should
136 * have used one as well.
138 ret = ttm_tt_create(bo, old_use_tt);
142 if (mem->mem_type != TTM_PL_SYSTEM) {
143 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
149 ret = dma_resv_reserve_fences(bo->base.resv, 1);
153 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
155 if (ret == -EMULTIHOP)
160 ctx->bytes_moved += bo->base.size;
165 ttm_bo_tt_destroy(bo);
172 * Will release GPU memory type usage on destruction.
173 * This is the place to put in driver specific hooks to release
174 * driver private resources.
175 * Will release the bo::reserved lock.
178 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
180 if (bo->bdev->funcs->delete_mem_notify)
181 bo->bdev->funcs->delete_mem_notify(bo);
183 ttm_bo_tt_destroy(bo);
184 ttm_resource_free(bo, &bo->resource);
187 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
191 if (bo->base.resv == &bo->base._resv)
194 BUG_ON(!dma_resv_trylock(&bo->base._resv));
196 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
197 dma_resv_unlock(&bo->base._resv);
201 if (bo->type != ttm_bo_type_sg) {
202 /* This works because the BO is about to be destroyed and nobody
203 * reference it any more. The only tricky case is the trylock on
204 * the resv object while holding the lru_lock.
206 spin_lock(&bo->bdev->lru_lock);
207 bo->base.resv = &bo->base._resv;
208 spin_unlock(&bo->bdev->lru_lock);
214 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
216 struct dma_resv *resv = &bo->base._resv;
217 struct dma_resv_iter cursor;
218 struct dma_fence *fence;
220 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
221 dma_resv_for_each_fence_unlocked(&cursor, fence) {
222 if (!fence->ops->signaled)
223 dma_fence_enable_sw_signaling(fence);
225 dma_resv_iter_end(&cursor);
229 * ttm_bo_cleanup_refs
230 * If bo idle, remove from lru lists, and unref.
231 * If not idle, block if possible.
233 * Must be called with lru_lock and reservation held, this function
234 * will drop the lru lock and optionally the reservation lock before returning.
236 * @bo: The buffer object to clean-up
237 * @interruptible: Any sleeps should occur interruptibly.
238 * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
239 * @unlock_resv: Unlock the reservation lock as well.
242 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
243 bool interruptible, bool no_wait_gpu,
246 struct dma_resv *resv = &bo->base._resv;
249 if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
254 if (ret && !no_wait_gpu) {
258 dma_resv_unlock(bo->base.resv);
259 spin_unlock(&bo->bdev->lru_lock);
261 lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
270 spin_lock(&bo->bdev->lru_lock);
271 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
273 * We raced, and lost, someone else holds the reservation now,
274 * and is probably busy in ttm_bo_cleanup_memtype_use.
276 * Even if it's not the case, because we finished waiting any
277 * delayed destruction would succeed, so just return success
280 spin_unlock(&bo->bdev->lru_lock);
286 if (ret || unlikely(list_empty(&bo->ddestroy))) {
288 dma_resv_unlock(bo->base.resv);
289 spin_unlock(&bo->bdev->lru_lock);
293 list_del_init(&bo->ddestroy);
294 spin_unlock(&bo->bdev->lru_lock);
295 ttm_bo_cleanup_memtype_use(bo);
298 dma_resv_unlock(bo->base.resv);
306 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
307 * encountered buffers.
309 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
311 struct list_head removed;
314 INIT_LIST_HEAD(&removed);
316 spin_lock(&bdev->lru_lock);
317 while (!list_empty(&bdev->ddestroy)) {
318 struct ttm_buffer_object *bo;
320 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
322 list_move_tail(&bo->ddestroy, &removed);
323 if (!ttm_bo_get_unless_zero(bo))
326 if (remove_all || bo->base.resv != &bo->base._resv) {
327 spin_unlock(&bdev->lru_lock);
328 dma_resv_lock(bo->base.resv, NULL);
330 spin_lock(&bdev->lru_lock);
331 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
333 } else if (dma_resv_trylock(bo->base.resv)) {
334 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
336 spin_unlock(&bdev->lru_lock);
340 spin_lock(&bdev->lru_lock);
342 list_splice_tail(&removed, &bdev->ddestroy);
343 empty = list_empty(&bdev->ddestroy);
344 spin_unlock(&bdev->lru_lock);
349 static void ttm_bo_release(struct kref *kref)
351 struct ttm_buffer_object *bo =
352 container_of(kref, struct ttm_buffer_object, kref);
353 struct ttm_device *bdev = bo->bdev;
356 WARN_ON_ONCE(bo->pin_count);
357 WARN_ON_ONCE(bo->bulk_move);
360 ret = ttm_bo_individualize_resv(bo);
362 /* Last resort, if we fail to allocate memory for the
363 * fences block for the BO to become idle
365 dma_resv_wait_timeout(bo->base.resv,
366 DMA_RESV_USAGE_BOOKKEEP, false,
370 if (bo->bdev->funcs->release_notify)
371 bo->bdev->funcs->release_notify(bo);
373 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
374 ttm_mem_io_free(bdev, bo->resource);
377 if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
378 !dma_resv_trylock(bo->base.resv)) {
379 /* The BO is not idle, resurrect it for delayed destroy */
380 ttm_bo_flush_all_fences(bo);
383 spin_lock(&bo->bdev->lru_lock);
386 * Make pinned bos immediately available to
387 * shrinkers, now that they are queued for
390 * FIXME: QXL is triggering this. Can be removed when the
395 ttm_resource_move_to_lru_tail(bo->resource);
398 kref_init(&bo->kref);
399 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
400 spin_unlock(&bo->bdev->lru_lock);
402 schedule_delayed_work(&bdev->wq,
403 ((HZ / 100) < 1) ? 1 : HZ / 100);
407 spin_lock(&bo->bdev->lru_lock);
408 list_del(&bo->ddestroy);
409 spin_unlock(&bo->bdev->lru_lock);
411 ttm_bo_cleanup_memtype_use(bo);
412 dma_resv_unlock(bo->base.resv);
414 atomic_dec(&ttm_glob.bo_count);
418 void ttm_bo_put(struct ttm_buffer_object *bo)
420 kref_put(&bo->kref, ttm_bo_release);
422 EXPORT_SYMBOL(ttm_bo_put);
424 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
426 return cancel_delayed_work_sync(&bdev->wq);
428 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
430 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
433 schedule_delayed_work(&bdev->wq,
434 ((HZ / 100) < 1) ? 1 : HZ / 100);
436 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
438 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
439 struct ttm_resource **mem,
440 struct ttm_operation_ctx *ctx,
441 struct ttm_place *hop)
443 struct ttm_placement hop_placement;
444 struct ttm_resource *hop_mem;
447 hop_placement.num_placement = hop_placement.num_busy_placement = 1;
448 hop_placement.placement = hop_placement.busy_placement = hop;
450 /* find space in the bounce domain */
451 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
454 /* move to the bounce domain */
455 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
457 ttm_resource_free(bo, &hop_mem);
463 static int ttm_bo_evict(struct ttm_buffer_object *bo,
464 struct ttm_operation_ctx *ctx)
466 struct ttm_device *bdev = bo->bdev;
467 struct ttm_resource *evict_mem;
468 struct ttm_placement placement;
469 struct ttm_place hop;
472 memset(&hop, 0, sizeof(hop));
474 dma_resv_assert_held(bo->base.resv);
476 placement.num_placement = 0;
477 placement.num_busy_placement = 0;
478 bdev->funcs->evict_flags(bo, &placement);
480 if (!placement.num_placement && !placement.num_busy_placement) {
481 ret = ttm_bo_wait(bo, true, false);
486 * Since we've already synced, this frees backing store
489 return ttm_bo_pipeline_gutting(bo);
492 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
494 if (ret != -ERESTARTSYS) {
495 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
497 ttm_bo_mem_space_debug(bo, &placement);
503 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
504 if (ret == -EMULTIHOP) {
505 ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
507 pr_err("Buffer eviction failed\n");
508 ttm_resource_free(bo, &evict_mem);
511 /* try and move to final place now. */
518 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
519 const struct ttm_place *place)
521 struct ttm_resource *res = bo->resource;
522 struct ttm_device *bdev = bo->bdev;
524 dma_resv_assert_held(bo->base.resv);
525 if (bo->resource->mem_type == TTM_PL_SYSTEM)
528 /* Don't evict this BO if it's outside of the
529 * requested placement range
531 return ttm_resource_intersects(bdev, res, place, bo->base.size);
533 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
536 * Check the target bo is allowable to be evicted or swapout, including cases:
538 * a. if share same reservation object with ctx->resv, have assumption
539 * reservation objects should already be locked, so not lock again and
540 * return true directly when either the opreation allow_reserved_eviction
541 * or the target bo already is in delayed free list;
543 * b. Otherwise, trylock it.
545 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
546 struct ttm_operation_ctx *ctx,
547 const struct ttm_place *place,
548 bool *locked, bool *busy)
552 if (bo->base.resv == ctx->resv) {
553 dma_resv_assert_held(bo->base.resv);
554 if (ctx->allow_res_evict)
560 ret = dma_resv_trylock(bo->base.resv);
566 if (ret && place && (bo->resource->mem_type != place->mem_type ||
567 !bo->bdev->funcs->eviction_valuable(bo, place))) {
570 dma_resv_unlock(bo->base.resv);
579 * ttm_mem_evict_wait_busy - wait for a busy BO to become available
581 * @busy_bo: BO which couldn't be locked with trylock
582 * @ctx: operation context
583 * @ticket: acquire ticket
585 * Try to lock a busy buffer object to avoid failing eviction.
587 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
588 struct ttm_operation_ctx *ctx,
589 struct ww_acquire_ctx *ticket)
593 if (!busy_bo || !ticket)
596 if (ctx->interruptible)
597 r = dma_resv_lock_interruptible(busy_bo->base.resv,
600 r = dma_resv_lock(busy_bo->base.resv, ticket);
603 * TODO: It would be better to keep the BO locked until allocation is at
604 * least tried one more time, but that would mean a much larger rework
608 dma_resv_unlock(busy_bo->base.resv);
610 return r == -EDEADLK ? -EBUSY : r;
613 int ttm_mem_evict_first(struct ttm_device *bdev,
614 struct ttm_resource_manager *man,
615 const struct ttm_place *place,
616 struct ttm_operation_ctx *ctx,
617 struct ww_acquire_ctx *ticket)
619 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
620 struct ttm_resource_cursor cursor;
621 struct ttm_resource *res;
625 spin_lock(&bdev->lru_lock);
626 ttm_resource_manager_for_each_res(man, &cursor, res) {
629 if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
631 if (busy && !busy_bo && ticket !=
632 dma_resv_locking_ctx(res->bo->base.resv))
637 if (ttm_bo_get_unless_zero(res->bo)) {
642 dma_resv_unlock(res->bo->base.resv);
646 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
648 spin_unlock(&bdev->lru_lock);
649 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
656 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
657 ctx->no_wait_gpu, locked);
662 spin_unlock(&bdev->lru_lock);
664 ret = ttm_bo_evict(bo, ctx);
666 ttm_bo_unreserve(bo);
668 ttm_bo_move_to_lru_tail_unlocked(bo);
675 * ttm_bo_pin - Pin the buffer object.
676 * @bo: The buffer object to pin
678 * Make sure the buffer is not evicted any more during memory pressure.
679 * @bo must be unpinned again by calling ttm_bo_unpin().
681 void ttm_bo_pin(struct ttm_buffer_object *bo)
683 dma_resv_assert_held(bo->base.resv);
684 WARN_ON_ONCE(!kref_read(&bo->kref));
685 spin_lock(&bo->bdev->lru_lock);
687 ttm_resource_del_bulk_move(bo->resource, bo);
689 spin_unlock(&bo->bdev->lru_lock);
691 EXPORT_SYMBOL(ttm_bo_pin);
694 * ttm_bo_unpin - Unpin the buffer object.
695 * @bo: The buffer object to unpin
697 * Allows the buffer object to be evicted again during memory pressure.
699 void ttm_bo_unpin(struct ttm_buffer_object *bo)
701 dma_resv_assert_held(bo->base.resv);
702 WARN_ON_ONCE(!kref_read(&bo->kref));
703 if (WARN_ON_ONCE(!bo->pin_count))
706 spin_lock(&bo->bdev->lru_lock);
709 ttm_resource_add_bulk_move(bo->resource, bo);
710 spin_unlock(&bo->bdev->lru_lock);
712 EXPORT_SYMBOL(ttm_bo_unpin);
715 * Add the last move fence to the BO as kernel dependency and reserve a new
718 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
719 struct ttm_resource_manager *man,
720 struct ttm_resource *mem,
723 struct dma_fence *fence;
726 spin_lock(&man->move_lock);
727 fence = dma_fence_get(man->move);
728 spin_unlock(&man->move_lock);
734 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
735 dma_fence_put(fence);
739 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
741 ret = dma_resv_reserve_fences(bo->base.resv, 1);
742 dma_fence_put(fence);
747 * Repeatedly evict memory from the LRU for @mem_type until we create enough
748 * space, or we've evicted everything and there isn't enough space.
750 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
751 const struct ttm_place *place,
752 struct ttm_resource **mem,
753 struct ttm_operation_ctx *ctx)
755 struct ttm_device *bdev = bo->bdev;
756 struct ttm_resource_manager *man;
757 struct ww_acquire_ctx *ticket;
760 man = ttm_manager_type(bdev, place->mem_type);
761 ticket = dma_resv_locking_ctx(bo->base.resv);
763 ret = ttm_resource_alloc(bo, place, mem);
766 if (unlikely(ret != -ENOSPC))
768 ret = ttm_mem_evict_first(bdev, man, place, ctx,
770 if (unlikely(ret != 0))
774 return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
778 * Creates space for memory region @mem according to its type.
780 * This function first searches for free space in compatible memory types in
781 * the priority order defined by the driver. If free space isn't found, then
782 * ttm_bo_mem_force_space is attempted in priority order to evict and find
785 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
786 struct ttm_placement *placement,
787 struct ttm_resource **mem,
788 struct ttm_operation_ctx *ctx)
790 struct ttm_device *bdev = bo->bdev;
791 bool type_found = false;
794 ret = dma_resv_reserve_fences(bo->base.resv, 1);
798 for (i = 0; i < placement->num_placement; ++i) {
799 const struct ttm_place *place = &placement->placement[i];
800 struct ttm_resource_manager *man;
802 man = ttm_manager_type(bdev, place->mem_type);
803 if (!man || !ttm_resource_manager_used(man))
807 ret = ttm_resource_alloc(bo, place, mem);
813 ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
815 ttm_resource_free(bo, mem);
824 for (i = 0; i < placement->num_busy_placement; ++i) {
825 const struct ttm_place *place = &placement->busy_placement[i];
826 struct ttm_resource_manager *man;
828 man = ttm_manager_type(bdev, place->mem_type);
829 if (!man || !ttm_resource_manager_used(man))
833 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
837 if (ret && ret != -EBUSY)
843 pr_err(TTM_PFX "No compatible memory type found\n");
850 EXPORT_SYMBOL(ttm_bo_mem_space);
852 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
853 struct ttm_placement *placement,
854 struct ttm_operation_ctx *ctx)
856 struct ttm_resource *mem;
857 struct ttm_place hop;
860 dma_resv_assert_held(bo->base.resv);
863 * Determine where to move the buffer.
865 * If driver determines move is going to need
866 * an extra step then it will return -EMULTIHOP
867 * and the buffer will be moved to the temporary
868 * stop and the driver will be called to make
871 ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
875 ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
876 if (ret == -EMULTIHOP) {
877 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
880 /* try and move to final place now. */
885 ttm_resource_free(bo, &mem);
889 int ttm_bo_validate(struct ttm_buffer_object *bo,
890 struct ttm_placement *placement,
891 struct ttm_operation_ctx *ctx)
895 dma_resv_assert_held(bo->base.resv);
898 * Remove the backing store if no placement is given.
900 if (!placement->num_placement && !placement->num_busy_placement)
901 return ttm_bo_pipeline_gutting(bo);
904 * Check whether we need to move buffer.
906 if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) {
907 ret = ttm_bo_move_buffer(bo, placement, ctx);
912 * We might need to add a TTM.
914 if (bo->resource->mem_type == TTM_PL_SYSTEM) {
915 ret = ttm_tt_create(bo, true);
921 EXPORT_SYMBOL(ttm_bo_validate);
924 * ttm_bo_init_reserved
926 * @bdev: Pointer to a ttm_device struct.
927 * @bo: Pointer to a ttm_buffer_object to be initialized.
928 * @type: Requested type of buffer object.
929 * @placement: Initial placement for buffer object.
930 * @alignment: Data alignment in pages.
931 * @ctx: TTM operation context for memory allocation.
932 * @sg: Scatter-gather table.
933 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
934 * @destroy: Destroy function. Use NULL for kfree().
936 * This function initializes a pre-allocated struct ttm_buffer_object.
937 * As this object may be part of a larger structure, this function,
938 * together with the @destroy function, enables driver-specific objects
939 * derived from a ttm_buffer_object.
941 * On successful return, the caller owns an object kref to @bo. The kref and
942 * list_kref are usually set to 1, but note that in some situations, other
943 * tasks may already be holding references to @bo as well.
944 * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
945 * and it is the caller's responsibility to call ttm_bo_unreserve.
947 * If a failure occurs, the function will call the @destroy function. Thus,
948 * after a failure, dereferencing @bo is illegal and will likely cause memory
952 * -ENOMEM: Out of memory.
953 * -EINVAL: Invalid placement flags.
954 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
956 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
957 enum ttm_bo_type type, struct ttm_placement *placement,
958 uint32_t alignment, struct ttm_operation_ctx *ctx,
959 struct sg_table *sg, struct dma_resv *resv,
960 void (*destroy) (struct ttm_buffer_object *))
962 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
965 kref_init(&bo->kref);
966 INIT_LIST_HEAD(&bo->ddestroy);
969 bo->page_alignment = alignment;
970 bo->destroy = destroy;
973 bo->bulk_move = NULL;
975 bo->base.resv = resv;
977 bo->base.resv = &bo->base._resv;
978 atomic_inc(&ttm_glob.bo_count);
980 ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
987 * For ttm_bo_type_device buffers, allocate
988 * address space from the device.
990 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
991 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
992 PFN_UP(bo->base.size));
997 /* passed reservation objects should already be locked,
998 * since otherwise lockdep will be angered in radeon.
1001 WARN_ON(!dma_resv_trylock(bo->base.resv));
1003 dma_resv_assert_held(resv);
1005 ret = ttm_bo_validate(bo, placement, ctx);
1013 dma_resv_unlock(bo->base.resv);
1019 EXPORT_SYMBOL(ttm_bo_init_reserved);
1022 * ttm_bo_init_validate
1024 * @bdev: Pointer to a ttm_device struct.
1025 * @bo: Pointer to a ttm_buffer_object to be initialized.
1026 * @type: Requested type of buffer object.
1027 * @placement: Initial placement for buffer object.
1028 * @alignment: Data alignment in pages.
1029 * @interruptible: If needing to sleep to wait for GPU resources,
1030 * sleep interruptible.
1031 * pinned in physical memory. If this behaviour is not desired, this member
1032 * holds a pointer to a persistent shmem object. Typically, this would
1033 * point to the shmem object backing a GEM object if TTM is used to back a
1034 * GEM user interface.
1035 * @sg: Scatter-gather table.
1036 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
1037 * @destroy: Destroy function. Use NULL for kfree().
1039 * This function initializes a pre-allocated struct ttm_buffer_object.
1040 * As this object may be part of a larger structure, this function,
1041 * together with the @destroy function,
1042 * enables driver-specific objects derived from a ttm_buffer_object.
1044 * On successful return, the caller owns an object kref to @bo. The kref and
1045 * list_kref are usually set to 1, but note that in some situations, other
1046 * tasks may already be holding references to @bo as well.
1048 * If a failure occurs, the function will call the @destroy function, Thus,
1049 * after a failure, dereferencing @bo is illegal and will likely cause memory
1053 * -ENOMEM: Out of memory.
1054 * -EINVAL: Invalid placement flags.
1055 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
1057 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
1058 enum ttm_bo_type type, struct ttm_placement *placement,
1059 uint32_t alignment, bool interruptible,
1060 struct sg_table *sg, struct dma_resv *resv,
1061 void (*destroy) (struct ttm_buffer_object *))
1063 struct ttm_operation_ctx ctx = { interruptible, false };
1066 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
1072 ttm_bo_unreserve(bo);
1076 EXPORT_SYMBOL(ttm_bo_init_validate);
1079 * buffer object vm functions.
1082 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1084 struct ttm_device *bdev = bo->bdev;
1086 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1087 ttm_mem_io_free(bdev, bo->resource);
1089 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1091 int ttm_bo_wait(struct ttm_buffer_object *bo,
1092 bool interruptible, bool no_wait)
1094 long timeout = 15 * HZ;
1097 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
1103 timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1104 interruptible, timeout);
1113 EXPORT_SYMBOL(ttm_bo_wait);
1115 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1118 struct ttm_place place;
1123 * While the bo may already reside in SYSTEM placement, set
1124 * SYSTEM as new placement to cover also the move further below.
1125 * The driver may use the fact that we're moving from SYSTEM
1126 * as an indication that we're about to swap out.
1128 memset(&place, 0, sizeof(place));
1129 place.mem_type = bo->resource->mem_type;
1130 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
1133 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1134 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1135 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
1136 !ttm_bo_get_unless_zero(bo)) {
1138 dma_resv_unlock(bo->base.resv);
1143 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1145 return ret == -EBUSY ? -ENOSPC : ret;
1148 /* TODO: Cleanup the locking */
1149 spin_unlock(&bo->bdev->lru_lock);
1152 * Move to system cached
1154 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1155 struct ttm_operation_ctx ctx = { false, false };
1156 struct ttm_resource *evict_mem;
1157 struct ttm_place hop;
1159 memset(&hop, 0, sizeof(hop));
1160 place.mem_type = TTM_PL_SYSTEM;
1161 ret = ttm_resource_alloc(bo, &place, &evict_mem);
1165 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
1166 if (unlikely(ret != 0)) {
1167 WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1173 * Make sure BO is idle.
1175 ret = ttm_bo_wait(bo, false, false);
1176 if (unlikely(ret != 0))
1179 ttm_bo_unmap_virtual(bo);
1182 * Swap out. Buffer will be swapped in again as soon as
1183 * anyone tries to access a ttm page.
1185 if (bo->bdev->funcs->swap_notify)
1186 bo->bdev->funcs->swap_notify(bo);
1188 if (ttm_tt_is_populated(bo->ttm))
1189 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1193 * Unreserve without putting on LRU to avoid swapping out an
1194 * already swapped buffer.
1197 dma_resv_unlock(bo->base.resv);
1199 return ret == -EBUSY ? -ENOSPC : ret;
1202 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1204 if (bo->ttm == NULL)
1207 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1208 ttm_tt_destroy(bo->bdev, bo->ttm);