1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-buf.h>
10 #include <drm/drm_drv.h>
11 #include <drm/drm_gem_ttm_helper.h>
12 #include <drm/ttm/ttm_device.h>
13 #include <drm/ttm/ttm_placement.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
17 #include "xe_device.h"
18 #include "xe_dma_buf.h"
22 #include "xe_migrate.h"
23 #include "xe_preempt_fence.h"
24 #include "xe_res_cursor.h"
26 #include "xe_ttm_stolen_mgr.h"
29 static const struct ttm_place sys_placement_flags = {
32 .mem_type = XE_PL_SYSTEM,
36 static struct ttm_placement sys_placement = {
38 .placement = &sys_placement_flags,
39 .num_busy_placement = 1,
40 .busy_placement = &sys_placement_flags,
43 static const struct ttm_place tt_placement_flags = {
50 static struct ttm_placement tt_placement = {
52 .placement = &tt_placement_flags,
53 .num_busy_placement = 1,
54 .busy_placement = &sys_placement_flags,
57 bool mem_type_is_vram(u32 mem_type)
59 return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
62 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
64 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
67 static bool resource_is_vram(struct ttm_resource *res)
69 return mem_type_is_vram(res->mem_type);
72 bool xe_bo_is_vram(struct xe_bo *bo)
74 return resource_is_vram(bo->ttm.resource) ||
75 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
78 bool xe_bo_is_stolen(struct xe_bo *bo)
80 return bo->ttm.resource->mem_type == XE_PL_STOLEN;
83 static bool xe_bo_is_user(struct xe_bo *bo)
85 return bo->flags & XE_BO_CREATE_USER_BIT;
88 static struct xe_tile *
89 mem_type_to_tile(struct xe_device *xe, u32 mem_type)
91 XE_BUG_ON(mem_type != XE_PL_STOLEN && !mem_type_is_vram(mem_type));
93 return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
97 * xe_bo_to_tile() - Get a tile from a BO's memory location
98 * @bo: The buffer object
100 * Get a tile from a BO's memory location, should be called on BOs in VRAM only.
102 * Return: xe_tile object which is closest to the BO
104 struct xe_tile *xe_bo_to_tile(struct xe_bo *bo)
106 return mem_type_to_tile(xe_bo_device(bo), bo->ttm.resource->mem_type);
109 static void try_add_system(struct xe_bo *bo, struct ttm_place *places,
110 u32 bo_flags, u32 *c)
112 if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
113 places[*c] = (struct ttm_place) {
114 .mem_type = XE_PL_TT,
118 if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
119 bo->props.preferred_mem_type = XE_PL_TT;
123 static void add_vram(struct xe_device *xe, struct xe_bo *bo,
124 struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
126 struct xe_tile *tile = mem_type_to_tile(xe, mem_type);
127 struct ttm_place place = { .mem_type = mem_type };
128 u64 io_size = tile->mem.vram.io_size;
130 XE_BUG_ON(!tile->mem.vram.usable_size);
133 * For eviction / restore on suspend / resume objects
134 * pinned in VRAM must be contiguous
136 if (bo_flags & (XE_BO_CREATE_PINNED_BIT |
137 XE_BO_CREATE_GGTT_BIT))
138 place.flags |= TTM_PL_FLAG_CONTIGUOUS;
140 if (io_size < tile->mem.vram.usable_size) {
141 if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
143 place.lpfn = io_size >> PAGE_SHIFT;
145 place.flags |= TTM_PL_FLAG_TOPDOWN;
151 if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
152 bo->props.preferred_mem_type = mem_type;
155 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
156 struct ttm_place *places, u32 bo_flags, u32 *c)
158 if (bo->props.preferred_gt == XE_GT1) {
159 if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
160 add_vram(xe, bo, places, bo_flags, XE_PL_VRAM1, c);
161 if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
162 add_vram(xe, bo, places, bo_flags, XE_PL_VRAM0, c);
164 if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
165 add_vram(xe, bo, places, bo_flags, XE_PL_VRAM0, c);
166 if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
167 add_vram(xe, bo, places, bo_flags, XE_PL_VRAM1, c);
171 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
172 struct ttm_place *places, u32 bo_flags, u32 *c)
174 if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
175 places[*c] = (struct ttm_place) {
176 .mem_type = XE_PL_STOLEN,
177 .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
178 XE_BO_CREATE_GGTT_BIT) ?
179 TTM_PL_FLAG_CONTIGUOUS : 0,
185 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
188 struct ttm_place *places = bo->placements;
191 bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
193 /* The order of placements should indicate preferred location */
195 if (bo->props.preferred_mem_class == XE_MEM_REGION_CLASS_SYSMEM) {
196 try_add_system(bo, places, bo_flags, &c);
197 try_add_vram(xe, bo, places, bo_flags, &c);
199 try_add_vram(xe, bo, places, bo_flags, &c);
200 try_add_system(bo, places, bo_flags, &c);
202 try_add_stolen(xe, bo, places, bo_flags, &c);
207 bo->placement = (struct ttm_placement) {
210 .num_busy_placement = c,
211 .busy_placement = places,
217 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
220 xe_bo_assert_held(bo);
221 return __xe_bo_placement_for_flags(xe, bo, bo_flags);
224 static void xe_evict_flags(struct ttm_buffer_object *tbo,
225 struct ttm_placement *placement)
229 if (!xe_bo_is_xe_bo(tbo)) {
230 /* Don't handle scatter gather BOs */
231 if (tbo->type == ttm_bo_type_sg) {
232 placement->num_placement = 0;
233 placement->num_busy_placement = 0;
237 *placement = sys_placement;
242 * For xe, sg bos that are evicted to system just triggers a
243 * rebind of the sg list upon subsequent validation to XE_PL_TT.
246 bo = ttm_to_xe_bo(tbo);
247 switch (tbo->resource->mem_type) {
251 *placement = tt_placement;
255 *placement = sys_placement;
267 static int xe_tt_map_sg(struct ttm_tt *tt)
269 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
270 unsigned long num_pages = tt->num_pages;
273 XE_BUG_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
278 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
280 (u64)num_pages << PAGE_SHIFT,
281 xe_sg_segment_size(xe_tt->dev),
286 xe_tt->sg = &xe_tt->sgt;
287 ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
288 DMA_ATTR_SKIP_CPU_SYNC);
290 sg_free_table(xe_tt->sg);
298 struct sg_table *xe_bo_get_sg(struct xe_bo *bo)
300 struct ttm_tt *tt = bo->ttm.ttm;
301 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
306 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
309 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
310 struct xe_device *xe = xe_bo_device(bo);
311 struct xe_ttm_tt *tt;
312 unsigned long extra_pages;
313 enum ttm_caching caching = ttm_cached;
316 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
320 tt->dev = xe->drm.dev;
323 if (xe_bo_needs_ccs_pages(bo))
324 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
328 * Display scanout is always non-coherent with the CPU cache.
330 * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
331 * require a CPU:WC mapping.
333 if (bo->flags & XE_BO_SCANOUT_BIT ||
334 (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
335 caching = ttm_write_combined;
337 err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
346 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
347 struct ttm_operation_ctx *ctx)
352 * dma-bufs are not populated with pages, and the dma-
353 * addresses are set up when moved to XE_PL_TT.
355 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
358 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
362 /* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */
363 err = xe_tt_map_sg(tt);
365 ttm_pool_free(&ttm_dev->pool, tt);
370 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
372 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
374 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
378 dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
379 DMA_BIDIRECTIONAL, 0);
380 sg_free_table(xe_tt->sg);
384 return ttm_pool_free(&ttm_dev->pool, tt);
387 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
393 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
394 struct ttm_resource *mem)
396 struct xe_device *xe = ttm_to_xe_device(bdev);
398 switch (mem->mem_type) {
404 struct xe_tile *tile = mem_type_to_tile(xe, mem->mem_type);
405 struct xe_ttm_vram_mgr_resource *vres =
406 to_xe_ttm_vram_mgr_resource(mem);
408 if (vres->used_visible_size < mem->size)
411 mem->bus.offset = mem->start << PAGE_SHIFT;
413 if (tile->mem.vram.mapping &&
414 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
415 mem->bus.addr = (u8 *)tile->mem.vram.mapping +
418 mem->bus.offset += tile->mem.vram.io_start;
419 mem->bus.is_iomem = true;
421 #if !defined(CONFIG_X86)
422 mem->bus.caching = ttm_write_combined;
426 return xe_ttm_stolen_io_mem_reserve(xe, mem);
432 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
433 const struct ttm_operation_ctx *ctx)
435 struct dma_resv_iter cursor;
436 struct dma_fence *fence;
437 struct drm_gpuva *gpuva;
438 struct drm_gem_object *obj = &bo->ttm.base;
439 struct drm_gpuvm_bo *vm_bo;
442 dma_resv_assert_held(bo->ttm.base.resv);
444 if (!xe_device_in_fault_mode(xe) && !list_empty(&bo->vmas)) {
445 dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
446 DMA_RESV_USAGE_BOOKKEEP);
447 dma_resv_for_each_fence_unlocked(&cursor, fence)
448 dma_fence_enable_sw_signaling(fence);
449 dma_resv_iter_end(&cursor);
452 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
453 drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
454 struct xe_vma *vma = gpuva_to_vma(gpuva);
455 struct xe_vm *vm = xe_vma_vm(vma);
457 trace_xe_vma_evict(vma);
459 if (xe_vm_in_fault_mode(vm)) {
460 /* Wait for pending binds / unbinds. */
463 if (ctx->no_wait_gpu &&
464 !dma_resv_test_signaled(bo->ttm.base.resv,
465 DMA_RESV_USAGE_BOOKKEEP))
468 timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
469 DMA_RESV_USAGE_BOOKKEEP,
471 MAX_SCHEDULE_TIMEOUT);
473 ret = xe_vm_invalidate_vma(vma);
475 } else if (!timeout) {
482 bool vm_resv_locked = false;
485 * We need to put the vma on the vm's rebind_list,
486 * but need the vm resv to do so. If we can't verify
487 * that we indeed have it locked, put the vma an the
488 * vm's notifier.rebind_list instead and scoop later.
490 if (dma_resv_trylock(xe_vm_resv(vm)))
491 vm_resv_locked = true;
492 else if (ctx->resv != xe_vm_resv(vm)) {
493 spin_lock(&vm->notifier.list_lock);
494 list_move_tail(&vma->notifier.rebind_link,
495 &vm->notifier.rebind_list);
496 spin_unlock(&vm->notifier.list_lock);
500 xe_vm_assert_held(vm);
501 if (list_empty(&vma->combined_links.rebind) &&
503 list_add_tail(&vma->combined_links.rebind,
507 dma_resv_unlock(xe_vm_resv(vm));
516 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
517 * Note that unmapping the attachment is deferred to the next
518 * map_attachment time, or to bo destroy (after idling) whichever comes first.
519 * This is to avoid syncing before unmap_attachment(), assuming that the
520 * caller relies on idling the reservation object before moving the
521 * backing store out. Should that assumption not hold, then we will be able
522 * to unconditionally call unmap_attachment() when moving out to system.
524 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
525 struct ttm_resource *new_res)
527 struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
528 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
533 XE_BUG_ON(!ttm_bo->ttm);
535 if (new_res->mem_type == XE_PL_SYSTEM)
539 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
543 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
551 ttm_bo_move_null(ttm_bo, new_res);
557 * xe_bo_move_notify - Notify subsystems of a pending move
558 * @bo: The buffer object
559 * @ctx: The struct ttm_operation_ctx controlling locking and waits.
561 * This function notifies subsystems of an upcoming buffer move.
562 * Upon receiving such a notification, subsystems should schedule
563 * halting access to the underlying pages and optionally add a fence
564 * to the buffer object's dma_resv object, that signals when access is
565 * stopped. The caller will wait on all dma_resv fences before
568 * A subsystem may commence access to the object after obtaining
569 * bindings to the new backing memory under the object lock.
571 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
572 * negative error code on error.
574 static int xe_bo_move_notify(struct xe_bo *bo,
575 const struct ttm_operation_ctx *ctx)
577 struct ttm_buffer_object *ttm_bo = &bo->ttm;
578 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
582 * If this starts to call into many components, consider
583 * using a notification chain here.
586 if (xe_bo_is_pinned(bo))
590 ret = xe_bo_trigger_rebind(xe, bo, ctx);
594 /* Don't call move_notify() for imported dma-bufs. */
595 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
596 dma_buf_move_notify(ttm_bo->base.dma_buf);
601 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
602 struct ttm_operation_ctx *ctx,
603 struct ttm_resource *new_mem,
604 struct ttm_place *hop)
606 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
607 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
608 struct ttm_resource *old_mem = ttm_bo->resource;
609 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
610 struct ttm_tt *ttm = ttm_bo->ttm;
611 struct xe_tile *tile = NULL;
612 struct dma_fence *fence;
613 bool move_lacks_source;
618 /* Bo creation path, moving to system or TT. No clearing required. */
619 if (!old_mem && ttm) {
620 ttm_bo_move_null(ttm_bo, new_mem);
624 if (ttm_bo->type == ttm_bo_type_sg) {
625 ret = xe_bo_move_notify(bo, ctx);
627 ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
631 tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
632 (ttm->page_flags & TTM_TT_FLAG_SWAPPED));
634 move_lacks_source = !mem_type_is_vram(old_mem_type) && !tt_has_data;
636 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
637 (!ttm && ttm_bo->type == ttm_bo_type_device);
639 if ((move_lacks_source && !needs_clear) ||
640 (old_mem_type == XE_PL_SYSTEM &&
641 new_mem->mem_type == XE_PL_TT)) {
642 ttm_bo_move_null(ttm_bo, new_mem);
647 * Failed multi-hop where the old_mem is still marked as
648 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
650 if (old_mem_type == XE_PL_TT &&
651 new_mem->mem_type == XE_PL_TT) {
652 ttm_bo_move_null(ttm_bo, new_mem);
656 if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
657 ret = xe_bo_move_notify(bo, ctx);
662 if (old_mem_type == XE_PL_TT &&
663 new_mem->mem_type == XE_PL_SYSTEM) {
664 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
665 DMA_RESV_USAGE_BOOKKEEP,
667 MAX_SCHEDULE_TIMEOUT);
672 ttm_bo_move_null(ttm_bo, new_mem);
676 if (!move_lacks_source &&
677 ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
678 (mem_type_is_vram(old_mem_type) &&
679 new_mem->mem_type == XE_PL_SYSTEM))) {
682 hop->mem_type = XE_PL_TT;
683 hop->flags = TTM_PL_FLAG_TEMPORARY;
690 else if (resource_is_vram(new_mem))
691 tile = mem_type_to_tile(xe, new_mem->mem_type);
692 else if (mem_type_is_vram(old_mem_type))
693 tile = mem_type_to_tile(xe, old_mem_type);
696 XE_BUG_ON(!tile->migrate);
698 trace_xe_bo_move(bo);
699 xe_device_mem_access_get(xe);
701 if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
703 * Kernel memory that is pinned should only be moved on suspend
704 * / resume, some of the pinned memory is required for the
705 * device to resume / use the GPU to move other evicted memory
706 * (user memory) around. This likely could be optimized a bit
707 * futher where we find the minimum set of pinned memory
708 * required for resume but for simplity doing a memcpy for all
711 ret = xe_bo_vmap(bo);
713 ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
715 /* Create a new VMAP once kernel BO back in VRAM */
716 if (!ret && resource_is_vram(new_mem)) {
717 void *new_addr = tile->mem.vram.mapping +
718 (new_mem->start << PAGE_SHIFT);
720 if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
722 xe_device_mem_access_put(xe);
726 XE_BUG_ON(new_mem->start !=
727 bo->placements->fpfn);
729 iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
733 if (move_lacks_source)
734 fence = xe_migrate_clear(tile->migrate, bo, new_mem);
736 fence = xe_migrate_copy(tile->migrate,
737 bo, bo, old_mem, new_mem);
739 ret = PTR_ERR(fence);
740 xe_device_mem_access_put(xe);
743 if (!move_lacks_source) {
744 ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
747 dma_fence_wait(fence, false);
748 ttm_bo_move_null(ttm_bo, new_mem);
753 * ttm_bo_move_accel_cleanup() may blow up if
754 * bo->resource == NULL, so just attach the
755 * fence and set the new resource.
757 dma_resv_add_fence(ttm_bo->base.resv, fence,
758 DMA_RESV_USAGE_KERNEL);
759 ttm_bo_move_null(ttm_bo, new_mem);
762 dma_fence_put(fence);
765 xe_device_mem_access_put(xe);
766 trace_printk("new_mem->mem_type=%d\n", new_mem->mem_type);
774 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
775 * @bo: The buffer object to move.
777 * On successful completion, the object memory will be moved to sytem memory.
778 * This function blocks until the object has been fully moved.
780 * This is needed to for special handling of pinned VRAM object during
783 * Return: 0 on success. Negative error code on failure.
785 int xe_bo_evict_pinned(struct xe_bo *bo)
787 struct ttm_place place = {
788 .mem_type = XE_PL_TT,
790 struct ttm_placement placement = {
794 struct ttm_operation_ctx ctx = {
795 .interruptible = false,
797 struct ttm_resource *new_mem;
800 xe_bo_assert_held(bo);
802 if (WARN_ON(!bo->ttm.resource))
805 if (WARN_ON(!xe_bo_is_pinned(bo)))
808 if (WARN_ON(!xe_bo_is_vram(bo)))
811 ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
816 bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
823 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
827 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
831 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
835 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
836 false, MAX_SCHEDULE_TIMEOUT);
841 ttm_resource_free(&bo->ttm, &new_mem);
846 * xe_bo_restore_pinned() - Restore a pinned VRAM object
847 * @bo: The buffer object to move.
849 * On successful completion, the object memory will be moved back to VRAM.
850 * This function blocks until the object has been fully moved.
852 * This is needed to for special handling of pinned VRAM object during
855 * Return: 0 on success. Negative error code on failure.
857 int xe_bo_restore_pinned(struct xe_bo *bo)
859 struct ttm_operation_ctx ctx = {
860 .interruptible = false,
862 struct ttm_resource *new_mem;
865 xe_bo_assert_held(bo);
867 if (WARN_ON(!bo->ttm.resource))
870 if (WARN_ON(!xe_bo_is_pinned(bo)))
873 if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
876 ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
880 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
884 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
888 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
892 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
893 false, MAX_SCHEDULE_TIMEOUT);
898 ttm_resource_free(&bo->ttm, &new_mem);
902 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
903 unsigned long page_offset)
905 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
906 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
907 struct xe_tile *tile = mem_type_to_tile(xe, ttm_bo->resource->mem_type);
908 struct xe_res_cursor cursor;
910 if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
911 return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
913 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
914 return (tile->mem.vram.io_start + cursor.start) >> PAGE_SHIFT;
917 static void __xe_bo_vunmap(struct xe_bo *bo);
920 * TODO: Move this function to TTM so we don't rely on how TTM does its
921 * locking, thereby abusing TTM internals.
923 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
927 XE_WARN_ON(kref_read(&ttm_bo->kref));
930 * We can typically only race with TTM trylocking under the
931 * lru_lock, which will immediately be unlocked again since
932 * the ttm_bo refcount is zero at this point. So trylocking *should*
933 * always succeed here, as long as we hold the lru lock.
935 spin_lock(&ttm_bo->bdev->lru_lock);
936 locked = dma_resv_trylock(ttm_bo->base.resv);
937 spin_unlock(&ttm_bo->bdev->lru_lock);
943 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
945 struct dma_resv_iter cursor;
946 struct dma_fence *fence;
947 struct dma_fence *replacement = NULL;
950 if (!xe_bo_is_xe_bo(ttm_bo))
953 bo = ttm_to_xe_bo(ttm_bo);
954 XE_WARN_ON(bo->created && kref_read(&ttm_bo->base.refcount));
957 * Corner case where TTM fails to allocate memory and this BOs resv
958 * still points the VMs resv
960 if (ttm_bo->base.resv != &ttm_bo->base._resv)
963 if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
967 * Scrub the preempt fences if any. The unbind fence is already
968 * attached to the resv.
969 * TODO: Don't do this for external bos once we scrub them after
972 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
973 DMA_RESV_USAGE_BOOKKEEP, fence) {
974 if (xe_fence_is_xe_preempt(fence) &&
975 !dma_fence_is_signaled(fence)) {
977 replacement = dma_fence_get_stub();
979 dma_resv_replace_fences(ttm_bo->base.resv,
982 DMA_RESV_USAGE_BOOKKEEP);
985 dma_fence_put(replacement);
987 dma_resv_unlock(ttm_bo->base.resv);
990 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
992 if (!xe_bo_is_xe_bo(ttm_bo))
996 * Object is idle and about to be destroyed. Release the
997 * dma-buf attachment.
999 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1000 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
1001 struct xe_ttm_tt, ttm);
1003 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
1010 struct ttm_device_funcs xe_ttm_funcs = {
1011 .ttm_tt_create = xe_ttm_tt_create,
1012 .ttm_tt_populate = xe_ttm_tt_populate,
1013 .ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
1014 .ttm_tt_destroy = xe_ttm_tt_destroy,
1015 .evict_flags = xe_evict_flags,
1017 .io_mem_reserve = xe_ttm_io_mem_reserve,
1018 .io_mem_pfn = xe_ttm_io_mem_pfn,
1019 .release_notify = xe_ttm_bo_release_notify,
1020 .eviction_valuable = ttm_bo_eviction_valuable,
1021 .delete_mem_notify = xe_ttm_bo_delete_mem_notify,
1024 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
1026 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1028 if (bo->ttm.base.import_attach)
1029 drm_prime_gem_destroy(&bo->ttm.base, NULL);
1030 drm_gem_object_release(&bo->ttm.base);
1032 WARN_ON(!list_empty(&bo->vmas));
1034 if (bo->ggtt_node.size)
1035 xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
1037 if (bo->vm && xe_bo_is_user(bo))
1043 static void xe_gem_object_free(struct drm_gem_object *obj)
1045 /* Our BO reference counting scheme works as follows:
1047 * The gem object kref is typically used throughout the driver,
1048 * and the gem object holds a ttm_buffer_object refcount, so
1049 * that when the last gem object reference is put, which is when
1050 * we end up in this function, we put also that ttm_buffer_object
1051 * refcount. Anything using gem interfaces is then no longer
1052 * allowed to access the object in a way that requires a gem
1053 * refcount, including locking the object.
1055 * driver ttm callbacks is allowed to use the ttm_buffer_object
1056 * refcount directly if needed.
1058 __xe_bo_vunmap(gem_to_xe_bo(obj));
1059 ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
1062 static void xe_gem_object_close(struct drm_gem_object *obj,
1063 struct drm_file *file_priv)
1065 struct xe_bo *bo = gem_to_xe_bo(obj);
1067 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
1068 struct ww_acquire_ctx ww;
1070 XE_WARN_ON(!xe_bo_is_user(bo));
1072 xe_bo_lock(bo, &ww, 0, false);
1073 ttm_bo_set_bulk_move(&bo->ttm, NULL);
1074 xe_bo_unlock(bo, &ww);
1078 static bool should_migrate_to_system(struct xe_bo *bo)
1080 struct xe_device *xe = xe_bo_device(bo);
1082 return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
1085 static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
1087 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
1088 struct drm_device *ddev = tbo->base.dev;
1092 ret = ttm_bo_vm_reserve(tbo, vmf);
1096 if (drm_dev_enter(ddev, &idx)) {
1097 struct xe_bo *bo = ttm_to_xe_bo(tbo);
1099 trace_xe_bo_cpu_fault(bo);
1101 if (should_migrate_to_system(bo)) {
1102 r = xe_bo_migrate(bo, XE_PL_TT);
1103 if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
1104 ret = VM_FAULT_NOPAGE;
1106 ret = VM_FAULT_SIGBUS;
1109 ret = ttm_bo_vm_fault_reserved(vmf,
1110 vmf->vma->vm_page_prot,
1111 TTM_BO_VM_NUM_PREFAULT);
1115 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1117 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
1120 dma_resv_unlock(tbo->base.resv);
1124 static const struct vm_operations_struct xe_gem_vm_ops = {
1125 .fault = xe_gem_fault,
1126 .open = ttm_bo_vm_open,
1127 .close = ttm_bo_vm_close,
1128 .access = ttm_bo_vm_access
1131 static const struct drm_gem_object_funcs xe_gem_object_funcs = {
1132 .free = xe_gem_object_free,
1133 .close = xe_gem_object_close,
1134 .mmap = drm_gem_ttm_mmap,
1135 .export = xe_gem_prime_export,
1136 .vm_ops = &xe_gem_vm_ops,
1140 * xe_bo_alloc - Allocate storage for a struct xe_bo
1142 * This funcition is intended to allocate storage to be used for input
1143 * to __xe_bo_create_locked(), in the case a pointer to the bo to be
1144 * created is needed before the call to __xe_bo_create_locked().
1145 * If __xe_bo_create_locked ends up never to be called, then the
1146 * storage allocated with this function needs to be freed using
1149 * Return: A pointer to an uninitialized struct xe_bo on success,
1150 * ERR_PTR(-ENOMEM) on error.
1152 struct xe_bo *xe_bo_alloc(void)
1154 struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1157 return ERR_PTR(-ENOMEM);
1163 * xe_bo_free - Free storage allocated using xe_bo_alloc()
1164 * @bo: The buffer object storage.
1166 * Refer to xe_bo_alloc() documentation for valid use-cases.
1168 void xe_bo_free(struct xe_bo *bo)
1173 struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
1174 struct xe_tile *tile, struct dma_resv *resv,
1175 struct ttm_lru_bulk_move *bulk, size_t size,
1176 enum ttm_bo_type type, u32 flags)
1178 struct ttm_operation_ctx ctx = {
1179 .interruptible = true,
1180 .no_wait_gpu = false,
1182 struct ttm_placement *placement;
1186 /* Only kernel objects should set GT */
1187 XE_BUG_ON(tile && type != ttm_bo_type_kernel);
1189 if (XE_WARN_ON(!size))
1190 return ERR_PTR(-EINVAL);
1198 if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
1199 !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
1200 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
1201 size = ALIGN(size, SZ_64K);
1202 flags |= XE_BO_INTERNAL_64K;
1203 alignment = SZ_64K >> PAGE_SHIFT;
1205 alignment = SZ_4K >> PAGE_SHIFT;
1211 bo->ttm.base.funcs = &xe_gem_object_funcs;
1212 bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
1213 bo->props.preferred_gt = XE_BO_PROPS_INVALID;
1214 bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
1215 bo->ttm.priority = DRM_XE_VMA_PRIORITY_NORMAL;
1216 INIT_LIST_HEAD(&bo->vmas);
1217 INIT_LIST_HEAD(&bo->pinned_link);
1219 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
1222 ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT);
1226 if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
1227 err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
1229 return ERR_PTR(err);
1232 /* Defer populating type_sg bos */
1233 placement = (type == ttm_bo_type_sg ||
1234 bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
1236 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
1237 placement, alignment,
1238 &ctx, NULL, resv, xe_ttm_bo_destroy);
1240 return ERR_PTR(err);
1244 ttm_bo_set_bulk_move(&bo->ttm, bulk);
1246 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1251 static int __xe_bo_fixed_placement(struct xe_device *xe,
1254 u64 start, u64 end, u64 size)
1256 struct ttm_place *place = bo->placements;
1258 if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
1261 place->flags = TTM_PL_FLAG_CONTIGUOUS;
1262 place->fpfn = start >> PAGE_SHIFT;
1263 place->lpfn = end >> PAGE_SHIFT;
1265 switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) {
1266 case XE_BO_CREATE_VRAM0_BIT:
1267 place->mem_type = XE_PL_VRAM0;
1269 case XE_BO_CREATE_VRAM1_BIT:
1270 place->mem_type = XE_PL_VRAM1;
1272 case XE_BO_CREATE_STOLEN_BIT:
1273 place->mem_type = XE_PL_STOLEN;
1277 /* 0 or multiple of the above set */
1281 bo->placement = (struct ttm_placement) {
1284 .num_busy_placement = 1,
1285 .busy_placement = place,
1292 xe_bo_create_locked_range(struct xe_device *xe,
1293 struct xe_tile *tile, struct xe_vm *vm,
1294 size_t size, u64 start, u64 end,
1295 enum ttm_bo_type type, u32 flags)
1297 struct xe_bo *bo = NULL;
1301 xe_vm_assert_held(vm);
1303 if (start || end != ~0ULL) {
1308 flags |= XE_BO_FIXED_PLACEMENT_BIT;
1309 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
1312 return ERR_PTR(err);
1316 bo = __xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
1317 vm && !xe_vm_in_fault_mode(vm) &&
1318 flags & XE_BO_CREATE_USER_BIT ?
1319 &vm->lru_bulk_move : NULL, size,
1325 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
1326 * to ensure the shared resv doesn't disappear under the bo, the bo
1327 * will keep a reference to the vm, and avoid circular references
1328 * by having all the vm's bo refereferences released at vm close
1331 if (vm && xe_bo_is_user(bo))
1335 if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
1336 if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
1337 tile = xe_device_get_root_tile(xe);
1341 if (flags & XE_BO_CREATE_STOLEN_BIT &&
1342 flags & XE_BO_FIXED_PLACEMENT_BIT) {
1343 err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, start);
1345 err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
1348 goto err_unlock_put_bo;
1354 __xe_bo_unset_bulk_move(bo);
1355 xe_bo_unlock_vm_held(bo);
1357 return ERR_PTR(err);
1360 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
1361 struct xe_vm *vm, size_t size,
1362 enum ttm_bo_type type, u32 flags)
1364 return xe_bo_create_locked_range(xe, tile, vm, size, 0, ~0ULL, type, flags);
1367 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
1368 struct xe_vm *vm, size_t size,
1369 enum ttm_bo_type type, u32 flags)
1371 struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
1374 xe_bo_unlock_vm_held(bo);
1379 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
1381 size_t size, u64 offset,
1382 enum ttm_bo_type type, u32 flags)
1386 u64 start = offset == ~0ull ? 0 : offset;
1387 u64 end = offset == ~0ull ? offset : start + size;
1389 if (flags & XE_BO_CREATE_STOLEN_BIT &&
1390 xe_ttm_stolen_cpu_access_needs_ggtt(xe))
1391 flags |= XE_BO_CREATE_GGTT_BIT;
1393 bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
1394 flags | XE_BO_NEEDS_CPU_ACCESS);
1398 err = xe_bo_pin(bo);
1402 err = xe_bo_vmap(bo);
1406 xe_bo_unlock_vm_held(bo);
1413 xe_bo_unlock_vm_held(bo);
1415 return ERR_PTR(err);
1418 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
1419 struct xe_vm *vm, size_t size,
1420 enum ttm_bo_type type, u32 flags)
1422 return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
1425 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
1426 const void *data, size_t size,
1427 enum ttm_bo_type type, u32 flags)
1429 struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
1430 ALIGN(size, PAGE_SIZE),
1435 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
1441 * XXX: This is in the VM bind data path, likely should calculate this once and
1442 * store, with a recalculation if the BO is moved.
1444 uint64_t vram_region_gpu_offset(struct ttm_resource *res)
1446 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
1447 struct xe_tile *tile = mem_type_to_tile(xe, res->mem_type);
1449 if (res->mem_type == XE_PL_STOLEN)
1450 return xe_ttm_stolen_gpu_offset(xe);
1452 return xe->mem.vram.base + tile->mem.vram.base;
1456 * xe_bo_pin_external - pin an external BO
1457 * @bo: buffer object to be pinned
1459 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1460 * BO. Unique call compared to xe_bo_pin as this function has it own set of
1461 * asserts and code to ensure evict / restore on suspend / resume.
1463 * Returns 0 for success, negative error code otherwise.
1465 int xe_bo_pin_external(struct xe_bo *bo)
1467 struct xe_device *xe = xe_bo_device(bo);
1471 XE_BUG_ON(!xe_bo_is_user(bo));
1473 if (!xe_bo_is_pinned(bo)) {
1474 err = xe_bo_validate(bo, NULL, false);
1478 if (xe_bo_is_vram(bo)) {
1479 spin_lock(&xe->pinned.lock);
1480 list_add_tail(&bo->pinned_link,
1481 &xe->pinned.external_vram);
1482 spin_unlock(&xe->pinned.lock);
1486 ttm_bo_pin(&bo->ttm);
1489 * FIXME: If we always use the reserve / unreserve functions for locking
1490 * we do not need this.
1492 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1497 int xe_bo_pin(struct xe_bo *bo)
1499 struct xe_device *xe = xe_bo_device(bo);
1502 /* We currently don't expect user BO to be pinned */
1503 XE_BUG_ON(xe_bo_is_user(bo));
1505 /* Pinned object must be in GGTT or have pinned flag */
1506 XE_BUG_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT |
1507 XE_BO_CREATE_GGTT_BIT)));
1510 * No reason we can't support pinning imported dma-bufs we just don't
1511 * expect to pin an imported dma-buf.
1513 XE_BUG_ON(bo->ttm.base.import_attach);
1515 /* We only expect at most 1 pin */
1516 XE_BUG_ON(xe_bo_is_pinned(bo));
1518 err = xe_bo_validate(bo, NULL, false);
1523 * For pinned objects in on DGFX, which are also in vram, we expect
1524 * these to be in contiguous VRAM memory. Required eviction / restore
1525 * during suspend / resume (force restore to same physical address).
1527 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1528 bo->flags & XE_BO_INTERNAL_TEST)) {
1529 struct ttm_place *place = &(bo->placements[0]);
1532 if (mem_type_is_vram(place->mem_type)) {
1533 XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
1535 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &vram) -
1536 vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
1537 place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
1539 spin_lock(&xe->pinned.lock);
1540 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
1541 spin_unlock(&xe->pinned.lock);
1545 ttm_bo_pin(&bo->ttm);
1548 * FIXME: If we always use the reserve / unreserve functions for locking
1549 * we do not need this.
1551 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1557 * xe_bo_unpin_external - unpin an external BO
1558 * @bo: buffer object to be unpinned
1560 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1561 * BO. Unique call compared to xe_bo_unpin as this function has it own set of
1562 * asserts and code to ensure evict / restore on suspend / resume.
1564 * Returns 0 for success, negative error code otherwise.
1566 void xe_bo_unpin_external(struct xe_bo *bo)
1568 struct xe_device *xe = xe_bo_device(bo);
1571 XE_BUG_ON(!xe_bo_is_pinned(bo));
1572 XE_BUG_ON(!xe_bo_is_user(bo));
1574 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
1575 spin_lock(&xe->pinned.lock);
1576 list_del_init(&bo->pinned_link);
1577 spin_unlock(&xe->pinned.lock);
1580 ttm_bo_unpin(&bo->ttm);
1583 * FIXME: If we always use the reserve / unreserve functions for locking
1584 * we do not need this.
1586 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1589 void xe_bo_unpin(struct xe_bo *bo)
1591 struct xe_device *xe = xe_bo_device(bo);
1593 XE_BUG_ON(bo->ttm.base.import_attach);
1594 XE_BUG_ON(!xe_bo_is_pinned(bo));
1596 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1597 bo->flags & XE_BO_INTERNAL_TEST)) {
1598 struct ttm_place *place = &(bo->placements[0]);
1600 if (mem_type_is_vram(place->mem_type)) {
1601 XE_BUG_ON(list_empty(&bo->pinned_link));
1603 spin_lock(&xe->pinned.lock);
1604 list_del_init(&bo->pinned_link);
1605 spin_unlock(&xe->pinned.lock);
1609 ttm_bo_unpin(&bo->ttm);
1613 * xe_bo_validate() - Make sure the bo is in an allowed placement
1615 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
1616 * NULL. Used together with @allow_res_evict.
1617 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
1618 * reservation object.
1620 * Make sure the bo is in allowed placement, migrating it if necessary. If
1621 * needed, other bos will be evicted. If bos selected for eviction shares
1622 * the @vm's reservation object, they can be evicted iff @allow_res_evict is
1623 * set to true, otherwise they will be bypassed.
1625 * Return: 0 on success, negative error code on failure. May return
1626 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
1628 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
1630 struct ttm_operation_ctx ctx = {
1631 .interruptible = true,
1632 .no_wait_gpu = false,
1636 lockdep_assert_held(&vm->lock);
1637 xe_vm_assert_held(vm);
1639 ctx.allow_res_evict = allow_res_evict;
1640 ctx.resv = xe_vm_resv(vm);
1643 return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
1646 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
1648 if (bo->destroy == &xe_ttm_bo_destroy)
1655 * Resolve a BO address. There is no assert to check if the proper lock is held
1656 * so it should only be used in cases where it is not fatal to get the wrong
1657 * address, such as printing debug information, but not in cases where memory is
1658 * written based on this result.
1660 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset,
1661 size_t page_size, bool *is_vram)
1663 struct xe_res_cursor cur;
1666 XE_BUG_ON(page_size > PAGE_SIZE);
1667 page = offset >> PAGE_SHIFT;
1668 offset &= (PAGE_SIZE - 1);
1670 *is_vram = xe_bo_is_vram(bo);
1672 if (!*is_vram && !xe_bo_is_stolen(bo)) {
1673 XE_BUG_ON(!bo->ttm.ttm);
1675 xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
1677 return xe_res_dma(&cur) + offset;
1679 struct xe_res_cursor cur;
1681 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
1683 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
1687 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
1688 size_t page_size, bool *is_vram)
1690 if (!READ_ONCE(bo->ttm.pin_count))
1691 xe_bo_assert_held(bo);
1692 return __xe_bo_addr(bo, offset, page_size, is_vram);
1695 int xe_bo_vmap(struct xe_bo *bo)
1701 xe_bo_assert_held(bo);
1703 if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS))
1706 if (!iosys_map_is_null(&bo->vmap))
1710 * We use this more or less deprecated interface for now since
1711 * ttm_bo_vmap() doesn't offer the optimization of kmapping
1712 * single page bos, which is done here.
1713 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
1714 * to use struct iosys_map.
1716 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
1720 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
1722 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
1724 iosys_map_set_vaddr(&bo->vmap, virtual);
1729 static void __xe_bo_vunmap(struct xe_bo *bo)
1731 if (!iosys_map_is_null(&bo->vmap)) {
1732 iosys_map_clear(&bo->vmap);
1733 ttm_bo_kunmap(&bo->kmap);
1737 void xe_bo_vunmap(struct xe_bo *bo)
1739 xe_bo_assert_held(bo);
1743 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
1744 struct drm_file *file)
1746 struct xe_device *xe = to_xe_device(dev);
1747 struct xe_file *xef = to_xe_file(file);
1748 struct drm_xe_gem_create *args = data;
1749 struct ww_acquire_ctx ww;
1750 struct xe_vm *vm = NULL;
1752 unsigned int bo_flags = XE_BO_CREATE_USER_BIT;
1756 if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) ||
1757 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1760 if (XE_IOCTL_DBG(xe, args->flags &
1761 ~(XE_GEM_CREATE_FLAG_DEFER_BACKING |
1762 XE_GEM_CREATE_FLAG_SCANOUT |
1763 xe->info.mem_region_mask)))
1766 /* at least one memory type must be specified */
1767 if (XE_IOCTL_DBG(xe, !(args->flags & xe->info.mem_region_mask)))
1770 if (XE_IOCTL_DBG(xe, args->handle))
1773 if (XE_IOCTL_DBG(xe, !args->size))
1776 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
1779 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
1783 vm = xe_vm_lookup(xef, args->vm_id);
1784 if (XE_IOCTL_DBG(xe, !vm))
1786 err = xe_vm_lock(vm, &ww, 0, true);
1793 if (args->flags & XE_GEM_CREATE_FLAG_DEFER_BACKING)
1794 bo_flags |= XE_BO_DEFER_BACKING;
1796 if (args->flags & XE_GEM_CREATE_FLAG_SCANOUT)
1797 bo_flags |= XE_BO_SCANOUT_BIT;
1799 bo_flags |= args->flags << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
1800 bo = xe_bo_create(xe, NULL, vm, args->size, ttm_bo_type_device,
1807 err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
1811 args->handle = handle;
1815 if (vm && !xe_vm_in_fault_mode(vm))
1816 __xe_bo_unset_bulk_move(bo);
1821 xe_vm_unlock(vm, &ww);
1827 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
1828 struct drm_file *file)
1830 struct xe_device *xe = to_xe_device(dev);
1831 struct drm_xe_gem_mmap_offset *args = data;
1832 struct drm_gem_object *gem_obj;
1834 if (XE_IOCTL_DBG(xe, args->extensions) ||
1835 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1838 if (XE_IOCTL_DBG(xe, args->flags))
1841 gem_obj = drm_gem_object_lookup(file, args->handle);
1842 if (XE_IOCTL_DBG(xe, !gem_obj))
1845 /* The mmap offset was set up at BO allocation time. */
1846 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
1848 xe_bo_put(gem_to_xe_bo(gem_obj));
1852 int xe_bo_lock(struct xe_bo *bo, struct ww_acquire_ctx *ww,
1853 int num_resv, bool intr)
1855 struct ttm_validate_buffer tv_bo;
1861 tv_bo.num_shared = num_resv;
1862 tv_bo.bo = &bo->ttm;
1863 list_add_tail(&tv_bo.head, &objs);
1865 return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
1868 void xe_bo_unlock(struct xe_bo *bo, struct ww_acquire_ctx *ww)
1870 dma_resv_unlock(bo->ttm.base.resv);
1871 ww_acquire_fini(ww);
1875 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
1876 * @bo: The buffer object to migrate
1877 * @mem_type: The TTM memory type intended to migrate to
1879 * Check whether the buffer object supports migration to the
1880 * given memory type. Note that pinning may affect the ability to migrate as
1881 * returned by this function.
1883 * This function is primarily intended as a helper for checking the
1884 * possibility to migrate buffer objects and can be called without
1885 * the object lock held.
1887 * Return: true if migration is possible, false otherwise.
1889 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
1891 unsigned int cur_place;
1893 if (bo->ttm.type == ttm_bo_type_kernel)
1896 if (bo->ttm.type == ttm_bo_type_sg)
1899 for (cur_place = 0; cur_place < bo->placement.num_placement;
1901 if (bo->placements[cur_place].mem_type == mem_type)
1908 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
1910 memset(place, 0, sizeof(*place));
1911 place->mem_type = mem_type;
1915 * xe_bo_migrate - Migrate an object to the desired region id
1916 * @bo: The buffer object to migrate.
1917 * @mem_type: The TTM region type to migrate to.
1919 * Attempt to migrate the buffer object to the desired memory region. The
1920 * buffer object may not be pinned, and must be locked.
1921 * On successful completion, the object memory type will be updated,
1922 * but an async migration task may not have completed yet, and to
1923 * accomplish that, the object's kernel fences must be signaled with
1924 * the object lock held.
1926 * Return: 0 on success. Negative error code on failure. In particular may
1927 * return -EINTR or -ERESTARTSYS if signal pending.
1929 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
1931 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1932 struct ttm_operation_ctx ctx = {
1933 .interruptible = true,
1934 .no_wait_gpu = false,
1936 struct ttm_placement placement;
1937 struct ttm_place requested;
1939 xe_bo_assert_held(bo);
1941 if (bo->ttm.resource->mem_type == mem_type)
1944 if (xe_bo_is_pinned(bo))
1947 if (!xe_bo_can_migrate(bo, mem_type))
1950 xe_place_from_ttm_type(mem_type, &requested);
1951 placement.num_placement = 1;
1952 placement.num_busy_placement = 1;
1953 placement.placement = &requested;
1954 placement.busy_placement = &requested;
1957 * Stolen needs to be handled like below VRAM handling if we ever need
1960 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);
1962 if (mem_type_is_vram(mem_type)) {
1965 add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
1968 return ttm_bo_validate(&bo->ttm, &placement, &ctx);
1972 * xe_bo_evict - Evict an object to evict placement
1973 * @bo: The buffer object to migrate.
1974 * @force_alloc: Set force_alloc in ttm_operation_ctx
1976 * On successful completion, the object memory will be moved to evict
1977 * placement. Ths function blocks until the object has been fully moved.
1979 * Return: 0 on success. Negative error code on failure.
1981 int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
1983 struct ttm_operation_ctx ctx = {
1984 .interruptible = false,
1985 .no_wait_gpu = false,
1986 .force_alloc = force_alloc,
1988 struct ttm_placement placement;
1991 xe_evict_flags(&bo->ttm, &placement);
1992 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
1996 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
1997 false, MAX_SCHEDULE_TIMEOUT);
2003 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
2004 * placed in system memory.
2007 * If a bo has an allowable placement in XE_PL_TT memory, it can't use
2008 * flat CCS compression, because the GPU then has no way to access the
2009 * CCS metadata using relevant commands. For the opposite case, we need to
2010 * allocate storage for the CCS metadata when the BO is not resident in
2013 * Return: true if extra pages need to be allocated, false otherwise.
2015 bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
2017 return bo->ttm.type == ttm_bo_type_device &&
2018 !(bo->flags & XE_BO_CREATE_SYSTEM_BIT) &&
2019 (bo->flags & XE_BO_CREATE_VRAM_MASK);
2023 * __xe_bo_release_dummy() - Dummy kref release function
2024 * @kref: The embedded struct kref.
2026 * Dummy release function for xe_bo_put_deferred(). Keep off.
2028 void __xe_bo_release_dummy(struct kref *kref)
2033 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
2034 * @deferred: The lockless list used for the call to xe_bo_put_deferred().
2036 * Puts all bos whose put was deferred by xe_bo_put_deferred().
2037 * The @deferred list can be either an onstack local list or a global
2038 * shared list used by a workqueue.
2040 void xe_bo_put_commit(struct llist_head *deferred)
2042 struct llist_node *freed;
2043 struct xe_bo *bo, *next;
2048 freed = llist_del_all(deferred);
2052 llist_for_each_entry_safe(bo, next, freed, freed)
2053 drm_gem_object_free(&bo->ttm.base.refcount);
2057 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
2062 * See dumb_create() hook in include/drm/drm_drv.h
2066 int xe_bo_dumb_create(struct drm_file *file_priv,
2067 struct drm_device *dev,
2068 struct drm_mode_create_dumb *args)
2070 struct xe_device *xe = to_xe_device(dev);
2073 int cpp = DIV_ROUND_UP(args->bpp, 8);
2075 u32 page_size = max_t(u32, PAGE_SIZE,
2076 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
2078 args->pitch = ALIGN(args->width * cpp, 64);
2079 args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
2082 bo = xe_bo_create(xe, NULL, NULL, args->size, ttm_bo_type_device,
2083 XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
2084 XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT);
2088 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
2089 /* drop reference from allocate - handle holds it now */
2090 drm_gem_object_put(&bo->ttm.base);
2092 args->handle = handle;
2096 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
2097 #include "tests/xe_bo.c"