1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include "xe_migrate.h"
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
11 #include <drm/drm_managed.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <drm/xe_drm.h>
15 #include <generated/xe_wa_oob.h>
17 #include "instructions/xe_mi_commands.h"
18 #include "regs/xe_gpu_commands.h"
19 #include "tests/xe_test.h"
20 #include "xe_assert.h"
23 #include "xe_exec_queue.h"
26 #include "xe_hw_engine.h"
31 #include "xe_res_cursor.h"
32 #include "xe_sched_job.h"
39 * struct xe_migrate - migrate context.
42 /** @q: Default exec queue used for migration */
43 struct xe_exec_queue *q;
44 /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
46 /** @job_mutex: Timeline mutex for @eng. */
47 struct mutex job_mutex;
48 /** @pt_bo: Page-table buffer object. */
50 /** @batch_base_ofs: VM offset of the migration batch buffer */
52 /** @usm_batch_base_ofs: VM offset of the usm batch buffer */
53 u64 usm_batch_base_ofs;
54 /** @cleared_mem_ofs: VM offset of @cleared_bo. */
57 * @fence: dma-fence representing the last migration job batch.
58 * Protected by @job_mutex.
60 struct dma_fence *fence;
62 * @vm_update_sa: For integrated, used to suballocate page-tables
65 struct drm_suballoc_manager vm_update_sa;
66 /** @min_chunk_size: For dgfx, Minimum chunk size */
70 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
71 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
72 #define NUM_KERNEL_PDE 17
73 #define NUM_PT_SLOTS 32
74 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
75 #define MAX_NUM_PTE 512
78 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
79 * legal value accepted. Since that instruction field is always stored in
80 * (val-2) format, this translates to 0x400 dwords for the true maximum length
81 * of the instruction. Subtracting the instruction header (1 dword) and
82 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
84 #define MAX_PTE_PER_SDI 0x1FE
87 * xe_tile_migrate_engine() - Get this tile's migrate engine.
90 * Returns the default migrate engine of this tile.
91 * TODO: Perhaps this function is slightly misplaced, and even unneeded?
93 * Return: The default migrate engine
95 struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
97 return tile->migrate->q;
100 static void xe_migrate_fini(struct drm_device *dev, void *arg)
102 struct xe_migrate *m = arg;
104 xe_vm_lock(m->q->vm, false);
105 xe_bo_unpin(m->pt_bo);
106 xe_vm_unlock(m->q->vm);
108 dma_fence_put(m->fence);
110 drm_suballoc_manager_fini(&m->vm_update_sa);
111 mutex_destroy(&m->job_mutex);
112 xe_vm_close_and_put(m->q->vm);
113 xe_exec_queue_put(m->q);
116 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
118 XE_WARN_ON(slot >= NUM_PT_SLOTS);
120 /* First slot is reserved for mapping of PT bo and bb, start from 1 */
121 return (slot + 1ULL) << xe_pt_shift(level + 1);
124 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
127 * Remove the DPA to get a correct offset into identity table for the
130 addr -= xe->mem.vram.dpa_base;
131 return addr + (256ULL << xe_pt_shift(2));
134 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
137 struct xe_device *xe = tile_to_xe(tile);
138 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
140 u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
141 u32 map_ofs, level, i;
142 struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
145 /* Can't bump NUM_PT_SLOTS too high */
146 BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
147 /* Must be a multiple of 64K to support all platforms */
148 BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
149 /* And one slot reserved for the 4KiB page table updates */
150 BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
152 /* Need to be sure everything fits in the first PT, or create more */
153 xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
155 bo = xe_bo_create_pin_map(vm->xe, tile, vm,
156 num_entries * XE_PAGE_SIZE,
158 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
159 XE_BO_CREATE_PINNED_BIT);
163 entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index);
164 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
166 map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
168 /* Map the entire BO in our level 0 pt */
169 for (i = 0, level = 0; i < num_entries; level++) {
170 entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
173 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
175 if (vm->flags & XE_VM_FLAG_64K)
182 /* Write out batch too */
183 m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
184 for (i = 0; i < batch->size;
185 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
187 entry = vm->pt_ops->pte_encode_bo(batch, i,
190 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
194 if (xe->info.has_usm) {
195 xe_tile_assert(tile, batch->size == SZ_1M);
197 batch = tile->primary_gt->usm.bb_pool->bo;
198 m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
199 xe_tile_assert(tile, batch->size == SZ_512K);
201 for (i = 0; i < batch->size;
202 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
204 entry = vm->pt_ops->pte_encode_bo(batch, i,
207 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
213 u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
215 m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
217 if (xe->info.has_usm) {
218 batch = tile->primary_gt->usm.bb_pool->bo;
219 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
220 m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
224 for (level = 1; level < num_level; level++) {
227 if (vm->flags & XE_VM_FLAG_64K && level == 1)
230 entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
231 XE_PAGE_SIZE, pat_index);
232 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
236 /* Write PDE's that point to our BO. */
237 for (i = 0; i < num_entries - num_level; i++) {
238 entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
241 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
242 (i + 1) * 8, u64, entry);
245 /* Set up a 1GiB NULL mapping at 255GiB offset. */
247 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
248 vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
250 m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
252 /* Identity map the entire vram at 256GiB offset */
257 ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
258 flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
262 * Use 1GB pages, it shouldn't matter the physical amount of
263 * vram is less, when we don't access it.
265 for (pos = xe->mem.vram.dpa_base;
266 pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
267 pos += SZ_1G, ofs += 8)
268 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
272 * Example layout created above, with root level = 3:
273 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
274 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
275 * [PT9...PT28]: Userspace PT's for VM_BIND, 4 KiB PTE's
276 * [PT29 = PDE 0] [PT30 = PDE 1] [PT31 = PDE 2]
278 * This makes the lowest part of the VM point to the pagetables.
279 * Hence the lowest 2M in the vm should point to itself, with a few writes
280 * and flushes, other parts of the VM can be used either for copying and
283 * For performance, the kernel reserves PDE's, so about 20 are left
284 * for async VM updates.
286 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
287 * everywhere, this allows lockless updates to scratch pages by using
288 * the different addresses in VM.
290 #define NUM_VMUSA_UNIT_PER_PAGE 32
291 #define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
292 #define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
293 drm_suballoc_manager_init(&m->vm_update_sa,
294 (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
295 NUM_VMUSA_UNIT_PER_PAGE, 0);
302 * Due to workaround 16017236439, odd instance hardware copy engines are
303 * faster than even instance ones.
304 * This function returns the mask involving all fast copy engines and the
305 * reserved copy engine to be used as logical mask for migrate engine.
306 * Including the reserved copy engine is required to avoid deadlocks due to
307 * migrate jobs servicing the faults gets stuck behind the job that faulted.
309 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
311 u32 logical_mask = 0;
312 struct xe_hw_engine *hwe;
313 enum xe_hw_engine_id id;
315 for_each_hw_engine(hwe, gt, id) {
316 if (hwe->class != XE_ENGINE_CLASS_COPY)
319 if (!XE_WA(gt, 16017236439) ||
320 xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
321 logical_mask |= BIT(hwe->logical_instance);
328 * xe_migrate_init() - Initialize a migrate context
329 * @tile: Back-pointer to the tile we're initializing for.
331 * Return: Pointer to a migrate context on success. Error pointer on error.
333 struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
335 struct xe_device *xe = tile_to_xe(tile);
336 struct xe_gt *primary_gt = tile->primary_gt;
337 struct xe_migrate *m;
341 m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
343 return ERR_PTR(-ENOMEM);
347 /* Special layout, prepared below.. */
348 vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
349 XE_VM_FLAG_SET_TILE_ID(tile));
353 xe_vm_lock(vm, false);
354 err = xe_migrate_prepare_vm(tile, m, vm);
357 xe_vm_close_and_put(vm);
361 if (xe->info.has_usm) {
362 struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
363 XE_ENGINE_CLASS_COPY,
364 primary_gt->usm.reserved_bcs_instance,
366 u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
368 if (!hwe || !logical_mask)
369 return ERR_PTR(-EINVAL);
371 m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
372 EXEC_QUEUE_FLAG_KERNEL |
373 EXEC_QUEUE_FLAG_PERMANENT |
374 EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
376 m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
377 XE_ENGINE_CLASS_COPY,
378 EXEC_QUEUE_FLAG_KERNEL |
379 EXEC_QUEUE_FLAG_PERMANENT);
382 xe_vm_close_and_put(vm);
383 return ERR_CAST(m->q);
386 mutex_init(&m->job_mutex);
388 err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
393 if (xe_device_has_flat_ccs(xe))
394 /* min chunk size corresponds to 4K of CCS Metadata */
395 m->min_chunk_size = SZ_4K * SZ_64K /
396 xe_device_ccs_bytes(xe, SZ_64K);
398 /* Somewhat arbitrary to avoid a huge amount of blits */
399 m->min_chunk_size = SZ_64K;
400 m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
401 drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
402 (unsigned long long)m->min_chunk_size);
408 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
410 if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
411 return MAX_CCS_LIMITED_TRANSFER;
413 return MAX_PREEMPTDISABLE_TRANSFER;
416 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
418 struct xe_device *xe = tile_to_xe(m->tile);
419 u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
421 if (mem_type_is_vram(cur->mem_type)) {
423 * VRAM we want to blit in chunks with sizes aligned to
424 * min_chunk_size in order for the offset to CCS metadata to be
425 * page-aligned. If it's the last chunk it may be smaller.
427 * Another constraint is that we need to limit the blit to
428 * the VRAM block size, unless size is smaller than
431 u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
433 size = min_t(u64, size, chunk);
434 if (size > m->min_chunk_size)
435 size = round_down(size, m->min_chunk_size);
441 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
443 /* If the chunk is not fragmented, allow identity map. */
444 return cur->size >= size;
447 static u32 pte_update_size(struct xe_migrate *m,
449 struct ttm_resource *res,
450 struct xe_res_cursor *cur,
451 u64 *L0, u64 *L0_ofs, u32 *L0_pt,
452 u32 cmd_size, u32 pt_ofs, u32 avail_pts)
457 if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
458 /* Offset into identity map. */
459 *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
460 cur->start + vram_region_gpu_offset(res));
463 /* Clip L0 to available size */
464 u64 size = min(*L0, (u64)avail_pts * SZ_2M);
465 u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
468 *L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
470 /* MI_STORE_DATA_IMM */
471 cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
474 cmds += num_4k_pages * 2;
476 /* Each chunk has a single blit command */
483 static void emit_pte(struct xe_migrate *m,
484 struct xe_bb *bb, u32 at_pt,
485 bool is_vram, bool is_comp_pte,
486 struct xe_res_cursor *cur,
487 u32 size, struct ttm_resource *res)
489 struct xe_device *xe = tile_to_xe(m->tile);
490 struct xe_vm *vm = m->q->vm;
493 u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
496 /* Indirect access needs compression enabled uncached PAT index */
497 if (GRAPHICS_VERx100(xe) >= 2000)
498 pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
499 xe->pat.idx[XE_CACHE_WB];
501 pat_index = xe->pat.idx[XE_CACHE_WB];
503 ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
506 u32 chunk = min(MAX_PTE_PER_SDI, ptes);
508 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
509 bb->cs[bb->len++] = ofs;
510 bb->cs[bb->len++] = 0;
520 addr = xe_res_dma(cur) & PAGE_MASK;
522 if (vm->flags & XE_VM_FLAG_64K) {
523 u64 va = cur_ofs * XE_PAGE_SIZE / 8;
525 xe_assert(xe, (va & (SZ_64K - 1)) ==
526 (addr & (SZ_64K - 1)));
528 flags |= XE_PTE_PS64;
531 addr += vram_region_gpu_offset(res);
535 addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
538 bb->cs[bb->len++] = lower_32_bits(addr);
539 bb->cs[bb->len++] = upper_32_bits(addr);
541 xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
547 #define EMIT_COPY_CCS_DW 5
548 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
549 u64 dst_ofs, bool dst_is_indirect,
550 u64 src_ofs, bool src_is_indirect,
553 struct xe_device *xe = gt_to_xe(gt);
554 u32 *cs = bb->cs + bb->len;
560 if (GRAPHICS_VERx100(xe) >= 2000) {
561 num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
562 xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
564 ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
565 mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
568 num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
569 NUM_CCS_BYTES_PER_BLOCK);
570 xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
572 ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
573 mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
576 *cs++ = XY_CTRL_SURF_COPY_BLT |
577 (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
578 (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
580 *cs++ = lower_32_bits(src_ofs);
581 *cs++ = upper_32_bits(src_ofs) | mocs;
582 *cs++ = lower_32_bits(dst_ofs);
583 *cs++ = upper_32_bits(dst_ofs) | mocs;
585 bb->len = cs - bb->cs;
588 #define EMIT_COPY_DW 10
589 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
590 u64 src_ofs, u64 dst_ofs, unsigned int size,
593 struct xe_device *xe = gt_to_xe(gt);
597 xe_gt_assert(gt, size / pitch <= S16_MAX);
598 xe_gt_assert(gt, pitch / 4 <= S16_MAX);
599 xe_gt_assert(gt, pitch <= U16_MAX);
601 if (GRAPHICS_VER(xe) >= 20)
602 mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
604 if (GRAPHICS_VERx100(xe) >= 1250)
605 tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
607 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
608 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
609 bb->cs[bb->len++] = 0;
610 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
611 bb->cs[bb->len++] = lower_32_bits(dst_ofs);
612 bb->cs[bb->len++] = upper_32_bits(dst_ofs);
613 bb->cs[bb->len++] = 0;
614 bb->cs[bb->len++] = pitch | mocs;
615 bb->cs[bb->len++] = lower_32_bits(src_ofs);
616 bb->cs[bb->len++] = upper_32_bits(src_ofs);
619 static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
620 enum dma_resv_usage usage)
622 return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
625 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
627 return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
630 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
632 u64 src_ofs, bool src_is_indirect,
633 u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
634 u64 ccs_ofs, bool copy_ccs)
636 struct xe_gt *gt = m->tile->primary_gt;
639 if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
641 * If the src is already in vram, then it should already
642 * have been cleared by us, or has been populated by the
643 * user. Make sure we copy the CCS aux state as-is.
645 * Otherwise if the bo doesn't have any CCS metadata attached,
646 * we still need to clear it for security reasons.
648 u64 ccs_src_ofs = src_is_indirect ? src_ofs : m->cleared_mem_ofs;
650 emit_copy_ccs(gt, bb,
652 ccs_src_ofs, src_is_indirect, dst_size);
654 flush_flags = MI_FLUSH_DW_CCS;
655 } else if (copy_ccs) {
656 if (!src_is_indirect)
658 else if (!dst_is_indirect)
661 xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
663 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
664 src_is_indirect, dst_size);
666 flush_flags = MI_FLUSH_DW_CCS;
673 * xe_migrate_copy() - Copy content of TTM resources.
674 * @m: The migration context.
675 * @src_bo: The buffer object @src is currently bound to.
676 * @dst_bo: If copying between resources created for the same bo, set this to
677 * the same value as @src_bo. If copying between buffer objects, set it to
678 * the buffer object @dst is currently bound to.
679 * @src: The source TTM resource.
680 * @dst: The dst TTM resource.
681 * @copy_only_ccs: If true copy only CCS metadata
683 * Copies the contents of @src to @dst: On flat CCS devices,
684 * the CCS metadata is copied as well if needed, or if not present,
685 * the CCS metadata of @dst is cleared for security reasons.
687 * Return: Pointer to a dma_fence representing the last copy batch, or
688 * an error pointer on failure. If there is a failure, any copy operation
689 * started by the function call has been synced.
691 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
692 struct xe_bo *src_bo,
693 struct xe_bo *dst_bo,
694 struct ttm_resource *src,
695 struct ttm_resource *dst,
698 struct xe_gt *gt = m->tile->primary_gt;
699 struct xe_device *xe = gt_to_xe(gt);
700 struct dma_fence *fence = NULL;
701 u64 size = src_bo->size;
702 struct xe_res_cursor src_it, dst_it, ccs_it;
703 u64 src_L0_ofs, dst_L0_ofs;
704 u32 src_L0_pt, dst_L0_pt;
708 bool src_is_pltt = src->mem_type == XE_PL_TT;
709 bool dst_is_pltt = dst->mem_type == XE_PL_TT;
710 bool src_is_vram = mem_type_is_vram(src->mem_type);
711 bool dst_is_vram = mem_type_is_vram(dst->mem_type);
712 bool copy_ccs = xe_device_has_flat_ccs(xe) &&
713 xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
714 bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
716 /* Copying CCS between two different BOs is not supported yet. */
717 if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
718 return ERR_PTR(-EINVAL);
720 if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
721 return ERR_PTR(-EINVAL);
724 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
726 xe_res_first(src, 0, size, &src_it);
728 xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
730 xe_res_first(dst, 0, size, &dst_it);
733 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
734 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
738 u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
739 struct xe_sched_job *job;
743 u64 ccs_ofs, ccs_size;
746 bool usm = xe->info.has_usm;
747 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
749 src_L0 = xe_migrate_res_sizes(m, &src_it);
750 dst_L0 = xe_migrate_res_sizes(m, &dst_it);
752 drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
753 pass++, src_L0, dst_L0);
755 src_L0 = min(src_L0, dst_L0);
757 batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
758 &src_L0_ofs, &src_L0_pt, 0, 0,
761 batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
762 &dst_L0_ofs, &dst_L0_pt, 0,
763 avail_pts, avail_pts);
765 if (copy_system_ccs) {
766 ccs_size = xe_device_ccs_bytes(xe, src_L0);
767 batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
768 &ccs_ofs, &ccs_pt, 0,
771 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
774 /* Add copy commands size here */
775 batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
776 ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
778 bb = xe_bb_new(gt, batch_size, usm);
784 if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
785 xe_res_next(&src_it, src_L0);
787 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
788 &src_it, src_L0, src);
790 if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
791 xe_res_next(&dst_it, src_L0);
793 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
794 &dst_it, src_L0, dst);
797 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
799 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
800 update_idx = bb->len;
803 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
805 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
806 IS_DGFX(xe) ? src_is_vram : src_is_pltt,
808 IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
809 src_L0, ccs_ofs, copy_ccs);
811 mutex_lock(&m->job_mutex);
812 job = xe_bb_create_migration_job(m->q, bb,
813 xe_migrate_batch_base(m, usm),
820 xe_sched_job_add_migrate_flush(job, flush_flags);
822 err = job_add_deps(job, src_bo->ttm.base.resv,
823 DMA_RESV_USAGE_BOOKKEEP);
824 if (!err && src_bo != dst_bo)
825 err = job_add_deps(job, dst_bo->ttm.base.resv,
826 DMA_RESV_USAGE_BOOKKEEP);
831 xe_sched_job_arm(job);
832 dma_fence_put(fence);
833 fence = dma_fence_get(&job->drm.s_fence->finished);
834 xe_sched_job_push(job);
836 dma_fence_put(m->fence);
837 m->fence = dma_fence_get(fence);
839 mutex_unlock(&m->job_mutex);
841 xe_bb_free(bb, fence);
846 xe_sched_job_put(job);
848 mutex_unlock(&m->job_mutex);
849 xe_bb_free(bb, NULL);
852 /* Sync partial copy if any. FIXME: under job_mutex? */
854 dma_fence_wait(fence, false);
855 dma_fence_put(fence);
864 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
867 struct xe_device *xe = gt_to_xe(gt);
868 u32 *cs = bb->cs + bb->len;
869 u32 len = PVC_MEM_SET_CMD_LEN_DW;
871 *cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
873 *cs++ = (size / pitch) - 1;
875 *cs++ = lower_32_bits(src_ofs);
876 *cs++ = upper_32_bits(src_ofs);
877 if (GRAPHICS_VERx100(xe) >= 2000)
878 *cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
880 *cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
882 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
887 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
888 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
890 struct xe_device *xe = gt_to_xe(gt);
891 u32 *cs = bb->cs + bb->len;
892 u32 len = XY_FAST_COLOR_BLT_DW;
894 if (GRAPHICS_VERx100(xe) < 1250)
897 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
899 if (GRAPHICS_VERx100(xe) >= 2000)
900 *cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
903 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
906 *cs++ = (size / pitch) << 16 | pitch / 4;
907 *cs++ = lower_32_bits(src_ofs);
908 *cs++ = upper_32_bits(src_ofs);
909 *cs++ = (is_vram ? 0x0 : 0x1) << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
923 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
928 static bool has_service_copy_support(struct xe_gt *gt)
931 * What we care about is whether the architecture was designed with
932 * service copy functionality (specifically the new MEM_SET / MEM_COPY
933 * instructions) so check the architectural engine list rather than the
934 * actual list since these instructions are usable on BCS0 even if
935 * all of the actual service copy engines (BCS1-BCS8) have been fused
938 return gt->info.__engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
942 static u32 emit_clear_cmd_len(struct xe_gt *gt)
944 if (has_service_copy_support(gt))
945 return PVC_MEM_SET_CMD_LEN_DW;
947 return XY_FAST_COLOR_BLT_DW;
950 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
951 u32 size, u32 pitch, bool is_vram)
953 if (has_service_copy_support(gt))
954 emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
956 emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
961 * xe_migrate_clear() - Copy content of TTM resources.
962 * @m: The migration context.
963 * @bo: The buffer object @dst is currently bound to.
964 * @dst: The dst TTM resource to be cleared.
966 * Clear the contents of @dst to zero. On flat CCS devices,
967 * the CCS metadata is cleared to zero as well on VRAM destinations.
968 * TODO: Eliminate the @bo argument.
970 * Return: Pointer to a dma_fence representing the last clear batch, or
971 * an error pointer on failure. If there is a failure, any clear operation
972 * started by the function call has been synced.
974 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
976 struct ttm_resource *dst)
978 bool clear_vram = mem_type_is_vram(dst->mem_type);
979 struct xe_gt *gt = m->tile->primary_gt;
980 struct xe_device *xe = gt_to_xe(gt);
981 bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
982 struct dma_fence *fence = NULL;
984 struct xe_res_cursor src_it;
985 struct ttm_resource *src = dst;
990 xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
992 xe_res_first(src, 0, bo->size, &src_it);
999 struct xe_sched_job *job;
1001 u32 batch_size, update_idx;
1003 bool usm = xe->info.has_usm;
1004 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1006 clear_L0 = xe_migrate_res_sizes(m, &src_it);
1008 drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
1010 /* Calculate final sizes and batch size.. */
1012 pte_update_size(m, clear_vram, src, &src_it,
1013 &clear_L0, &clear_L0_ofs, &clear_L0_pt,
1014 clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
1017 if (xe_device_has_flat_ccs(xe))
1018 batch_size += EMIT_COPY_CCS_DW;
1020 /* Clear commands */
1022 if (WARN_ON_ONCE(!clear_L0))
1025 bb = xe_bb_new(gt, batch_size, usm);
1032 /* Preemption is enabled again by the ring ops. */
1033 if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
1034 xe_res_next(&src_it, clear_L0);
1036 emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
1037 &src_it, clear_L0, dst);
1039 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1040 update_idx = bb->len;
1042 if (!clear_system_ccs)
1043 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1045 if (xe_device_has_flat_ccs(xe)) {
1046 emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1047 m->cleared_mem_ofs, false, clear_L0);
1048 flush_flags = MI_FLUSH_DW_CCS;
1051 mutex_lock(&m->job_mutex);
1052 job = xe_bb_create_migration_job(m->q, bb,
1053 xe_migrate_batch_base(m, usm),
1060 xe_sched_job_add_migrate_flush(job, flush_flags);
1063 * There can't be anything userspace related at this
1064 * point, so we just need to respect any potential move
1065 * fences, which are always tracked as
1066 * DMA_RESV_USAGE_KERNEL.
1068 err = job_add_deps(job, bo->ttm.base.resv,
1069 DMA_RESV_USAGE_KERNEL);
1074 xe_sched_job_arm(job);
1075 dma_fence_put(fence);
1076 fence = dma_fence_get(&job->drm.s_fence->finished);
1077 xe_sched_job_push(job);
1079 dma_fence_put(m->fence);
1080 m->fence = dma_fence_get(fence);
1082 mutex_unlock(&m->job_mutex);
1084 xe_bb_free(bb, fence);
1088 xe_sched_job_put(job);
1090 mutex_unlock(&m->job_mutex);
1091 xe_bb_free(bb, NULL);
1093 /* Sync partial copies if any. FIXME: job_mutex? */
1095 dma_fence_wait(m->fence, false);
1096 dma_fence_put(fence);
1099 return ERR_PTR(err);
1102 if (clear_system_ccs)
1103 bo->ccs_cleared = true;
1108 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1109 const struct xe_vm_pgtable_update *update,
1110 struct xe_migrate_pt_update *pt_update)
1112 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1114 u32 ofs = update->ofs, size = update->qwords;
1117 * If we have 512 entries (max), we would populate it ourselves,
1118 * and update the PDE above it to the new pointer.
1119 * The only time this can only happen if we have to update the top
1120 * PDE. This requires a BO that is almost vm->size big.
1122 * This shouldn't be possible in practice.. might change when 16K
1123 * pages are used. Hence the assert.
1125 xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1127 ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1128 xe_bo_addr(update->pt_bo, 0,
1132 u64 addr = ppgtt_ofs + ofs * 8;
1134 chunk = min(size, MAX_PTE_PER_SDI);
1136 /* Ensure populatefn can do memset64 by aligning bb->cs */
1138 bb->cs[bb->len++] = MI_NOOP;
1140 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1141 bb->cs[bb->len++] = lower_32_bits(addr);
1142 bb->cs[bb->len++] = upper_32_bits(addr);
1143 ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
1146 bb->len += chunk * 2;
1152 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1154 return xe_vm_get(m->q->vm);
1157 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1158 struct migrate_test_params {
1159 struct xe_test_priv base;
1163 #define to_migrate_test_params(_priv) \
1164 container_of(_priv, struct migrate_test_params, base)
1167 static struct dma_fence *
1168 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1169 struct xe_vm *vm, struct xe_bo *bo,
1170 const struct xe_vm_pgtable_update *updates,
1171 u32 num_updates, bool wait_vm,
1172 struct xe_migrate_pt_update *pt_update)
1174 XE_TEST_DECLARE(struct migrate_test_params *test =
1175 to_migrate_test_params
1176 (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1177 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1178 struct dma_fence *fence;
1182 if (XE_TEST_ONLY(test && test->force_gpu))
1183 return ERR_PTR(-ETIME);
1185 if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
1186 DMA_RESV_USAGE_KERNEL))
1187 return ERR_PTR(-ETIME);
1189 if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
1190 DMA_RESV_USAGE_BOOKKEEP))
1191 return ERR_PTR(-ETIME);
1193 if (ops->pre_commit) {
1194 pt_update->job = NULL;
1195 err = ops->pre_commit(pt_update);
1197 return ERR_PTR(err);
1199 for (i = 0; i < num_updates; i++) {
1200 const struct xe_vm_pgtable_update *update = &updates[i];
1202 ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
1203 update->ofs, update->qwords, update);
1207 trace_xe_vm_cpu_bind(vm);
1208 xe_device_wmb(vm->xe);
1211 fence = dma_fence_get_stub();
1216 static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
1217 struct xe_sync_entry *syncs, u32 num_syncs)
1219 struct dma_fence *fence;
1222 for (i = 0; i < num_syncs; i++) {
1223 fence = syncs[i].fence;
1225 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1230 fence = xe_exec_queue_last_fence_get(q, vm);
1231 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1232 dma_fence_put(fence);
1235 dma_fence_put(fence);
1242 * xe_migrate_update_pgtables() - Pipelined page-table update
1243 * @m: The migrate context.
1244 * @vm: The vm we'll be updating.
1245 * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
1246 * @q: The exec queue to be used for the update or NULL if the default
1247 * migration engine is to be used.
1248 * @updates: An array of update descriptors.
1249 * @num_updates: Number of descriptors in @updates.
1250 * @syncs: Array of xe_sync_entry to await before updating. Note that waits
1251 * will block the engine timeline.
1252 * @num_syncs: Number of entries in @syncs.
1253 * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
1254 * pointers to callback functions and, if subclassed, private arguments to
1257 * Perform a pipelined page-table update. The update descriptors are typically
1258 * built under the same lock critical section as a call to this function. If
1259 * using the default engine for the updates, they will be performed in the
1260 * order they grab the job_mutex. If different engines are used, external
1261 * synchronization is needed for overlapping updates to maintain page-table
1262 * consistency. Note that the meaing of "overlapping" is that the updates
1263 * touch the same page-table, which might be a higher-level page-directory.
1264 * If no pipelining is needed, then updates may be performed by the cpu.
1266 * Return: A dma_fence that, when signaled, indicates the update completion.
1269 xe_migrate_update_pgtables(struct xe_migrate *m,
1272 struct xe_exec_queue *q,
1273 const struct xe_vm_pgtable_update *updates,
1275 struct xe_sync_entry *syncs, u32 num_syncs,
1276 struct xe_migrate_pt_update *pt_update)
1278 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1279 struct xe_tile *tile = m->tile;
1280 struct xe_gt *gt = tile->primary_gt;
1281 struct xe_device *xe = tile_to_xe(tile);
1282 struct xe_sched_job *job;
1283 struct dma_fence *fence;
1284 struct drm_suballoc *sa_bo = NULL;
1285 struct xe_vma *vma = pt_update->vma;
1287 u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
1290 bool usm = !q && xe->info.has_usm;
1291 bool first_munmap_rebind = vma &&
1292 vma->gpuva.flags & XE_VMA_FIRST_REBIND;
1293 struct xe_exec_queue *q_override = !q ? m->q : q;
1294 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1296 /* Use the CPU if no in syncs and engine is idle */
1297 if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
1298 fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
1300 first_munmap_rebind,
1302 if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
1306 /* fixed + PTE entries */
1310 batch_size = 6 + num_updates * 2;
1312 for (i = 0; i < num_updates; i++) {
1313 u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
1315 /* align noop + MI_STORE_DATA_IMM cmd prefix */
1316 batch_size += 4 * num_cmds + updates[i].qwords * 2;
1320 * XXX: Create temp bo to copy from, if batch_size becomes too big?
1322 * Worst case: Sum(2 * (each lower level page size) + (top level page size))
1323 * Should be reasonably bound..
1325 xe_tile_assert(tile, batch_size < SZ_128K);
1327 bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
1329 return ERR_CAST(bb);
1331 /* For sysmem PTE's, need to map them in our hole.. */
1333 ppgtt_ofs = NUM_KERNEL_PDE - 1;
1335 xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
1337 sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
1338 GFP_KERNEL, true, 0);
1339 if (IS_ERR(sa_bo)) {
1340 err = PTR_ERR(sa_bo);
1344 ppgtt_ofs = NUM_KERNEL_PDE +
1345 (drm_suballoc_soffset(sa_bo) /
1346 NUM_VMUSA_UNIT_PER_PAGE);
1347 page_ofs = (drm_suballoc_soffset(sa_bo) %
1348 NUM_VMUSA_UNIT_PER_PAGE) *
1349 VM_SA_UPDATE_UNIT_SIZE;
1352 /* Map our PT's to gtt */
1353 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
1354 bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1355 bb->cs[bb->len++] = 0; /* upper_32_bits */
1357 for (i = 0; i < num_updates; i++) {
1358 struct xe_bo *pt_bo = updates[i].pt_bo;
1360 xe_tile_assert(tile, pt_bo->size == SZ_4K);
1362 addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
1363 bb->cs[bb->len++] = lower_32_bits(addr);
1364 bb->cs[bb->len++] = upper_32_bits(addr);
1367 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1368 update_idx = bb->len;
1370 addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1371 (page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1372 for (i = 0; i < num_updates; i++)
1373 write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
1374 &updates[i], pt_update);
1376 /* phys pages, no preamble required */
1377 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1378 update_idx = bb->len;
1380 for (i = 0; i < num_updates; i++)
1381 write_pgtable(tile, bb, 0, &updates[i], pt_update);
1385 mutex_lock(&m->job_mutex);
1387 job = xe_bb_create_migration_job(q ?: m->q, bb,
1388 xe_migrate_batch_base(m, usm),
1395 /* Wait on BO move */
1397 err = job_add_deps(job, bo->ttm.base.resv,
1398 DMA_RESV_USAGE_KERNEL);
1404 * Munmap style VM unbind, need to wait for all jobs to be complete /
1405 * trigger preempts before moving forward
1407 if (first_munmap_rebind) {
1408 err = job_add_deps(job, xe_vm_resv(vm),
1409 DMA_RESV_USAGE_BOOKKEEP);
1414 err = xe_sched_job_last_fence_add_dep(job, vm);
1415 for (i = 0; !err && i < num_syncs; i++)
1416 err = xe_sync_entry_add_deps(&syncs[i], job);
1421 if (ops->pre_commit) {
1422 pt_update->job = job;
1423 err = ops->pre_commit(pt_update);
1427 xe_sched_job_arm(job);
1428 fence = dma_fence_get(&job->drm.s_fence->finished);
1429 xe_sched_job_push(job);
1432 mutex_unlock(&m->job_mutex);
1434 xe_bb_free(bb, fence);
1435 drm_suballoc_free(sa_bo, fence);
1440 xe_sched_job_put(job);
1443 mutex_unlock(&m->job_mutex);
1444 xe_bb_free(bb, NULL);
1446 drm_suballoc_free(sa_bo, NULL);
1447 return ERR_PTR(err);
1451 * xe_migrate_wait() - Complete all operations using the xe_migrate context
1452 * @m: Migrate context to wait for.
1454 * Waits until the GPU no longer uses the migrate context's default engine
1455 * or its page-table objects. FIXME: What about separate page-table update
1458 void xe_migrate_wait(struct xe_migrate *m)
1461 dma_fence_wait(m->fence, false);
1464 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1465 #include "tests/xe_migrate.c"