/* First part of the test, are we updating our pagetable bo with a new entry? */
xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
0xdeaddeadbeefbeef);
- expected = gen8_pte_encode(NULL, pt, 0, XE_CACHE_WB, 0, 0);
+ expected = xe_pte_encode(NULL, pt, 0, XE_CACHE_WB, 0, 0);
if (m->eng->vm->flags & XE_VM_FLAGS_64K)
expected |= XE_PTE_PS64;
if (xe_bo_is_vram(pt))
return ret;
}
- entry = gen8_pde_encode(bo, bo->size - XE_PAGE_SIZE, XE_CACHE_WB);
+ entry = xe_pde_encode(bo, bo->size - XE_PAGE_SIZE, XE_CACHE_WB);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
/* Map the entire BO in our level 0 pt */
for (i = 0, level = 0; i < num_entries; level++) {
- entry = gen8_pte_encode(NULL, bo, i * XE_PAGE_SIZE,
- XE_CACHE_WB, 0, 0);
+ entry = xe_pte_encode(NULL, bo, i * XE_PAGE_SIZE,
+ XE_CACHE_WB, 0, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAGS_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
- entry = gen8_pte_encode(NULL, batch, i,
- XE_CACHE_WB, 0, 0);
+ entry = xe_pte_encode(NULL, batch, i,
+ XE_CACHE_WB, 0, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
entry);
if (vm->flags & XE_VM_FLAGS_64K && level == 1)
flags = XE_PDE_64K;
- entry = gen8_pde_encode(bo, map_ofs + (level - 1) *
+ entry = xe_pde_encode(bo, map_ofs + (level - 1) *
XE_PAGE_SIZE, XE_CACHE_WB);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
- entry = gen8_pde_encode(bo, i * XE_PAGE_SIZE,
- XE_CACHE_WB);
+ entry = xe_pde_encode(bo, i * XE_PAGE_SIZE,
+ XE_CACHE_WB);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
(i + 1) * 8, u64, entry);
BUG_ON(pt_bo->size != SZ_4K);
- addr = gen8_pte_encode(NULL, pt_bo, 0, XE_CACHE_WB,
- 0, 0);
+ addr = xe_pte_encode(NULL, pt_bo, 0, XE_CACHE_WB,
+ 0, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
}
}
/**
- * gen8_pde_encode() - Encode a page-table directory entry pointing to
+ * xe_pde_encode() - Encode a page-table directory entry pointing to
* another page-table.
* @bo: The page-table bo of the page-table to point to.
* @bo_offset: Offset in the page-table bo to point to.
*
* Return: An encoded page directory entry. No errors.
*/
-u64 gen8_pde_encode(struct xe_bo *bo, u64 bo_offset,
- const enum xe_cache_level level)
+u64 xe_pde_encode(struct xe_bo *bo, u64 bo_offset,
+ const enum xe_cache_level level)
{
u64 pde;
bool is_vram;
}
}
-static u64 __gen8_pte_encode(u64 pte, enum xe_cache_level cache, u32 flags,
- u32 pt_level)
+static u64 __pte_encode(u64 pte, enum xe_cache_level cache, u32 flags,
+ u32 pt_level)
{
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
}
/**
- * gen8_pte_encode() - Encode a page-table entry pointing to memory.
+ * xe_pte_encode() - Encode a page-table entry pointing to memory.
* @vma: The vma representing the memory to point to.
* @bo: If @vma is NULL, representing the memory to point to.
* @offset: The offset into @vma or @bo.
* @pt_level: The page-table level of the page-table into which the entry
* is to be inserted.
*
- * TODO: Rename.
- *
* Return: An encoded page-table entry. No errors.
*/
-u64 gen8_pte_encode(struct xe_vma *vma, struct xe_bo *bo,
- u64 offset, enum xe_cache_level cache,
- u32 flags, u32 pt_level)
+u64 xe_pte_encode(struct xe_vma *vma, struct xe_bo *bo,
+ u64 offset, enum xe_cache_level cache,
+ u32 flags, u32 pt_level)
{
u64 pte;
bool is_vram;
pte |= XE_USM_PPGTT_PTE_AE;
}
- return __gen8_pte_encode(pte, cache, flags, pt_level);
+ return __pte_encode(pte, cache, flags, pt_level);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
return 0;
if (level == 0) {
- u64 empty = gen8_pte_encode(NULL, vm->scratch_bo[id], 0,
- XE_CACHE_WB, 0, 0);
+ u64 empty = xe_pte_encode(NULL, vm->scratch_bo[id], 0,
+ XE_CACHE_WB, 0, 0);
return empty;
} else {
- return gen8_pde_encode(vm->scratch_pt[id][level - 1]->bo, 0,
- XE_CACHE_WB);
+ return xe_pde_encode(vm->scratch_pt[id][level - 1]->bo, 0,
+ XE_CACHE_WB);
}
}
XE_WARN_ON(xe_walk->va_curs_start != addr);
- pte = __gen8_pte_encode(xe_res_dma(curs) + xe_walk->dma_offset,
- xe_walk->cache, xe_walk->pte_flags,
- level);
+ pte = __pte_encode(xe_res_dma(curs) + xe_walk->dma_offset,
+ xe_walk->cache, xe_walk->pte_flags,
+ level);
pte |= xe_walk->default_pte;
/*
xe_child->is_compact = true;
}
- pte = gen8_pde_encode(xe_child->bo, 0, xe_walk->cache) | flags;
+ pte = xe_pde_encode(xe_child->bo, 0, xe_walk->cache) | flags;
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
pte);
}
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
-u64 gen8_pde_encode(struct xe_bo *bo, u64 bo_offset,
- const enum xe_cache_level level);
+u64 xe_pde_encode(struct xe_bo *bo, u64 bo_offset,
+ const enum xe_cache_level level);
-u64 gen8_pte_encode(struct xe_vma *vma, struct xe_bo *bo,
- u64 offset, enum xe_cache_level cache,
- u32 flags, u32 pt_level);
+u64 xe_pte_encode(struct xe_vma *vma, struct xe_bo *bo,
+ u64 offset, enum xe_cache_level cache,
+ u32 flags, u32 pt_level);
#endif
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{
- return gen8_pde_encode(vm->pt_root[tile->id]->bo, 0,
- XE_CACHE_WB);
+ return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
+ XE_CACHE_WB);
}
static struct dma_fence *