level = 2;
ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
- flags = vm->pt_ops->pte_encode_addr(0, XE_CACHE_WB, level, true, 0);
+ flags = vm->pt_ops->pte_encode_addr(xe, 0, XE_CACHE_WB, level,
+ true, 0);
/*
* Use 1GB pages, it shouldn't matter the physical amount of
devmem = true;
}
- addr = m->q->vm->pt_ops->pte_encode_addr(addr, XE_CACHE_WB,
+ addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
+ addr, XE_CACHE_WB,
0, devmem, flags);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
#include "xe_pt_walk.h"
struct xe_bo;
+struct xe_device;
struct xe_vma;
enum xe_cache_level {
enum xe_cache_level cache, u32 pt_level);
u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma,
enum xe_cache_level cache, u32 pt_level);
- u64 (*pte_encode_addr)(u64 addr, enum xe_cache_level cache,
+ u64 (*pte_encode_addr)(struct xe_device *xe, u64 addr,
+ enum xe_cache_level cache,
u32 pt_level, bool devmem, u64 flags);
u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
const enum xe_cache_level cache);
.vm_free = xe_vm_free,
};
-static u64 pde_encode_cache(enum xe_cache_level cache)
+static u64 pde_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
{
- /* FIXME: I don't think the PPAT handling is correct for MTL */
+ u32 pat_index = xe->pat.idx[cache];
+ u64 pte = 0;
- if (cache != XE_CACHE_NONE)
- return PPAT_CACHED_PDE;
+ if (pat_index & BIT(0))
+ pte |= XE_PPGTT_PTE_PAT0;
- return PPAT_UNCACHED;
+ if (pat_index & BIT(1))
+ pte |= XE_PPGTT_PTE_PAT1;
+
+ return pte;
}
-static u64 pte_encode_cache(enum xe_cache_level cache)
+static u64 pte_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
{
- /* FIXME: I don't think the PPAT handling is correct for MTL */
- switch (cache) {
- case XE_CACHE_NONE:
- return PPAT_UNCACHED;
- case XE_CACHE_WT:
- return PPAT_DISPLAY_ELLC;
- default:
- return PPAT_CACHED;
- }
+ u32 pat_index = xe->pat.idx[cache];
+ u64 pte = 0;
+
+ if (pat_index & BIT(0))
+ pte |= XE_PPGTT_PTE_PAT0;
+
+ if (pat_index & BIT(1))
+ pte |= XE_PPGTT_PTE_PAT1;
+
+ if (pat_index & BIT(2))
+ pte |= XE_PPGTT_PTE_PAT2;
+
+ if (pat_index & BIT(3))
+ pte |= XELPG_PPGTT_PTE_PAT3;
+
+ return pte;
}
static u64 pte_encode_ps(u32 pt_level)
static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
const enum xe_cache_level cache)
{
+ struct xe_device *xe = xe_bo_device(bo);
u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pde |= pde_encode_cache(cache);
+ pde |= pde_encode_cache(xe, cache);
return pde;
}
static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
enum xe_cache_level cache, u32 pt_level)
{
+ struct xe_device *xe = xe_bo_device(bo);
u64 pte;
pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_cache(cache);
+ pte |= pte_encode_cache(xe, cache);
pte |= pte_encode_ps(pt_level);
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
enum xe_cache_level cache, u32 pt_level)
{
+ struct xe_device *xe = xe_vma_vm(vma)->xe;
+
pte |= XE_PAGE_PRESENT;
if (likely(!xe_vma_read_only(vma)))
pte |= XE_PAGE_RW;
- pte |= pte_encode_cache(cache);
+ pte |= pte_encode_cache(xe, cache);
pte |= pte_encode_ps(pt_level);
if (unlikely(xe_vma_is_null(vma)))
return pte;
}
-static u64 xelp_pte_encode_addr(u64 addr, enum xe_cache_level cache,
+static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
+ enum xe_cache_level cache,
u32 pt_level, bool devmem, u64 flags)
{
u64 pte;
pte = addr;
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_cache(cache);
+ pte |= pte_encode_cache(xe, cache);
pte |= pte_encode_ps(pt_level);
if (devmem)