drm/xe: s/XE_PTE_READ_ONLY/XE_PTE_FLAG_READ_ONLY
authorMatthew Brost <matthew.brost@intel.com>
Wed, 7 Jun 2023 18:43:52 +0000 (11:43 -0700)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 19 Dec 2023 23:35:21 +0000 (18:35 -0500)
This define is for internal PTE flags rather than fields in the hardware
PTEs, rename as such. This will help in an upcoming patch to avoid
further confusion.

Reviewed-by: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_bo.h
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_vm.c

index 29eb7474f0185d704318eb60be5e2605564406d6..552fe073e9c5b04afcad9ac5da16bfd0f0af7d47 100644 (file)
@@ -65,7 +65,7 @@
 #define XE_PAGE_PRESENT                        BIT_ULL(0)
 #define XE_PAGE_RW                     BIT_ULL(1)
 
-#define XE_PTE_READ_ONLY               BIT(0)
+#define XE_PTE_FLAG_READ_ONLY          BIT(0)
 
 #define XE_PL_SYSTEM           TTM_PL_SYSTEM
 #define XE_PL_TT               TTM_PL_TT
index 2c472fafc8112d656d50538e210e06e33ddc220c..1ba93c2861aba13850eb99d9b1f954157486dfc4 100644 (file)
@@ -102,7 +102,7 @@ static u64 __gen8_pte_encode(u64 pte, enum xe_cache_level cache, u32 flags,
 {
        pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
 
-       if (unlikely(flags & XE_PTE_READ_ONLY))
+       if (unlikely(flags & XE_PTE_FLAG_READ_ONLY))
                pte &= ~XE_PAGE_RW;
 
        /* FIXME: I don't think the PPAT handling is correct for MTL */
index be629783050f13d67de51909b724b1672f4d711c..51daa5fd78212dc7e7097bec5b2fe6f85a01bd06 100644 (file)
@@ -61,7 +61,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
        bool in_kthread = !current->mm;
        unsigned long notifier_seq;
        int pinned, ret, i;
-       bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
+       bool read_only = vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
 
        lockdep_assert_held(&vm->lock);
        XE_BUG_ON(!xe_vma_is_userptr(vma));
@@ -869,7 +869,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        vma->start = start;
        vma->end = end;
        if (read_only)
-               vma->pte_flags = XE_PTE_READ_ONLY;
+               vma->pte_flags = XE_PTE_FLAG_READ_ONLY;
 
        if (tile_mask) {
                vma->tile_mask = tile_mask;
@@ -923,7 +923,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
 {
        struct xe_vm *vm = vma->vm;
        struct xe_device *xe = vm->xe;
-       bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
+       bool read_only = vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
 
        if (xe_vma_is_userptr(vma)) {
                if (vma->userptr.sg) {
@@ -2643,7 +2643,8 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
                                          first->userptr.ptr,
                                          first->start,
                                          lookup->start - 1,
-                                         (first->pte_flags & XE_PTE_READ_ONLY),
+                                         (first->pte_flags &
+                                          XE_PTE_FLAG_READ_ONLY),
                                          first->tile_mask);
                if (first->bo)
                        xe_bo_unlock(first->bo, &ww);
@@ -2674,7 +2675,8 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
                                         last->userptr.ptr + chunk,
                                         last->start + chunk,
                                         last->end,
-                                        (last->pte_flags & XE_PTE_READ_ONLY),
+                                        (last->pte_flags &
+                                         XE_PTE_FLAG_READ_ONLY),
                                         last->tile_mask);
                if (last->bo)
                        xe_bo_unlock(last->bo, &ww);