drm/xe: s/lmem/vram/
authorMatthew Auld <matthew.auld@intel.com>
Wed, 8 Mar 2023 12:30:08 +0000 (12:30 +0000)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 19 Dec 2023 23:29:45 +0000 (18:29 -0500)
This seems to be the preferred nomenclature in xe. Currently we are
intermixing vram and lmem, which is confusing.

v2 (Gwan-gyeong Mun & Lucas):
  - Rather apply to the entire driver

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Acked-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/Kconfig.debug
drivers/gpu/drm/xe/tests/xe_migrate.c
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_bo.h
drivers/gpu/drm/xe/xe_ggtt.c
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_mmio.c
drivers/gpu/drm/xe/xe_module.c
drivers/gpu/drm/xe/xe_module.h
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_vm.c

index 565be3f6b9b961e303a8e47a227d6a7e62e072aa..93b284cdd0a2e7f148bb9830cf5b3bbc153e7bf8 100644 (file)
@@ -41,7 +41,7 @@ config DRM_XE_DEBUG_VM
          If in doubt, say "N".
 
 config DRM_XE_DEBUG_MEM
-       bool "Enable passing SYS/LMEM addresses to user space"
+       bool "Enable passing SYS/VRAM addresses to user space"
        default n
        help
          Pass object location trough uapi. Intended for extended
index b7e4a126e8b736870212ee208167c7695fc684fc..ac659b94e7f5237bebb3d6972e52b0df40982f3e 100644 (file)
@@ -129,7 +129,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
        }
        dma_fence_put(fence);
 
-       /* Try to copy 0xc0 from sysmem to lmem with 2MB or 64KiB/4KiB pages */
+       /* Try to copy 0xc0 from sysmem to vram with 2MB or 64KiB/4KiB pages */
        xe_map_memset(xe, &sysmem->vmap, 0, 0xc0, sysmem->size);
        xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
 
index 09b8db6d7ba363b8c11028dd032522bbcb8463f5..cfb79519b6731528ecda3b8a250ff0895af28cd3 100644 (file)
@@ -1299,12 +1299,12 @@ int xe_bo_pin(struct xe_bo *bo)
        if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
            bo->flags & XE_BO_INTERNAL_TEST)) {
                struct ttm_place *place = &(bo->placements[0]);
-               bool lmem;
+               bool vram;
 
                if (mem_type_is_vram(place->mem_type)) {
                        XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
 
-                       place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &lmem) -
+                       place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &vram) -
                                       vram_region_io_offset(bo)) >> PAGE_SHIFT;
                        place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
 
@@ -1424,7 +1424,7 @@ bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
 }
 
 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
-                     size_t page_size, bool *is_lmem)
+                     size_t page_size, bool *is_vram)
 {
        struct xe_res_cursor cur;
        u64 page;
@@ -1436,9 +1436,9 @@ dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
        page = offset >> PAGE_SHIFT;
        offset &= (PAGE_SIZE - 1);
 
-       *is_lmem = xe_bo_is_vram(bo);
+       *is_vram = xe_bo_is_vram(bo);
 
-       if (!*is_lmem && !xe_bo_is_stolen(bo)) {
+       if (!*is_vram && !xe_bo_is_stolen(bo)) {
                XE_BUG_ON(!bo->ttm.ttm);
 
                xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
index 8c2cdbe51ab5396440842f8e499fcb31e94b79d3..4350845542bf4ad90b0e1c92d135095ece3186db 100644 (file)
@@ -196,14 +196,14 @@ static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
 
 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
-                     size_t page_size, bool *is_lmem);
+                     size_t page_size, bool *is_vram);
 
 static inline dma_addr_t
 xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
 {
-       bool is_lmem;
+       bool is_vram;
 
-       return xe_bo_addr(bo, 0, page_size, &is_lmem);
+       return xe_bo_addr(bo, 0, page_size, &is_vram);
 }
 
 static inline u32
index d6ebc1d77f4d5ecac805ebf258f71ae561d941d9..99bc9036c7a0d3a82965c1ab03e296a6498e29a3 100644 (file)
@@ -28,12 +28,12 @@ u64 xe_ggtt_pte_encode(struct xe_bo *bo, u64 bo_offset)
 {
        struct xe_device *xe = xe_bo_device(bo);
        u64 pte;
-       bool is_lmem;
+       bool is_vram;
 
-       pte = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_lmem);
+       pte = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_vram);
        pte |= GEN8_PAGE_PRESENT;
 
-       if (is_lmem)
+       if (is_vram)
                pte |= GEN12_GGTT_PTE_LM;
 
        /* FIXME: vfunc + pass in caching rules */
index 79aa3508ae3e7cf4bd693358f9784495189d9002..4a9fe1f7128d18c66b1fe3134eade303618d98ae 100644 (file)
@@ -222,15 +222,15 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
                        level++;
                }
        } else {
-               bool is_lmem;
-               u64 batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE, &is_lmem);
+               bool is_vram;
+               u64 batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE, &is_vram);
 
                m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
 
                if (xe->info.supports_usm) {
                        batch = gt->usm.bb_pool.bo;
                        batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE,
-                                               &is_lmem);
+                                               &is_vram);
                        m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
                }
        }
@@ -933,12 +933,12 @@ static void write_pgtable(struct xe_gt *gt, struct xe_bb *bb, u64 ppgtt_ofs,
         */
        XE_BUG_ON(update->qwords > 0x1ff);
        if (!ppgtt_ofs) {
-               bool is_lmem;
+               bool is_vram;
 
                ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
                                                           GEN8_PAGE_SIZE,
-                                                          &is_lmem));
-               XE_BUG_ON(!is_lmem);
+                                                          &is_vram));
+               XE_BUG_ON(!is_vram);
        }
 
        do {
index 65b0df9bb5796bcc967a1db2728bb1d9e251d0f5..e5bd4609aaeed93bedc18d3867dd7eb28b358ee2 100644 (file)
@@ -68,7 +68,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size)
        return 1;
 }
 
-static int xe_resize_lmem_bar(struct xe_device *xe, resource_size_t lmem_size)
+static int xe_resize_vram_bar(struct xe_device *xe, resource_size_t vram_size)
 {
        struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
        struct pci_bus *root = pdev->bus;
@@ -78,31 +78,31 @@ static int xe_resize_lmem_bar(struct xe_device *xe, resource_size_t lmem_size)
        u32 pci_cmd;
        int i;
        int ret;
-       u64 force_lmem_bar_size = xe_force_lmem_bar_size;
+       u64 force_vram_bar_size = xe_force_vram_bar_size;
 
        current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
 
-       if (force_lmem_bar_size) {
+       if (force_vram_bar_size) {
                u32 bar_sizes;
 
-               rebar_size = force_lmem_bar_size * (resource_size_t)SZ_1M;
+               rebar_size = force_vram_bar_size * (resource_size_t)SZ_1M;
                bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
 
                if (rebar_size == current_size)
                        return 0;
 
                if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) ||
-                   rebar_size >= roundup_pow_of_two(lmem_size)) {
-                       rebar_size = lmem_size;
+                   rebar_size >= roundup_pow_of_two(vram_size)) {
+                       rebar_size = vram_size;
                        drm_info(&xe->drm,
                                 "Given bar size is not within supported size, setting it to default: %llu\n",
-                                (u64)lmem_size >> 20);
+                                (u64)vram_size >> 20);
                }
        } else {
                rebar_size = current_size;
 
-               if (rebar_size != roundup_pow_of_two(lmem_size))
-                       rebar_size = lmem_size;
+               if (rebar_size != roundup_pow_of_two(vram_size))
+                       rebar_size = vram_size;
                else
                        return 0;
        }
@@ -117,7 +117,7 @@ static int xe_resize_lmem_bar(struct xe_device *xe, resource_size_t lmem_size)
        }
 
        if (!root_res) {
-               drm_info(&xe->drm, "Can't resize LMEM BAR - platform support is missing\n");
+               drm_info(&xe->drm, "Can't resize VRAM BAR - platform support is missing\n");
                return -1;
        }
 
@@ -168,7 +168,7 @@ int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *usable_si
        if (usable_size) {
                reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
                *usable_size = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
-               drm_info(&xe->drm, "lmem_size: 0x%llx usable_size: 0x%llx\n",
+               drm_info(&xe->drm, "vram_size: 0x%llx usable_size: 0x%llx\n",
                         *vram_size, *usable_size);
        }
 
@@ -180,7 +180,7 @@ int xe_mmio_probe_vram(struct xe_device *xe)
        struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
        struct xe_gt *gt;
        u8 id;
-       u64 lmem_size;
+       u64 vram_size;
        u64 original_size;
        u64 current_size;
        u64 usable_size;
@@ -207,29 +207,29 @@ int xe_mmio_probe_vram(struct xe_device *xe)
        gt = xe_device_get_gt(xe, 0);
        original_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
 
-       err = xe_mmio_total_vram_size(xe, &lmem_size, &usable_size);
+       err = xe_mmio_total_vram_size(xe, &vram_size, &usable_size);
        if (err)
                return err;
 
-       resize_result = xe_resize_lmem_bar(xe, lmem_size);
+       resize_result = xe_resize_vram_bar(xe, vram_size);
        current_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
        xe->mem.vram.io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
 
-       xe->mem.vram.size = min(current_size, lmem_size);
+       xe->mem.vram.size = min(current_size, vram_size);
 
        if (!xe->mem.vram.size)
                return -EIO;
 
        if (resize_result > 0)
-               drm_info(&xe->drm, "Successfully resize LMEM from %lluMiB to %lluMiB\n",
+               drm_info(&xe->drm, "Successfully resize VRAM from %lluMiB to %lluMiB\n",
                         (u64)original_size >> 20,
                         (u64)current_size >> 20);
-       else if (xe->mem.vram.size < lmem_size && !xe_force_lmem_bar_size)
+       else if (xe->mem.vram.size < vram_size && !xe_force_vram_bar_size)
                drm_info(&xe->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' support in your BIOS.\n",
                         (u64)xe->mem.vram.size >> 20);
-       if (xe->mem.vram.size < lmem_size)
+       if (xe->mem.vram.size < vram_size)
                drm_warn(&xe->drm, "Restricting VRAM size to PCI resource size (0x%llx->0x%llx)\n",
-                        lmem_size, (u64)xe->mem.vram.size);
+                        vram_size, (u64)xe->mem.vram.size);
 
        xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.size);
        xe->mem.vram.size = min_t(u64, xe->mem.vram.size, usable_size);
@@ -360,7 +360,7 @@ int xe_mmio_init(struct xe_device *xe)
         * and we should not continue with driver initialization.
         */
        if (IS_DGFX(xe) && !(xe_mmio_read32(gt, GU_CNTL.reg) & LMEM_INIT)) {
-               drm_err(&xe->drm, "LMEM not initialized by firmware\n");
+               drm_err(&xe->drm, "VRAM not initialized by firmware\n");
                return -ENODEV;
        }
 
index 3f5d03a586968072d777a83039fca384ac6bb963..e8ee7a9b08781b45b4a9bdf14b92c36f9c1cfbb6 100644 (file)
@@ -18,9 +18,9 @@ bool enable_guc = true;
 module_param_named_unsafe(enable_guc, enable_guc, bool, 0444);
 MODULE_PARM_DESC(enable_guc, "Enable GuC submission");
 
-u32 xe_force_lmem_bar_size;
-module_param_named(lmem_bar_size, xe_force_lmem_bar_size, uint, 0600);
-MODULE_PARM_DESC(lmem_bar_size, "Set the lmem bar size(in MiB)");
+u32 xe_force_vram_bar_size;
+module_param_named(vram_bar_size, xe_force_vram_bar_size, uint, 0600);
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
 
 int xe_guc_log_level = 5;
 module_param_named(guc_log_level, xe_guc_log_level, int, 0600);
index 2c6ee46f55953d2da4f54987ec7dcca41204f369..86916c176382572e603ea8d26f93b0f0cd6e3957 100644 (file)
@@ -8,6 +8,6 @@
 /* Module modprobe variables */
 extern bool enable_guc;
 extern bool enable_display;
-extern u32 xe_force_lmem_bar_size;
+extern u32 xe_force_vram_bar_size;
 extern int xe_guc_log_level;
 extern char *xe_param_force_probe;
index 00d9fff538284febc9c5c08ec7f64518b283acd0..64da9815245597ba17651dbb7bea781f1204fc7e 100644 (file)
@@ -61,12 +61,12 @@ u64 gen8_pde_encode(struct xe_bo *bo, u64 bo_offset,
                    const enum xe_cache_level level)
 {
        u64 pde;
-       bool is_lmem;
+       bool is_vram;
 
-       pde = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_lmem);
+       pde = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_vram);
        pde |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
 
-       XE_WARN_ON(IS_DGFX(xe_bo_device(bo)) && !is_lmem);
+       XE_WARN_ON(IS_DGFX(xe_bo_device(bo)) && !is_vram);
 
        /* FIXME: I don't think the PPAT handling is correct for MTL */
 
@@ -79,13 +79,13 @@ u64 gen8_pde_encode(struct xe_bo *bo, u64 bo_offset,
 }
 
 static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset,
-                          size_t page_size, bool *is_lmem)
+                          size_t page_size, bool *is_vram)
 {
        if (xe_vma_is_userptr(vma)) {
                struct xe_res_cursor cur;
                u64 page;
 
-               *is_lmem = false;
+               *is_vram = false;
                page = offset >> PAGE_SHIFT;
                offset &= (PAGE_SIZE - 1);
 
@@ -93,7 +93,7 @@ static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset,
                                &cur);
                return xe_res_dma(&cur) + offset;
        } else {
-               return xe_bo_addr(vma->bo, offset, page_size, is_lmem);
+               return xe_bo_addr(vma->bo, offset, page_size, is_vram);
        }
 }
 
index fcac31f11706f13f767e83ee2df5b88475076e20..a8254a4148f703150d5c93aaf575819bd489459b 100644 (file)
@@ -3379,7 +3379,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
 {
        struct rb_node *node;
-       bool is_lmem;
+       bool is_vram;
        uint64_t addr;
 
        if (!down_read_trylock(&vm->lock)) {
@@ -3387,8 +3387,8 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
                return 0;
        }
        if (vm->pt_root[gt_id]) {
-               addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
-               drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_lmem ? "LMEM" : "SYS");
+               addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_vram);
+               drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
        }
 
        for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
@@ -3401,11 +3401,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
                        xe_res_first_sg(vma->userptr.sg, 0, GEN8_PAGE_SIZE, &cur);
                        addr = xe_res_dma(&cur);
                } else {
-                       addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
+                       addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_vram);
                }
                drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
                           vma->start, vma->end, vma->end - vma->start + 1ull,
-                          addr, is_userptr ? "USR" : is_lmem ? "VRAM" : "SYS");
+                          addr, is_userptr ? "USR" : is_vram ? "VRAM" : "SYS");
        }
        up_read(&vm->lock);