If in doubt, say "N".
config DRM_XE_DEBUG_MEM
- bool "Enable passing SYS/LMEM addresses to user space"
+ bool "Enable passing SYS/VRAM addresses to user space"
default n
help
Pass object location trough uapi. Intended for extended
}
dma_fence_put(fence);
- /* Try to copy 0xc0 from sysmem to lmem with 2MB or 64KiB/4KiB pages */
+ /* Try to copy 0xc0 from sysmem to vram with 2MB or 64KiB/4KiB pages */
xe_map_memset(xe, &sysmem->vmap, 0, 0xc0, sysmem->size);
xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
- bool lmem;
+ bool vram;
if (mem_type_is_vram(place->mem_type)) {
XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
- place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &lmem) -
+ place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &vram) -
vram_region_io_offset(bo)) >> PAGE_SHIFT;
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
}
dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
- size_t page_size, bool *is_lmem)
+ size_t page_size, bool *is_vram)
{
struct xe_res_cursor cur;
u64 page;
page = offset >> PAGE_SHIFT;
offset &= (PAGE_SIZE - 1);
- *is_lmem = xe_bo_is_vram(bo);
+ *is_vram = xe_bo_is_vram(bo);
- if (!*is_lmem && !xe_bo_is_stolen(bo)) {
+ if (!*is_vram && !xe_bo_is_stolen(bo)) {
XE_BUG_ON(!bo->ttm.ttm);
xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
- size_t page_size, bool *is_lmem);
+ size_t page_size, bool *is_vram);
static inline dma_addr_t
xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
{
- bool is_lmem;
+ bool is_vram;
- return xe_bo_addr(bo, 0, page_size, &is_lmem);
+ return xe_bo_addr(bo, 0, page_size, &is_vram);
}
static inline u32
{
struct xe_device *xe = xe_bo_device(bo);
u64 pte;
- bool is_lmem;
+ bool is_vram;
- pte = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_lmem);
+ pte = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_vram);
pte |= GEN8_PAGE_PRESENT;
- if (is_lmem)
+ if (is_vram)
pte |= GEN12_GGTT_PTE_LM;
/* FIXME: vfunc + pass in caching rules */
level++;
}
} else {
- bool is_lmem;
- u64 batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE, &is_lmem);
+ bool is_vram;
+ u64 batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE, &is_vram);
m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
if (xe->info.supports_usm) {
batch = gt->usm.bb_pool.bo;
batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE,
- &is_lmem);
+ &is_vram);
m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
}
}
*/
XE_BUG_ON(update->qwords > 0x1ff);
if (!ppgtt_ofs) {
- bool is_lmem;
+ bool is_vram;
ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
GEN8_PAGE_SIZE,
- &is_lmem));
- XE_BUG_ON(!is_lmem);
+ &is_vram));
+ XE_BUG_ON(!is_vram);
}
do {
return 1;
}
-static int xe_resize_lmem_bar(struct xe_device *xe, resource_size_t lmem_size)
+static int xe_resize_vram_bar(struct xe_device *xe, resource_size_t vram_size)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_bus *root = pdev->bus;
u32 pci_cmd;
int i;
int ret;
- u64 force_lmem_bar_size = xe_force_lmem_bar_size;
+ u64 force_vram_bar_size = xe_force_vram_bar_size;
current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
- if (force_lmem_bar_size) {
+ if (force_vram_bar_size) {
u32 bar_sizes;
- rebar_size = force_lmem_bar_size * (resource_size_t)SZ_1M;
+ rebar_size = force_vram_bar_size * (resource_size_t)SZ_1M;
bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
if (rebar_size == current_size)
return 0;
if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) ||
- rebar_size >= roundup_pow_of_two(lmem_size)) {
- rebar_size = lmem_size;
+ rebar_size >= roundup_pow_of_two(vram_size)) {
+ rebar_size = vram_size;
drm_info(&xe->drm,
"Given bar size is not within supported size, setting it to default: %llu\n",
- (u64)lmem_size >> 20);
+ (u64)vram_size >> 20);
}
} else {
rebar_size = current_size;
- if (rebar_size != roundup_pow_of_two(lmem_size))
- rebar_size = lmem_size;
+ if (rebar_size != roundup_pow_of_two(vram_size))
+ rebar_size = vram_size;
else
return 0;
}
}
if (!root_res) {
- drm_info(&xe->drm, "Can't resize LMEM BAR - platform support is missing\n");
+ drm_info(&xe->drm, "Can't resize VRAM BAR - platform support is missing\n");
return -1;
}
if (usable_size) {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
*usable_size = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
- drm_info(&xe->drm, "lmem_size: 0x%llx usable_size: 0x%llx\n",
+ drm_info(&xe->drm, "vram_size: 0x%llx usable_size: 0x%llx\n",
*vram_size, *usable_size);
}
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct xe_gt *gt;
u8 id;
- u64 lmem_size;
+ u64 vram_size;
u64 original_size;
u64 current_size;
u64 usable_size;
gt = xe_device_get_gt(xe, 0);
original_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
- err = xe_mmio_total_vram_size(xe, &lmem_size, &usable_size);
+ err = xe_mmio_total_vram_size(xe, &vram_size, &usable_size);
if (err)
return err;
- resize_result = xe_resize_lmem_bar(xe, lmem_size);
+ resize_result = xe_resize_vram_bar(xe, vram_size);
current_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
xe->mem.vram.io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
- xe->mem.vram.size = min(current_size, lmem_size);
+ xe->mem.vram.size = min(current_size, vram_size);
if (!xe->mem.vram.size)
return -EIO;
if (resize_result > 0)
- drm_info(&xe->drm, "Successfully resize LMEM from %lluMiB to %lluMiB\n",
+ drm_info(&xe->drm, "Successfully resize VRAM from %lluMiB to %lluMiB\n",
(u64)original_size >> 20,
(u64)current_size >> 20);
- else if (xe->mem.vram.size < lmem_size && !xe_force_lmem_bar_size)
+ else if (xe->mem.vram.size < vram_size && !xe_force_vram_bar_size)
drm_info(&xe->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' support in your BIOS.\n",
(u64)xe->mem.vram.size >> 20);
- if (xe->mem.vram.size < lmem_size)
+ if (xe->mem.vram.size < vram_size)
drm_warn(&xe->drm, "Restricting VRAM size to PCI resource size (0x%llx->0x%llx)\n",
- lmem_size, (u64)xe->mem.vram.size);
+ vram_size, (u64)xe->mem.vram.size);
xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.size);
xe->mem.vram.size = min_t(u64, xe->mem.vram.size, usable_size);
* and we should not continue with driver initialization.
*/
if (IS_DGFX(xe) && !(xe_mmio_read32(gt, GU_CNTL.reg) & LMEM_INIT)) {
- drm_err(&xe->drm, "LMEM not initialized by firmware\n");
+ drm_err(&xe->drm, "VRAM not initialized by firmware\n");
return -ENODEV;
}
module_param_named_unsafe(enable_guc, enable_guc, bool, 0444);
MODULE_PARM_DESC(enable_guc, "Enable GuC submission");
-u32 xe_force_lmem_bar_size;
-module_param_named(lmem_bar_size, xe_force_lmem_bar_size, uint, 0600);
-MODULE_PARM_DESC(lmem_bar_size, "Set the lmem bar size(in MiB)");
+u32 xe_force_vram_bar_size;
+module_param_named(vram_bar_size, xe_force_vram_bar_size, uint, 0600);
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
int xe_guc_log_level = 5;
module_param_named(guc_log_level, xe_guc_log_level, int, 0600);
/* Module modprobe variables */
extern bool enable_guc;
extern bool enable_display;
-extern u32 xe_force_lmem_bar_size;
+extern u32 xe_force_vram_bar_size;
extern int xe_guc_log_level;
extern char *xe_param_force_probe;
const enum xe_cache_level level)
{
u64 pde;
- bool is_lmem;
+ bool is_vram;
- pde = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_lmem);
+ pde = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_vram);
pde |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
- XE_WARN_ON(IS_DGFX(xe_bo_device(bo)) && !is_lmem);
+ XE_WARN_ON(IS_DGFX(xe_bo_device(bo)) && !is_vram);
/* FIXME: I don't think the PPAT handling is correct for MTL */
}
static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset,
- size_t page_size, bool *is_lmem)
+ size_t page_size, bool *is_vram)
{
if (xe_vma_is_userptr(vma)) {
struct xe_res_cursor cur;
u64 page;
- *is_lmem = false;
+ *is_vram = false;
page = offset >> PAGE_SHIFT;
offset &= (PAGE_SIZE - 1);
&cur);
return xe_res_dma(&cur) + offset;
} else {
- return xe_bo_addr(vma->bo, offset, page_size, is_lmem);
+ return xe_bo_addr(vma->bo, offset, page_size, is_vram);
}
}
int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
{
struct rb_node *node;
- bool is_lmem;
+ bool is_vram;
uint64_t addr;
if (!down_read_trylock(&vm->lock)) {
return 0;
}
if (vm->pt_root[gt_id]) {
- addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
- drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_lmem ? "LMEM" : "SYS");
+ addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_vram);
+ drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
}
for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
xe_res_first_sg(vma->userptr.sg, 0, GEN8_PAGE_SIZE, &cur);
addr = xe_res_dma(&cur);
} else {
- addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
+ addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_vram);
}
drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
vma->start, vma->end, vma->end - vma->start + 1ull,
- addr, is_userptr ? "USR" : is_lmem ? "VRAM" : "SYS");
+ addr, is_userptr ? "USR" : is_vram ? "VRAM" : "SYS");
}
up_read(&vm->lock);