* @immediate: immediate submission in a page fault
* @unlocked: unlocked invalidation during MM callback
* @flush_tlb: trigger tlb invalidation after update completed
+ * @allow_override: change MTYPE for local NUMA nodes
* @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* 0 for success, negative erro code for failure.
*/
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- bool immediate, bool unlocked, bool flush_tlb,
+ bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
struct dma_resv *resv, uint64_t start, uint64_t last,
uint64_t flags, uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
params.immediate = immediate;
params.pages_addr = pages_addr;
params.unlocked = unlocked;
+ params.allow_override = allow_override;
/* Implicitly sync to command submissions in the same VM before
* unmapping. Sync to moving fences before mapping.
struct ttm_resource *mem;
struct dma_fence **last_update;
bool flush_tlb = clear;
+ bool uncached;
struct dma_resv *resv;
uint64_t vram_base;
uint64_t flags;
bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
vram_base = bo_adev->vm_manager.vram_base_offset;
+ uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
} else {
flags = 0x0;
vram_base = 0;
+ uncached = false;
}
if (clear || (bo && bo->tbo.base.resv ==
trace_amdgpu_vm_bo_update(mapping);
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
- resv, mapping->start, mapping->last,
+ !uncached, resv, mapping->start, mapping->last,
update_flags, mapping->offset,
vram_base, mem, pages_addr,
last_update);
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_update_range(adev, vm, false, false, true, resv,
- mapping->start, mapping->last,
+ r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
+ resv, mapping->start, mapping->last,
init_pte_value, 0, 0, NULL, NULL,
&f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
goto error_unlock;
}
- r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr,
- addr, flags, value, 0, NULL, NULL, NULL);
+ r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
+ NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
if (r)
goto error_unlock;
* @table_freed: return true if page table is freed when updating
*/
bool table_freed;
+
+ /**
+ * @allow_override: true for memory that is not uncached: allows MTYPE
+ * to be overridden for NUMA local memory.
+ */
+ bool allow_override;
};
struct amdgpu_vm_update_funcs {
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- bool immediate, bool unlocked, bool flush_tlb,
+ bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
struct dma_resv *resv, uint64_t start, uint64_t last,
uint64_t flags, uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
*/
if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
adev->gmc.gmc_funcs->override_vm_pte_flags &&
- num_possible_nodes() > 1 && !params->pages_addr)
+ num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
return;
}
- /* Only override mappings with MTYPE_NC, which is the safe default for
- * cacheable memory.
+ /* MTYPE_NC is the same default and can be overridden.
+ * MTYPE_UC will be present if the memory is extended-coherent
+ * and can also be overridden.
*/
if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
- AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
- dev_dbg_ratelimited(adev->dev, "MTYPE is not NC\n");
+ AMDGPU_PTE_MTYPE_VG10(MTYPE_NC) &&
+ (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
+ AMDGPU_PTE_MTYPE_VG10(MTYPE_UC)) {
+ dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
return;
}
vm->mem_id, local_node, nid);
if (nid == local_node) {
uint64_t old_flags = *flags;
- unsigned int mtype_local = MTYPE_RW;
+ if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
+ AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
+ unsigned int mtype_local = MTYPE_RW;
- if (amdgpu_mtype_local == 1)
- mtype_local = MTYPE_NC;
- else if (amdgpu_mtype_local == 2)
- mtype_local = MTYPE_CC;
+ if (amdgpu_mtype_local == 1)
+ mtype_local = MTYPE_NC;
+ else if (amdgpu_mtype_local == 2)
+ mtype_local = MTYPE_CC;
+
+ *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
+ AMDGPU_PTE_MTYPE_VG10(mtype_local);
+ } else {
+ /* MTYPE_UC case */
+ *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
+ AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
+ }
- *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
- AMDGPU_PTE_MTYPE_VG10(mtype_local);
dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
old_flags, *flags);
}
if (num_possible_nodes() <= 1)
mapping_flags |= mtype_local;
else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
mapping_flags |= AMDGPU_VM_MTYPE_UC;
pr_debug("[0x%llx 0x%llx]\n", start, last);
- return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
+ return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
last, init_pte_value, 0, 0, NULL, NULL,
fence);
}
* different memory partition based on fpfn/lpfn, we should use
* same vm_manager.vram_base_offset regardless memory partition.
*/
- r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
- last_start, prange->start + i,
+ r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
+ NULL, last_start, prange->start + i,
pte_flags,
(last_start - prange->start) << PAGE_SHIFT,
bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,