#include "xe_vm.h"
#include "xe_vm_types.h"
+static bool xe_svm_range_in_vram(struct xe_svm_range *range)
+{
+ /* Not reliable without notifier lock */
+ return range->base.flags.has_devmem_pages;
+}
+
+static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
+{
+ /* Not reliable without notifier lock */
+ return xe_svm_range_in_vram(range) && range->tile_present;
+}
+
static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
{
return container_of(gpusvm, struct xe_vm, svm.gpusvm);
return drm_gpusvm_range_size(&range->base);
}
+#define range_debug(r__, operaton__) \
+ vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
+ "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
+ "start=0x%014lx, end=0x%014lx, size=%lu", \
+ (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
+ (r__)->base.gpusvm, \
+ xe_svm_range_in_vram((r__)) ? 1 : 0, \
+ xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
+ (r__)->base.notifier_seq, \
+ xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
+ xe_svm_range_size((r__)))
+
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
+{
+ range_debug(range, operation);
+}
+
static void *xe_svm_devm_owner(struct xe_device *xe)
{
return xe;
{
struct xe_device *xe = vm->xe;
+ range_debug(range, "GARBAGE COLLECTOR ADD");
+
drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
spin_lock(&vm->svm.garbage_collector.lock);
xe_svm_assert_in_notifier(vm);
+ range_debug(range, "NOTIFIER");
+
/* Skip if already unmapped or if no binding exist */
if (range->base.flags.unmapped || !range->tile_present)
return 0;
+ range_debug(range, "NOTIFIER - EXECUTE");
+
/* Adjust invalidation to range boundaries */
*adj_start = min(xe_svm_range_start(range), mmu_range->start);
*adj_end = max(xe_svm_range_end(range), mmu_range->end);
xe_svm_assert_in_notifier(vm);
+ vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
+ "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
+ vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
+ mmu_range->start, mmu_range->end, mmu_range->event);
+
/* Adjust invalidation to notifier boundaries */
adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
{
struct dma_fence *fence;
+ range_debug(range, "GARBAGE COLLECTOR");
+
xe_vm_lock(vm, false);
fence = xe_vm_range_unbind(vm, range);
xe_vm_unlock(vm);
int incr = (match && last) ? 1 : 0;
if (vram_addr != XE_VRAM_ADDR_INVALID) {
- if (sram)
+ if (sram) {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
+ vram_addr, (u64)dma_addr[pos], i - pos + incr);
__fence = xe_migrate_from_vram(tile->migrate,
i - pos + incr,
vram_addr,
dma_addr + pos);
- else
+ } else {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
+ (u64)dma_addr[pos], vram_addr, i - pos + incr);
__fence = xe_migrate_to_vram(tile->migrate,
i - pos + incr,
dma_addr + pos,
vram_addr);
+ }
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
/* Extra mismatched device page, copy it */
if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
- if (sram)
+ if (sram) {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
+ vram_addr, (u64)dma_addr[pos], 1);
__fence = xe_migrate_from_vram(tile->migrate, 1,
vram_addr,
dma_addr + pos);
- else
+ } else {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
+ (u64)dma_addr[pos], vram_addr, 1);
__fence = xe_migrate_to_vram(tile->migrate, 1,
dma_addr + pos,
vram_addr);
+ }
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
ktime_t end = 0;
int err;
+ range_debug(range, "ALLOCATE VRAM");
+
if (!mmget_not_zero(mm))
return -EFAULT;
mmap_read_lock(mm);
if (xe_svm_range_is_valid(range, tile))
return 0;
+ range_debug(range, "PAGE FAULT");
+
/* XXX: Add migration policy, for now migrate range once */
if (!range->skip_migrate && range->base.flags.migrate_devmem &&
xe_svm_range_size(range) >= SZ_64K) {
}
}
+ range_debug(range, "GET PAGES");
err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
/* Corner where CPU mappings have changed */
if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
- if (err == -EOPNOTSUPP)
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ }
drm_dbg(&vm->xe->drm,
"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ range_debug(range, "PAGE FAULT - RETRY PAGES");
goto retry;
}
- if (err)
+ if (err) {
+ range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
goto err_out;
+ }
+
+ range_debug(range, "PAGE FAULT - BIND");
retry_bind:
drm_exec_init(&exec, 0, 0);
if (IS_ERR(fence)) {
drm_exec_fini(&exec);
err = PTR_ERR(fence);
- if (err == -EAGAIN)
+ if (err == -EAGAIN) {
+ range_debug(range, "PAGE FAULT - RETRY BIND");
goto retry;
+ }
if (xe_vm_validate_should_retry(&exec, err, &end))
goto retry_bind;
goto err_out;