#include "intel_fb_pin.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
+#include "xe_pm.h"
#include <drm/ttm/ttm_bo.h>
/* TODO: Consider sharing framebuffer mapping?
* embed i915_vma inside intel_framebuffer
*/
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
ret = mutex_lock_interruptible(&ggtt->lock);
if (ret)
goto out;
out_unlock:
mutex_unlock(&ggtt->lock);
out:
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return ret;
}
addr_out_off = PAGE_SIZE;
host_session_id = xe_gsc_create_host_session_id();
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
addr_in_wr_off, HECI_MEADDRESS_HDCP,
host_session_id, msg_in_len);
msg_out_len);
out:
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return ret;
}
xe_assert(xe, migrate);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
/*
if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
ret = -EINVAL;
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
goto out;
}
new_mem, handle_system_ccs);
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
goto out;
}
if (!move_lacks_source) {
dma_fence_put(fence);
}
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
out:
return ret;
xe_assert(xe, !xe_pm_runtime_suspended(xe));
}
-void xe_device_mem_access_get(struct xe_device *xe)
-{
- int ref;
-
- /*
- * This looks racy, but should be fine since the pm_callback_task only
- * transitions from NULL -> current (and back to NULL again), during the
- * runtime_resume() or runtime_suspend() callbacks, for which there can
- * only be a single one running for our device. We only need to prevent
- * recursively calling the runtime_get or runtime_put from those
- * callbacks, as well as preventing triggering any access_ongoing
- * asserts.
- */
- if (xe_pm_read_callback_task(xe) == current)
- return;
-
- xe_pm_runtime_get_noresume(xe);
- ref = atomic_inc_return(&xe->mem_access.ref);
-
- xe_assert(xe, ref != S32_MAX);
-
-}
-
-void xe_device_mem_access_put(struct xe_device *xe)
-{
- int ref;
-
- if (xe_pm_read_callback_task(xe) == current)
- return;
-
- ref = atomic_dec_return(&xe->mem_access.ref);
- xe_pm_runtime_put(xe);
-
- xe_assert(xe, ref >= 0);
-}
-
void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
{
struct xe_gt *gt;
return >->mmio.fw;
}
-void xe_device_mem_access_get(struct xe_device *xe);
-void xe_device_mem_access_put(struct xe_device *xe);
-
void xe_device_assert_mem_access(struct xe_device *xe);
static inline bool xe_device_in_fault_mode(struct xe_device *xe)
* triggering additional actions when they occur.
*/
struct {
- /** @mem_access.ref: ref count of memory accesses */
- atomic_t ref;
-
/**
* @mem_access.vram_userfault: Encapsulate vram_userfault
* related stuff
return -EINVAL;
/* The migration vm doesn't hold rpm ref */
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
args->width, hwe, flags,
args->extensions);
- xe_device_mem_access_put(xe); /* now held by engine */
+ xe_pm_runtime_put(xe); /* now held by engine */
xe_vm_put(migrate_vm);
if (IS_ERR(new)) {
#include "xe_gt_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
+#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_wopcm.h"
if (err)
return err;
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
mutex_lock(&ggtt->lock);
err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
alignment, 0, start, end, 0);
if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
xe_ggtt_invalidate(ggtt);
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return err;
}
void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
bool invalidate)
{
- xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
mutex_lock(&ggtt->lock);
xe_ggtt_clear(ggtt, node->start, node->size);
if (invalidate)
xe_ggtt_invalidate(ggtt);
- xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(tile_to_xe(ggtt->tile));
}
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_guc_klv_helpers.h"
+#include "xe_pm.h"
/*
* Return: number of KLVs that were successfully parsed and saved,
{
int err = 0;
- xe_device_mem_access_get(gt_to_xe(gt));
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (reset)
err |= pf_reprovision_sample_period(gt);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
- xe_device_mem_access_put(gt_to_xe(gt));
+ xe_pm_runtime_put(gt_to_xe(gt));
return err ? -ENXIO : 0;
}
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
+#include "xe_pm.h"
#include "xe_sync_types.h"
#include "xe_trace.h"
#include "xe_vm.h"
/* All other jobs require a VM to be open which has a ref */
if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
- xe_device_mem_access_get(job_to_xe(job));
+ xe_pm_runtime_get_noresume(job_to_xe(job));
xe_device_assert_mem_access(job_to_xe(job));
trace_xe_sched_job_create(job);
container_of(ref, struct xe_sched_job, refcount);
if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
- xe_device_mem_access_put(job_to_xe(job));
+ xe_pm_runtime_put(job_to_xe(job));
xe_exec_queue_put(job->q);
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);
vm->pt_ops = &xelp_pt_ops;
if (!(flags & XE_VM_FLAG_MIGRATION))
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get_noresume(xe);
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
xe_range_fence_tree_fini(&vm->rftree[id]);
kfree(vm);
if (!(flags & XE_VM_FLAG_MIGRATION))
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return ERR_PTR(err);
}
mutex_destroy(&vm->snap_mutex);
if (!(vm->flags & XE_VM_FLAG_MIGRATION))
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
for_each_tile(tile, xe, id)
XE_WARN_ON(vm->pt_root[id]);