1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/suspend.h>
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_placement.h>
15 #include "display/xe_display.h"
17 #include "xe_bo_evict.h"
18 #include "xe_device.h"
29 * DOC: Xe Power Management
31 * Xe PM implements the main routines for both system level suspend states and
32 * for the opportunistic runtime suspend states.
34 * System Level Suspend (S-States) - In general this is OS initiated suspend
35 * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
36 * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
37 * are the main point for the suspend to and resume from these states.
39 * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
40 * state D3, controlled by the PCI subsystem and ACPI with the help from the
41 * runtime_pm infrastructure.
42 * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
43 * alive and quicker low latency resume or D3Cold where Vcc power is off for
44 * better power savings.
45 * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
46 * level, while the device driver can be behind multiple bridges/switches and
47 * paired with other devices. For this reason, the PCI subsystem cannot perform
48 * the transition towards D3Cold. The lowest runtime PM possible from the PCI
49 * subsystem is D3hot. Then, if all these paired devices in the same root port
50 * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
51 * to perform the transition from D3hot to D3cold. Xe may disallow this
52 * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
53 * suspend. It will be based on runtime conditions such as VRAM usage for a
54 * quick and low latency resume for instance.
56 * Runtime PM - This infrastructure provided by the Linux kernel allows the
57 * device drivers to indicate when the can be runtime suspended, so the device
58 * could be put at D3 (if supported), or allow deeper package sleep states
59 * (PC-states), and/or other low level power states. Xe PM component provides
60 * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
61 * subsystem will call before transition to/from runtime suspend.
63 * Also, Xe PM provides get and put functions that Xe driver will use to
64 * indicate activity. In order to avoid locking complications with the memory
65 * management, whenever possible, these get and put functions needs to be called
66 * from the higher/outer levels.
67 * The main cases that need to be protected from the outer levels are: IOCTL,
68 * sysfs, debugfs, dma-buf sharing, GPU execution.
70 * This component is not responsible for GT idleness (RC6) nor GT frequency
75 static struct lockdep_map xe_pm_runtime_d3cold_map = {
76 .name = "xe_rpm_d3cold_map"
79 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
80 .name = "xe_rpm_nod3cold_map"
85 * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
88 * Return: true if it is safe to runtime resume from reclaim context.
91 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
93 return !xe->d3cold.capable;
96 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
98 lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
99 &xe_pm_runtime_nod3cold_map :
100 &xe_pm_runtime_d3cold_map);
103 static void xe_rpm_lockmap_release(const struct xe_device *xe)
105 lock_map_release(xe_rpm_reclaim_safe(xe) ?
106 &xe_pm_runtime_nod3cold_map :
107 &xe_pm_runtime_d3cold_map);
111 * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
112 * @xe: xe device instance
114 * Return: 0 on success
116 int xe_pm_suspend(struct xe_device *xe)
122 drm_dbg(&xe->drm, "Suspending device\n");
123 trace_xe_pm_suspend(xe, __builtin_return_address(0));
125 err = xe_pxp_pm_suspend(xe->pxp);
129 for_each_gt(gt, xe, id)
130 xe_gt_suspend_prepare(gt);
132 xe_display_pm_suspend(xe);
134 /* FIXME: Super racey... */
135 err = xe_bo_evict_all(xe);
139 for_each_gt(gt, xe, id) {
140 err = xe_gt_suspend(gt);
147 xe_display_pm_suspend_late(xe);
149 drm_dbg(&xe->drm, "Device suspended\n");
153 xe_display_pm_resume(xe);
155 xe_pxp_pm_resume(xe->pxp);
157 drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
162 * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
163 * @xe: xe device instance
165 * Return: 0 on success
167 int xe_pm_resume(struct xe_device *xe)
169 struct xe_tile *tile;
174 drm_dbg(&xe->drm, "Resuming device\n");
175 trace_xe_pm_resume(xe, __builtin_return_address(0));
177 for_each_tile(tile, xe, id)
178 xe_wa_apply_tile_workarounds(tile);
180 err = xe_pcode_ready(xe, true);
184 xe_display_pm_resume_early(xe);
187 * This only restores pinned memory which is the memory required for the
190 err = xe_bo_restore_early(xe);
196 for_each_gt(gt, xe, id)
199 xe_display_pm_resume(xe);
201 err = xe_bo_restore_late(xe);
205 xe_pxp_pm_resume(xe->pxp);
207 drm_dbg(&xe->drm, "Device resumed\n");
210 drm_dbg(&xe->drm, "Device resume failed %d\n", err);
214 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
216 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
217 struct pci_dev *root_pdev;
219 root_pdev = pcie_find_root_port(pdev);
223 /* D3Cold requires PME capability */
224 if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
225 drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
229 /* D3Cold requires _PR3 power resource */
230 if (!pci_pr3_present(root_pdev)) {
231 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
238 static void xe_pm_runtime_init(struct xe_device *xe)
240 struct device *dev = xe->drm.dev;
243 * Disable the system suspend direct complete optimization.
244 * We need to ensure that the regular device suspend/resume functions
245 * are called since our runtime_pm cannot guarantee local memory
246 * eviction for d3cold.
247 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
248 * this option to integrated graphics as well.
251 dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
253 pm_runtime_use_autosuspend(dev);
254 pm_runtime_set_autosuspend_delay(dev, 1000);
255 pm_runtime_set_active(dev);
256 pm_runtime_allow(dev);
257 pm_runtime_mark_last_busy(dev);
261 int xe_pm_init_early(struct xe_device *xe)
265 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
267 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
271 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
275 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
278 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
280 static u32 vram_threshold_value(struct xe_device *xe)
282 /* FIXME: D3Cold temporarily disabled by default on BMG */
283 if (xe->info.platform == XE_BATTLEMAGE)
286 return DEFAULT_VRAM_THRESHOLD;
289 static int xe_pm_notifier_callback(struct notifier_block *nb,
290 unsigned long action, void *data)
292 struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
296 case PM_HIBERNATION_PREPARE:
297 case PM_SUSPEND_PREPARE:
298 xe_pm_runtime_get(xe);
299 err = xe_bo_evict_all_user(xe);
301 drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
302 xe_pm_runtime_put(xe);
306 err = xe_bo_notifier_prepare_all_pinned(xe);
308 drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
309 xe_pm_runtime_put(xe);
312 case PM_POST_HIBERNATION:
313 case PM_POST_SUSPEND:
314 xe_bo_notifier_unprepare_all_pinned(xe);
315 xe_pm_runtime_put(xe);
326 * xe_pm_init - Initialize Xe Power Management
327 * @xe: xe device instance
329 * This component is responsible for System and Device sleep states.
331 * Returns 0 for success, negative error code otherwise.
333 int xe_pm_init(struct xe_device *xe)
338 xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
339 err = register_pm_notifier(&xe->pm_notifier);
343 /* For now suspend/resume is only allowed with GuC */
344 if (!xe_device_uc_enabled(xe))
347 if (xe->d3cold.capable) {
348 vram_threshold = vram_threshold_value(xe);
349 err = xe_pm_set_vram_threshold(xe, vram_threshold);
354 xe_pm_runtime_init(xe);
358 unregister_pm_notifier(&xe->pm_notifier);
362 static void xe_pm_runtime_fini(struct xe_device *xe)
364 struct device *dev = xe->drm.dev;
366 pm_runtime_get_sync(dev);
367 pm_runtime_forbid(dev);
371 * xe_pm_fini - Finalize PM
372 * @xe: xe device instance
374 void xe_pm_fini(struct xe_device *xe)
376 if (xe_device_uc_enabled(xe))
377 xe_pm_runtime_fini(xe);
379 unregister_pm_notifier(&xe->pm_notifier);
382 static void xe_pm_write_callback_task(struct xe_device *xe,
383 struct task_struct *task)
385 WRITE_ONCE(xe->pm_callback_task, task);
388 * Just in case it's somehow possible for our writes to be reordered to
389 * the extent that something else re-uses the task written in
390 * pm_callback_task. For example after returning from the callback, but
391 * before the reordered write that resets pm_callback_task back to NULL.
393 smp_mb(); /* pairs with xe_pm_read_callback_task */
396 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
398 smp_mb(); /* pairs with xe_pm_write_callback_task */
400 return READ_ONCE(xe->pm_callback_task);
404 * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
405 * @xe: xe device instance
407 * This does not provide any guarantee that the device is going to remain
408 * suspended as it might be racing with the runtime state transitions.
409 * It can be used only as a non-reliable assertion, to ensure that we are not in
410 * the sleep state while trying to access some memory for instance.
412 * Returns true if PCI device is suspended, false otherwise.
414 bool xe_pm_runtime_suspended(struct xe_device *xe)
416 return pm_runtime_suspended(xe->drm.dev);
420 * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
421 * @xe: xe device instance
423 * Returns 0 for success, negative error code otherwise.
425 int xe_pm_runtime_suspend(struct xe_device *xe)
427 struct xe_bo *bo, *on;
432 trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
433 /* Disable access_ongoing asserts and prevent recursive pm calls */
434 xe_pm_write_callback_task(xe, current);
437 * The actual xe_pm_runtime_put() is always async underneath, so
438 * exactly where that is called should makes no difference to us. However
439 * we still need to be very careful with the locks that this callback
440 * acquires and the locks that are acquired and held by any callers of
441 * xe_runtime_pm_get(). We already have the matching annotation
442 * on that side, but we also need it here. For example lockdep should be
443 * able to tell us if the following scenario is in theory possible:
445 * CPU0 | CPU1 (kworker)
447 * | xe_pm_runtime_suspend()
449 * xe_pm_runtime_get() |
451 * This will clearly deadlock since rpm core needs to wait for
452 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
453 * on CPU0 which prevents CPU1 making forward progress. With the
454 * annotation here and in xe_pm_runtime_get() lockdep will see
455 * the potential lock inversion and give us a nice splat.
457 xe_rpm_lockmap_acquire(xe);
459 err = xe_pxp_pm_suspend(xe->pxp);
464 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
465 * also checks and deletes bo entry from user fault list.
467 mutex_lock(&xe->mem_access.vram_userfault.lock);
468 list_for_each_entry_safe(bo, on,
469 &xe->mem_access.vram_userfault.list, vram_userfault_link)
470 xe_bo_runtime_pm_release_mmap_offset(bo);
471 mutex_unlock(&xe->mem_access.vram_userfault.lock);
473 xe_display_pm_runtime_suspend(xe);
475 if (xe->d3cold.allowed) {
476 err = xe_bo_evict_all(xe);
481 for_each_gt(gt, xe, id) {
482 err = xe_gt_suspend(gt);
489 xe_display_pm_runtime_suspend_late(xe);
491 xe_rpm_lockmap_release(xe);
492 xe_pm_write_callback_task(xe, NULL);
496 xe_display_pm_runtime_resume(xe);
497 xe_pxp_pm_resume(xe->pxp);
499 xe_rpm_lockmap_release(xe);
500 xe_pm_write_callback_task(xe, NULL);
505 * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
506 * @xe: xe device instance
508 * Returns 0 for success, negative error code otherwise.
510 int xe_pm_runtime_resume(struct xe_device *xe)
516 trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
517 /* Disable access_ongoing asserts and prevent recursive pm calls */
518 xe_pm_write_callback_task(xe, current);
520 xe_rpm_lockmap_acquire(xe);
522 if (xe->d3cold.allowed) {
523 err = xe_pcode_ready(xe, true);
527 xe_display_pm_resume_early(xe);
530 * This only restores pinned memory which is the memory
531 * required for the GT(s) to resume.
533 err = xe_bo_restore_early(xe);
540 for_each_gt(gt, xe, id)
543 xe_display_pm_runtime_resume(xe);
545 if (xe->d3cold.allowed) {
546 err = xe_bo_restore_late(xe);
551 xe_pxp_pm_resume(xe->pxp);
554 xe_rpm_lockmap_release(xe);
555 xe_pm_write_callback_task(xe, NULL);
560 * For places where resume is synchronous it can be quite easy to deadlock
561 * if we are not careful. Also in practice it might be quite timing
562 * sensitive to ever see the 0 -> 1 transition with the callers locks
563 * held, so deadlocks might exist but are hard for lockdep to ever see.
564 * With this in mind, help lockdep learn about the potentially scary
565 * stuff that can happen inside the runtime_resume callback by acquiring
566 * a dummy lock (it doesn't protect anything and gets compiled out on
567 * non-debug builds). Lockdep then only needs to see the
568 * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
569 * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
570 * For example if the (callers_locks) are ever grabbed in the
571 * runtime_resume callback, lockdep should give us a nice splat.
573 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
575 xe_rpm_lockmap_acquire(xe);
576 xe_rpm_lockmap_release(xe);
580 * Prime the lockdep maps for known locking orders that need to
581 * be supported but that may not always occur on all systems.
583 static void xe_pm_runtime_lockdep_prime(void)
585 struct dma_resv lockdep_resv;
587 dma_resv_init(&lockdep_resv);
588 lock_map_acquire(&xe_pm_runtime_d3cold_map);
589 /* D3Cold takes the dma_resv locks to evict bos */
590 dma_resv_lock(&lockdep_resv, NULL);
591 dma_resv_unlock(&lockdep_resv);
592 lock_map_release(&xe_pm_runtime_d3cold_map);
594 /* Shrinkers might like to wake up the device under reclaim. */
595 fs_reclaim_acquire(GFP_KERNEL);
596 lock_map_acquire(&xe_pm_runtime_nod3cold_map);
597 lock_map_release(&xe_pm_runtime_nod3cold_map);
598 fs_reclaim_release(GFP_KERNEL);
602 * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
603 * @xe: xe device instance
605 void xe_pm_runtime_get(struct xe_device *xe)
607 trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
608 pm_runtime_get_noresume(xe->drm.dev);
610 if (xe_pm_read_callback_task(xe) == current)
613 xe_rpm_might_enter_cb(xe);
614 pm_runtime_resume(xe->drm.dev);
618 * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
619 * @xe: xe device instance
621 void xe_pm_runtime_put(struct xe_device *xe)
623 trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
624 if (xe_pm_read_callback_task(xe) == current) {
625 pm_runtime_put_noidle(xe->drm.dev);
627 pm_runtime_mark_last_busy(xe->drm.dev);
628 pm_runtime_put(xe->drm.dev);
633 * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
634 * @xe: xe device instance
636 * Returns: Any number greater than or equal to 0 for success, negative error
639 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
641 trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
642 if (WARN_ON(xe_pm_read_callback_task(xe) == current))
645 xe_rpm_might_enter_cb(xe);
646 return pm_runtime_get_sync(xe->drm.dev);
650 * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
651 * @xe: xe device instance
653 * Return: True if device is awake (regardless the previous number of references)
654 * and a new reference was taken, false otherwise.
656 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
658 return pm_runtime_get_if_active(xe->drm.dev) > 0;
662 * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
663 * @xe: xe device instance
665 * Return: True if device is awake, a previous reference had been already taken,
666 * and a new reference was now taken, false otherwise.
668 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
670 if (xe_pm_read_callback_task(xe) == current) {
671 /* The device is awake, grab the ref and move on */
672 pm_runtime_get_noresume(xe->drm.dev);
676 return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
680 * Very unreliable! Should only be used to suppress the false positive case
681 * in the missing outer rpm protection warning.
683 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
686 struct device *dev = xe->drm.dev;
688 return dev->power.runtime_status == RPM_SUSPENDING ||
689 dev->power.runtime_status == RPM_RESUMING ||
690 pm_suspend_in_progress();
697 * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
698 * @xe: xe device instance
700 * This function should be used in inner places where it is surely already
701 * protected by outer-bound callers of `xe_pm_runtime_get`.
702 * It will warn if not protected.
703 * The reference should be put back after this function regardless, since it
704 * will always bump the usage counter, regardless.
706 void xe_pm_runtime_get_noresume(struct xe_device *xe)
710 ref = xe_pm_runtime_get_if_in_use(xe);
713 pm_runtime_get_noresume(xe->drm.dev);
714 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
715 "Missing outer runtime PM protection\n");
720 * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
721 * @xe: xe device instance
723 * Returns: True if device is awake and the reference was taken, false otherwise.
725 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
727 if (xe_pm_read_callback_task(xe) == current) {
728 /* The device is awake, grab the ref and move on */
729 pm_runtime_get_noresume(xe->drm.dev);
733 xe_rpm_might_enter_cb(xe);
734 return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
738 * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
739 * @xe: xe device instance
741 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
743 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
744 struct pci_dev *bridge = pci_upstream_bridge(pdev);
749 if (!bridge->driver) {
750 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
751 device_set_pm_not_required(&pdev->dev);
756 * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
757 * @xe: xe device instance
758 * @threshold: VRAM size in bites for the D3cold threshold
760 * Returns 0 for success, negative error code otherwise.
762 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
764 struct ttm_resource_manager *man;
765 u32 vram_total_mb = 0;
768 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
769 man = ttm_manager_type(&xe->ttm, i);
771 vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
774 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
776 if (threshold > vram_total_mb)
779 mutex_lock(&xe->d3cold.lock);
780 xe->d3cold.vram_threshold = threshold;
781 mutex_unlock(&xe->d3cold.lock);
787 * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
788 * @xe: xe device instance
790 * To be called during runtime_pm idle callback.
791 * Check for all the D3Cold conditions ahead of runtime suspend.
793 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
795 struct ttm_resource_manager *man;
796 u32 total_vram_used_mb = 0;
800 if (!xe->d3cold.capable) {
801 xe->d3cold.allowed = false;
805 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
806 man = ttm_manager_type(&xe->ttm, i);
808 vram_used = ttm_resource_manager_usage(man);
809 total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
813 mutex_lock(&xe->d3cold.lock);
815 if (total_vram_used_mb < xe->d3cold.vram_threshold)
816 xe->d3cold.allowed = true;
818 xe->d3cold.allowed = false;
820 mutex_unlock(&xe->d3cold.lock);
824 * xe_pm_module_init() - Perform xe_pm specific module initialization.
826 * Return: 0 on success. Currently doesn't fail.
828 int __init xe_pm_module_init(void)
830 xe_pm_runtime_lockdep_prime();