1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
8 #include <linux/pm_runtime.h>
10 #include <drm/drm_managed.h>
11 #include <drm/ttm/ttm_placement.h>
13 #include "display/xe_display.h"
15 #include "xe_bo_evict.h"
16 #include "xe_device.h"
17 #include "xe_device_sysfs.h"
26 * DOC: Xe Power Management
28 * Xe PM shall be guided by the simplicity.
29 * Use the simplest hook options whenever possible.
30 * Let's not reinvent the runtime_pm references and hooks.
31 * Shall have a clear separation of display and gt underneath this component.
35 * For now s2idle and s3 are only working in integrated devices. The next step
36 * is to iterate through all VRAM's BO backing them up into the system memory
37 * before allowing the system suspend.
39 * Also runtime_pm needs to be here from the beginning.
41 * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC
42 * and no wait boost. Frequency optimizations should come on a next stage.
46 * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
47 * @xe: xe device instance
49 * Return: 0 on success
51 int xe_pm_suspend(struct xe_device *xe)
57 for_each_gt(gt, xe, id)
58 xe_gt_suspend_prepare(gt);
60 /* FIXME: Super racey... */
61 err = xe_bo_evict_all(xe);
65 xe_display_pm_suspend(xe);
67 for_each_gt(gt, xe, id) {
68 err = xe_gt_suspend(gt);
70 xe_display_pm_resume(xe);
77 xe_display_pm_suspend_late(xe);
83 * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
84 * @xe: xe device instance
86 * Return: 0 on success
88 int xe_pm_resume(struct xe_device *xe)
95 for_each_tile(tile, xe, id)
96 xe_wa_apply_tile_workarounds(tile);
98 for_each_gt(gt, xe, id) {
99 err = xe_pcode_init(gt);
104 xe_display_pm_resume_early(xe);
107 * This only restores pinned memory which is the memory required for the
110 err = xe_bo_restore_kernel(xe);
116 xe_display_pm_resume(xe);
118 for_each_gt(gt, xe, id)
121 err = xe_bo_restore_user(xe);
128 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
130 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
131 struct pci_dev *root_pdev;
133 root_pdev = pcie_find_root_port(pdev);
137 /* D3Cold requires PME capability */
138 if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
139 drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
143 /* D3Cold requires _PR3 power resource */
144 if (!pci_pr3_present(root_pdev)) {
145 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
152 static void xe_pm_runtime_init(struct xe_device *xe)
154 struct device *dev = xe->drm.dev;
157 * Disable the system suspend direct complete optimization.
158 * We need to ensure that the regular device suspend/resume functions
159 * are called since our runtime_pm cannot guarantee local memory
160 * eviction for d3cold.
161 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
162 * this option to integrated graphics as well.
165 dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
167 pm_runtime_use_autosuspend(dev);
168 pm_runtime_set_autosuspend_delay(dev, 1000);
169 pm_runtime_set_active(dev);
170 pm_runtime_allow(dev);
171 pm_runtime_mark_last_busy(dev);
175 void xe_pm_init_early(struct xe_device *xe)
177 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
178 drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
181 void xe_pm_init(struct xe_device *xe)
183 /* For now suspend/resume is only allowed with GuC */
184 if (!xe_device_uc_enabled(xe))
187 drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
189 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
191 if (xe->d3cold.capable) {
192 xe_device_sysfs_init(xe);
193 xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
196 xe_pm_runtime_init(xe);
199 void xe_pm_runtime_fini(struct xe_device *xe)
201 struct device *dev = xe->drm.dev;
203 pm_runtime_get_sync(dev);
204 pm_runtime_forbid(dev);
207 static void xe_pm_write_callback_task(struct xe_device *xe,
208 struct task_struct *task)
210 WRITE_ONCE(xe->pm_callback_task, task);
213 * Just in case it's somehow possible for our writes to be reordered to
214 * the extent that something else re-uses the task written in
215 * pm_callback_task. For example after returning from the callback, but
216 * before the reordered write that resets pm_callback_task back to NULL.
218 smp_mb(); /* pairs with xe_pm_read_callback_task */
221 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
223 smp_mb(); /* pairs with xe_pm_write_callback_task */
225 return READ_ONCE(xe->pm_callback_task);
228 int xe_pm_runtime_suspend(struct xe_device *xe)
230 struct xe_bo *bo, *on;
235 if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe))
238 /* Disable access_ongoing asserts and prevent recursive pm calls */
239 xe_pm_write_callback_task(xe, current);
242 * The actual xe_device_mem_access_put() is always async underneath, so
243 * exactly where that is called should makes no difference to us. However
244 * we still need to be very careful with the locks that this callback
245 * acquires and the locks that are acquired and held by any callers of
246 * xe_device_mem_access_get(). We already have the matching annotation
247 * on that side, but we also need it here. For example lockdep should be
248 * able to tell us if the following scenario is in theory possible:
250 * CPU0 | CPU1 (kworker)
252 * | xe_pm_runtime_suspend()
254 * xe_device_mem_access_get() |
256 * This will clearly deadlock since rpm core needs to wait for
257 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
258 * on CPU0 which prevents CPU1 making forward progress. With the
259 * annotation here and in xe_device_mem_access_get() lockdep will see
260 * the potential lock inversion and give us a nice splat.
262 lock_map_acquire(&xe_device_mem_access_lockdep_map);
265 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
266 * also checks and delets bo entry from user fault list.
268 mutex_lock(&xe->mem_access.vram_userfault.lock);
269 list_for_each_entry_safe(bo, on,
270 &xe->mem_access.vram_userfault.list, vram_userfault_link)
271 xe_bo_runtime_pm_release_mmap_offset(bo);
272 mutex_unlock(&xe->mem_access.vram_userfault.lock);
274 if (xe->d3cold.allowed) {
275 err = xe_bo_evict_all(xe);
280 for_each_gt(gt, xe, id) {
281 err = xe_gt_suspend(gt);
288 lock_map_release(&xe_device_mem_access_lockdep_map);
289 xe_pm_write_callback_task(xe, NULL);
293 int xe_pm_runtime_resume(struct xe_device *xe)
299 /* Disable access_ongoing asserts and prevent recursive pm calls */
300 xe_pm_write_callback_task(xe, current);
302 lock_map_acquire(&xe_device_mem_access_lockdep_map);
305 * It can be possible that xe has allowed d3cold but other pcie devices
306 * in gfx card soc would have blocked d3cold, therefore card has not
307 * really lost power. Detecting primary Gt power is sufficient.
309 gt = xe_device_get_gt(xe, 0);
310 xe->d3cold.power_lost = xe_guc_in_reset(>->uc.guc);
312 if (xe->d3cold.allowed && xe->d3cold.power_lost) {
313 for_each_gt(gt, xe, id) {
314 err = xe_pcode_init(gt);
320 * This only restores pinned memory which is the memory
321 * required for the GT(s) to resume.
323 err = xe_bo_restore_kernel(xe);
330 for_each_gt(gt, xe, id)
333 if (xe->d3cold.allowed && xe->d3cold.power_lost) {
334 err = xe_bo_restore_user(xe);
339 lock_map_release(&xe_device_mem_access_lockdep_map);
340 xe_pm_write_callback_task(xe, NULL);
344 int xe_pm_runtime_get(struct xe_device *xe)
346 return pm_runtime_get_sync(xe->drm.dev);
349 int xe_pm_runtime_put(struct xe_device *xe)
351 pm_runtime_mark_last_busy(xe->drm.dev);
352 return pm_runtime_put(xe->drm.dev);
355 int xe_pm_runtime_get_if_active(struct xe_device *xe)
357 return pm_runtime_get_if_active(xe->drm.dev);
360 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
362 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
363 struct pci_dev *bridge = pci_upstream_bridge(pdev);
368 if (!bridge->driver) {
369 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
370 device_set_pm_not_required(&pdev->dev);
374 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
376 struct ttm_resource_manager *man;
377 u32 vram_total_mb = 0;
380 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
381 man = ttm_manager_type(&xe->ttm, i);
383 vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
386 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
388 if (threshold > vram_total_mb)
391 mutex_lock(&xe->d3cold.lock);
392 xe->d3cold.vram_threshold = threshold;
393 mutex_unlock(&xe->d3cold.lock);
398 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
400 struct ttm_resource_manager *man;
401 u32 total_vram_used_mb = 0;
405 if (!xe->d3cold.capable) {
406 xe->d3cold.allowed = false;
410 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
411 man = ttm_manager_type(&xe->ttm, i);
413 vram_used = ttm_resource_manager_usage(man);
414 total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
418 mutex_lock(&xe->d3cold.lock);
420 if (total_vram_used_mb < xe->d3cold.vram_threshold)
421 xe->d3cold.allowed = true;
423 xe->d3cold.allowed = false;
425 mutex_unlock(&xe->d3cold.lock);
428 "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));