Merge tag 'livepatching-for-6.11-rc6' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / gpu / drm / xe / xe_pm.c
CommitLineData
dd08ebf6
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
ea9f879d
LDM
6#include "xe_pm.h"
7
dd08ebf6
MB
8#include <linux/pm_runtime.h>
9
b2d75619 10#include <drm/drm_managed.h>
dd08ebf6
MB
11#include <drm/ttm/ttm_placement.h>
12
1e5a4dfe 13#include "display/xe_display.h"
dd08ebf6
MB
14#include "xe_bo.h"
15#include "xe_bo_evict.h"
16#include "xe_device.h"
b2d75619 17#include "xe_device_sysfs.h"
dd08ebf6 18#include "xe_ggtt.h"
ea9f879d 19#include "xe_gt.h"
09d88e3b 20#include "xe_guc.h"
dd08ebf6
MB
21#include "xe_irq.h"
22#include "xe_pcode.h"
0d053475 23#include "xe_wa.h"
dd08ebf6
MB
24
25/**
26 * DOC: Xe Power Management
27 *
30c39952
RV
28 * Xe PM implements the main routines for both system level suspend states and
29 * for the opportunistic runtime suspend states.
dd08ebf6 30 *
30c39952
RV
31 * System Level Suspend (S-States) - In general this is OS initiated suspend
32 * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
33 * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
34 * are the main point for the suspend to and resume from these states.
dd08ebf6 35 *
30c39952
RV
36 * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
37 * state D3, controlled by the PCI subsystem and ACPI with the help from the
38 * runtime_pm infrastructure.
39 * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
40 * alive and quicker low latency resume or D3Cold where Vcc power is off for
41 * better power savings.
42 * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
43 * level, while the device driver can be behind multiple bridges/switches and
44 * paired with other devices. For this reason, the PCI subsystem cannot perform
45 * the transition towards D3Cold. The lowest runtime PM possible from the PCI
46 * subsystem is D3hot. Then, if all these paired devices in the same root port
47 * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
48 * to perform the transition from D3hot to D3cold. Xe may disallow this
49 * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
50 * suspend. It will be based on runtime conditions such as VRAM usage for a
51 * quick and low latency resume for instance.
dd08ebf6 52 *
30c39952
RV
53 * Runtime PM - This infrastructure provided by the Linux kernel allows the
54 * device drivers to indicate when the can be runtime suspended, so the device
55 * could be put at D3 (if supported), or allow deeper package sleep states
56 * (PC-states), and/or other low level power states. Xe PM component provides
57 * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
58 * subsystem will call before transition to/from runtime suspend.
dd08ebf6 59 *
30c39952
RV
60 * Also, Xe PM provides get and put functions that Xe driver will use to
61 * indicate activity. In order to avoid locking complications with the memory
62 * management, whenever possible, these get and put functions needs to be called
63 * from the higher/outer levels.
64 * The main cases that need to be protected from the outer levels are: IOCTL,
65 * sysfs, debugfs, dma-buf sharing, GPU execution.
66 *
67 * This component is not responsible for GT idleness (RC6) nor GT frequency
68 * management (RPS).
dd08ebf6
MB
69 */
70
8ae84a27 71#ifdef CONFIG_LOCKDEP
869e54d4 72static struct lockdep_map xe_pm_runtime_lockdep_map = {
8ae84a27
RV
73 .name = "xe_pm_runtime_lockdep_map"
74};
75#endif
76
dd08ebf6
MB
77/**
78 * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
79 * @xe: xe device instance
80 *
81 * Return: 0 on success
82 */
83int xe_pm_suspend(struct xe_device *xe)
84{
85 struct xe_gt *gt;
86 u8 id;
87 int err;
88
f7f24b79
RV
89 drm_dbg(&xe->drm, "Suspending device\n");
90
dd08ebf6
MB
91 for_each_gt(gt, xe, id)
92 xe_gt_suspend_prepare(gt);
93
ddf6492e
ML
94 xe_display_pm_suspend(xe, false);
95
dd08ebf6
MB
96 /* FIXME: Super racey... */
97 err = xe_bo_evict_all(xe);
98 if (err)
f7f24b79 99 goto err;
dd08ebf6
MB
100
101 for_each_gt(gt, xe, id) {
102 err = xe_gt_suspend(gt);
44e69495 103 if (err) {
e7b180b2 104 xe_display_pm_resume(xe, false);
f7f24b79 105 goto err;
44e69495 106 }
dd08ebf6
MB
107 }
108
109 xe_irq_suspend(xe);
110
44e69495
ML
111 xe_display_pm_suspend_late(xe);
112
f7f24b79 113 drm_dbg(&xe->drm, "Device suspended\n");
dd08ebf6 114 return 0;
f7f24b79
RV
115err:
116 drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
117 return err;
dd08ebf6
MB
118}
119
120/**
121 * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
122 * @xe: xe device instance
123 *
124 * Return: 0 on success
125 */
126int xe_pm_resume(struct xe_device *xe)
127{
0d053475 128 struct xe_tile *tile;
dd08ebf6
MB
129 struct xe_gt *gt;
130 u8 id;
131 int err;
132
f7f24b79
RV
133 drm_dbg(&xe->drm, "Resuming device\n");
134
0d053475
MR
135 for_each_tile(tile, xe, id)
136 xe_wa_apply_tile_workarounds(tile);
137
933fd5ff
RT
138 err = xe_pcode_ready(xe, true);
139 if (err)
140 return err;
dd08ebf6 141
44e69495
ML
142 xe_display_pm_resume_early(xe);
143
dd08ebf6
MB
144 /*
145 * This only restores pinned memory which is the memory required for the
146 * GT(s) to resume.
147 */
148 err = xe_bo_restore_kernel(xe);
149 if (err)
f7f24b79 150 goto err;
dd08ebf6
MB
151
152 xe_irq_resume(xe);
153
154 for_each_gt(gt, xe, id)
155 xe_gt_resume(gt);
156
ddf6492e
ML
157 xe_display_pm_resume(xe, false);
158
dd08ebf6
MB
159 err = xe_bo_restore_user(xe);
160 if (err)
f7f24b79 161 goto err;
dd08ebf6 162
f7f24b79 163 drm_dbg(&xe->drm, "Device resumed\n");
dd08ebf6 164 return 0;
f7f24b79
RV
165err:
166 drm_dbg(&xe->drm, "Device resume failed %d\n", err);
167 return err;
dd08ebf6
MB
168}
169
95ec8c1d 170static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
ac0be3b5 171{
95ec8c1d 172 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
ac0be3b5
AG
173 struct pci_dev *root_pdev;
174
175 root_pdev = pcie_find_root_port(pdev);
176 if (!root_pdev)
177 return false;
178
95ec8c1d
RT
179 /* D3Cold requires PME capability */
180 if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
181 drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
ac0be3b5 182 return false;
95ec8c1d
RT
183 }
184
185 /* D3Cold requires _PR3 power resource */
186 if (!pci_pr3_present(root_pdev)) {
187 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
188 return false;
189 }
ac0be3b5
AG
190
191 return true;
192}
193
fddebcbf 194static void xe_pm_runtime_init(struct xe_device *xe)
dd08ebf6
MB
195{
196 struct device *dev = xe->drm.dev;
197
d87c424a
RV
198 /*
199 * Disable the system suspend direct complete optimization.
200 * We need to ensure that the regular device suspend/resume functions
201 * are called since our runtime_pm cannot guarantee local memory
202 * eviction for d3cold.
203 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
204 * this option to integrated graphics as well.
205 */
206 if (IS_DGFX(xe))
207 dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
208
dd08ebf6
MB
209 pm_runtime_use_autosuspend(dev);
210 pm_runtime_set_autosuspend_delay(dev, 1000);
211 pm_runtime_set_active(dev);
212 pm_runtime_allow(dev);
213 pm_runtime_mark_last_busy(dev);
bba2ec41 214 pm_runtime_put(dev);
dd08ebf6
MB
215}
216
c086bfc6 217int xe_pm_init_early(struct xe_device *xe)
fa78e188 218{
c086bfc6
HPG
219 int err;
220
fa78e188 221 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
c086bfc6
HPG
222
223 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
224 if (err)
225 return err;
226
227 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
228 if (err)
229 return err;
230
231 return 0;
fa78e188
BN
232}
233
30c39952
RV
234/**
235 * xe_pm_init - Initialize Xe Power Management
236 * @xe: xe device instance
237 *
238 * This component is responsible for System and Device sleep states.
c086bfc6
HPG
239 *
240 * Returns 0 for success, negative error code otherwise.
30c39952 241 */
c086bfc6 242int xe_pm_init(struct xe_device *xe)
ac0be3b5 243{
c086bfc6
HPG
244 int err;
245
5349bb76
OS
246 /* For now suspend/resume is only allowed with GuC */
247 if (!xe_device_uc_enabled(xe))
c086bfc6 248 return 0;
a32d82b4 249
95ec8c1d 250 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
3d4b0bfc
AG
251
252 if (xe->d3cold.capable) {
c086bfc6
HPG
253 err = xe_device_sysfs_init(xe);
254 if (err)
255 return err;
256
257 err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
258 if (err)
259 return err;
3d4b0bfc 260 }
a32d82b4
RV
261
262 xe_pm_runtime_init(xe);
c086bfc6
HPG
263
264 return 0;
ac0be3b5
AG
265}
266
30c39952
RV
267/**
268 * xe_pm_runtime_fini - Finalize Runtime PM
269 * @xe: xe device instance
270 */
5b7e50e2
MA
271void xe_pm_runtime_fini(struct xe_device *xe)
272{
273 struct device *dev = xe->drm.dev;
274
275 pm_runtime_get_sync(dev);
276 pm_runtime_forbid(dev);
277}
278
a00b8f1a
MA
279static void xe_pm_write_callback_task(struct xe_device *xe,
280 struct task_struct *task)
281{
282 WRITE_ONCE(xe->pm_callback_task, task);
283
284 /*
285 * Just in case it's somehow possible for our writes to be reordered to
286 * the extent that something else re-uses the task written in
287 * pm_callback_task. For example after returning from the callback, but
288 * before the reordered write that resets pm_callback_task back to NULL.
289 */
290 smp_mb(); /* pairs with xe_pm_read_callback_task */
291}
292
293struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
294{
295 smp_mb(); /* pairs with xe_pm_write_callback_task */
296
297 return READ_ONCE(xe->pm_callback_task);
298}
299
0f9d886f
RV
300/**
301 * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
302 * @xe: xe device instance
303 *
304 * This does not provide any guarantee that the device is going to remain
305 * suspended as it might be racing with the runtime state transitions.
306 * It can be used only as a non-reliable assertion, to ensure that we are not in
307 * the sleep state while trying to access some memory for instance.
308 *
309 * Returns true if PCI device is suspended, false otherwise.
310 */
311bool xe_pm_runtime_suspended(struct xe_device *xe)
312{
313 return pm_runtime_suspended(xe->drm.dev);
314}
315
30c39952
RV
316/**
317 * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
318 * @xe: xe device instance
319 *
320 * Returns 0 for success, negative error code otherwise.
321 */
dd08ebf6
MB
322int xe_pm_runtime_suspend(struct xe_device *xe)
323{
fa78e188 324 struct xe_bo *bo, *on;
dd08ebf6
MB
325 struct xe_gt *gt;
326 u8 id;
a00b8f1a 327 int err = 0;
dd08ebf6 328
a00b8f1a
MA
329 /* Disable access_ongoing asserts and prevent recursive pm calls */
330 xe_pm_write_callback_task(xe, current);
dd08ebf6 331
9700a1df 332 /*
8ae84a27 333 * The actual xe_pm_runtime_put() is always async underneath, so
9700a1df
MA
334 * exactly where that is called should makes no difference to us. However
335 * we still need to be very careful with the locks that this callback
336 * acquires and the locks that are acquired and held by any callers of
8ae84a27 337 * xe_runtime_pm_get(). We already have the matching annotation
9700a1df
MA
338 * on that side, but we also need it here. For example lockdep should be
339 * able to tell us if the following scenario is in theory possible:
340 *
341 * CPU0 | CPU1 (kworker)
342 * lock(A) |
343 * | xe_pm_runtime_suspend()
344 * | lock(A)
8ae84a27 345 * xe_pm_runtime_get() |
9700a1df
MA
346 *
347 * This will clearly deadlock since rpm core needs to wait for
348 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
349 * on CPU0 which prevents CPU1 making forward progress. With the
8ae84a27 350 * annotation here and in xe_pm_runtime_get() lockdep will see
9700a1df
MA
351 * the potential lock inversion and give us a nice splat.
352 */
8ae84a27 353 lock_map_acquire(&xe_pm_runtime_lockdep_map);
9700a1df 354
fa78e188
BN
355 /*
356 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
357 * also checks and delets bo entry from user fault list.
358 */
359 mutex_lock(&xe->mem_access.vram_userfault.lock);
360 list_for_each_entry_safe(bo, on,
361 &xe->mem_access.vram_userfault.list, vram_userfault_link)
362 xe_bo_runtime_pm_release_mmap_offset(bo);
363 mutex_unlock(&xe->mem_access.vram_userfault.lock);
364
a00b8f1a 365 if (xe->d3cold.allowed) {
ddf6492e
ML
366 xe_display_pm_suspend(xe, true);
367
dd08ebf6
MB
368 err = xe_bo_evict_all(xe);
369 if (err)
a00b8f1a 370 goto out;
dd08ebf6
MB
371 }
372
373 for_each_gt(gt, xe, id) {
374 err = xe_gt_suspend(gt);
375 if (err)
a00b8f1a 376 goto out;
dd08ebf6
MB
377 }
378
379 xe_irq_suspend(xe);
e7b180b2
RV
380
381 if (xe->d3cold.allowed)
382 xe_display_pm_suspend_late(xe);
a00b8f1a 383out:
e7b180b2
RV
384 if (err)
385 xe_display_pm_resume(xe, true);
8ae84a27 386 lock_map_release(&xe_pm_runtime_lockdep_map);
a00b8f1a
MA
387 xe_pm_write_callback_task(xe, NULL);
388 return err;
dd08ebf6
MB
389}
390
30c39952
RV
391/**
392 * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
393 * @xe: xe device instance
394 *
395 * Returns 0 for success, negative error code otherwise.
396 */
dd08ebf6
MB
397int xe_pm_runtime_resume(struct xe_device *xe)
398{
399 struct xe_gt *gt;
400 u8 id;
a00b8f1a
MA
401 int err = 0;
402
403 /* Disable access_ongoing asserts and prevent recursive pm calls */
404 xe_pm_write_callback_task(xe, current);
dd08ebf6 405
8ae84a27 406 lock_map_acquire(&xe_pm_runtime_lockdep_map);
9700a1df 407
8d490e01 408 if (xe->d3cold.allowed) {
933fd5ff
RT
409 err = xe_pcode_ready(xe, true);
410 if (err)
411 goto out;
dd08ebf6 412
e7b180b2
RV
413 xe_display_pm_resume_early(xe);
414
dd08ebf6
MB
415 /*
416 * This only restores pinned memory which is the memory
417 * required for the GT(s) to resume.
418 */
419 err = xe_bo_restore_kernel(xe);
420 if (err)
a00b8f1a 421 goto out;
dd08ebf6
MB
422 }
423
424 xe_irq_resume(xe);
425
426 for_each_gt(gt, xe, id)
427 xe_gt_resume(gt);
428
8d490e01 429 if (xe->d3cold.allowed) {
e7b180b2 430 xe_display_pm_resume(xe, true);
dd08ebf6
MB
431 err = xe_bo_restore_user(xe);
432 if (err)
a00b8f1a 433 goto out;
dd08ebf6 434 }
a00b8f1a 435out:
8ae84a27 436 lock_map_release(&xe_pm_runtime_lockdep_map);
a00b8f1a
MA
437 xe_pm_write_callback_task(xe, NULL);
438 return err;
dd08ebf6
MB
439}
440
8ae84a27
RV
441/*
442 * For places where resume is synchronous it can be quite easy to deadlock
443 * if we are not careful. Also in practice it might be quite timing
444 * sensitive to ever see the 0 -> 1 transition with the callers locks
445 * held, so deadlocks might exist but are hard for lockdep to ever see.
446 * With this in mind, help lockdep learn about the potentially scary
447 * stuff that can happen inside the runtime_resume callback by acquiring
448 * a dummy lock (it doesn't protect anything and gets compiled out on
449 * non-debug builds). Lockdep then only needs to see the
450 * xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can
451 * hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map.
452 * For example if the (callers_locks) are ever grabbed in the
453 * runtime_resume callback, lockdep should give us a nice splat.
454 */
455static void pm_runtime_lockdep_prime(void)
456{
457 lock_map_acquire(&xe_pm_runtime_lockdep_map);
458 lock_map_release(&xe_pm_runtime_lockdep_map);
459}
460
30c39952
RV
461/**
462 * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
463 * @xe: xe device instance
30c39952 464 */
5c9da9fc 465void xe_pm_runtime_get(struct xe_device *xe)
dd08ebf6 466{
5c9da9fc
RV
467 pm_runtime_get_noresume(xe->drm.dev);
468
469 if (xe_pm_read_callback_task(xe) == current)
470 return;
471
8ae84a27 472 pm_runtime_lockdep_prime();
5c9da9fc 473 pm_runtime_resume(xe->drm.dev);
dd08ebf6
MB
474}
475
30c39952
RV
476/**
477 * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
478 * @xe: xe device instance
30c39952 479 */
5c9da9fc 480void xe_pm_runtime_put(struct xe_device *xe)
dd08ebf6 481{
5c9da9fc
RV
482 if (xe_pm_read_callback_task(xe) == current) {
483 pm_runtime_put_noidle(xe->drm.dev);
484 } else {
485 pm_runtime_mark_last_busy(xe->drm.dev);
486 pm_runtime_put(xe->drm.dev);
487 }
dd08ebf6
MB
488}
489
23cf006b
RV
490/**
491 * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
492 * @xe: xe device instance
493 *
494 * Returns: Any number greater than or equal to 0 for success, negative error
495 * code otherwise.
496 */
497int xe_pm_runtime_get_ioctl(struct xe_device *xe)
498{
499 if (WARN_ON(xe_pm_read_callback_task(xe) == current))
500 return -ELOOP;
501
8ae84a27 502 pm_runtime_lockdep_prime();
23cf006b
RV
503 return pm_runtime_get_sync(xe->drm.dev);
504}
505
30c39952
RV
506/**
507 * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
508 * @xe: xe device instance
509 *
46edb0a3
RV
510 * Return: True if device is awake (regardless the previous number of references)
511 * and a new reference was taken, false otherwise.
30c39952 512 */
46edb0a3 513bool xe_pm_runtime_get_if_active(struct xe_device *xe)
dd08ebf6 514{
46edb0a3 515 return pm_runtime_get_if_active(xe->drm.dev) > 0;
dd08ebf6 516}
c8a74077 517
3b85b7bc 518/**
967c5d7c 519 * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
3b85b7bc
RV
520 * @xe: xe device instance
521 *
967c5d7c
RV
522 * Return: True if device is awake, a previous reference had been already taken,
523 * and a new reference was now taken, false otherwise.
3b85b7bc
RV
524 */
525bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
526{
527 if (xe_pm_read_callback_task(xe) == current) {
528 /* The device is awake, grab the ref and move on */
529 pm_runtime_get_noresume(xe->drm.dev);
530 return true;
531 }
532
533 return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
534}
535
cbb6a741
RV
536/**
537 * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
538 * @xe: xe device instance
539 *
540 * This function should be used in inner places where it is surely already
541 * protected by outer-bound callers of `xe_pm_runtime_get`.
542 * It will warn if not protected.
543 * The reference should be put back after this function regardless, since it
544 * will always bump the usage counter, regardless.
545 */
546void xe_pm_runtime_get_noresume(struct xe_device *xe)
547{
548 bool ref;
549
550 ref = xe_pm_runtime_get_if_in_use(xe);
551
552 if (drm_WARN(&xe->drm, !ref, "Missing outer runtime PM protection\n"))
553 pm_runtime_get_noresume(xe->drm.dev);
554}
555
d6b41378
RV
556/**
557 * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
558 * @xe: xe device instance
559 *
560 * Returns: True if device is awake and the reference was taken, false otherwise.
561 */
562bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
563{
564 if (xe_pm_read_callback_task(xe) == current) {
565 /* The device is awake, grab the ref and move on */
566 pm_runtime_get_noresume(xe->drm.dev);
567 return true;
568 }
569
8ae84a27 570 pm_runtime_lockdep_prime();
d6b41378
RV
571 return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
572}
573
30c39952
RV
574/**
575 * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
576 * @xe: xe device instance
577 */
c8a74077
AG
578void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
579{
580 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
581 struct pci_dev *bridge = pci_upstream_bridge(pdev);
582
583 if (!bridge)
584 return;
585
586 if (!bridge->driver) {
587 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
588 device_set_pm_not_required(&pdev->dev);
589 }
590}
b2d75619 591
30c39952
RV
592/**
593 * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
594 * @xe: xe device instance
595 * @threshold: VRAM size in bites for the D3cold threshold
596 *
597 * Returns 0 for success, negative error code otherwise.
598 */
b2d75619
AG
599int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
600{
601 struct ttm_resource_manager *man;
602 u32 vram_total_mb = 0;
603 int i;
604
605 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
606 man = ttm_manager_type(&xe->ttm, i);
607 if (man)
608 vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
609 }
610
611 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
612
613 if (threshold > vram_total_mb)
614 return -EINVAL;
615
616 mutex_lock(&xe->d3cold.lock);
617 xe->d3cold.vram_threshold = threshold;
618 mutex_unlock(&xe->d3cold.lock);
619
620 return 0;
621}
2ef08b98 622
30c39952
RV
623/**
624 * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
625 * @xe: xe device instance
626 *
627 * To be called during runtime_pm idle callback.
628 * Check for all the D3Cold conditions ahead of runtime suspend.
629 */
2ef08b98
AG
630void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
631{
632 struct ttm_resource_manager *man;
633 u32 total_vram_used_mb = 0;
634 u64 vram_used;
635 int i;
636
e07aa913
RV
637 if (!xe->d3cold.capable) {
638 xe->d3cold.allowed = false;
639 return;
640 }
641
2ef08b98
AG
642 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
643 man = ttm_manager_type(&xe->ttm, i);
644 if (man) {
645 vram_used = ttm_resource_manager_usage(man);
646 total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
647 }
648 }
649
650 mutex_lock(&xe->d3cold.lock);
651
652 if (total_vram_used_mb < xe->d3cold.vram_threshold)
653 xe->d3cold.allowed = true;
654 else
655 xe->d3cold.allowed = false;
656
657 mutex_unlock(&xe->d3cold.lock);
ff765b77
MA
658
659 drm_dbg(&xe->drm,
660 "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
2ef08b98 661}