1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
6 #include <linux/highmem.h>
7 #include <linux/moduleparam.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/reboot.h>
12 #include "vpu_boot_api.h"
16 #include "ivpu_fw_log.h"
19 #include "ivpu_jsm_msg.h"
23 static bool ivpu_disable_recovery;
24 module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
25 MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected");
27 static unsigned long ivpu_tdr_timeout_ms;
28 module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
29 MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
31 #define PM_RESCHEDULE_LIMIT 5
33 static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
35 struct ivpu_fw_info *fw = vdev->fw;
37 ivpu_cmdq_reset_all_contexts(vdev);
40 fw->entry_point = fw->cold_boot_entry_point;
43 static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
45 struct ivpu_fw_info *fw = vdev->fw;
46 struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem);
48 if (!bp->save_restore_ret_address) {
49 ivpu_pm_prepare_cold_boot(vdev);
53 ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address);
54 fw->entry_point = bp->save_restore_ret_address;
57 static int ivpu_suspend(struct ivpu_device *vdev)
61 ivpu_prepare_for_reset(vdev);
63 ret = ivpu_shutdown(vdev);
65 ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret);
70 static int ivpu_resume(struct ivpu_device *vdev)
75 pci_restore_state(to_pci_dev(vdev->drm.dev));
76 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
78 ret = ivpu_hw_power_up(vdev);
80 ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
84 ret = ivpu_mmu_enable(vdev);
86 ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
90 ret = ivpu_boot(vdev);
97 ivpu_mmu_disable(vdev);
99 ivpu_hw_power_down(vdev);
100 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
102 if (!ivpu_fw_is_cold_boot(vdev)) {
103 ivpu_pm_prepare_cold_boot(vdev);
106 ivpu_err(vdev, "Failed to resume the FW: %d\n", ret);
112 static void ivpu_pm_recovery_work(struct work_struct *work)
114 struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
115 struct ivpu_device *vdev = pm->vdev;
116 char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
119 ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
121 ret = pm_runtime_resume_and_get(vdev->drm.dev);
123 ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
125 ivpu_fw_log_dump(vdev);
127 atomic_inc(&vdev->pm->reset_counter);
128 atomic_set(&vdev->pm->reset_pending, 1);
129 down_write(&vdev->pm->reset_lock);
132 ivpu_pm_prepare_cold_boot(vdev);
133 ivpu_jobs_abort_all(vdev);
135 ret = ivpu_resume(vdev);
137 ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
139 up_write(&vdev->pm->reset_lock);
140 atomic_set(&vdev->pm->reset_pending, 0);
142 kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
143 pm_runtime_mark_last_busy(vdev->drm.dev);
144 pm_runtime_put_autosuspend(vdev->drm.dev);
147 void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
149 ivpu_err(vdev, "Recovery triggered by %s\n", reason);
151 if (ivpu_disable_recovery) {
152 ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
156 if (ivpu_is_fpga(vdev)) {
157 ivpu_err(vdev, "Recovery not available on FPGA\n");
161 /* Trigger recovery if it's not in progress */
162 if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
163 ivpu_hw_diagnose_failure(vdev);
164 ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
165 queue_work(system_long_wq, &vdev->pm->recovery_work);
169 static void ivpu_job_timeout_work(struct work_struct *work)
171 struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
172 struct ivpu_device *vdev = pm->vdev;
174 ivpu_pm_trigger_recovery(vdev, "TDR");
177 void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
179 unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
181 /* No-op if already queued */
182 queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));
185 void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
187 cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
190 int ivpu_pm_suspend_cb(struct device *dev)
192 struct drm_device *drm = dev_get_drvdata(dev);
193 struct ivpu_device *vdev = to_ivpu_device(drm);
194 unsigned long timeout;
196 ivpu_dbg(vdev, PM, "Suspend..\n");
198 timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
199 while (!ivpu_hw_is_idle(vdev)) {
201 if (time_after_eq(jiffies, timeout)) {
202 ivpu_err(vdev, "Failed to enter idle on system suspend\n");
207 ivpu_jsm_pwr_d0i3_enter(vdev);
210 ivpu_pm_prepare_warm_boot(vdev);
212 ivpu_dbg(vdev, PM, "Suspend done.\n");
217 int ivpu_pm_resume_cb(struct device *dev)
219 struct drm_device *drm = dev_get_drvdata(dev);
220 struct ivpu_device *vdev = to_ivpu_device(drm);
223 ivpu_dbg(vdev, PM, "Resume..\n");
225 ret = ivpu_resume(vdev);
227 ivpu_err(vdev, "Failed to resume: %d\n", ret);
229 ivpu_dbg(vdev, PM, "Resume done.\n");
234 int ivpu_pm_runtime_suspend_cb(struct device *dev)
236 struct drm_device *drm = dev_get_drvdata(dev);
237 struct ivpu_device *vdev = to_ivpu_device(drm);
238 bool hw_is_idle = true;
241 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
242 drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
244 ivpu_dbg(vdev, PM, "Runtime suspend..\n");
246 if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
247 ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
248 vdev->pm->suspend_reschedule_counter);
249 pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
250 vdev->pm->suspend_reschedule_counter--;
254 if (!vdev->pm->suspend_reschedule_counter)
256 else if (ivpu_jsm_pwr_d0i3_enter(vdev))
259 ret = ivpu_suspend(vdev);
261 ivpu_err(vdev, "Failed to suspend NPU: %d\n", ret);
264 ivpu_err(vdev, "NPU failed to enter idle, force suspended.\n");
265 ivpu_fw_log_dump(vdev);
266 ivpu_pm_prepare_cold_boot(vdev);
268 ivpu_pm_prepare_warm_boot(vdev);
271 vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
273 ivpu_dbg(vdev, PM, "Runtime suspend done.\n");
278 int ivpu_pm_runtime_resume_cb(struct device *dev)
280 struct drm_device *drm = dev_get_drvdata(dev);
281 struct ivpu_device *vdev = to_ivpu_device(drm);
284 ivpu_dbg(vdev, PM, "Runtime resume..\n");
286 ret = ivpu_resume(vdev);
288 ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
290 ivpu_dbg(vdev, PM, "Runtime resume done.\n");
295 int ivpu_rpm_get(struct ivpu_device *vdev)
299 ret = pm_runtime_resume_and_get(vdev->drm.dev);
300 if (!drm_WARN_ON(&vdev->drm, ret < 0))
301 vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
306 int ivpu_rpm_get_if_active(struct ivpu_device *vdev)
310 ret = pm_runtime_get_if_in_use(vdev->drm.dev);
311 drm_WARN_ON(&vdev->drm, ret < 0);
316 void ivpu_rpm_put(struct ivpu_device *vdev)
318 pm_runtime_mark_last_busy(vdev->drm.dev);
319 pm_runtime_put_autosuspend(vdev->drm.dev);
322 void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
324 struct ivpu_device *vdev = pci_get_drvdata(pdev);
326 ivpu_dbg(vdev, PM, "Pre-reset..\n");
327 atomic_inc(&vdev->pm->reset_counter);
328 atomic_set(&vdev->pm->reset_pending, 1);
330 pm_runtime_get_sync(vdev->drm.dev);
331 down_write(&vdev->pm->reset_lock);
332 ivpu_prepare_for_reset(vdev);
334 ivpu_pm_prepare_cold_boot(vdev);
335 ivpu_jobs_abort_all(vdev);
336 ivpu_dbg(vdev, PM, "Pre-reset done.\n");
339 void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
341 struct ivpu_device *vdev = pci_get_drvdata(pdev);
344 ivpu_dbg(vdev, PM, "Post-reset..\n");
345 ret = ivpu_resume(vdev);
347 ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
348 up_write(&vdev->pm->reset_lock);
349 atomic_set(&vdev->pm->reset_pending, 0);
350 ivpu_dbg(vdev, PM, "Post-reset done.\n");
352 pm_runtime_mark_last_busy(vdev->drm.dev);
353 pm_runtime_put_autosuspend(vdev->drm.dev);
356 void ivpu_pm_init(struct ivpu_device *vdev)
358 struct device *dev = vdev->drm.dev;
359 struct ivpu_pm_info *pm = vdev->pm;
363 pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
365 init_rwsem(&pm->reset_lock);
366 atomic_set(&pm->reset_pending, 0);
367 atomic_set(&pm->reset_counter, 0);
369 INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
370 INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
372 if (ivpu_disable_recovery)
375 delay = vdev->timeout.autosuspend;
377 pm_runtime_use_autosuspend(dev);
378 pm_runtime_set_autosuspend_delay(dev, delay);
380 ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
383 void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
385 drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
386 cancel_work_sync(&vdev->pm->recovery_work);
389 void ivpu_pm_enable(struct ivpu_device *vdev)
391 struct device *dev = vdev->drm.dev;
393 pm_runtime_set_active(dev);
394 pm_runtime_allow(dev);
395 pm_runtime_mark_last_busy(dev);
396 pm_runtime_put_autosuspend(dev);
399 void ivpu_pm_disable(struct ivpu_device *vdev)
401 pm_runtime_get_noresume(vdev->drm.dev);
402 pm_runtime_forbid(vdev->drm.dev);