aa6cf0152ce74e8bcc31ffe5019b13013584536c
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gt / intel_gt_pm.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "i915_params.h"
9 #include "intel_context.h"
10 #include "intel_engine_pm.h"
11 #include "intel_gt.h"
12 #include "intel_gt_pm.h"
13 #include "intel_pm.h"
14 #include "intel_wakeref.h"
15
16 static void pm_notify(struct drm_i915_private *i915, int state)
17 {
18         blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
19 }
20
21 static int __gt_unpark(struct intel_wakeref *wf)
22 {
23         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
24         struct drm_i915_private *i915 = gt->i915;
25
26         GEM_TRACE("\n");
27
28         /*
29          * It seems that the DMC likes to transition between the DC states a lot
30          * when there are no connected displays (no active power domains) during
31          * command submission.
32          *
33          * This activity has negative impact on the performance of the chip with
34          * huge latencies observed in the interrupt handler and elsewhere.
35          *
36          * Work around it by grabbing a GT IRQ power domain whilst there is any
37          * GT activity, preventing any DC state transitions.
38          */
39         gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
40         GEM_BUG_ON(!gt->awake);
41
42         intel_enable_gt_powersave(i915);
43
44         i915_update_gfx_val(i915);
45         if (INTEL_GEN(i915) >= 6)
46                 gen6_rps_busy(i915);
47
48         i915_pmu_gt_unparked(i915);
49
50         intel_gt_queue_hangcheck(gt);
51
52         pm_notify(i915, INTEL_GT_UNPARK);
53
54         return 0;
55 }
56
57 static int __gt_park(struct intel_wakeref *wf)
58 {
59         struct drm_i915_private *i915 =
60                 container_of(wf, typeof(*i915), gt.wakeref);
61         intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
62
63         GEM_TRACE("\n");
64
65         pm_notify(i915, INTEL_GT_PARK);
66
67         i915_pmu_gt_parked(i915);
68         if (INTEL_GEN(i915) >= 6)
69                 gen6_rps_idle(i915);
70
71         /* Everything switched off, flush any residual interrupt just in case */
72         intel_synchronize_irq(i915);
73
74         GEM_BUG_ON(!wakeref);
75         intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
76
77         return 0;
78 }
79
80 static const struct intel_wakeref_ops wf_ops = {
81         .get = __gt_unpark,
82         .put = __gt_park,
83         .flags = INTEL_WAKEREF_PUT_ASYNC,
84 };
85
86 void intel_gt_pm_init_early(struct intel_gt *gt)
87 {
88         intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
89
90         BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
91 }
92
93 static bool reset_engines(struct intel_gt *gt)
94 {
95         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
96                 return false;
97
98         return __intel_gt_reset(gt, ALL_ENGINES) == 0;
99 }
100
101 /**
102  * intel_gt_sanitize: called after the GPU has lost power
103  * @gt: the i915 GT container
104  * @force: ignore a failed reset and sanitize engine state anyway
105  *
106  * Anytime we reset the GPU, either with an explicit GPU reset or through a
107  * PCI power cycle, the GPU loses state and we must reset our state tracking
108  * to match. Note that calling intel_gt_sanitize() if the GPU has not
109  * been reset results in much confusion!
110  */
111 void intel_gt_sanitize(struct intel_gt *gt, bool force)
112 {
113         struct intel_engine_cs *engine;
114         enum intel_engine_id id;
115
116         GEM_TRACE("\n");
117
118         intel_uc_sanitize(&gt->uc);
119
120         if (!reset_engines(gt) && !force)
121                 return;
122
123         for_each_engine(engine, gt->i915, id)
124                 __intel_engine_reset(engine, false);
125 }
126
127 int intel_gt_resume(struct intel_gt *gt)
128 {
129         struct intel_engine_cs *engine;
130         enum intel_engine_id id;
131         int err = 0;
132
133         /*
134          * After resume, we may need to poke into the pinned kernel
135          * contexts to paper over any damage caused by the sudden suspend.
136          * Only the kernel contexts should remain pinned over suspend,
137          * allowing us to fixup the user contexts on their first pin.
138          */
139         intel_gt_pm_get(gt);
140         for_each_engine(engine, gt->i915, id) {
141                 struct intel_context *ce;
142
143                 intel_engine_pm_get(engine);
144
145                 ce = engine->kernel_context;
146                 if (ce) {
147                         GEM_BUG_ON(!intel_context_is_pinned(ce));
148                         mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
149                         ce->ops->reset(ce);
150                         mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
151                 }
152
153                 engine->serial++; /* kernel context lost */
154                 err = engine->resume(engine);
155
156                 intel_engine_pm_put(engine);
157                 if (err) {
158                         dev_err(gt->i915->drm.dev,
159                                 "Failed to restart %s (%d)\n",
160                                 engine->name, err);
161                         break;
162                 }
163         }
164         intel_gt_pm_put(gt);
165
166         return err;
167 }
168
169 void intel_gt_runtime_suspend(struct intel_gt *gt)
170 {
171         intel_uc_runtime_suspend(&gt->uc);
172 }
173
174 int intel_gt_runtime_resume(struct intel_gt *gt)
175 {
176         intel_gt_init_swizzling(gt);
177
178         return intel_uc_runtime_resume(&gt->uc);
179 }