2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
27 #include "display/intel_dp.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
33 #include "intel_display_types.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_snps_phy.h"
38 #include "skl_universal_plane.h"
41 * DOC: Panel Self Refresh (PSR/SRD)
43 * Since Haswell Display controller supports Panel Self-Refresh on display
44 * panels witch have a remote frame buffer (RFB) implemented according to PSR
45 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
46 * when system is idle but display is on as it eliminates display refresh
47 * request to DDR memory completely as long as the frame buffer for that
48 * display is unchanged.
50 * Panel Self Refresh must be supported by both Hardware (source) and
53 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
54 * to power down the link and memory controller. For DSI panels the same idea
55 * is called "manual mode".
57 * The implementation uses the hardware-based PSR support which automatically
58 * enters/exits self-refresh mode. The hardware takes care of sending the
59 * required DP aux message and could even retrain the link (that part isn't
60 * enabled yet though). The hardware also keeps track of any frontbuffer
61 * changes to know when to exit self-refresh mode again. Unfortunately that
62 * part doesn't work too well, hence why the i915 PSR support uses the
63 * software frontbuffer tracking to make sure it doesn't miss a screen
64 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
65 * get called by the frontbuffer tracking code. Note that because of locking
66 * issues the self-refresh re-enable code is done from a work queue, which
67 * must be correctly synchronized/cancelled when shutting down the pipe."
69 * DC3CO (DC3 clock off)
71 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
72 * clock off automatically during PSR2 idle state.
73 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
74 * entry/exit allows the HW to enter a low-power state even when page flipping
75 * periodically (for instance a 30fps video playback scenario).
77 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
78 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
79 * frames, if no other flip occurs and the function above is executed, DC3CO is
80 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
82 * Front buffer modifications do not trigger DC3CO activation on purpose as it
83 * would bring a lot of complexity and most of the moderns systems will only
87 static bool psr_global_enabled(struct intel_dp *intel_dp)
89 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
91 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
92 case I915_PSR_DEBUG_DEFAULT:
93 return i915->params.enable_psr;
94 case I915_PSR_DEBUG_DISABLE:
101 static bool psr2_global_enabled(struct intel_dp *intel_dp)
103 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
105 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
106 case I915_PSR_DEBUG_DISABLE:
107 case I915_PSR_DEBUG_FORCE_PSR1:
110 if (i915->params.enable_psr == 1)
116 static void psr_irq_control(struct intel_dp *intel_dp)
118 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
119 enum transcoder trans_shift;
124 * gen12+ has registers relative to transcoder and one per transcoder
125 * using the same bit definition: handle it as TRANSCODER_EDP to force
126 * 0 shift in bit definition
128 if (DISPLAY_VER(dev_priv) >= 12) {
130 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
132 trans_shift = intel_dp->psr.transcoder;
133 imr_reg = EDP_PSR_IMR;
136 mask = EDP_PSR_ERROR(trans_shift);
137 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
138 mask |= EDP_PSR_POST_EXIT(trans_shift) |
139 EDP_PSR_PRE_ENTRY(trans_shift);
141 /* Warning: it is masking/setting reserved bits too */
142 val = intel_de_read(dev_priv, imr_reg);
143 val &= ~EDP_PSR_TRANS_MASK(trans_shift);
145 intel_de_write(dev_priv, imr_reg, val);
148 static void psr_event_print(struct drm_i915_private *i915,
149 u32 val, bool psr2_enabled)
151 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
152 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
153 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
154 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
155 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
156 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
157 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
158 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
159 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
160 if (val & PSR_EVENT_GRAPHICS_RESET)
161 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
162 if (val & PSR_EVENT_PCH_INTERRUPT)
163 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
164 if (val & PSR_EVENT_MEMORY_UP)
165 drm_dbg_kms(&i915->drm, "\tMemory up\n");
166 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
167 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
168 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
169 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
170 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
171 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
172 if (val & PSR_EVENT_REGISTER_UPDATE)
173 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
174 if (val & PSR_EVENT_HDCP_ENABLE)
175 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
176 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
177 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
178 if (val & PSR_EVENT_VBI_ENABLE)
179 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
180 if (val & PSR_EVENT_LPSP_MODE_EXIT)
181 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
182 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
183 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
186 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
188 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
189 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
190 ktime_t time_ns = ktime_get();
191 enum transcoder trans_shift;
194 if (DISPLAY_VER(dev_priv) >= 12) {
196 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
198 trans_shift = intel_dp->psr.transcoder;
199 imr_reg = EDP_PSR_IMR;
202 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
203 intel_dp->psr.last_entry_attempt = time_ns;
204 drm_dbg_kms(&dev_priv->drm,
205 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
206 transcoder_name(cpu_transcoder));
209 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
210 intel_dp->psr.last_exit = time_ns;
211 drm_dbg_kms(&dev_priv->drm,
212 "[transcoder %s] PSR exit completed\n",
213 transcoder_name(cpu_transcoder));
215 if (DISPLAY_VER(dev_priv) >= 9) {
216 u32 val = intel_de_read(dev_priv,
217 PSR_EVENT(cpu_transcoder));
218 bool psr2_enabled = intel_dp->psr.psr2_enabled;
220 intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
222 psr_event_print(dev_priv, val, psr2_enabled);
226 if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
229 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
230 transcoder_name(cpu_transcoder));
232 intel_dp->psr.irq_aux_error = true;
235 * If this interruption is not masked it will keep
236 * interrupting so fast that it prevents the scheduled
238 * Also after a PSR error, we don't want to arm PSR
239 * again so we don't care about unmask the interruption
240 * or unset irq_aux_error.
242 val = intel_de_read(dev_priv, imr_reg);
243 val |= EDP_PSR_ERROR(trans_shift);
244 intel_de_write(dev_priv, imr_reg, val);
246 schedule_work(&intel_dp->psr.work);
250 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
254 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
257 return alpm_caps & DP_ALPM_CAP;
260 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
262 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
263 u8 val = 8; /* assume the worst if we can't read the value */
265 if (drm_dp_dpcd_readb(&intel_dp->aux,
266 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
267 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
269 drm_dbg_kms(&i915->drm,
270 "Unable to get sink synchronization latency, assuming 8 frames\n");
274 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
276 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
281 /* If sink don't have specific granularity requirements set legacy ones */
282 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
283 /* As PSR2 HW sends full lines, we do not care about x granularity */
289 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
291 drm_dbg_kms(&i915->drm,
292 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
294 * Spec says that if the value read is 0 the default granularity should
297 if (r != 2 || w == 0)
300 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
302 drm_dbg_kms(&i915->drm,
303 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
310 intel_dp->psr.su_w_granularity = w;
311 intel_dp->psr.su_y_granularity = y;
314 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
316 struct drm_i915_private *dev_priv =
317 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
319 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
320 sizeof(intel_dp->psr_dpcd));
322 if (!intel_dp->psr_dpcd[0])
324 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
325 intel_dp->psr_dpcd[0]);
327 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
328 drm_dbg_kms(&dev_priv->drm,
329 "PSR support not currently available for this panel\n");
333 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
334 drm_dbg_kms(&dev_priv->drm,
335 "Panel lacks power state control, PSR cannot be enabled\n");
339 intel_dp->psr.sink_support = true;
340 intel_dp->psr.sink_sync_latency =
341 intel_dp_get_sink_sync_latency(intel_dp);
343 if (DISPLAY_VER(dev_priv) >= 9 &&
344 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
345 bool y_req = intel_dp->psr_dpcd[1] &
346 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
347 bool alpm = intel_dp_get_alpm_status(intel_dp);
350 * All panels that supports PSR version 03h (PSR2 +
351 * Y-coordinate) can handle Y-coordinates in VSC but we are
352 * only sure that it is going to be used when required by the
353 * panel. This way panel is capable to do selective update
354 * without a aux frame sync.
356 * To support PSR version 02h and PSR version 03h without
357 * Y-coordinate requirement panels we would need to enable
360 intel_dp->psr.sink_psr2_support = y_req && alpm;
361 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
362 intel_dp->psr.sink_psr2_support ? "" : "not ");
364 if (intel_dp->psr.sink_psr2_support) {
365 intel_dp->psr.colorimetry_support =
366 intel_dp_get_colorimetry_status(intel_dp);
367 intel_dp_get_su_granularity(intel_dp);
372 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
374 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
375 u8 dpcd_val = DP_PSR_ENABLE;
377 /* Enable ALPM at sink for psr2 */
378 if (intel_dp->psr.psr2_enabled) {
379 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
381 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
383 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
385 if (intel_dp->psr.link_standby)
386 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
388 if (DISPLAY_VER(dev_priv) >= 8)
389 dpcd_val |= DP_PSR_CRC_VERIFICATION;
392 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
393 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
395 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
397 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
400 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
402 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
405 if (DISPLAY_VER(dev_priv) >= 11)
406 val |= EDP_PSR_TP4_TIME_0US;
408 if (dev_priv->params.psr_safest_params) {
409 val |= EDP_PSR_TP1_TIME_2500us;
410 val |= EDP_PSR_TP2_TP3_TIME_2500us;
414 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
415 val |= EDP_PSR_TP1_TIME_0us;
416 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
417 val |= EDP_PSR_TP1_TIME_100us;
418 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
419 val |= EDP_PSR_TP1_TIME_500us;
421 val |= EDP_PSR_TP1_TIME_2500us;
423 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
424 val |= EDP_PSR_TP2_TP3_TIME_0us;
425 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
426 val |= EDP_PSR_TP2_TP3_TIME_100us;
427 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
428 val |= EDP_PSR_TP2_TP3_TIME_500us;
430 val |= EDP_PSR_TP2_TP3_TIME_2500us;
433 if (intel_dp_source_supports_tps3(dev_priv) &&
434 drm_dp_tps3_supported(intel_dp->dpcd))
435 val |= EDP_PSR_TP1_TP3_SEL;
437 val |= EDP_PSR_TP1_TP2_SEL;
442 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
444 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
447 /* Let's use 6 as the minimum to cover all known cases including the
448 * off-by-one issue that HW has in some cases.
450 idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
451 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
453 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
459 static void hsw_activate_psr1(struct intel_dp *intel_dp)
461 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
462 u32 max_sleep_time = 0x1f;
463 u32 val = EDP_PSR_ENABLE;
465 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
467 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
468 if (IS_HASWELL(dev_priv))
469 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
471 if (intel_dp->psr.link_standby)
472 val |= EDP_PSR_LINK_STANDBY;
474 val |= intel_psr1_get_tp_time(intel_dp);
476 if (DISPLAY_VER(dev_priv) >= 8)
477 val |= EDP_PSR_CRC_ENABLE;
479 val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
480 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
481 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
484 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
486 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
489 if (dev_priv->params.psr_safest_params)
490 return EDP_PSR2_TP2_TIME_2500us;
492 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
493 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
494 val |= EDP_PSR2_TP2_TIME_50us;
495 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
496 val |= EDP_PSR2_TP2_TIME_100us;
497 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
498 val |= EDP_PSR2_TP2_TIME_500us;
500 val |= EDP_PSR2_TP2_TIME_2500us;
505 static void hsw_activate_psr2(struct intel_dp *intel_dp)
507 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
508 u32 val = EDP_PSR2_ENABLE;
510 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
512 if (!IS_ALDERLAKE_P(dev_priv))
513 val |= EDP_SU_TRACK_ENABLE;
515 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
516 val |= EDP_Y_COORDINATE_ENABLE;
518 val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
519 val |= intel_psr2_get_tp_time(intel_dp);
521 /* Wa_22012278275:adl-p */
522 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
523 static const u8 map[] = {
534 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
535 * comments bellow for more information
539 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
541 tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
542 tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
545 tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
546 tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
548 } else if (DISPLAY_VER(dev_priv) >= 12) {
550 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
551 * values from BSpec. In order to setting an optimal power
552 * consumption, lower than 4k resoluition mode needs to decrese
553 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
554 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
556 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
557 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
558 val |= TGL_EDP_PSR2_FAST_WAKE(7);
559 } else if (DISPLAY_VER(dev_priv) >= 9) {
560 val |= EDP_PSR2_IO_BUFFER_WAKE(7);
561 val |= EDP_PSR2_FAST_WAKE(7);
564 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
565 val |= EDP_PSR2_SU_SDP_SCANLINE;
567 if (intel_dp->psr.psr2_sel_fetch_enabled) {
571 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
572 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
573 DIS_RAM_BYPASS_PSR2_MAN_TRACK,
574 DIS_RAM_BYPASS_PSR2_MAN_TRACK);
576 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
577 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
578 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
579 intel_de_write(dev_priv,
580 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
584 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
585 * recommending keep this bit unset while PSR2 is enabled.
587 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
589 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
593 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
595 if (IS_ALDERLAKE_P(dev_priv))
596 return trans == TRANSCODER_A || trans == TRANSCODER_B;
597 else if (DISPLAY_VER(dev_priv) >= 12)
598 return trans == TRANSCODER_A;
600 return trans == TRANSCODER_EDP;
603 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
605 if (!cstate || !cstate->hw.active)
608 return DIV_ROUND_UP(1000 * 1000,
609 drm_mode_vrefresh(&cstate->hw.adjusted_mode));
612 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
615 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
618 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
619 val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
620 val &= ~EDP_PSR2_IDLE_FRAME_MASK;
622 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
625 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
627 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
629 psr2_program_idle_frames(intel_dp, 0);
630 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
633 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
635 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
637 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
638 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
641 static void tgl_dc3co_disable_work(struct work_struct *work)
643 struct intel_dp *intel_dp =
644 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
646 mutex_lock(&intel_dp->psr.lock);
647 /* If delayed work is pending, it is not idle */
648 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
651 tgl_psr2_disable_dc3co(intel_dp);
653 mutex_unlock(&intel_dp->psr.lock);
656 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
658 if (!intel_dp->psr.dc3co_exitline)
661 cancel_delayed_work(&intel_dp->psr.dc3co_work);
662 /* Before PSR2 exit disallow dc3co*/
663 tgl_psr2_disable_dc3co(intel_dp);
667 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
668 struct intel_crtc_state *crtc_state)
670 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
671 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
672 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
673 enum port port = dig_port->base.port;
675 if (IS_ALDERLAKE_P(dev_priv))
676 return pipe <= PIPE_B && port <= PORT_B;
678 return pipe == PIPE_A && port == PORT_A;
682 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
683 struct intel_crtc_state *crtc_state)
685 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
686 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
690 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
691 * disable DC3CO until the changed dc3co activating/deactivating sequence
692 * is applied. B.Specs:49196
697 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
698 * TODO: when the issue is addressed, this restriction should be removed.
700 if (crtc_state->enable_psr2_sel_fetch)
703 if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
706 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
709 /* Wa_16011303918:adl-p */
710 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
714 * DC3CO Exit time 200us B.Spec 49196
715 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
718 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
720 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
723 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
726 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
727 struct intel_crtc_state *crtc_state)
729 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
731 if (!dev_priv->params.enable_psr2_sel_fetch &&
732 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
733 drm_dbg_kms(&dev_priv->drm,
734 "PSR2 sel fetch not enabled, disabled by parameter\n");
738 if (crtc_state->uapi.async_flip) {
739 drm_dbg_kms(&dev_priv->drm,
740 "PSR2 sel fetch not enabled, async flip enabled\n");
744 /* Wa_14010254185 Wa_14010103792 */
745 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
746 drm_dbg_kms(&dev_priv->drm,
747 "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
751 return crtc_state->enable_psr2_sel_fetch = true;
754 static bool psr2_granularity_check(struct intel_dp *intel_dp,
755 struct intel_crtc_state *crtc_state)
757 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
758 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
759 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
760 u16 y_granularity = 0;
762 /* PSR2 HW only send full lines so we only need to validate the width */
763 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
766 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
769 /* HW tracking is only aligned to 4 lines */
770 if (!crtc_state->enable_psr2_sel_fetch)
771 return intel_dp->psr.su_y_granularity == 4;
774 * adl_p has 1 line granularity. For other platforms with SW tracking we
775 * can adjust the y coordinates to match sink requirement if multiple of
778 if (IS_ALDERLAKE_P(dev_priv))
779 y_granularity = intel_dp->psr.su_y_granularity;
780 else if (intel_dp->psr.su_y_granularity <= 2)
782 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
783 y_granularity = intel_dp->psr.su_y_granularity;
785 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
788 crtc_state->su_y_granularity = y_granularity;
792 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
793 struct intel_crtc_state *crtc_state)
795 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
796 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
797 u32 hblank_total, hblank_ns, req_ns;
799 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
800 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
802 /* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */
803 req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000);
805 if ((hblank_ns - req_ns) > 100)
808 if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
811 crtc_state->req_psr2_sdp_prior_scanline = true;
815 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
816 struct intel_crtc_state *crtc_state)
818 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
819 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
820 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
821 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
823 if (!intel_dp->psr.sink_psr2_support)
826 /* JSL and EHL only supports eDP 1.3 */
827 if (IS_JSL_EHL(dev_priv)) {
828 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
833 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
835 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
839 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
840 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
844 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
845 drm_dbg_kms(&dev_priv->drm,
846 "PSR2 not supported in transcoder %s\n",
847 transcoder_name(crtc_state->cpu_transcoder));
851 if (!psr2_global_enabled(intel_dp)) {
852 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
857 * DSC and PSR2 cannot be enabled simultaneously. If a requested
858 * resolution requires DSC to be enabled, priority is given to DSC
861 if (crtc_state->dsc.compression_enable) {
862 drm_dbg_kms(&dev_priv->drm,
863 "PSR2 cannot be enabled since DSC is enabled\n");
867 if (crtc_state->crc_enabled) {
868 drm_dbg_kms(&dev_priv->drm,
869 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
873 if (DISPLAY_VER(dev_priv) >= 12) {
877 } else if (DISPLAY_VER(dev_priv) >= 10) {
881 } else if (DISPLAY_VER(dev_priv) == 9) {
887 if (crtc_state->pipe_bpp > max_bpp) {
888 drm_dbg_kms(&dev_priv->drm,
889 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
890 crtc_state->pipe_bpp, max_bpp);
894 /* Wa_16011303918:adl-p */
895 if (crtc_state->vrr.enable &&
896 IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
897 drm_dbg_kms(&dev_priv->drm,
898 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
902 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
903 drm_dbg_kms(&dev_priv->drm,
904 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
908 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
909 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
910 !HAS_PSR_HW_TRACKING(dev_priv)) {
911 drm_dbg_kms(&dev_priv->drm,
912 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
918 if (!crtc_state->enable_psr2_sel_fetch &&
919 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
920 drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
924 if (!psr2_granularity_check(intel_dp, crtc_state)) {
925 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
929 if (!crtc_state->enable_psr2_sel_fetch &&
930 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
931 drm_dbg_kms(&dev_priv->drm,
932 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
933 crtc_hdisplay, crtc_vdisplay,
934 psr_max_h, psr_max_v);
938 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
942 crtc_state->enable_psr2_sel_fetch = false;
946 void intel_psr_compute_config(struct intel_dp *intel_dp,
947 struct intel_crtc_state *crtc_state,
948 struct drm_connector_state *conn_state)
950 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
951 const struct drm_display_mode *adjusted_mode =
952 &crtc_state->hw.adjusted_mode;
956 * Current PSR panels dont work reliably with VRR enabled
957 * So if VRR is enabled, do not enable PSR.
959 if (crtc_state->vrr.enable)
962 if (!CAN_PSR(intel_dp))
965 if (!psr_global_enabled(intel_dp)) {
966 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
970 if (intel_dp->psr.sink_not_reliable) {
971 drm_dbg_kms(&dev_priv->drm,
972 "PSR sink implementation is not reliable\n");
976 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
977 drm_dbg_kms(&dev_priv->drm,
978 "PSR condition failed: Interlaced mode enabled\n");
982 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
983 if (psr_setup_time < 0) {
984 drm_dbg_kms(&dev_priv->drm,
985 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
986 intel_dp->psr_dpcd[1]);
990 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
991 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
992 drm_dbg_kms(&dev_priv->drm,
993 "PSR condition failed: PSR setup time (%d us) too long\n",
998 crtc_state->has_psr = true;
999 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1001 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1002 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1003 &crtc_state->psr_vsc);
1006 void intel_psr_get_config(struct intel_encoder *encoder,
1007 struct intel_crtc_state *pipe_config)
1009 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1010 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1011 struct intel_dp *intel_dp;
1017 intel_dp = &dig_port->dp;
1018 if (!CAN_PSR(intel_dp))
1021 mutex_lock(&intel_dp->psr.lock);
1022 if (!intel_dp->psr.enabled)
1026 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1027 * enabled/disabled because of frontbuffer tracking and others.
1029 pipe_config->has_psr = true;
1030 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1031 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1033 if (!intel_dp->psr.psr2_enabled)
1036 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1037 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
1038 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1039 pipe_config->enable_psr2_sel_fetch = true;
1042 if (DISPLAY_VER(dev_priv) >= 12) {
1043 val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
1044 val &= EXITLINE_MASK;
1045 pipe_config->dc3co_exitline = val;
1048 mutex_unlock(&intel_dp->psr.lock);
1051 static void intel_psr_activate(struct intel_dp *intel_dp)
1053 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1054 enum transcoder transcoder = intel_dp->psr.transcoder;
1056 if (transcoder_has_psr2(dev_priv, transcoder))
1057 drm_WARN_ON(&dev_priv->drm,
1058 intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
1060 drm_WARN_ON(&dev_priv->drm,
1061 intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
1062 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1063 lockdep_assert_held(&intel_dp->psr.lock);
1065 /* psr1 and psr2 are mutually exclusive.*/
1066 if (intel_dp->psr.psr2_enabled)
1067 hsw_activate_psr2(intel_dp);
1069 hsw_activate_psr1(intel_dp);
1071 intel_dp->psr.active = true;
1074 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1076 switch (intel_dp->psr.pipe) {
1078 return LATENCY_REPORTING_REMOVED_PIPE_A;
1080 return LATENCY_REPORTING_REMOVED_PIPE_B;
1082 return LATENCY_REPORTING_REMOVED_PIPE_C;
1084 MISSING_CASE(intel_dp->psr.pipe);
1089 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1090 const struct intel_crtc_state *crtc_state)
1092 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1093 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1097 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1098 * mask LPSP to avoid dependency on other drivers that might block
1099 * runtime_pm besides preventing other hw tracking issues now we
1100 * can rely on frontbuffer tracking.
1102 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1103 EDP_PSR_DEBUG_MASK_HPD |
1104 EDP_PSR_DEBUG_MASK_LPSP |
1105 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1107 if (DISPLAY_VER(dev_priv) < 11)
1108 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1110 intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
1113 psr_irq_control(intel_dp);
1115 if (intel_dp->psr.dc3co_exitline) {
1119 * TODO: if future platforms supports DC3CO in more than one
1120 * transcoder, EXITLINE will need to be unset when disabling PSR
1122 val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
1123 val &= ~EXITLINE_MASK;
1124 val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
1125 val |= EXITLINE_ENABLE;
1126 intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
1129 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1130 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1131 intel_dp->psr.psr2_sel_fetch_enabled ?
1132 IGNORE_PSR2_HW_TRACKING : 0);
1134 if (intel_dp->psr.psr2_enabled) {
1135 if (DISPLAY_VER(dev_priv) == 9)
1136 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1137 PSR2_VSC_ENABLE_PROG_HEADER |
1138 PSR2_ADD_VERTICAL_LINE_COUNT);
1141 * Wa_16014451276:adlp
1142 * All supported adlp panels have 1-based X granularity, this may
1143 * cause issues if non-supported panels are used.
1145 if (IS_ALDERLAKE_P(dev_priv))
1146 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1147 ADLP_1_BASED_X_GRANULARITY);
1149 /* Wa_16011168373:adl-p */
1150 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1151 intel_de_rmw(dev_priv,
1152 TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
1153 TRANS_SET_CONTEXT_LATENCY_MASK,
1154 TRANS_SET_CONTEXT_LATENCY_VALUE(1));
1156 /* Wa_16012604467:adlp */
1157 if (IS_ALDERLAKE_P(dev_priv))
1158 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1159 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1161 /* Wa_16013835468:tgl[b0+], dg1 */
1162 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
1166 vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
1167 crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1168 vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
1169 crtc_state->uapi.adjusted_mode.crtc_vblank_start;
1170 if (vblank > vtotal)
1171 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
1172 wa_16013835468_bit_get(intel_dp));
1177 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1179 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1183 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1184 * will still keep the error set even after the reset done in the
1185 * irq_preinstall and irq_uninstall hooks.
1186 * And enabling in this situation cause the screen to freeze in the
1187 * first time that PSR HW tries to activate so lets keep PSR disabled
1188 * to avoid any rendering problems.
1190 if (DISPLAY_VER(dev_priv) >= 12) {
1191 val = intel_de_read(dev_priv,
1192 TRANS_PSR_IIR(intel_dp->psr.transcoder));
1193 val &= EDP_PSR_ERROR(0);
1195 val = intel_de_read(dev_priv, EDP_PSR_IIR);
1196 val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
1199 intel_dp->psr.sink_not_reliable = true;
1200 drm_dbg_kms(&dev_priv->drm,
1201 "PSR interruption error set, not enabling PSR\n");
1208 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1209 const struct intel_crtc_state *crtc_state)
1211 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1212 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1213 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1214 struct intel_encoder *encoder = &dig_port->base;
1217 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1219 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1220 intel_dp->psr.busy_frontbuffer_bits = 0;
1221 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1222 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1223 /* DC5/DC6 requires at least 6 idle frames */
1224 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1225 intel_dp->psr.dc3co_exit_delay = val;
1226 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1227 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1228 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1229 intel_dp->psr.req_psr2_sdp_prior_scanline =
1230 crtc_state->req_psr2_sdp_prior_scanline;
1232 if (!psr_interrupt_error_check(intel_dp))
1235 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1236 intel_dp->psr.psr2_enabled ? "2" : "1");
1237 intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1238 intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1239 intel_psr_enable_sink(intel_dp);
1240 intel_psr_enable_source(intel_dp, crtc_state);
1241 intel_dp->psr.enabled = true;
1242 intel_dp->psr.paused = false;
1244 intel_psr_activate(intel_dp);
1247 static void intel_psr_exit(struct intel_dp *intel_dp)
1249 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1252 if (!intel_dp->psr.active) {
1253 if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1254 val = intel_de_read(dev_priv,
1255 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1256 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1259 val = intel_de_read(dev_priv,
1260 EDP_PSR_CTL(intel_dp->psr.transcoder));
1261 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1266 if (intel_dp->psr.psr2_enabled) {
1267 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1268 val = intel_de_read(dev_priv,
1269 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1270 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1271 val &= ~EDP_PSR2_ENABLE;
1272 intel_de_write(dev_priv,
1273 EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1275 val = intel_de_read(dev_priv,
1276 EDP_PSR_CTL(intel_dp->psr.transcoder));
1277 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1278 val &= ~EDP_PSR_ENABLE;
1279 intel_de_write(dev_priv,
1280 EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1282 intel_dp->psr.active = false;
1285 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1287 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1288 i915_reg_t psr_status;
1289 u32 psr_status_mask;
1291 if (intel_dp->psr.psr2_enabled) {
1292 psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1293 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1295 psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1296 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1299 /* Wait till PSR is idle */
1300 if (intel_de_wait_for_clear(dev_priv, psr_status,
1301 psr_status_mask, 2000))
1302 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1305 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1307 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1308 enum phy phy = intel_port_to_phy(dev_priv,
1309 dp_to_dig_port(intel_dp)->base.port);
1311 lockdep_assert_held(&intel_dp->psr.lock);
1313 if (!intel_dp->psr.enabled)
1316 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1317 intel_dp->psr.psr2_enabled ? "2" : "1");
1319 intel_psr_exit(intel_dp);
1320 intel_psr_wait_exit_locked(intel_dp);
1323 if (intel_dp->psr.psr2_sel_fetch_enabled &&
1324 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1325 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
1326 DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
1328 if (intel_dp->psr.psr2_enabled) {
1329 /* Wa_16011168373:adl-p */
1330 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1331 intel_de_rmw(dev_priv,
1332 TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
1333 TRANS_SET_CONTEXT_LATENCY_MASK, 0);
1335 /* Wa_16012604467:adlp */
1336 if (IS_ALDERLAKE_P(dev_priv))
1337 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1338 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1340 /* Wa_16013835468:tgl[b0+], dg1 */
1341 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
1343 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1344 wa_16013835468_bit_get(intel_dp), 0);
1347 intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1349 /* Disable PSR on Sink */
1350 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1352 if (intel_dp->psr.psr2_enabled)
1353 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1355 intel_dp->psr.enabled = false;
1359 * intel_psr_disable - Disable PSR
1360 * @intel_dp: Intel DP
1361 * @old_crtc_state: old CRTC state
1363 * This function needs to be called before disabling pipe.
1365 void intel_psr_disable(struct intel_dp *intel_dp,
1366 const struct intel_crtc_state *old_crtc_state)
1368 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1370 if (!old_crtc_state->has_psr)
1373 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1376 mutex_lock(&intel_dp->psr.lock);
1378 intel_psr_disable_locked(intel_dp);
1380 mutex_unlock(&intel_dp->psr.lock);
1381 cancel_work_sync(&intel_dp->psr.work);
1382 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1386 * intel_psr_pause - Pause PSR
1387 * @intel_dp: Intel DP
1389 * This function need to be called after enabling psr.
1391 void intel_psr_pause(struct intel_dp *intel_dp)
1393 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1394 struct intel_psr *psr = &intel_dp->psr;
1396 if (!CAN_PSR(intel_dp))
1399 mutex_lock(&psr->lock);
1401 if (!psr->enabled) {
1402 mutex_unlock(&psr->lock);
1406 /* If we ever hit this, we will need to add refcount to pause/resume */
1407 drm_WARN_ON(&dev_priv->drm, psr->paused);
1409 intel_psr_exit(intel_dp);
1410 intel_psr_wait_exit_locked(intel_dp);
1413 mutex_unlock(&psr->lock);
1415 cancel_work_sync(&psr->work);
1416 cancel_delayed_work_sync(&psr->dc3co_work);
1420 * intel_psr_resume - Resume PSR
1421 * @intel_dp: Intel DP
1423 * This function need to be called after pausing psr.
1425 void intel_psr_resume(struct intel_dp *intel_dp)
1427 struct intel_psr *psr = &intel_dp->psr;
1429 if (!CAN_PSR(intel_dp))
1432 mutex_lock(&psr->lock);
1437 psr->paused = false;
1438 intel_psr_activate(intel_dp);
1441 mutex_unlock(&psr->lock);
1444 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1446 return IS_ALDERLAKE_P(dev_priv) ? 0 : PSR2_MAN_TRK_CTL_ENABLE;
1449 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1451 return IS_ALDERLAKE_P(dev_priv) ?
1452 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1453 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1456 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1458 return IS_ALDERLAKE_P(dev_priv) ?
1459 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1460 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1463 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1465 return IS_ALDERLAKE_P(dev_priv) ?
1466 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1467 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1470 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1472 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1474 if (intel_dp->psr.psr2_sel_fetch_enabled)
1475 intel_de_write(dev_priv,
1476 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
1477 man_trk_ctl_enable_bit_get(dev_priv) |
1478 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1479 man_trk_ctl_single_full_frame_bit_get(dev_priv));
1482 * Display WA #0884: skl+
1483 * This documented WA for bxt can be safely applied
1484 * broadly so we can force HW tracking to exit PSR
1485 * instead of disabling and re-enabling.
1486 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1487 * but it makes more sense write to the current active
1490 * This workaround do not exist for platforms with display 10 or newer
1491 * but testing proved that it works for up display 13, for newer
1492 * than that testing will be needed.
1494 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1497 void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
1498 const struct intel_crtc_state *crtc_state)
1500 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1501 enum pipe pipe = plane->pipe;
1503 if (!crtc_state->enable_psr2_sel_fetch)
1506 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1509 void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
1510 const struct intel_crtc_state *crtc_state,
1511 const struct intel_plane_state *plane_state,
1514 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1515 enum pipe pipe = plane->pipe;
1516 const struct drm_rect *clip;
1520 if (!crtc_state->enable_psr2_sel_fetch)
1523 if (plane->id == PLANE_CURSOR) {
1524 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1529 clip = &plane_state->psr2_sel_fetch_area;
1531 val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1532 val |= plane_state->uapi.dst.x1;
1533 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1535 x = plane_state->view.color_plane[color_plane].x;
1538 * From Bspec: UV surface Start Y Position = half of Y plane Y
1542 y = plane_state->view.color_plane[color_plane].y + clip->y1;
1544 y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1548 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1551 /* Sizes are 0 based */
1552 val = (drm_rect_height(clip) - 1) << 16;
1553 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1554 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1556 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1557 PLANE_SEL_FETCH_CTL_ENABLE);
1560 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1562 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1563 struct intel_encoder *encoder;
1565 if (!crtc_state->enable_psr2_sel_fetch)
1568 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1569 crtc_state->uapi.encoder_mask) {
1570 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1572 lockdep_assert_held(&intel_dp->psr.lock);
1573 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1578 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
1579 crtc_state->psr2_man_track_ctl);
1582 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1583 struct drm_rect *clip, bool full_update)
1585 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1586 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1587 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1589 /* SF partial frame enable has to be set even on full update */
1590 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1594 * Not applying Wa_14014971508:adlp as we do not support the
1595 * feature that requires this workaround.
1597 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1604 if (IS_ALDERLAKE_P(dev_priv)) {
1605 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1606 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1608 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1610 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1611 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1614 crtc_state->psr2_man_track_ctl = val;
1617 static void clip_area_update(struct drm_rect *overlap_damage_area,
1618 struct drm_rect *damage_area)
1620 if (overlap_damage_area->y1 == -1) {
1621 overlap_damage_area->y1 = damage_area->y1;
1622 overlap_damage_area->y2 = damage_area->y2;
1626 if (damage_area->y1 < overlap_damage_area->y1)
1627 overlap_damage_area->y1 = damage_area->y1;
1629 if (damage_area->y2 > overlap_damage_area->y2)
1630 overlap_damage_area->y2 = damage_area->y2;
1633 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1634 struct drm_rect *pipe_clip)
1636 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1637 const u16 y_alignment = crtc_state->su_y_granularity;
1639 pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1640 if (pipe_clip->y2 % y_alignment)
1641 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1643 if (IS_ALDERLAKE_P(dev_priv) && crtc_state->dsc.compression_enable)
1644 drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n");
1648 * TODO: Not clear how to handle planes with negative position,
1649 * also planes are not updated if they have a negative X
1650 * position so for now doing a full update in this cases
1652 * Plane scaling and rotation is not supported by selective fetch and both
1653 * properties can change without a modeset, so need to be check at every
1656 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1658 if (plane_state->uapi.dst.y1 < 0 ||
1659 plane_state->uapi.dst.x1 < 0 ||
1660 plane_state->scaler_id >= 0 ||
1661 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1668 * Check for pipe properties that is not supported by selective fetch.
1670 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1671 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1672 * enabled and going to the full update path.
1674 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1676 if (crtc_state->scaler_state.scaler_id >= 0)
1682 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1683 struct intel_crtc *crtc)
1685 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1686 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1687 struct intel_plane_state *new_plane_state, *old_plane_state;
1688 struct intel_plane *plane;
1689 bool full_update = false;
1692 if (!crtc_state->enable_psr2_sel_fetch)
1695 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
1697 goto skip_sel_fetch_set_loop;
1701 * Calculate minimal selective fetch area of each plane and calculate
1702 * the pipe damaged area.
1703 * In the next loop the plane selective fetch area will actually be set
1704 * using whole pipe damaged area.
1706 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1707 new_plane_state, i) {
1708 struct drm_rect src, damaged_area = { .y1 = -1 };
1709 struct drm_atomic_helper_damage_iter iter;
1710 struct drm_rect clip;
1712 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1715 if (!new_plane_state->uapi.visible &&
1716 !old_plane_state->uapi.visible)
1719 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1725 * If visibility or plane moved, mark the whole plane area as
1726 * damaged as it needs to be complete redraw in the new and old
1729 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1730 !drm_rect_equals(&new_plane_state->uapi.dst,
1731 &old_plane_state->uapi.dst)) {
1732 if (old_plane_state->uapi.visible) {
1733 damaged_area.y1 = old_plane_state->uapi.dst.y1;
1734 damaged_area.y2 = old_plane_state->uapi.dst.y2;
1735 clip_area_update(&pipe_clip, &damaged_area);
1738 if (new_plane_state->uapi.visible) {
1739 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1740 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1741 clip_area_update(&pipe_clip, &damaged_area);
1744 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
1745 /* If alpha changed mark the whole plane area as damaged */
1746 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1747 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1748 clip_area_update(&pipe_clip, &damaged_area);
1752 drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
1754 drm_atomic_helper_damage_iter_init(&iter,
1755 &old_plane_state->uapi,
1756 &new_plane_state->uapi);
1757 drm_atomic_for_each_plane_damage(&iter, &clip) {
1758 if (drm_rect_intersect(&clip, &src))
1759 clip_area_update(&damaged_area, &clip);
1762 if (damaged_area.y1 == -1)
1765 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1766 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1767 clip_area_update(&pipe_clip, &damaged_area);
1771 goto skip_sel_fetch_set_loop;
1773 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1777 intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
1780 * Now that we have the pipe damaged area check if it intersect with
1781 * every plane, if it does set the plane selective fetch area.
1783 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1784 new_plane_state, i) {
1785 struct drm_rect *sel_fetch_area, inter;
1786 struct intel_plane *linked = new_plane_state->planar_linked_plane;
1788 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1789 !new_plane_state->uapi.visible)
1793 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1796 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1801 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1802 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1803 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1804 crtc_state->update_planes |= BIT(plane->id);
1807 * Sel_fetch_area is calculated for UV plane. Use
1808 * same area for Y plane as well.
1811 struct intel_plane_state *linked_new_plane_state;
1812 struct drm_rect *linked_sel_fetch_area;
1814 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
1815 if (IS_ERR(linked_new_plane_state))
1816 return PTR_ERR(linked_new_plane_state);
1818 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
1819 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
1820 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
1821 crtc_state->update_planes |= BIT(linked->id);
1825 skip_sel_fetch_set_loop:
1826 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
1830 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
1831 struct intel_crtc *crtc)
1833 struct drm_i915_private *i915 = to_i915(state->base.dev);
1834 const struct intel_crtc_state *crtc_state =
1835 intel_atomic_get_new_crtc_state(state, crtc);
1836 struct intel_encoder *encoder;
1841 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
1842 crtc_state->uapi.encoder_mask) {
1843 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1844 struct intel_psr *psr = &intel_dp->psr;
1845 bool needs_to_disable = false;
1847 mutex_lock(&psr->lock);
1850 * Reasons to disable:
1851 * - PSR disabled in new state
1852 * - All planes will go inactive
1853 * - Changing between PSR versions
1855 needs_to_disable |= intel_crtc_needs_modeset(crtc_state);
1856 needs_to_disable |= !crtc_state->has_psr;
1857 needs_to_disable |= !crtc_state->active_planes;
1858 needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled;
1860 if (psr->enabled && needs_to_disable)
1861 intel_psr_disable_locked(intel_dp);
1863 mutex_unlock(&psr->lock);
1867 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
1868 const struct intel_crtc_state *crtc_state)
1870 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1871 struct intel_encoder *encoder;
1873 if (!crtc_state->has_psr)
1876 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
1877 crtc_state->uapi.encoder_mask) {
1878 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1879 struct intel_psr *psr = &intel_dp->psr;
1881 mutex_lock(&psr->lock);
1883 if (psr->sink_not_reliable)
1886 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
1888 /* Only enable if there is active planes */
1889 if (!psr->enabled && crtc_state->active_planes)
1890 intel_psr_enable_locked(intel_dp, crtc_state);
1892 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1893 if (crtc_state->crc_enabled && psr->enabled)
1894 psr_force_hw_tracking_exit(intel_dp);
1897 mutex_unlock(&psr->lock);
1901 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
1903 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1904 struct intel_crtc_state *crtc_state;
1905 struct intel_crtc *crtc;
1908 if (!HAS_PSR(dev_priv))
1911 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
1912 _intel_psr_post_plane_update(state, crtc_state);
1915 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
1917 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1920 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
1921 * As all higher states has bit 4 of PSR2 state set we can just wait for
1922 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
1924 return intel_de_wait_for_clear(dev_priv,
1925 EDP_PSR2_STATUS(intel_dp->psr.transcoder),
1926 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
1929 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
1931 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1934 * From bspec: Panel Self Refresh (BDW+)
1935 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1936 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1937 * defensive enough to cover everything.
1939 return intel_de_wait_for_clear(dev_priv,
1940 EDP_PSR_STATUS(intel_dp->psr.transcoder),
1941 EDP_PSR_STATUS_STATE_MASK, 50);
1945 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
1946 * @new_crtc_state: new CRTC state
1948 * This function is expected to be called from pipe_update_start() where it is
1949 * not expected to race with PSR enable or disable.
1951 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
1953 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
1954 struct intel_encoder *encoder;
1956 if (!new_crtc_state->has_psr)
1959 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1960 new_crtc_state->uapi.encoder_mask) {
1961 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1964 lockdep_assert_held(&intel_dp->psr.lock);
1966 if (!intel_dp->psr.enabled)
1969 if (intel_dp->psr.psr2_enabled)
1970 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
1972 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
1975 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
1979 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
1981 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1986 if (!intel_dp->psr.enabled)
1989 if (intel_dp->psr.psr2_enabled) {
1990 reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1991 mask = EDP_PSR2_STATUS_STATE_MASK;
1993 reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1994 mask = EDP_PSR_STATUS_STATE_MASK;
1997 mutex_unlock(&intel_dp->psr.lock);
1999 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2001 drm_err(&dev_priv->drm,
2002 "Timed out waiting for PSR Idle for re-enable\n");
2004 /* After the unlocked wait, verify that PSR is still wanted! */
2005 mutex_lock(&intel_dp->psr.lock);
2006 return err == 0 && intel_dp->psr.enabled;
2009 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2011 struct drm_connector_list_iter conn_iter;
2012 struct drm_device *dev = &dev_priv->drm;
2013 struct drm_modeset_acquire_ctx ctx;
2014 struct drm_atomic_state *state;
2015 struct drm_connector *conn;
2018 state = drm_atomic_state_alloc(dev);
2022 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2023 state->acquire_ctx = &ctx;
2027 drm_connector_list_iter_begin(dev, &conn_iter);
2028 drm_for_each_connector_iter(conn, &conn_iter) {
2029 struct drm_connector_state *conn_state;
2030 struct drm_crtc_state *crtc_state;
2032 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2035 conn_state = drm_atomic_get_connector_state(state, conn);
2036 if (IS_ERR(conn_state)) {
2037 err = PTR_ERR(conn_state);
2041 if (!conn_state->crtc)
2044 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2045 if (IS_ERR(crtc_state)) {
2046 err = PTR_ERR(crtc_state);
2050 /* Mark mode as changed to trigger a pipe->update() */
2051 crtc_state->mode_changed = true;
2053 drm_connector_list_iter_end(&conn_iter);
2056 err = drm_atomic_commit(state);
2058 if (err == -EDEADLK) {
2059 drm_atomic_state_clear(state);
2060 err = drm_modeset_backoff(&ctx);
2065 drm_modeset_drop_locks(&ctx);
2066 drm_modeset_acquire_fini(&ctx);
2067 drm_atomic_state_put(state);
2072 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2074 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2075 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2079 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2080 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2081 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2085 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2089 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2090 intel_dp->psr.debug = val;
2093 * Do it right away if it's already enabled, otherwise it will be done
2094 * when enabling the source.
2096 if (intel_dp->psr.enabled)
2097 psr_irq_control(intel_dp);
2099 mutex_unlock(&intel_dp->psr.lock);
2101 if (old_mode != mode)
2102 ret = intel_psr_fastset_force(dev_priv);
2107 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2109 struct intel_psr *psr = &intel_dp->psr;
2111 intel_psr_disable_locked(intel_dp);
2112 psr->sink_not_reliable = true;
2113 /* let's make sure that sink is awaken */
2114 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2117 static void intel_psr_work(struct work_struct *work)
2119 struct intel_dp *intel_dp =
2120 container_of(work, typeof(*intel_dp), psr.work);
2122 mutex_lock(&intel_dp->psr.lock);
2124 if (!intel_dp->psr.enabled)
2127 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2128 intel_psr_handle_irq(intel_dp);
2131 * We have to make sure PSR is ready for re-enable
2132 * otherwise it keeps disabled until next full enable/disable cycle.
2133 * PSR might take some time to get fully disabled
2134 * and be ready for re-enable.
2136 if (!__psr_wait_for_idle_locked(intel_dp))
2140 * The delayed work can race with an invalidate hence we need to
2141 * recheck. Since psr_flush first clears this and then reschedules we
2142 * won't ever miss a flush when bailing out here.
2144 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2147 intel_psr_activate(intel_dp);
2149 mutex_unlock(&intel_dp->psr.lock);
2152 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2154 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2156 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2159 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2162 val = man_trk_ctl_enable_bit_get(dev_priv) |
2163 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2164 man_trk_ctl_continuos_full_frame(dev_priv);
2165 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
2166 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2167 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2169 intel_psr_exit(intel_dp);
2174 * intel_psr_invalidate - Invalidade PSR
2175 * @dev_priv: i915 device
2176 * @frontbuffer_bits: frontbuffer plane tracking bits
2177 * @origin: which operation caused the invalidate
2179 * Since the hardware frontbuffer tracking has gaps we need to integrate
2180 * with the software frontbuffer tracking. This function gets called every
2181 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2182 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2184 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2186 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2187 unsigned frontbuffer_bits, enum fb_op_origin origin)
2189 struct intel_encoder *encoder;
2191 if (origin == ORIGIN_FLIP)
2194 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2195 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2196 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2198 mutex_lock(&intel_dp->psr.lock);
2199 if (!intel_dp->psr.enabled) {
2200 mutex_unlock(&intel_dp->psr.lock);
2204 pipe_frontbuffer_bits &=
2205 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2206 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2208 if (pipe_frontbuffer_bits)
2209 _psr_invalidate_handle(intel_dp);
2211 mutex_unlock(&intel_dp->psr.lock);
2215 * When we will be completely rely on PSR2 S/W tracking in future,
2216 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2217 * event also therefore tgl_dc3co_flush_locked() require to be changed
2218 * accordingly in future.
2221 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2222 enum fb_op_origin origin)
2224 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2225 !intel_dp->psr.active)
2229 * At every frontbuffer flush flip event modified delay of delayed work,
2230 * when delayed work schedules that means display has been idle.
2232 if (!(frontbuffer_bits &
2233 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2236 tgl_psr2_enable_dc3co(intel_dp);
2237 mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
2238 intel_dp->psr.dc3co_exit_delay);
2241 static void _psr_flush_handle(struct intel_dp *intel_dp)
2243 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2245 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2246 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2247 /* can we turn CFF off? */
2248 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2249 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2250 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2251 man_trk_ctl_single_full_frame_bit_get(dev_priv);
2254 * turn continuous full frame off and do a single
2257 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
2259 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2260 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2264 * continuous full frame is disabled, only a single full
2267 psr_force_hw_tracking_exit(intel_dp);
2270 psr_force_hw_tracking_exit(intel_dp);
2272 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2273 schedule_work(&intel_dp->psr.work);
2278 * intel_psr_flush - Flush PSR
2279 * @dev_priv: i915 device
2280 * @frontbuffer_bits: frontbuffer plane tracking bits
2281 * @origin: which operation caused the flush
2283 * Since the hardware frontbuffer tracking has gaps we need to integrate
2284 * with the software frontbuffer tracking. This function gets called every
2285 * time frontbuffer rendering has completed and flushed out to memory. PSR
2286 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2288 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2290 void intel_psr_flush(struct drm_i915_private *dev_priv,
2291 unsigned frontbuffer_bits, enum fb_op_origin origin)
2293 struct intel_encoder *encoder;
2295 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2296 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2297 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2299 mutex_lock(&intel_dp->psr.lock);
2300 if (!intel_dp->psr.enabled) {
2301 mutex_unlock(&intel_dp->psr.lock);
2305 pipe_frontbuffer_bits &=
2306 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2307 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2310 * If the PSR is paused by an explicit intel_psr_paused() call,
2311 * we have to ensure that the PSR is not activated until
2312 * intel_psr_resume() is called.
2314 if (intel_dp->psr.paused)
2317 if (origin == ORIGIN_FLIP ||
2318 (origin == ORIGIN_CURSOR_UPDATE &&
2319 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2320 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2324 if (pipe_frontbuffer_bits == 0)
2327 /* By definition flush = invalidate + flush */
2328 _psr_flush_handle(intel_dp);
2330 mutex_unlock(&intel_dp->psr.lock);
2335 * intel_psr_init - Init basic PSR work and mutex.
2336 * @intel_dp: Intel DP
2338 * This function is called after the initializing connector.
2339 * (the initializing of connector treats the handling of connector capabilities)
2340 * And it initializes basic PSR stuff for each DP Encoder.
2342 void intel_psr_init(struct intel_dp *intel_dp)
2344 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2345 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2347 if (!HAS_PSR(dev_priv))
2351 * HSW spec explicitly says PSR is tied to port A.
2352 * BDW+ platforms have a instance of PSR registers per transcoder but
2353 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2355 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2356 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2357 * But GEN12 supports a instance of PSR registers per transcoder.
2359 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2360 drm_dbg_kms(&dev_priv->drm,
2361 "PSR condition failed: Port not supported\n");
2365 intel_dp->psr.source_support = true;
2367 if (dev_priv->params.enable_psr == -1)
2368 if (!dev_priv->vbt.psr.enable)
2369 dev_priv->params.enable_psr = 0;
2371 /* Set link_standby x link_off defaults */
2372 if (DISPLAY_VER(dev_priv) < 12)
2373 /* For new platforms up to TGL let's respect VBT back again */
2374 intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
2376 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2377 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2378 mutex_init(&intel_dp->psr.lock);
2381 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2382 u8 *status, u8 *error_status)
2384 struct drm_dp_aux *aux = &intel_dp->aux;
2387 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2391 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2395 *status = *status & DP_PSR_SINK_STATE_MASK;
2400 static void psr_alpm_check(struct intel_dp *intel_dp)
2402 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2403 struct drm_dp_aux *aux = &intel_dp->aux;
2404 struct intel_psr *psr = &intel_dp->psr;
2408 if (!psr->psr2_enabled)
2411 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2413 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2417 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2418 intel_psr_disable_locked(intel_dp);
2419 psr->sink_not_reliable = true;
2420 drm_dbg_kms(&dev_priv->drm,
2421 "ALPM lock timeout error, disabling PSR\n");
2423 /* Clearing error */
2424 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2428 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2430 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2431 struct intel_psr *psr = &intel_dp->psr;
2435 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2437 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2441 if (val & DP_PSR_CAPS_CHANGE) {
2442 intel_psr_disable_locked(intel_dp);
2443 psr->sink_not_reliable = true;
2444 drm_dbg_kms(&dev_priv->drm,
2445 "Sink PSR capability changed, disabling PSR\n");
2448 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2452 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2454 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2455 struct intel_psr *psr = &intel_dp->psr;
2456 u8 status, error_status;
2457 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2458 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2459 DP_PSR_LINK_CRC_ERROR;
2461 if (!CAN_PSR(intel_dp))
2464 mutex_lock(&psr->lock);
2469 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2470 drm_err(&dev_priv->drm,
2471 "Error reading PSR status or error status\n");
2475 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2476 intel_psr_disable_locked(intel_dp);
2477 psr->sink_not_reliable = true;
2480 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2481 drm_dbg_kms(&dev_priv->drm,
2482 "PSR sink internal error, disabling PSR\n");
2483 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2484 drm_dbg_kms(&dev_priv->drm,
2485 "PSR RFB storage error, disabling PSR\n");
2486 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2487 drm_dbg_kms(&dev_priv->drm,
2488 "PSR VSC SDP uncorrectable error, disabling PSR\n");
2489 if (error_status & DP_PSR_LINK_CRC_ERROR)
2490 drm_dbg_kms(&dev_priv->drm,
2491 "PSR Link CRC error, disabling PSR\n");
2493 if (error_status & ~errors)
2494 drm_err(&dev_priv->drm,
2495 "PSR_ERROR_STATUS unhandled errors %x\n",
2496 error_status & ~errors);
2497 /* clear status register */
2498 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2500 psr_alpm_check(intel_dp);
2501 psr_capability_changed_check(intel_dp);
2504 mutex_unlock(&psr->lock);
2507 bool intel_psr_enabled(struct intel_dp *intel_dp)
2511 if (!CAN_PSR(intel_dp))
2514 mutex_lock(&intel_dp->psr.lock);
2515 ret = intel_dp->psr.enabled;
2516 mutex_unlock(&intel_dp->psr.lock);
2522 * intel_psr_lock - grab PSR lock
2523 * @crtc_state: the crtc state
2525 * This is initially meant to be used by around CRTC update, when
2526 * vblank sensitive registers are updated and we need grab the lock
2527 * before it to avoid vblank evasion.
2529 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2531 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2532 struct intel_encoder *encoder;
2534 if (!crtc_state->has_psr)
2537 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2538 crtc_state->uapi.encoder_mask) {
2539 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2541 mutex_lock(&intel_dp->psr.lock);
2547 * intel_psr_unlock - release PSR lock
2548 * @crtc_state: the crtc state
2550 * Release the PSR lock that was held during pipe update.
2552 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2554 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2555 struct intel_encoder *encoder;
2557 if (!crtc_state->has_psr)
2560 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2561 crtc_state->uapi.encoder_mask) {
2562 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2564 mutex_unlock(&intel_dp->psr.lock);