Merge tag 'platform-drivers-x86-v6.9-3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / gpu / drm / xe / display / xe_display.c
CommitLineData
44e69495
ML
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include "xe_display.h"
7#include "regs/xe_regs.h"
8
9#include <linux/fb.h>
10
11#include <drm/drm_drv.h>
12#include <drm/drm_managed.h>
13#include <drm/xe_drm.h>
14
15#include "soc/intel_dram.h"
16#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
17#include "intel_acpi.h"
18#include "intel_audio.h"
19#include "intel_bw.h"
20#include "intel_display.h"
21#include "intel_display_driver.h"
22#include "intel_display_irq.h"
23#include "intel_display_types.h"
24#include "intel_dmc.h"
25#include "intel_dp.h"
26#include "intel_fbdev.h"
27#include "intel_hdcp.h"
28#include "intel_hotplug.h"
29#include "intel_opregion.h"
30#include "xe_module.h"
31
32/* Xe device functions */
33
34static bool has_display(struct xe_device *xe)
35{
36 return HAS_DISPLAY(xe);
37}
38
39/**
40 * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
41 * early on
42 * @pdev: PCI device
43 *
44 * Returns: true if probe needs to be deferred, false otherwise
45 */
46bool xe_display_driver_probe_defer(struct pci_dev *pdev)
47{
adce1b39 48 if (!xe_modparam.enable_display)
44e69495
ML
49 return 0;
50
51 return intel_display_driver_probe_defer(pdev);
52}
53
54static void xe_display_last_close(struct drm_device *dev)
55{
56 struct xe_device *xe = to_xe_device(dev);
57
58 if (xe->info.enable_display)
59 intel_fbdev_restore_mode(to_xe_device(dev));
60}
61
62/**
63 * xe_display_driver_set_hooks - Add driver flags and hooks for display
64 * @driver: DRM device driver
65 *
66 * Set features and function hooks in @driver that are needed for driving the
67 * display IP. This sets the driver's capability of driving display, regardless
68 * if the device has it enabled
69 */
70void xe_display_driver_set_hooks(struct drm_driver *driver)
71{
adce1b39 72 if (!xe_modparam.enable_display)
44e69495
ML
73 return;
74
75 driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
76 driver->lastclose = xe_display_last_close;
77}
78
79static void unset_display_features(struct xe_device *xe)
80{
81 xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
82}
83
84static void display_destroy(struct drm_device *dev, void *dummy)
85{
86 struct xe_device *xe = to_xe_device(dev);
87
88 destroy_workqueue(xe->display.hotplug.dp_wq);
89}
90
91/**
92 * xe_display_create - create display struct
93 * @xe: XE device instance
94 *
95 * Initialize all fields used by the display part.
96 *
97 * TODO: once everything can be inside a single struct, make the struct opaque
98 * to the rest of xe and return it to be xe->display.
99 *
100 * Returns: 0 on success
101 */
102int xe_display_create(struct xe_device *xe)
103{
104 int err;
105
106 spin_lock_init(&xe->display.fb_tracking.lock);
107
108 xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
109
110 drmm_mutex_init(&xe->drm, &xe->sb_lock);
44e69495
ML
111 xe->enabled_irq_mask = ~0;
112
113 err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
114 if (err)
115 return err;
116
117 return 0;
118}
119
120static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
121{
122 struct xe_device *xe = to_xe_device(dev);
123
124 if (!xe->info.enable_display)
125 return;
126
127 intel_power_domains_cleanup(xe);
128}
129
130int xe_display_init_nommio(struct xe_device *xe)
131{
44e69495
ML
132 if (!xe->info.enable_display)
133 return 0;
134
135 /* Fake uncore lock */
136 spin_lock_init(&xe->uncore.lock);
137
138 /* This must be called before any calls to HAS_PCH_* */
139 intel_detect_pch(xe);
140
44e69495
ML
141 return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
142}
143
144static void xe_display_fini_noirq(struct drm_device *dev, void *dummy)
145{
146 struct xe_device *xe = to_xe_device(dev);
147
148 if (!xe->info.enable_display)
149 return;
150
151 intel_display_driver_remove_noirq(xe);
152 intel_power_domains_driver_remove(xe);
153}
154
155int xe_display_init_noirq(struct xe_device *xe)
156{
157 int err;
158
159 if (!xe->info.enable_display)
160 return 0;
161
162 intel_display_driver_early_probe(xe);
163
164 /* Early display init.. */
165 intel_opregion_setup(xe);
166
167 /*
168 * Fill the dram structure to get the system dram info. This will be
169 * used for memory latency calculation.
170 */
171 intel_dram_detect(xe);
172
173 intel_bw_init_hw(xe);
174
175 intel_display_device_info_runtime_init(xe);
176
177 err = intel_display_driver_probe_noirq(xe);
178 if (err)
179 return err;
180
181 return drmm_add_action_or_reset(&xe->drm, xe_display_fini_noirq, NULL);
182}
183
184static void xe_display_fini_noaccel(struct drm_device *dev, void *dummy)
185{
186 struct xe_device *xe = to_xe_device(dev);
187
188 if (!xe->info.enable_display)
189 return;
190
191 intel_display_driver_remove_nogem(xe);
192}
193
194int xe_display_init_noaccel(struct xe_device *xe)
195{
196 int err;
197
198 if (!xe->info.enable_display)
199 return 0;
200
201 err = intel_display_driver_probe_nogem(xe);
202 if (err)
203 return err;
204
205 return drmm_add_action_or_reset(&xe->drm, xe_display_fini_noaccel, NULL);
206}
207
208int xe_display_init(struct xe_device *xe)
209{
210 if (!xe->info.enable_display)
211 return 0;
212
213 return intel_display_driver_probe(xe);
214}
215
216void xe_display_fini(struct xe_device *xe)
217{
218 if (!xe->info.enable_display)
219 return;
220
221 /* poll work can call into fbdev, hence clean that up afterwards */
222 intel_hpd_poll_fini(xe);
223 intel_fbdev_fini(xe);
224
225 intel_hdcp_component_fini(xe);
226 intel_audio_deinit(xe);
227}
228
229void xe_display_register(struct xe_device *xe)
230{
231 if (!xe->info.enable_display)
232 return;
233
234 intel_display_driver_register(xe);
235 intel_register_dsm_handler();
236 intel_power_domains_enable(xe);
237}
238
239void xe_display_unregister(struct xe_device *xe)
240{
241 if (!xe->info.enable_display)
242 return;
243
244 intel_unregister_dsm_handler();
245 intel_power_domains_disable(xe);
246 intel_display_driver_unregister(xe);
247}
248
249void xe_display_driver_remove(struct xe_device *xe)
250{
251 if (!xe->info.enable_display)
252 return;
253
254 intel_display_driver_remove(xe);
255
256 intel_display_device_remove(xe);
257}
258
259/* IRQ-related functions */
260
261void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
262{
263 if (!xe->info.enable_display)
264 return;
265
266 if (master_ctl & DISPLAY_IRQ)
267 gen11_display_irq_handler(xe);
268}
269
270void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
271{
272 if (!xe->info.enable_display)
273 return;
274
275 if (gu_misc_iir & GU_MISC_GSE)
276 intel_opregion_asle_intr(xe);
277}
278
279void xe_display_irq_reset(struct xe_device *xe)
280{
281 if (!xe->info.enable_display)
282 return;
283
284 gen11_display_irq_reset(xe);
285}
286
287void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
288{
289 if (!xe->info.enable_display)
290 return;
291
292 if (gt->info.id == XE_GT0)
293 gen11_de_irq_postinstall(xe);
294}
295
296static void intel_suspend_encoders(struct xe_device *xe)
297{
298 struct drm_device *dev = &xe->drm;
299 struct intel_encoder *encoder;
300
301 if (has_display(xe))
302 return;
303
304 drm_modeset_lock_all(dev);
305 for_each_intel_encoder(dev, encoder)
306 if (encoder->suspend)
307 encoder->suspend(encoder);
308 drm_modeset_unlock_all(dev);
309}
310
04316b4a
FD
311static bool suspend_to_idle(void)
312{
313#if IS_ENABLED(CONFIG_ACPI_SLEEP)
314 if (acpi_target_system_state() < ACPI_STATE_S3)
315 return true;
316#endif
317 return false;
318}
319
44e69495
ML
320void xe_display_pm_suspend(struct xe_device *xe)
321{
04316b4a 322 bool s2idle = suspend_to_idle();
44e69495
ML
323 if (!xe->info.enable_display)
324 return;
325
326 /*
327 * We do a lot of poking in a lot of registers, make sure they work
328 * properly.
329 */
330 intel_power_domains_disable(xe);
331 if (has_display(xe))
332 drm_kms_helper_poll_disable(&xe->drm);
333
334 intel_display_driver_suspend(xe);
335
336 intel_dp_mst_suspend(xe);
337
338 intel_hpd_cancel_work(xe);
339
340 intel_suspend_encoders(xe);
341
f6761c68 342 intel_opregion_suspend(xe, s2idle ? PCI_D1 : PCI_D3cold);
44e69495
ML
343
344 intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
345
346 intel_dmc_suspend(xe);
347}
348
349void xe_display_pm_suspend_late(struct xe_device *xe)
350{
04316b4a 351 bool s2idle = suspend_to_idle();
44e69495
ML
352 if (!xe->info.enable_display)
353 return;
354
f6761c68 355 intel_power_domains_suspend(xe, s2idle);
44e69495
ML
356
357 intel_display_power_suspend_late(xe);
358}
359
360void xe_display_pm_resume_early(struct xe_device *xe)
361{
362 if (!xe->info.enable_display)
363 return;
364
365 intel_display_power_resume_early(xe);
366
367 intel_power_domains_resume(xe);
368}
369
370void xe_display_pm_resume(struct xe_device *xe)
371{
372 if (!xe->info.enable_display)
373 return;
374
375 intel_dmc_resume(xe);
376
377 if (has_display(xe))
378 drm_mode_config_reset(&xe->drm);
379
380 intel_display_driver_init_hw(xe);
381 intel_hpd_init(xe);
382
383 /* MST sideband requires HPD interrupts enabled */
384 intel_dp_mst_resume(xe);
385 intel_display_driver_resume(xe);
386
387 intel_hpd_poll_disable(xe);
388 if (has_display(xe))
389 drm_kms_helper_poll_enable(&xe->drm);
390
391 intel_opregion_resume(xe);
392
393 intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
394
395 intel_power_domains_enable(xe);
396}
397
398void xe_display_probe(struct xe_device *xe)
399{
400 if (!xe->info.enable_display)
401 goto no_display;
402
403 intel_display_device_probe(xe);
404
405 if (has_display(xe))
406 return;
407
408no_display:
409 xe->info.enable_display = false;
410 unset_display_features(xe);
411}