1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "xe_display.h"
7 #include "regs/xe_irq_regs.h"
8
9 #include <linux/fb.h>
10
11 #include <drm/drm_client.h>
12 #include <drm/drm_client_event.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_probe_helper.h>
16 #include <uapi/drm/xe_drm.h>
17
18 #include "soc/intel_dram.h"
19 #include "intel_acpi.h"
20 #include "intel_audio.h"
21 #include "intel_bw.h"
22 #include "intel_display.h"
23 #include "intel_display_driver.h"
24 #include "intel_display_irq.h"
25 #include "intel_display_types.h"
26 #include "intel_dmc.h"
27 #include "intel_dmc_wl.h"
28 #include "intel_dp.h"
29 #include "intel_encoder.h"
30 #include "intel_fbdev.h"
31 #include "intel_hdcp.h"
32 #include "intel_hotplug.h"
33 #include "intel_opregion.h"
34 #include "skl_watermark.h"
35 #include "xe_module.h"
36
37 /* Xe device functions */
38
has_display(struct xe_device * xe)39 static bool has_display(struct xe_device *xe)
40 {
41 return HAS_DISPLAY(&xe->display);
42 }
43
44 /**
45 * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
46 * early on
47 * @pdev: PCI device
48 *
49 * Returns: true if probe needs to be deferred, false otherwise
50 */
xe_display_driver_probe_defer(struct pci_dev * pdev)51 bool xe_display_driver_probe_defer(struct pci_dev *pdev)
52 {
53 if (!xe_modparam.probe_display)
54 return 0;
55
56 return intel_display_driver_probe_defer(pdev);
57 }
58
59 /**
60 * xe_display_driver_set_hooks - Add driver flags and hooks for display
61 * @driver: DRM device driver
62 *
63 * Set features and function hooks in @driver that are needed for driving the
64 * display IP. This sets the driver's capability of driving display, regardless
65 * if the device has it enabled
66 */
xe_display_driver_set_hooks(struct drm_driver * driver)67 void xe_display_driver_set_hooks(struct drm_driver *driver)
68 {
69 if (!xe_modparam.probe_display)
70 return;
71
72 #ifdef CONFIG_DRM_FBDEV_EMULATION
73 driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
74 #endif
75
76 driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
77 }
78
unset_display_features(struct xe_device * xe)79 static void unset_display_features(struct xe_device *xe)
80 {
81 xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
82 }
83
display_destroy(struct drm_device * dev,void * dummy)84 static void display_destroy(struct drm_device *dev, void *dummy)
85 {
86 struct xe_device *xe = to_xe_device(dev);
87
88 destroy_workqueue(xe->display.hotplug.dp_wq);
89 }
90
91 /**
92 * xe_display_create - create display struct
93 * @xe: XE device instance
94 *
95 * Initialize all fields used by the display part.
96 *
97 * TODO: once everything can be inside a single struct, make the struct opaque
98 * to the rest of xe and return it to be xe->display.
99 *
100 * Returns: 0 on success
101 */
xe_display_create(struct xe_device * xe)102 int xe_display_create(struct xe_device *xe)
103 {
104 spin_lock_init(&xe->display.fb_tracking.lock);
105
106 xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
107
108 return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
109 }
110
xe_display_fini_early(void * arg)111 static void xe_display_fini_early(void *arg)
112 {
113 struct xe_device *xe = arg;
114 struct intel_display *display = &xe->display;
115
116 if (!xe->info.probe_display)
117 return;
118
119 intel_display_driver_remove_nogem(display);
120 intel_display_driver_remove_noirq(display);
121 intel_opregion_cleanup(display);
122 intel_power_domains_cleanup(display);
123 }
124
xe_display_init_early(struct xe_device * xe)125 int xe_display_init_early(struct xe_device *xe)
126 {
127 struct intel_display *display = &xe->display;
128 int err;
129
130 if (!xe->info.probe_display)
131 return 0;
132
133 /* Fake uncore lock */
134 spin_lock_init(&xe->uncore.lock);
135
136 /* This must be called before any calls to HAS_PCH_* */
137 intel_detect_pch(xe);
138
139 intel_display_driver_early_probe(display);
140
141 /* Early display init.. */
142 intel_opregion_setup(display);
143
144 /*
145 * Fill the dram structure to get the system dram info. This will be
146 * used for memory latency calculation.
147 */
148 intel_dram_detect(xe);
149
150 intel_bw_init_hw(xe);
151
152 intel_display_device_info_runtime_init(display);
153
154 err = intel_display_driver_probe_noirq(display);
155 if (err)
156 goto err_opregion;
157
158 err = intel_display_driver_probe_nogem(display);
159 if (err)
160 goto err_noirq;
161
162 return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
163 err_noirq:
164 intel_display_driver_remove_noirq(display);
165 intel_power_domains_cleanup(display);
166 err_opregion:
167 intel_opregion_cleanup(display);
168 return err;
169 }
170
xe_display_fini(void * arg)171 static void xe_display_fini(void *arg)
172 {
173 struct xe_device *xe = arg;
174 struct intel_display *display = &xe->display;
175
176 intel_hpd_poll_fini(xe);
177 intel_hdcp_component_fini(display);
178 intel_audio_deinit(display);
179 intel_display_driver_remove(display);
180 }
181
xe_display_init(struct xe_device * xe)182 int xe_display_init(struct xe_device *xe)
183 {
184 struct intel_display *display = &xe->display;
185 int err;
186
187 if (!xe->info.probe_display)
188 return 0;
189
190 err = intel_display_driver_probe(display);
191 if (err)
192 return err;
193
194 return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
195 }
196
xe_display_register(struct xe_device * xe)197 void xe_display_register(struct xe_device *xe)
198 {
199 struct intel_display *display = &xe->display;
200
201 if (!xe->info.probe_display)
202 return;
203
204 intel_display_driver_register(display);
205 intel_power_domains_enable(display);
206 }
207
xe_display_unregister(struct xe_device * xe)208 void xe_display_unregister(struct xe_device *xe)
209 {
210 struct intel_display *display = &xe->display;
211
212 if (!xe->info.probe_display)
213 return;
214
215 intel_power_domains_disable(display);
216 intel_display_driver_unregister(display);
217 }
218
219 /* IRQ-related functions */
220
xe_display_irq_handler(struct xe_device * xe,u32 master_ctl)221 void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
222 {
223 if (!xe->info.probe_display)
224 return;
225
226 if (master_ctl & DISPLAY_IRQ)
227 gen11_display_irq_handler(xe);
228 }
229
xe_display_irq_enable(struct xe_device * xe,u32 gu_misc_iir)230 void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
231 {
232 struct intel_display *display = &xe->display;
233
234 if (!xe->info.probe_display)
235 return;
236
237 if (gu_misc_iir & GU_MISC_GSE)
238 intel_opregion_asle_intr(display);
239 }
240
xe_display_irq_reset(struct xe_device * xe)241 void xe_display_irq_reset(struct xe_device *xe)
242 {
243 if (!xe->info.probe_display)
244 return;
245
246 gen11_display_irq_reset(xe);
247 }
248
xe_display_irq_postinstall(struct xe_device * xe,struct xe_gt * gt)249 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
250 {
251 if (!xe->info.probe_display)
252 return;
253
254 if (gt->info.id == XE_GT0)
255 gen11_de_irq_postinstall(xe);
256 }
257
suspend_to_idle(void)258 static bool suspend_to_idle(void)
259 {
260 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
261 if (acpi_target_system_state() < ACPI_STATE_S3)
262 return true;
263 #endif
264 return false;
265 }
266
xe_display_flush_cleanup_work(struct xe_device * xe)267 static void xe_display_flush_cleanup_work(struct xe_device *xe)
268 {
269 struct intel_crtc *crtc;
270
271 for_each_intel_crtc(&xe->drm, crtc) {
272 struct drm_crtc_commit *commit;
273
274 spin_lock(&crtc->base.commit_lock);
275 commit = list_first_entry_or_null(&crtc->base.commit_list,
276 struct drm_crtc_commit, commit_entry);
277 if (commit)
278 drm_crtc_commit_get(commit);
279 spin_unlock(&crtc->base.commit_lock);
280
281 if (commit) {
282 wait_for_completion(&commit->cleanup_done);
283 drm_crtc_commit_put(commit);
284 }
285 }
286 }
287
xe_display_enable_d3cold(struct xe_device * xe)288 static void xe_display_enable_d3cold(struct xe_device *xe)
289 {
290 struct intel_display *display = &xe->display;
291
292 if (!xe->info.probe_display)
293 return;
294
295 /*
296 * We do a lot of poking in a lot of registers, make sure they work
297 * properly.
298 */
299 intel_power_domains_disable(display);
300
301 xe_display_flush_cleanup_work(xe);
302
303 intel_opregion_suspend(display, PCI_D3cold);
304
305 intel_dmc_suspend(display);
306
307 if (has_display(xe))
308 intel_hpd_poll_enable(xe);
309 }
310
xe_display_disable_d3cold(struct xe_device * xe)311 static void xe_display_disable_d3cold(struct xe_device *xe)
312 {
313 struct intel_display *display = &xe->display;
314
315 if (!xe->info.probe_display)
316 return;
317
318 intel_dmc_resume(display);
319
320 if (has_display(xe))
321 drm_mode_config_reset(&xe->drm);
322
323 intel_display_driver_init_hw(display);
324
325 intel_hpd_init(xe);
326
327 if (has_display(xe))
328 intel_hpd_poll_disable(xe);
329
330 intel_opregion_resume(display);
331
332 intel_power_domains_enable(display);
333 }
334
xe_display_pm_suspend(struct xe_device * xe)335 void xe_display_pm_suspend(struct xe_device *xe)
336 {
337 struct intel_display *display = &xe->display;
338 bool s2idle = suspend_to_idle();
339
340 if (!xe->info.probe_display)
341 return;
342
343 /*
344 * We do a lot of poking in a lot of registers, make sure they work
345 * properly.
346 */
347 intel_power_domains_disable(display);
348 drm_client_dev_suspend(&xe->drm, false);
349
350 if (has_display(xe)) {
351 drm_kms_helper_poll_disable(&xe->drm);
352 intel_display_driver_disable_user_access(display);
353 intel_display_driver_suspend(display);
354 }
355
356 xe_display_flush_cleanup_work(xe);
357
358 intel_hpd_cancel_work(xe);
359
360 if (has_display(xe)) {
361 intel_display_driver_suspend_access(display);
362 intel_encoder_suspend_all(&xe->display);
363 }
364
365 intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
366
367 intel_dmc_suspend(display);
368 }
369
xe_display_pm_shutdown(struct xe_device * xe)370 void xe_display_pm_shutdown(struct xe_device *xe)
371 {
372 struct intel_display *display = &xe->display;
373
374 if (!xe->info.probe_display)
375 return;
376
377 intel_power_domains_disable(display);
378 drm_client_dev_suspend(&xe->drm, false);
379
380 if (has_display(xe)) {
381 drm_kms_helper_poll_disable(&xe->drm);
382 intel_display_driver_disable_user_access(display);
383 intel_display_driver_suspend(display);
384 }
385
386 xe_display_flush_cleanup_work(xe);
387 intel_dp_mst_suspend(display);
388 intel_hpd_cancel_work(xe);
389
390 if (has_display(xe))
391 intel_display_driver_suspend_access(display);
392
393 intel_encoder_suspend_all(display);
394 intel_encoder_shutdown_all(display);
395
396 intel_opregion_suspend(display, PCI_D3cold);
397
398 intel_dmc_suspend(display);
399 }
400
xe_display_pm_runtime_suspend(struct xe_device * xe)401 void xe_display_pm_runtime_suspend(struct xe_device *xe)
402 {
403 if (!xe->info.probe_display)
404 return;
405
406 if (xe->d3cold.allowed) {
407 xe_display_enable_d3cold(xe);
408 return;
409 }
410
411 intel_hpd_poll_enable(xe);
412 }
413
xe_display_pm_suspend_late(struct xe_device * xe)414 void xe_display_pm_suspend_late(struct xe_device *xe)
415 {
416 struct intel_display *display = &xe->display;
417 bool s2idle = suspend_to_idle();
418
419 if (!xe->info.probe_display)
420 return;
421
422 intel_display_power_suspend_late(display, s2idle);
423 }
424
xe_display_pm_runtime_suspend_late(struct xe_device * xe)425 void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
426 {
427 struct intel_display *display = &xe->display;
428
429 if (!xe->info.probe_display)
430 return;
431
432 if (xe->d3cold.allowed)
433 xe_display_pm_suspend_late(xe);
434
435 /*
436 * If xe_display_pm_suspend_late() is not called, it is likely
437 * that we will be on dynamic DC states with DMC wakelock enabled. We
438 * need to flush the release work in that case.
439 */
440 intel_dmc_wl_flush_release_work(display);
441 }
442
xe_display_pm_shutdown_late(struct xe_device * xe)443 void xe_display_pm_shutdown_late(struct xe_device *xe)
444 {
445 struct intel_display *display = &xe->display;
446
447 if (!xe->info.probe_display)
448 return;
449
450 /*
451 * The only requirement is to reboot with display DC states disabled,
452 * for now leaving all display power wells in the INIT power domain
453 * enabled.
454 */
455 intel_power_domains_driver_remove(display);
456 }
457
xe_display_pm_resume_early(struct xe_device * xe)458 void xe_display_pm_resume_early(struct xe_device *xe)
459 {
460 struct intel_display *display = &xe->display;
461
462 if (!xe->info.probe_display)
463 return;
464
465 intel_display_power_resume_early(display);
466 }
467
xe_display_pm_resume(struct xe_device * xe)468 void xe_display_pm_resume(struct xe_device *xe)
469 {
470 struct intel_display *display = &xe->display;
471
472 if (!xe->info.probe_display)
473 return;
474
475 intel_dmc_resume(display);
476
477 if (has_display(xe))
478 drm_mode_config_reset(&xe->drm);
479
480 intel_display_driver_init_hw(display);
481
482 if (has_display(xe))
483 intel_display_driver_resume_access(display);
484
485 intel_hpd_init(xe);
486
487 if (has_display(xe)) {
488 intel_display_driver_resume(display);
489 drm_kms_helper_poll_enable(&xe->drm);
490 intel_display_driver_enable_user_access(display);
491 }
492
493 if (has_display(xe))
494 intel_hpd_poll_disable(xe);
495
496 intel_opregion_resume(display);
497
498 drm_client_dev_resume(&xe->drm, false);
499
500 intel_power_domains_enable(display);
501 }
502
xe_display_pm_runtime_resume(struct xe_device * xe)503 void xe_display_pm_runtime_resume(struct xe_device *xe)
504 {
505 if (!xe->info.probe_display)
506 return;
507
508 if (xe->d3cold.allowed) {
509 xe_display_disable_d3cold(xe);
510 return;
511 }
512
513 intel_hpd_init(xe);
514 intel_hpd_poll_disable(xe);
515 skl_watermark_ipc_update(xe);
516 }
517
518
display_device_remove(struct drm_device * dev,void * arg)519 static void display_device_remove(struct drm_device *dev, void *arg)
520 {
521 struct intel_display *display = arg;
522
523 intel_display_device_remove(display);
524 }
525
xe_display_probe(struct xe_device * xe)526 int xe_display_probe(struct xe_device *xe)
527 {
528 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
529 struct intel_display *display;
530 int err;
531
532 if (!xe->info.probe_display)
533 goto no_display;
534
535 display = intel_display_device_probe(pdev);
536
537 err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
538 if (err)
539 return err;
540
541 if (has_display(xe))
542 return 0;
543
544 no_display:
545 xe->info.probe_display = false;
546 unset_display_features(xe);
547 return 0;
548 }
549