1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services_types.h"
27 #include "dc.h"
28 
29 #include "amdgpu.h"
30 #include "amdgpu_dm.h"
31 #include "amdgpu_dm_irq.h"
32 
33 /**
34  * DOC: overview
35  *
36  * DM provides another layer of IRQ management on top of what the base driver
37  * already provides. This is something that could be cleaned up, and is a
38  * future TODO item.
39  *
40  * The base driver provides IRQ source registration with DRM, handler
41  * registration into the base driver's IRQ table, and a handler callback
42  * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
43  * handler looks up the IRQ table, and calls the respective
44  * &amdgpu_irq_src_funcs.process hookups.
45  *
46  * What DM provides on top are two IRQ tables specifically for top-half and
47  * bottom-half IRQ handling, with the bottom-half implementing workqueues:
48  *
49  * - &amdgpu_display_manager.irq_handler_list_high_tab
50  * - &amdgpu_display_manager.irq_handler_list_low_tab
51  *
52  * They override the base driver's IRQ table, and the effect can be seen
53  * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
54  * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
55  * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
56  * still needs to register the IRQ with the base driver. See
57  * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
58  *
59  * To expose DC's hardware interrupt toggle to the base driver, DM implements
60  * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
61  * amdgpu_irq_update() to enable or disable the interrupt.
62  */
63 
64 /******************************************************************************
65  * Private declarations.
66  *****************************************************************************/
67 
68 /**
69  * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
70  *
71  * @list: Linked list entry referencing the next/previous handler
72  * @handler: Handler function
73  * @handler_arg: Argument passed to the handler when triggered
74  * @dm: DM which this handler belongs to
75  * @irq_source: DC interrupt source that this handler is registered for
76  * @work: work struct
77  */
78 struct amdgpu_dm_irq_handler_data {
79 	struct list_head list;
80 	interrupt_handler handler;
81 	void *handler_arg;
82 
83 	struct amdgpu_display_manager *dm;
84 	/* DAL irq source which registered for this interrupt. */
85 	enum dc_irq_source irq_source;
86 	struct work_struct work;
87 };
88 
89 #define DM_IRQ_TABLE_LOCK(adev, flags) \
90 	spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
91 
92 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \
93 	spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
94 
95 /******************************************************************************
96  * Private functions.
97  *****************************************************************************/
98 
99 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
100 				     void (*ih)(void *),
101 				     void *args,
102 				     struct amdgpu_display_manager *dm)
103 {
104 	hcd->handler = ih;
105 	hcd->handler_arg = args;
106 	hcd->dm = dm;
107 }
108 
109 /**
110  * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
111  *
112  * @work: work struct
113  */
114 static void dm_irq_work_func(struct work_struct *work)
115 {
116 	struct amdgpu_dm_irq_handler_data *handler_data =
117 		container_of(work, struct amdgpu_dm_irq_handler_data, work);
118 
119 	handler_data->handler(handler_data->handler_arg);
120 
121 	/* Call a DAL subcomponent which registered for interrupt notification
122 	 * at INTERRUPT_LOW_IRQ_CONTEXT.
123 	 * (The most common use is HPD interrupt) */
124 }
125 
126 /*
127  * Remove a handler and return a pointer to handler list from which the
128  * handler was removed.
129  */
130 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
131 					    void *ih,
132 					    const struct dc_interrupt_params *int_params)
133 {
134 	struct list_head *hnd_list;
135 	struct list_head *entry, *tmp;
136 	struct amdgpu_dm_irq_handler_data *handler;
137 	unsigned long irq_table_flags;
138 	bool handler_removed = false;
139 	enum dc_irq_source irq_source;
140 
141 	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
142 
143 	irq_source = int_params->irq_source;
144 
145 	switch (int_params->int_context) {
146 	case INTERRUPT_HIGH_IRQ_CONTEXT:
147 		hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
148 		break;
149 	case INTERRUPT_LOW_IRQ_CONTEXT:
150 	default:
151 		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
152 		break;
153 	}
154 
155 	list_for_each_safe(entry, tmp, hnd_list) {
156 
157 		handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
158 				     list);
159 
160 		if (handler == NULL)
161 			continue;
162 
163 		if (ih == handler->handler) {
164 			/* Found our handler. Remove it from the list. */
165 			list_del(&handler->list);
166 			handler_removed = true;
167 			break;
168 		}
169 	}
170 
171 	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
172 
173 	if (handler_removed == false) {
174 		/* Not necessarily an error - caller may not
175 		 * know the context. */
176 		return NULL;
177 	}
178 
179 	kfree(handler);
180 
181 	DRM_DEBUG_KMS(
182 	"DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
183 		ih, int_params->irq_source, int_params->int_context);
184 
185 	return hnd_list;
186 }
187 
188 static bool
189 validate_irq_registration_params(struct dc_interrupt_params *int_params,
190 				 void (*ih)(void *))
191 {
192 	if (NULL == int_params || NULL == ih) {
193 		DRM_ERROR("DM_IRQ: invalid input!\n");
194 		return false;
195 	}
196 
197 	if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
198 		DRM_ERROR("DM_IRQ: invalid context: %d!\n",
199 				int_params->int_context);
200 		return false;
201 	}
202 
203 	if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
204 		DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
205 				int_params->irq_source);
206 		return false;
207 	}
208 
209 	return true;
210 }
211 
212 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
213 					       irq_handler_idx handler_idx)
214 {
215 	if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
216 		DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
217 		return false;
218 	}
219 
220 	if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
221 		DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
222 		return false;
223 	}
224 
225 	return true;
226 }
227 /******************************************************************************
228  * Public functions.
229  *
230  * Note: caller is responsible for input validation.
231  *****************************************************************************/
232 
233 /**
234  * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
235  * @adev: The base driver device containing the DM device.
236  * @int_params: Interrupt parameters containing the source, and handler context
237  * @ih: Function pointer to the interrupt handler to register
238  * @handler_args: Arguments passed to the handler when the interrupt occurs
239  *
240  * Register an interrupt handler for the given IRQ source, under the given
241  * context. The context can either be high or low. High context handlers are
242  * executed directly within ISR context, while low context is executed within a
243  * workqueue, thereby allowing operations that sleep.
244  *
245  * Registered handlers are called in a FIFO manner, i.e. the most recently
246  * registered handler will be called first.
247  *
248  * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
249  *         source, handler function, and args
250  */
251 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
252 				       struct dc_interrupt_params *int_params,
253 				       void (*ih)(void *),
254 				       void *handler_args)
255 {
256 	struct list_head *hnd_list;
257 	struct amdgpu_dm_irq_handler_data *handler_data;
258 	unsigned long irq_table_flags;
259 	enum dc_irq_source irq_source;
260 
261 	if (false == validate_irq_registration_params(int_params, ih))
262 		return DAL_INVALID_IRQ_HANDLER_IDX;
263 
264 	handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
265 	if (!handler_data) {
266 		DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
267 		return DAL_INVALID_IRQ_HANDLER_IDX;
268 	}
269 
270 	init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
271 
272 	irq_source = int_params->irq_source;
273 
274 	handler_data->irq_source = irq_source;
275 
276 	/* Lock the list, add the handler. */
277 	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
278 
279 	switch (int_params->int_context) {
280 	case INTERRUPT_HIGH_IRQ_CONTEXT:
281 		hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
282 		break;
283 	case INTERRUPT_LOW_IRQ_CONTEXT:
284 	default:
285 		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
286 		INIT_WORK(&handler_data->work, dm_irq_work_func);
287 		break;
288 	}
289 
290 	list_add_tail(&handler_data->list, hnd_list);
291 
292 	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
293 
294 	/* This pointer will be stored by code which requested interrupt
295 	 * registration.
296 	 * The same pointer will be needed in order to unregister the
297 	 * interrupt. */
298 
299 	DRM_DEBUG_KMS(
300 		"DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
301 		handler_data,
302 		irq_source,
303 		int_params->int_context);
304 
305 	return handler_data;
306 }
307 
308 /**
309  * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
310  * @adev: The base driver device containing the DM device
311  * @irq_source: IRQ source to remove the given handler from
312  * @ih: Function pointer to the interrupt handler to unregister
313  *
314  * Go through both low and high context IRQ tables, and find the given handler
315  * for the given irq source. If found, remove it. Otherwise, do nothing.
316  */
317 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
318 					enum dc_irq_source irq_source,
319 					void *ih)
320 {
321 	struct list_head *handler_list;
322 	struct dc_interrupt_params int_params;
323 	int i;
324 
325 	if (false == validate_irq_unregistration_params(irq_source, ih))
326 		return;
327 
328 	memset(&int_params, 0, sizeof(int_params));
329 
330 	int_params.irq_source = irq_source;
331 
332 	for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
333 
334 		int_params.int_context = i;
335 
336 		handler_list = remove_irq_handler(adev, ih, &int_params);
337 
338 		if (handler_list != NULL)
339 			break;
340 	}
341 
342 	if (handler_list == NULL) {
343 		/* If we got here, it means we searched all irq contexts
344 		 * for this irq source, but the handler was not found. */
345 		DRM_ERROR(
346 		"DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
347 			ih, irq_source);
348 	}
349 }
350 
351 /**
352  * amdgpu_dm_irq_init() - Initialize DM IRQ management
353  * @adev:  The base driver device containing the DM device
354  *
355  * Initialize DM's high and low context IRQ tables.
356  *
357  * The N by M table contains N IRQ sources, with M
358  * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
359  * list_heads are initialized here. When an interrupt n is triggered, all m
360  * handlers are called in sequence, FIFO according to registration order.
361  *
362  * The low context table requires special steps to initialize, since handlers
363  * will be deferred to a workqueue. See &struct irq_list_head.
364  */
365 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
366 {
367 	int src;
368 	struct list_head *lh;
369 
370 	DRM_DEBUG_KMS("DM_IRQ\n");
371 
372 	spin_lock_init(&adev->dm.irq_handler_list_table_lock);
373 
374 	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
375 		/* low context handler list init */
376 		lh = &adev->dm.irq_handler_list_low_tab[src];
377 		INIT_LIST_HEAD(lh);
378 		/* high context handler init */
379 		INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
380 	}
381 
382 	return 0;
383 }
384 
385 /**
386  * amdgpu_dm_irq_fini() - Tear down DM IRQ management
387  * @adev: The base driver device containing the DM device
388  *
389  * Flush all work within the low context IRQ table.
390  */
391 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
392 {
393 	int src;
394 	struct list_head *lh;
395 	struct list_head *entry, *tmp;
396 	struct amdgpu_dm_irq_handler_data *handler;
397 	unsigned long irq_table_flags;
398 
399 	DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
400 	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
401 		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
402 		/* The handler was removed from the table,
403 		 * it means it is safe to flush all the 'work'
404 		 * (because no code can schedule a new one). */
405 		lh = &adev->dm.irq_handler_list_low_tab[src];
406 		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
407 
408 		if (!list_empty(lh)) {
409 			list_for_each_safe(entry, tmp, lh) {
410 				handler = list_entry(
411 					entry,
412 					struct amdgpu_dm_irq_handler_data,
413 					list);
414 				flush_work(&handler->work);
415 			}
416 		}
417 	}
418 }
419 
420 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
421 {
422 	int src;
423 	struct list_head *hnd_list_h;
424 	struct list_head *hnd_list_l;
425 	unsigned long irq_table_flags;
426 	struct list_head *entry, *tmp;
427 	struct amdgpu_dm_irq_handler_data *handler;
428 
429 	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
430 
431 	DRM_DEBUG_KMS("DM_IRQ: suspend\n");
432 
433 	/**
434 	 * Disable HW interrupt  for HPD and HPDRX only since FLIP and VBLANK
435 	 * will be disabled from manage_dm_interrupts on disable CRTC.
436 	 */
437 	for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
438 		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
439 		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
440 		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
441 			dc_interrupt_set(adev->dm.dc, src, false);
442 
443 		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
444 
445 		if (!list_empty(hnd_list_l)) {
446 			list_for_each_safe (entry, tmp, hnd_list_l) {
447 				handler = list_entry(
448 					entry,
449 					struct amdgpu_dm_irq_handler_data,
450 					list);
451 				flush_work(&handler->work);
452 			}
453 		}
454 		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
455 	}
456 
457 	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
458 	return 0;
459 }
460 
461 int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
462 {
463 	int src;
464 	struct list_head *hnd_list_h, *hnd_list_l;
465 	unsigned long irq_table_flags;
466 
467 	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
468 
469 	DRM_DEBUG_KMS("DM_IRQ: early resume\n");
470 
471 	/* re-enable short pulse interrupts HW interrupt */
472 	for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
473 		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
474 		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
475 		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
476 			dc_interrupt_set(adev->dm.dc, src, true);
477 	}
478 
479 	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
480 
481 	return 0;
482 }
483 
484 int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
485 {
486 	int src;
487 	struct list_head *hnd_list_h, *hnd_list_l;
488 	unsigned long irq_table_flags;
489 
490 	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
491 
492 	DRM_DEBUG_KMS("DM_IRQ: resume\n");
493 
494 	/**
495 	 * Renable HW interrupt  for HPD and only since FLIP and VBLANK
496 	 * will be enabled from manage_dm_interrupts on enable CRTC.
497 	 */
498 	for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
499 		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
500 		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
501 		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
502 			dc_interrupt_set(adev->dm.dc, src, true);
503 	}
504 
505 	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
506 	return 0;
507 }
508 
509 /*
510  * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
511  * "irq_source".
512  */
513 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
514 					enum dc_irq_source irq_source)
515 {
516 	struct  list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
517 	struct  amdgpu_dm_irq_handler_data *handler_data;
518 	bool    work_queued = false;
519 
520 	if (list_empty(handler_list))
521 		return;
522 
523 	list_for_each_entry (handler_data, handler_list, list) {
524 		if (queue_work(system_highpri_wq, &handler_data->work)) {
525 			work_queued = true;
526 			break;
527 		}
528 	}
529 
530 	if (!work_queued) {
531 		struct  amdgpu_dm_irq_handler_data *handler_data_add;
532 		/*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
533 		handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
534 
535 		/*allocate a new amdgpu_dm_irq_handler_data*/
536 		handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
537 		if (!handler_data_add) {
538 			DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
539 			return;
540 		}
541 
542 		/*copy new amdgpu_dm_irq_handler_data members from handler_data*/
543 		handler_data_add->handler       = handler_data->handler;
544 		handler_data_add->handler_arg   = handler_data->handler_arg;
545 		handler_data_add->dm            = handler_data->dm;
546 		handler_data_add->irq_source    = irq_source;
547 
548 		list_add_tail(&handler_data_add->list, handler_list);
549 
550 		INIT_WORK(&handler_data_add->work, dm_irq_work_func);
551 
552 		if (queue_work(system_highpri_wq, &handler_data_add->work))
553 			DRM_DEBUG("Queued work for handling interrupt from "
554 				  "display for IRQ source %d\n",
555 				  irq_source);
556 		else
557 			DRM_ERROR("Failed to queue work for handling interrupt "
558 				  "from display for IRQ source %d\n",
559 				  irq_source);
560 	}
561 }
562 
563 /*
564  * amdgpu_dm_irq_immediate_work
565  * Callback high irq work immediately, don't send to work queue
566  */
567 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
568 					 enum dc_irq_source irq_source)
569 {
570 	struct amdgpu_dm_irq_handler_data *handler_data;
571 	unsigned long irq_table_flags;
572 
573 	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
574 
575 	list_for_each_entry(handler_data,
576 			    &adev->dm.irq_handler_list_high_tab[irq_source],
577 			    list) {
578 		/* Call a subcomponent which registered for immediate
579 		 * interrupt notification */
580 		handler_data->handler(handler_data->handler_arg);
581 	}
582 
583 	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
584 }
585 
586 /**
587  * amdgpu_dm_irq_handler - Generic DM IRQ handler
588  * @adev: amdgpu base driver device containing the DM device
589  * @source: Unused
590  * @entry: Data about the triggered interrupt
591  *
592  * Calls all registered high irq work immediately, and schedules work for low
593  * irq. The DM IRQ table is used to find the corresponding handlers.
594  */
595 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
596 				 struct amdgpu_irq_src *source,
597 				 struct amdgpu_iv_entry *entry)
598 {
599 
600 	enum dc_irq_source src =
601 		dc_interrupt_to_irq_source(
602 			adev->dm.dc,
603 			entry->src_id,
604 			entry->src_data[0]);
605 
606 	dc_interrupt_ack(adev->dm.dc, src);
607 
608 	/* Call high irq work immediately */
609 	amdgpu_dm_irq_immediate_work(adev, src);
610 	/*Schedule low_irq work */
611 	amdgpu_dm_irq_schedule_work(adev, src);
612 
613 	return 0;
614 }
615 
616 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
617 {
618 	switch (type) {
619 	case AMDGPU_HPD_1:
620 		return DC_IRQ_SOURCE_HPD1;
621 	case AMDGPU_HPD_2:
622 		return DC_IRQ_SOURCE_HPD2;
623 	case AMDGPU_HPD_3:
624 		return DC_IRQ_SOURCE_HPD3;
625 	case AMDGPU_HPD_4:
626 		return DC_IRQ_SOURCE_HPD4;
627 	case AMDGPU_HPD_5:
628 		return DC_IRQ_SOURCE_HPD5;
629 	case AMDGPU_HPD_6:
630 		return DC_IRQ_SOURCE_HPD6;
631 	default:
632 		return DC_IRQ_SOURCE_INVALID;
633 	}
634 }
635 
636 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
637 				       struct amdgpu_irq_src *source,
638 				       unsigned type,
639 				       enum amdgpu_interrupt_state state)
640 {
641 	enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
642 	bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
643 
644 	dc_interrupt_set(adev->dm.dc, src, st);
645 	return 0;
646 }
647 
648 static inline int dm_irq_state(struct amdgpu_device *adev,
649 			       struct amdgpu_irq_src *source,
650 			       unsigned crtc_id,
651 			       enum amdgpu_interrupt_state state,
652 			       const enum irq_type dal_irq_type,
653 			       const char *func)
654 {
655 	bool st;
656 	enum dc_irq_source irq_source;
657 
658 	struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
659 
660 	if (!acrtc) {
661 		DRM_ERROR(
662 			"%s: crtc is NULL at id :%d\n",
663 			func,
664 			crtc_id);
665 		return 0;
666 	}
667 
668 	if (acrtc->otg_inst == -1)
669 		return 0;
670 
671 	irq_source = dal_irq_type + acrtc->otg_inst;
672 
673 	st = (state == AMDGPU_IRQ_STATE_ENABLE);
674 
675 	dc_interrupt_set(adev->dm.dc, irq_source, st);
676 	return 0;
677 }
678 
679 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
680 					 struct amdgpu_irq_src *source,
681 					 unsigned crtc_id,
682 					 enum amdgpu_interrupt_state state)
683 {
684 	return dm_irq_state(
685 		adev,
686 		source,
687 		crtc_id,
688 		state,
689 		IRQ_TYPE_PFLIP,
690 		__func__);
691 }
692 
693 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
694 					struct amdgpu_irq_src *source,
695 					unsigned crtc_id,
696 					enum amdgpu_interrupt_state state)
697 {
698 	return dm_irq_state(
699 		adev,
700 		source,
701 		crtc_id,
702 		state,
703 		IRQ_TYPE_VBLANK,
704 		__func__);
705 }
706 
707 static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
708 					struct amdgpu_irq_src *source,
709 					unsigned int crtc_id,
710 					enum amdgpu_interrupt_state state)
711 {
712 	return dm_irq_state(
713 		adev,
714 		source,
715 		crtc_id,
716 		state,
717 		IRQ_TYPE_VLINE0,
718 		__func__);
719 }
720 
721 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
722 					   struct amdgpu_irq_src *source,
723 					   unsigned int crtc_id,
724 					   enum amdgpu_interrupt_state state)
725 {
726 	return dm_irq_state(
727 		adev,
728 		source,
729 		crtc_id,
730 		state,
731 		IRQ_TYPE_VUPDATE,
732 		__func__);
733 }
734 
735 static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
736 					   struct amdgpu_irq_src *source,
737 					   unsigned int type,
738 					   enum amdgpu_interrupt_state state)
739 {
740 	enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
741 	bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
742 
743 	dc_interrupt_set(adev->dm.dc, irq_source, st);
744 	return 0;
745 }
746 
747 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
748 	.set = amdgpu_dm_set_crtc_irq_state,
749 	.process = amdgpu_dm_irq_handler,
750 };
751 
752 static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
753 	.set = amdgpu_dm_set_vline0_irq_state,
754 	.process = amdgpu_dm_irq_handler,
755 };
756 
757 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
758 	.set = amdgpu_dm_set_vupdate_irq_state,
759 	.process = amdgpu_dm_irq_handler,
760 };
761 
762 static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
763 	.set = amdgpu_dm_set_dmub_trace_irq_state,
764 	.process = amdgpu_dm_irq_handler,
765 };
766 
767 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
768 	.set = amdgpu_dm_set_pflip_irq_state,
769 	.process = amdgpu_dm_irq_handler,
770 };
771 
772 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
773 	.set = amdgpu_dm_set_hpd_irq_state,
774 	.process = amdgpu_dm_irq_handler,
775 };
776 
777 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
778 {
779 
780 	adev->crtc_irq.num_types = adev->mode_info.num_crtc;
781 	adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
782 
783 	adev->vline0_irq.num_types = adev->mode_info.num_crtc;
784 	adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
785 
786 	adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
787 	adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
788 
789 	adev->dmub_trace_irq.num_types = 1;
790 	adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
791 
792 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
793 	adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
794 
795 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
796 	adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
797 }
798 
799 /**
800  * amdgpu_dm_hpd_init - hpd setup callback.
801  *
802  * @adev: amdgpu_device pointer
803  *
804  * Setup the hpd pins used by the card (evergreen+).
805  * Enable the pin, set the polarity, and enable the hpd interrupts.
806  */
807 void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
808 {
809 	struct drm_device *dev = adev_to_drm(adev);
810 	struct drm_connector *connector;
811 	struct drm_connector_list_iter iter;
812 
813 	drm_connector_list_iter_begin(dev, &iter);
814 	drm_for_each_connector_iter(connector, &iter) {
815 		struct amdgpu_dm_connector *amdgpu_dm_connector =
816 				to_amdgpu_dm_connector(connector);
817 
818 		const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
819 
820 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
821 			dc_interrupt_set(adev->dm.dc,
822 					dc_link->irq_source_hpd,
823 					true);
824 		}
825 
826 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
827 			dc_interrupt_set(adev->dm.dc,
828 					dc_link->irq_source_hpd_rx,
829 					true);
830 		}
831 	}
832 	drm_connector_list_iter_end(&iter);
833 }
834 
835 /**
836  * amdgpu_dm_hpd_fini - hpd tear down callback.
837  *
838  * @adev: amdgpu_device pointer
839  *
840  * Tear down the hpd pins used by the card (evergreen+).
841  * Disable the hpd interrupts.
842  */
843 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
844 {
845 	struct drm_device *dev = adev_to_drm(adev);
846 	struct drm_connector *connector;
847 	struct drm_connector_list_iter iter;
848 
849 	drm_connector_list_iter_begin(dev, &iter);
850 	drm_for_each_connector_iter(connector, &iter) {
851 		struct amdgpu_dm_connector *amdgpu_dm_connector =
852 				to_amdgpu_dm_connector(connector);
853 		const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
854 
855 		dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
856 
857 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
858 			dc_interrupt_set(adev->dm.dc,
859 					dc_link->irq_source_hpd_rx,
860 					false);
861 		}
862 	}
863 	drm_connector_list_iter_end(&iter);
864 }
865