1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dm_services_types.h" 27 #include "dc.h" 28 29 #include "amdgpu.h" 30 #include "amdgpu_dm.h" 31 #include "amdgpu_dm_irq.h" 32 33 /** 34 * DOC: overview 35 * 36 * DM provides another layer of IRQ management on top of what the base driver 37 * already provides. This is something that could be cleaned up, and is a 38 * future TODO item. 39 * 40 * The base driver provides IRQ source registration with DRM, handler 41 * registration into the base driver's IRQ table, and a handler callback 42 * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic 43 * handler looks up the IRQ table, and calls the respective 44 * &amdgpu_irq_src_funcs.process hookups. 45 * 46 * What DM provides on top are two IRQ tables specifically for top-half and 47 * bottom-half IRQ handling, with the bottom-half implementing workqueues: 48 * 49 * - &amdgpu_display_manager.irq_handler_list_high_tab 50 * - &amdgpu_display_manager.irq_handler_list_low_tab 51 * 52 * They override the base driver's IRQ table, and the effect can be seen 53 * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They 54 * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up 55 * DM's IRQ tables. However, in order for base driver to recognize this hook, DM 56 * still needs to register the IRQ with the base driver. See 57 * dce110_register_irq_handlers() and dcn10_register_irq_handlers(). 58 * 59 * To expose DC's hardware interrupt toggle to the base driver, DM implements 60 * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through 61 * amdgpu_irq_update() to enable or disable the interrupt. 62 */ 63 64 /****************************************************************************** 65 * Private declarations. 66 *****************************************************************************/ 67 68 /** 69 * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers. 70 * 71 * @list: Linked list entry referencing the next/previous handler 72 * @handler: Handler function 73 * @handler_arg: Argument passed to the handler when triggered 74 * @dm: DM which this handler belongs to 75 * @irq_source: DC interrupt source that this handler is registered for 76 * @work: work struct 77 */ 78 struct amdgpu_dm_irq_handler_data { 79 struct list_head list; 80 interrupt_handler handler; 81 void *handler_arg; 82 83 struct amdgpu_display_manager *dm; 84 /* DAL irq source which registered for this interrupt. */ 85 enum dc_irq_source irq_source; 86 struct work_struct work; 87 }; 88 89 #define DM_IRQ_TABLE_LOCK(adev, flags) \ 90 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags) 91 92 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \ 93 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags) 94 95 /****************************************************************************** 96 * Private functions. 97 *****************************************************************************/ 98 99 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, 100 void (*ih)(void *), 101 void *args, 102 struct amdgpu_display_manager *dm) 103 { 104 hcd->handler = ih; 105 hcd->handler_arg = args; 106 hcd->dm = dm; 107 } 108 109 /** 110 * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper. 111 * 112 * @work: work struct 113 */ 114 static void dm_irq_work_func(struct work_struct *work) 115 { 116 struct amdgpu_dm_irq_handler_data *handler_data = 117 container_of(work, struct amdgpu_dm_irq_handler_data, work); 118 119 handler_data->handler(handler_data->handler_arg); 120 121 /* Call a DAL subcomponent which registered for interrupt notification 122 * at INTERRUPT_LOW_IRQ_CONTEXT. 123 * (The most common use is HPD interrupt) 124 */ 125 } 126 127 /* 128 * Remove a handler and return a pointer to handler list from which the 129 * handler was removed. 130 */ 131 static struct list_head *remove_irq_handler(struct amdgpu_device *adev, 132 void *ih, 133 const struct dc_interrupt_params *int_params) 134 { 135 struct list_head *hnd_list; 136 struct list_head *entry, *tmp; 137 struct amdgpu_dm_irq_handler_data *handler; 138 unsigned long irq_table_flags; 139 bool handler_removed = false; 140 enum dc_irq_source irq_source; 141 142 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 143 144 irq_source = int_params->irq_source; 145 146 switch (int_params->int_context) { 147 case INTERRUPT_HIGH_IRQ_CONTEXT: 148 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; 149 break; 150 case INTERRUPT_LOW_IRQ_CONTEXT: 151 default: 152 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; 153 break; 154 } 155 156 list_for_each_safe(entry, tmp, hnd_list) { 157 158 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, 159 list); 160 161 if (handler == NULL) 162 continue; 163 164 if (ih == handler->handler) { 165 /* Found our handler. Remove it from the list. */ 166 list_del(&handler->list); 167 handler_removed = true; 168 break; 169 } 170 } 171 172 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 173 174 if (handler_removed == false) { 175 /* Not necessarily an error - caller may not 176 * know the context. 177 */ 178 return NULL; 179 } 180 181 kfree(handler); 182 183 DRM_DEBUG_KMS( 184 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n", 185 ih, int_params->irq_source, int_params->int_context); 186 187 return hnd_list; 188 } 189 190 /** 191 * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table 192 * @adev: The base driver device containing the DM device 193 * 194 * Go through low and high context IRQ tables and deallocate handlers. 195 */ 196 static void unregister_all_irq_handlers(struct amdgpu_device *adev) 197 { 198 struct list_head *hnd_list_low; 199 struct list_head *hnd_list_high; 200 struct list_head *entry, *tmp; 201 struct amdgpu_dm_irq_handler_data *handler; 202 unsigned long irq_table_flags; 203 int i; 204 205 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 206 207 for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) { 208 hnd_list_low = &adev->dm.irq_handler_list_low_tab[i]; 209 hnd_list_high = &adev->dm.irq_handler_list_high_tab[i]; 210 211 list_for_each_safe(entry, tmp, hnd_list_low) { 212 213 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, 214 list); 215 216 if (handler == NULL || handler->handler == NULL) 217 continue; 218 219 list_del(&handler->list); 220 kfree(handler); 221 } 222 223 list_for_each_safe(entry, tmp, hnd_list_high) { 224 225 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, 226 list); 227 228 if (handler == NULL || handler->handler == NULL) 229 continue; 230 231 list_del(&handler->list); 232 kfree(handler); 233 } 234 } 235 236 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 237 } 238 239 static bool 240 validate_irq_registration_params(struct dc_interrupt_params *int_params, 241 void (*ih)(void *)) 242 { 243 if (NULL == int_params || NULL == ih) { 244 DRM_ERROR("DM_IRQ: invalid input!\n"); 245 return false; 246 } 247 248 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) { 249 DRM_ERROR("DM_IRQ: invalid context: %d!\n", 250 int_params->int_context); 251 return false; 252 } 253 254 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) { 255 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n", 256 int_params->irq_source); 257 return false; 258 } 259 260 return true; 261 } 262 263 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, 264 irq_handler_idx handler_idx) 265 { 266 if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) { 267 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); 268 return false; 269 } 270 271 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) { 272 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source); 273 return false; 274 } 275 276 return true; 277 } 278 /****************************************************************************** 279 * Public functions. 280 * 281 * Note: caller is responsible for input validation. 282 *****************************************************************************/ 283 284 /** 285 * amdgpu_dm_irq_register_interrupt() - Register a handler within DM. 286 * @adev: The base driver device containing the DM device. 287 * @int_params: Interrupt parameters containing the source, and handler context 288 * @ih: Function pointer to the interrupt handler to register 289 * @handler_args: Arguments passed to the handler when the interrupt occurs 290 * 291 * Register an interrupt handler for the given IRQ source, under the given 292 * context. The context can either be high or low. High context handlers are 293 * executed directly within ISR context, while low context is executed within a 294 * workqueue, thereby allowing operations that sleep. 295 * 296 * Registered handlers are called in a FIFO manner, i.e. the most recently 297 * registered handler will be called first. 298 * 299 * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ 300 * source, handler function, and args 301 */ 302 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, 303 struct dc_interrupt_params *int_params, 304 void (*ih)(void *), 305 void *handler_args) 306 { 307 struct list_head *hnd_list; 308 struct amdgpu_dm_irq_handler_data *handler_data; 309 unsigned long irq_table_flags; 310 enum dc_irq_source irq_source; 311 312 if (false == validate_irq_registration_params(int_params, ih)) 313 return DAL_INVALID_IRQ_HANDLER_IDX; 314 315 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); 316 if (!handler_data) { 317 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); 318 return DAL_INVALID_IRQ_HANDLER_IDX; 319 } 320 321 init_handler_common_data(handler_data, ih, handler_args, &adev->dm); 322 323 irq_source = int_params->irq_source; 324 325 handler_data->irq_source = irq_source; 326 327 /* Lock the list, add the handler. */ 328 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 329 330 switch (int_params->int_context) { 331 case INTERRUPT_HIGH_IRQ_CONTEXT: 332 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; 333 break; 334 case INTERRUPT_LOW_IRQ_CONTEXT: 335 default: 336 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; 337 INIT_WORK(&handler_data->work, dm_irq_work_func); 338 break; 339 } 340 341 list_add_tail(&handler_data->list, hnd_list); 342 343 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 344 345 /* This pointer will be stored by code which requested interrupt 346 * registration. 347 * The same pointer will be needed in order to unregister the 348 * interrupt. 349 */ 350 351 DRM_DEBUG_KMS( 352 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", 353 handler_data, 354 irq_source, 355 int_params->int_context); 356 357 return handler_data; 358 } 359 360 /** 361 * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table 362 * @adev: The base driver device containing the DM device 363 * @irq_source: IRQ source to remove the given handler from 364 * @ih: Function pointer to the interrupt handler to unregister 365 * 366 * Go through both low and high context IRQ tables, and find the given handler 367 * for the given irq source. If found, remove it. Otherwise, do nothing. 368 */ 369 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, 370 enum dc_irq_source irq_source, 371 void *ih) 372 { 373 struct list_head *handler_list; 374 struct dc_interrupt_params int_params; 375 int i; 376 377 if (false == validate_irq_unregistration_params(irq_source, ih)) 378 return; 379 380 memset(&int_params, 0, sizeof(int_params)); 381 382 int_params.irq_source = irq_source; 383 384 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) { 385 386 int_params.int_context = i; 387 388 handler_list = remove_irq_handler(adev, ih, &int_params); 389 390 if (handler_list != NULL) 391 break; 392 } 393 394 if (handler_list == NULL) { 395 /* If we got here, it means we searched all irq contexts 396 * for this irq source, but the handler was not found. 397 */ 398 DRM_ERROR( 399 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", 400 ih, irq_source); 401 } 402 } 403 404 /** 405 * amdgpu_dm_irq_init() - Initialize DM IRQ management 406 * @adev: The base driver device containing the DM device 407 * 408 * Initialize DM's high and low context IRQ tables. 409 * 410 * The N by M table contains N IRQ sources, with M 411 * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The 412 * list_heads are initialized here. When an interrupt n is triggered, all m 413 * handlers are called in sequence, FIFO according to registration order. 414 * 415 * The low context table requires special steps to initialize, since handlers 416 * will be deferred to a workqueue. See &struct irq_list_head. 417 */ 418 int amdgpu_dm_irq_init(struct amdgpu_device *adev) 419 { 420 int src; 421 struct list_head *lh; 422 423 DRM_DEBUG_KMS("DM_IRQ\n"); 424 425 spin_lock_init(&adev->dm.irq_handler_list_table_lock); 426 427 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 428 /* low context handler list init */ 429 lh = &adev->dm.irq_handler_list_low_tab[src]; 430 INIT_LIST_HEAD(lh); 431 /* high context handler init */ 432 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); 433 } 434 435 return 0; 436 } 437 438 /** 439 * amdgpu_dm_irq_fini() - Tear down DM IRQ management 440 * @adev: The base driver device containing the DM device 441 * 442 * Flush all work within the low context IRQ table. 443 */ 444 void amdgpu_dm_irq_fini(struct amdgpu_device *adev) 445 { 446 int src; 447 struct list_head *lh; 448 struct list_head *entry, *tmp; 449 struct amdgpu_dm_irq_handler_data *handler; 450 unsigned long irq_table_flags; 451 452 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); 453 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 454 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 455 /* The handler was removed from the table, 456 * it means it is safe to flush all the 'work' 457 * (because no code can schedule a new one). 458 */ 459 lh = &adev->dm.irq_handler_list_low_tab[src]; 460 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 461 462 if (!list_empty(lh)) { 463 list_for_each_safe(entry, tmp, lh) { 464 handler = list_entry( 465 entry, 466 struct amdgpu_dm_irq_handler_data, 467 list); 468 flush_work(&handler->work); 469 } 470 } 471 } 472 /* Deallocate handlers from the table. */ 473 unregister_all_irq_handlers(adev); 474 } 475 476 void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) 477 { 478 int src; 479 struct list_head *hnd_list_h; 480 struct list_head *hnd_list_l; 481 unsigned long irq_table_flags; 482 struct list_head *entry, *tmp; 483 struct amdgpu_dm_irq_handler_data *handler; 484 485 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 486 487 DRM_DEBUG_KMS("DM_IRQ: suspend\n"); 488 489 /** 490 * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK 491 * will be disabled from manage_dm_interrupts on disable CRTC. 492 */ 493 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { 494 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; 495 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 496 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 497 dc_interrupt_set(adev->dm.dc, src, false); 498 499 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 500 501 if (!list_empty(hnd_list_l)) { 502 list_for_each_safe(entry, tmp, hnd_list_l) { 503 handler = list_entry( 504 entry, 505 struct amdgpu_dm_irq_handler_data, 506 list); 507 flush_work(&handler->work); 508 } 509 } 510 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 511 } 512 513 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 514 } 515 516 int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) 517 { 518 int src; 519 struct list_head *hnd_list_h, *hnd_list_l; 520 unsigned long irq_table_flags; 521 522 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 523 524 drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n"); 525 526 /* re-enable short pulse interrupts HW interrupt */ 527 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { 528 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; 529 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 530 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 531 dc_interrupt_set(adev->dm.dc, src, true); 532 } 533 534 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 535 536 return 0; 537 } 538 539 int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) 540 { 541 int src; 542 struct list_head *hnd_list_h, *hnd_list_l; 543 unsigned long irq_table_flags; 544 545 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 546 547 drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n"); 548 549 /** 550 * Renable HW interrupt for HPD and only since FLIP and VBLANK 551 * will be enabled from manage_dm_interrupts on enable CRTC. 552 */ 553 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { 554 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; 555 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 556 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 557 dc_interrupt_set(adev->dm.dc, src, true); 558 } 559 560 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 561 return 0; 562 } 563 564 /* 565 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the 566 * "irq_source". 567 */ 568 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, 569 enum dc_irq_source irq_source) 570 { 571 struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source]; 572 struct amdgpu_dm_irq_handler_data *handler_data; 573 bool work_queued = false; 574 575 if (list_empty(handler_list)) 576 return; 577 578 list_for_each_entry(handler_data, handler_list, list) { 579 if (queue_work(system_highpri_wq, &handler_data->work)) { 580 work_queued = true; 581 break; 582 } 583 } 584 585 if (!work_queued) { 586 struct amdgpu_dm_irq_handler_data *handler_data_add; 587 /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/ 588 handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list); 589 590 /*allocate a new amdgpu_dm_irq_handler_data*/ 591 handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC); 592 if (!handler_data_add) { 593 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); 594 return; 595 } 596 597 /*copy new amdgpu_dm_irq_handler_data members from handler_data*/ 598 handler_data_add->handler = handler_data->handler; 599 handler_data_add->handler_arg = handler_data->handler_arg; 600 handler_data_add->dm = handler_data->dm; 601 handler_data_add->irq_source = irq_source; 602 603 list_add_tail(&handler_data_add->list, handler_list); 604 605 INIT_WORK(&handler_data_add->work, dm_irq_work_func); 606 607 if (queue_work(system_highpri_wq, &handler_data_add->work)) 608 DRM_DEBUG("Queued work for handling interrupt from " 609 "display for IRQ source %d\n", 610 irq_source); 611 else 612 DRM_ERROR("Failed to queue work for handling interrupt " 613 "from display for IRQ source %d\n", 614 irq_source); 615 } 616 } 617 618 /* 619 * amdgpu_dm_irq_immediate_work 620 * Callback high irq work immediately, don't send to work queue 621 */ 622 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, 623 enum dc_irq_source irq_source) 624 { 625 struct amdgpu_dm_irq_handler_data *handler_data; 626 unsigned long irq_table_flags; 627 628 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 629 630 list_for_each_entry(handler_data, 631 &adev->dm.irq_handler_list_high_tab[irq_source], 632 list) { 633 /* Call a subcomponent which registered for immediate 634 * interrupt notification 635 */ 636 handler_data->handler(handler_data->handler_arg); 637 } 638 639 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 640 } 641 642 /** 643 * amdgpu_dm_irq_handler - Generic DM IRQ handler 644 * @adev: amdgpu base driver device containing the DM device 645 * @source: Unused 646 * @entry: Data about the triggered interrupt 647 * 648 * Calls all registered high irq work immediately, and schedules work for low 649 * irq. The DM IRQ table is used to find the corresponding handlers. 650 */ 651 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, 652 struct amdgpu_irq_src *source, 653 struct amdgpu_iv_entry *entry) 654 { 655 656 enum dc_irq_source src = 657 dc_interrupt_to_irq_source( 658 adev->dm.dc, 659 entry->src_id, 660 entry->src_data[0]); 661 662 dc_interrupt_ack(adev->dm.dc, src); 663 664 /* Call high irq work immediately */ 665 amdgpu_dm_irq_immediate_work(adev, src); 666 /*Schedule low_irq work */ 667 amdgpu_dm_irq_schedule_work(adev, src); 668 669 return 0; 670 } 671 672 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type) 673 { 674 switch (type) { 675 case AMDGPU_HPD_1: 676 return DC_IRQ_SOURCE_HPD1; 677 case AMDGPU_HPD_2: 678 return DC_IRQ_SOURCE_HPD2; 679 case AMDGPU_HPD_3: 680 return DC_IRQ_SOURCE_HPD3; 681 case AMDGPU_HPD_4: 682 return DC_IRQ_SOURCE_HPD4; 683 case AMDGPU_HPD_5: 684 return DC_IRQ_SOURCE_HPD5; 685 case AMDGPU_HPD_6: 686 return DC_IRQ_SOURCE_HPD6; 687 default: 688 return DC_IRQ_SOURCE_INVALID; 689 } 690 } 691 692 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, 693 struct amdgpu_irq_src *source, 694 unsigned int type, 695 enum amdgpu_interrupt_state state) 696 { 697 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); 698 bool st = (state == AMDGPU_IRQ_STATE_ENABLE); 699 700 dc_interrupt_set(adev->dm.dc, src, st); 701 return 0; 702 } 703 704 static inline int dm_irq_state(struct amdgpu_device *adev, 705 struct amdgpu_irq_src *source, 706 unsigned int crtc_id, 707 enum amdgpu_interrupt_state state, 708 const enum irq_type dal_irq_type, 709 const char *func) 710 { 711 bool st; 712 enum dc_irq_source irq_source; 713 struct dc *dc = adev->dm.dc; 714 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; 715 716 if (!acrtc) { 717 DRM_ERROR( 718 "%s: crtc is NULL at id :%d\n", 719 func, 720 crtc_id); 721 return 0; 722 } 723 724 if (acrtc->otg_inst == -1) 725 return 0; 726 727 irq_source = dal_irq_type + acrtc->otg_inst; 728 729 st = (state == AMDGPU_IRQ_STATE_ENABLE); 730 731 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) 732 dc_allow_idle_optimizations(dc, false); 733 734 dc_interrupt_set(adev->dm.dc, irq_source, st); 735 return 0; 736 } 737 738 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, 739 struct amdgpu_irq_src *source, 740 unsigned int crtc_id, 741 enum amdgpu_interrupt_state state) 742 { 743 return dm_irq_state( 744 adev, 745 source, 746 crtc_id, 747 state, 748 IRQ_TYPE_PFLIP, 749 __func__); 750 } 751 752 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, 753 struct amdgpu_irq_src *source, 754 unsigned int crtc_id, 755 enum amdgpu_interrupt_state state) 756 { 757 return dm_irq_state( 758 adev, 759 source, 760 crtc_id, 761 state, 762 IRQ_TYPE_VBLANK, 763 __func__); 764 } 765 766 static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev, 767 struct amdgpu_irq_src *source, 768 unsigned int crtc_id, 769 enum amdgpu_interrupt_state state) 770 { 771 return dm_irq_state( 772 adev, 773 source, 774 crtc_id, 775 state, 776 IRQ_TYPE_VLINE0, 777 __func__); 778 } 779 780 static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev, 781 struct amdgpu_irq_src *source, 782 unsigned int crtc_id, 783 enum amdgpu_interrupt_state state) 784 { 785 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 786 bool st = (state == AMDGPU_IRQ_STATE_ENABLE); 787 788 dc_interrupt_set(adev->dm.dc, irq_source, st); 789 return 0; 790 } 791 792 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev, 793 struct amdgpu_irq_src *source, 794 unsigned int crtc_id, 795 enum amdgpu_interrupt_state state) 796 { 797 return dm_irq_state( 798 adev, 799 source, 800 crtc_id, 801 state, 802 IRQ_TYPE_VUPDATE, 803 __func__); 804 } 805 806 static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev, 807 struct amdgpu_irq_src *source, 808 unsigned int type, 809 enum amdgpu_interrupt_state state) 810 { 811 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0; 812 bool st = (state == AMDGPU_IRQ_STATE_ENABLE); 813 814 dc_interrupt_set(adev->dm.dc, irq_source, st); 815 return 0; 816 } 817 818 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { 819 .set = amdgpu_dm_set_crtc_irq_state, 820 .process = amdgpu_dm_irq_handler, 821 }; 822 823 static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = { 824 .set = amdgpu_dm_set_vline0_irq_state, 825 .process = amdgpu_dm_irq_handler, 826 }; 827 828 static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = { 829 .set = amdgpu_dm_set_dmub_outbox_irq_state, 830 .process = amdgpu_dm_irq_handler, 831 }; 832 833 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = { 834 .set = amdgpu_dm_set_vupdate_irq_state, 835 .process = amdgpu_dm_irq_handler, 836 }; 837 838 static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = { 839 .set = amdgpu_dm_set_dmub_trace_irq_state, 840 .process = amdgpu_dm_irq_handler, 841 }; 842 843 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { 844 .set = amdgpu_dm_set_pflip_irq_state, 845 .process = amdgpu_dm_irq_handler, 846 }; 847 848 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { 849 .set = amdgpu_dm_set_hpd_irq_state, 850 .process = amdgpu_dm_irq_handler, 851 }; 852 853 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) 854 { 855 adev->crtc_irq.num_types = adev->mode_info.num_crtc; 856 adev->crtc_irq.funcs = &dm_crtc_irq_funcs; 857 858 adev->vline0_irq.num_types = adev->mode_info.num_crtc; 859 adev->vline0_irq.funcs = &dm_vline0_irq_funcs; 860 861 adev->dmub_outbox_irq.num_types = 1; 862 adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs; 863 864 adev->vupdate_irq.num_types = adev->mode_info.num_crtc; 865 adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs; 866 867 adev->dmub_trace_irq.num_types = 1; 868 adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs; 869 870 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 871 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; 872 873 adev->hpd_irq.num_types = adev->mode_info.num_hpd; 874 adev->hpd_irq.funcs = &dm_hpd_irq_funcs; 875 } 876 void amdgpu_dm_outbox_init(struct amdgpu_device *adev) 877 { 878 dc_interrupt_set(adev->dm.dc, 879 DC_IRQ_SOURCE_DMCUB_OUTBOX, 880 true); 881 } 882 883 /** 884 * amdgpu_dm_hpd_init - hpd setup callback. 885 * 886 * @adev: amdgpu_device pointer 887 * 888 * Setup the hpd pins used by the card (evergreen+). 889 * Enable the pin, set the polarity, and enable the hpd interrupts. 890 */ 891 void amdgpu_dm_hpd_init(struct amdgpu_device *adev) 892 { 893 struct drm_device *dev = adev_to_drm(adev); 894 struct drm_connector *connector; 895 struct drm_connector_list_iter iter; 896 int i; 897 898 drm_connector_list_iter_begin(dev, &iter); 899 drm_for_each_connector_iter(connector, &iter) { 900 struct amdgpu_dm_connector *amdgpu_dm_connector; 901 const struct dc_link *dc_link; 902 903 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 904 continue; 905 906 amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 907 908 dc_link = amdgpu_dm_connector->dc_link; 909 910 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 911 dc_interrupt_set(adev->dm.dc, 912 dc_link->irq_source_hpd, 913 true); 914 } 915 916 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 917 dc_interrupt_set(adev->dm.dc, 918 dc_link->irq_source_hpd_rx, 919 true); 920 } 921 } 922 drm_connector_list_iter_end(&iter); 923 924 /* Update reference counts for HPDs */ 925 for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) { 926 if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1)) 927 drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i); 928 } 929 } 930 931 /** 932 * amdgpu_dm_hpd_fini - hpd tear down callback. 933 * 934 * @adev: amdgpu_device pointer 935 * 936 * Tear down the hpd pins used by the card (evergreen+). 937 * Disable the hpd interrupts. 938 */ 939 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) 940 { 941 struct drm_device *dev = adev_to_drm(adev); 942 struct drm_connector *connector; 943 struct drm_connector_list_iter iter; 944 int i; 945 946 drm_connector_list_iter_begin(dev, &iter); 947 drm_for_each_connector_iter(connector, &iter) { 948 struct amdgpu_dm_connector *amdgpu_dm_connector; 949 const struct dc_link *dc_link; 950 951 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 952 continue; 953 954 amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 955 dc_link = amdgpu_dm_connector->dc_link; 956 957 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 958 dc_interrupt_set(adev->dm.dc, 959 dc_link->irq_source_hpd, 960 false); 961 } 962 963 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 964 dc_interrupt_set(adev->dm.dc, 965 dc_link->irq_source_hpd_rx, 966 false); 967 } 968 } 969 drm_connector_list_iter_end(&iter); 970 971 /* Update reference counts for HPDs */ 972 for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) { 973 if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1)) 974 drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i); 975 } 976 } 977