Lines Matching refs:con

139 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
671 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_create_obj() local
674 if (!adev->ras_enabled || !con) in amdgpu_ras_create_obj()
684 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_create_obj()
686 obj = &con->objs[head->block]; in amdgpu_ras_create_obj()
697 list_add(&obj->node, &con->head); in amdgpu_ras_create_obj()
707 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_find_obj() local
711 if (!adev->ras_enabled || !con) in amdgpu_ras_find_obj()
722 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_find_obj()
724 obj = &con->objs[head->block]; in amdgpu_ras_find_obj()
730 obj = &con->objs[i]; in amdgpu_ras_find_obj()
750 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_feature_enabled() local
752 return con->features & BIT(head->block); in amdgpu_ras_is_feature_enabled()
762 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_feature_enable() local
783 con->features |= BIT(head->block); in __amdgpu_ras_feature_enable()
786 con->features &= ~BIT(head->block); in __amdgpu_ras_feature_enable()
798 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable() local
802 if (!con) in amdgpu_ras_feature_enable()
855 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable_on_boot() local
858 if (!con) in amdgpu_ras_feature_enable_on_boot()
861 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_feature_enable_on_boot()
889 con->features |= BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
895 con->features &= ~BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
906 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_disable_all_features() local
909 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_disable_all_features()
922 return con->features; in amdgpu_ras_disable_all_features()
928 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_enable_all_features() local
975 return con->features; in amdgpu_ras_enable_all_features()
1645 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_error_count() local
1650 if (!adev->ras_enabled || !con) in amdgpu_ras_query_error_count()
1662 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_error_count()
1740 struct amdgpu_ras *con = in amdgpu_ras_sysfs_badpages_read() local
1742 struct amdgpu_device *adev = con->adev; in amdgpu_ras_sysfs_badpages_read()
1771 struct amdgpu_ras *con = in amdgpu_ras_sysfs_features_read() local
1774 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); in amdgpu_ras_sysfs_features_read()
1780 struct amdgpu_ras *con = in amdgpu_ras_sysfs_version_show() local
1782 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); in amdgpu_ras_sysfs_version_show()
1788 struct amdgpu_ras *con = in amdgpu_ras_sysfs_schema_show() local
1790 return sysfs_emit(buf, "schema: 0x%x\n", con->schema); in amdgpu_ras_sysfs_schema_show()
1805 struct amdgpu_ras *con = in amdgpu_ras_sysfs_event_state_show() local
1807 struct ras_event_manager *event_mgr = con->event_mgr; in amdgpu_ras_sysfs_event_state_show()
1828 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_bad_page_node() local
1832 &con->badpages_attr.attr, in amdgpu_ras_sysfs_remove_bad_page_node()
1838 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_dev_attr_node() local
1840 &con->features_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1841 &con->version_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1842 &con->schema_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1843 &con->event_state_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1920 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_all() local
1923 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_sysfs_remove_all()
1957 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_ctrl_node() local
1958 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; in amdgpu_ras_debugfs_create_ctrl_node()
1968 &con->bad_page_cnt_threshold); in amdgpu_ras_debugfs_create_ctrl_node()
1974 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", in amdgpu_ras_debugfs_create_ctrl_node()
1977 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); in amdgpu_ras_debugfs_create_ctrl_node()
1987 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); in amdgpu_ras_debugfs_create_ctrl_node()
1994 &con->disable_ras_err_cnt_harvest); in amdgpu_ras_debugfs_create_ctrl_node()
2037 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_all() local
2046 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) in amdgpu_ras_debugfs_create_all()
2051 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_debugfs_create_all()
2084 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_init() local
2089 &con->features_attr.attr, in amdgpu_ras_fs_init()
2090 &con->version_attr.attr, in amdgpu_ras_fs_init()
2091 &con->schema_attr.attr, in amdgpu_ras_fs_init()
2092 &con->event_state_attr.attr, in amdgpu_ras_fs_init()
2104 con->features_attr = dev_attr_features; in amdgpu_ras_fs_init()
2108 con->version_attr = dev_attr_version; in amdgpu_ras_fs_init()
2112 con->schema_attr = dev_attr_schema; in amdgpu_ras_fs_init()
2116 con->event_state_attr = dev_attr_event_state; in amdgpu_ras_fs_init()
2121 con->badpages_attr = bin_attr_gpu_vram_bad_pages; in amdgpu_ras_fs_init()
2122 sysfs_bin_attr_init(&con->badpages_attr); in amdgpu_ras_fs_init()
2123 bin_attrs[0] = &con->badpages_attr; in amdgpu_ras_fs_init()
2136 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_fini() local
2140 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { in amdgpu_ras_fs_fini()
2191 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_poison_consumption_handler() local
2196 if (!block_obj || !con) in amdgpu_ras_interrupt_poison_consumption_handler()
2255 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); in amdgpu_ras_interrupt_poison_creation_handler() local
2257 atomic_inc(&con->page_retirement_req_cnt); in amdgpu_ras_interrupt_poison_creation_handler()
2258 atomic_inc(&con->poison_creation_count); in amdgpu_ras_interrupt_poison_creation_handler()
2260 wake_up(&con->page_retirement_wq); in amdgpu_ras_interrupt_poison_creation_handler()
2435 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_remove_all() local
2438 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_interrupt_remove_all()
2449 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_log_on_err_counter() local
2452 if (!adev->ras_enabled || !con) in amdgpu_ras_log_on_err_counter()
2455 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_log_on_err_counter()
2524 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_err_status() local
2527 if (!adev->ras_enabled || !con) in amdgpu_ras_query_err_status()
2530 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_err_status()
2547 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_badpages_read() local
2552 if (!con || !con->eh_data || !bps || !count) in amdgpu_ras_badpages_read()
2555 mutex_lock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2556 data = con->eh_data; in amdgpu_ras_badpages_read()
2585 mutex_unlock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2806 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_restore_bad_pages() local
2807 struct ras_err_handler_data *data = con->eh_data; in __amdgpu_ras_restore_bad_pages()
2810 if (amdgpu_ras_check_bad_page_unlock(con, in __amdgpu_ras_restore_bad_pages()
2897 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_add_bad_pages() local
2905 if (!con || !con->eh_data || !bps || pages <= 0) in amdgpu_ras_add_bad_pages()
2921 mutex_lock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2954 mutex_unlock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2967 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_save_bad_pages() local
2972 if (!con || !con->eh_data) { in amdgpu_ras_save_bad_pages()
2979 mutex_lock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
2980 control = &con->eeprom_control; in amdgpu_ras_save_bad_pages()
2981 data = con->eh_data; in amdgpu_ras_save_bad_pages()
2984 mutex_unlock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
3076 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, in amdgpu_ras_check_bad_page_unlock() argument
3079 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_check_bad_page_unlock()
3098 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_check_bad_page() local
3101 if (!con || !con->eh_data) in amdgpu_ras_check_bad_page()
3104 mutex_lock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
3105 ret = amdgpu_ras_check_bad_page_unlock(con, addr); in amdgpu_ras_check_bad_page()
3106 mutex_unlock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
3113 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_validate_threshold() local
3135 con->bad_page_cnt_threshold = min(lower_32_bits(val), in amdgpu_ras_validate_threshold()
3138 con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4; in amdgpu_ras_validate_threshold()
3140 con->bad_page_cnt_threshold = min_t(int, max_count, in amdgpu_ras_validate_threshold()
3151 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_put_poison_req() local
3160 ret = kfifo_put(&con->poison_fifo, poison_msg); in amdgpu_ras_put_poison_req()
3172 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_poison_req() local
3174 return kfifo_get(&con->poison_fifo, poison_msg); in amdgpu_ras_get_poison_req()
3206 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con, in amdgpu_ras_schedule_retirement_dwork() argument
3211 mutex_lock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
3212 ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree, in amdgpu_ras_schedule_retirement_dwork()
3214 mutex_unlock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
3217 schedule_delayed_work(&con->page_retirement_dwork, in amdgpu_ras_schedule_retirement_dwork()
3225 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_do_page_retirement() local
3227 struct amdgpu_device *adev = con->adev; in amdgpu_ras_do_page_retirement()
3233 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
3248 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
3315 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_clear_poison_fifo() local
3320 ret = kfifo_get(&con->poison_fifo, &msg); in amdgpu_ras_clear_poison_fifo()
3327 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_poison_consumption_handler() local
3354 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_poison_consumption_handler()
3356 con->gpu_reset_flags |= reset; in amdgpu_ras_poison_consumption_handler()
3362 flush_work(&con->recovery_work); in amdgpu_ras_poison_consumption_handler()
3371 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_page_retirement_thread() local
3378 wait_event_interruptible(con->page_retirement_wq, in amdgpu_ras_page_retirement_thread()
3380 atomic_read(&con->page_retirement_req_cnt)); in amdgpu_ras_page_retirement_thread()
3388 poison_creation_count = atomic_read(&con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3394 atomic_sub(poison_creation_count, &con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3395 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3397 } while (atomic_read(&con->poison_creation_count)); in amdgpu_ras_page_retirement_thread()
3400 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3406 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3413 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_page_retirement_thread()
3419 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_page_retirement_thread()
3428 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3432 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3435 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3439 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3448 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init_badpage_info() local
3452 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_init_badpage_info()
3455 control = &con->eeprom_control; in amdgpu_ras_init_badpage_info()
3471 if (con->update_channel_flag == true) { in amdgpu_ras_init_badpage_info()
3474 con->update_channel_flag = false; in amdgpu_ras_init_badpage_info()
3490 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_init() local
3495 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_recovery_init()
3503 con->adev = adev; in amdgpu_ras_recovery_init()
3508 data = &con->eh_data; in amdgpu_ras_recovery_init()
3515 mutex_init(&con->recovery_lock); in amdgpu_ras_recovery_init()
3516 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); in amdgpu_ras_recovery_init()
3517 atomic_set(&con->in_recovery, 0); in amdgpu_ras_recovery_init()
3518 con->eeprom_control.bad_channel_bitmap = 0; in amdgpu_ras_recovery_init()
3520 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); in amdgpu_ras_recovery_init()
3529 mutex_init(&con->page_rsv_lock); in amdgpu_ras_recovery_init()
3530 INIT_KFIFO(con->poison_fifo); in amdgpu_ras_recovery_init()
3531 mutex_init(&con->page_retirement_lock); in amdgpu_ras_recovery_init()
3532 init_waitqueue_head(&con->page_retirement_wq); in amdgpu_ras_recovery_init()
3533 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_init()
3534 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_init()
3535 con->page_retirement_thread = in amdgpu_ras_recovery_init()
3537 if (IS_ERR(con->page_retirement_thread)) { in amdgpu_ras_recovery_init()
3538 con->page_retirement_thread = NULL; in amdgpu_ras_recovery_init()
3542 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement); in amdgpu_ras_recovery_init()
3543 amdgpu_ras_ecc_log_init(&con->umc_ecc_log); in amdgpu_ras_recovery_init()
3554 con->eh_data = NULL; in amdgpu_ras_recovery_init()
3572 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_fini() local
3573 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_recovery_fini()
3583 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3584 ret = amdgpu_ras_schedule_retirement_dwork(con, 0); in amdgpu_ras_recovery_fini()
3587 if (con->page_retirement_thread) in amdgpu_ras_recovery_fini()
3588 kthread_stop(con->page_retirement_thread); in amdgpu_ras_recovery_fini()
3590 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_fini()
3591 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_fini()
3593 mutex_destroy(&con->page_rsv_lock); in amdgpu_ras_recovery_fini()
3595 cancel_work_sync(&con->recovery_work); in amdgpu_ras_recovery_fini()
3597 cancel_delayed_work_sync(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3599 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log); in amdgpu_ras_recovery_fini()
3601 mutex_lock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
3602 con->eh_data = NULL; in amdgpu_ras_recovery_fini()
3605 mutex_unlock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
3715 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_poison_mode() local
3719 if (amdgpu_sriov_vf(adev) || !con) in amdgpu_ras_query_poison_mode()
3726 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
3738 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
3809 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_counte_dw() local
3811 struct amdgpu_device *adev = con->adev; in amdgpu_ras_counte_dw()
3823 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_counte_dw()
3824 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_counte_dw()
3878 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init_reserved_vram_size() local
3880 if (!con || (adev->flags & AMD_IS_APU)) in amdgpu_ras_init_reserved_vram_size()
3887 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT; in amdgpu_ras_init_reserved_vram_size()
3890 con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1); in amdgpu_ras_init_reserved_vram_size()
3899 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init() local
3902 if (con) in amdgpu_ras_init()
3905 con = kzalloc(sizeof(*con) + in amdgpu_ras_init()
3909 if (!con) in amdgpu_ras_init()
3912 con->adev = adev; in amdgpu_ras_init()
3913 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); in amdgpu_ras_init()
3914 atomic_set(&con->ras_ce_count, 0); in amdgpu_ras_init()
3915 atomic_set(&con->ras_ue_count, 0); in amdgpu_ras_init()
3917 con->objs = (struct ras_manager *)(con + 1); in amdgpu_ras_init()
3919 amdgpu_ras_set_context(adev, con); in amdgpu_ras_init()
3928 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_ras_init()
3937 con->update_channel_flag = false; in amdgpu_ras_init()
3938 con->features = 0; in amdgpu_ras_init()
3939 con->schema = 0; in amdgpu_ras_init()
3940 INIT_LIST_HEAD(&con->head); in amdgpu_ras_init()
3942 con->flags = RAS_DEFAULT_FLAGS; in amdgpu_ras_init()
4008 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << in amdgpu_ras_init()
4012 con->schema = amdgpu_get_ras_schema(adev); in amdgpu_ras_init()
4037 kfree(con); in amdgpu_ras_init()
4071 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_poison_mode_supported() local
4073 if (!con) in amdgpu_ras_is_poison_mode_supported()
4076 return con->poison_supported; in amdgpu_ras_is_poison_mode_supported()
4084 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_block_late_init() local
4136 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_block_late_init()
4137 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_block_late_init()
4185 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_resume() local
4188 if (!adev->ras_enabled || !con) { in amdgpu_ras_resume()
4195 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_resume()
4207 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_resume()
4219 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_suspend() local
4221 if (!adev->ras_enabled || !con) in amdgpu_ras_suspend()
4226 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_suspend()
4287 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_pre_fini() local
4289 if (!adev->ras_enabled || !con) in amdgpu_ras_pre_fini()
4294 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_pre_fini()
4304 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fini() local
4306 if (!adev->ras_enabled || !con) in amdgpu_ras_fini()
4334 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); in amdgpu_ras_fini()
4336 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_fini()
4339 cancel_delayed_work_sync(&con->ras_counte_delay_work); in amdgpu_ras_fini()
4342 kfree(con); in amdgpu_ras_fini()
4509 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_release_ras_context() local
4511 if (!con) in amdgpu_release_ras_context()
4514 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_release_ras_context()
4515 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_release_ras_context()
4517 kfree(con); in amdgpu_release_ras_context()
4707 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_mca_debug_mode() local
4710 if (con) { in amdgpu_ras_set_mca_debug_mode()
4713 con->is_aca_debug_mode = enable; in amdgpu_ras_set_mca_debug_mode()
4721 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_aca_debug_mode() local
4724 if (con) { in amdgpu_ras_set_aca_debug_mode()
4730 con->is_aca_debug_mode = enable; in amdgpu_ras_set_aca_debug_mode()
4738 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_aca_debug_mode() local
4742 if (!con) in amdgpu_ras_get_aca_debug_mode()
4747 return con->is_aca_debug_mode; in amdgpu_ras_get_aca_debug_mode()
4755 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_error_query_mode() local
4759 if (!con) { in amdgpu_ras_get_error_query_mode()
4768 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; in amdgpu_ras_get_error_query_mode()
5207 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_reserve_page() local
5212 mutex_lock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
5216 mutex_unlock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
5241 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_rma() local
5243 if (!con) in amdgpu_ras_is_rma()
5246 return con->is_rma; in amdgpu_ras_is_rma()