| /linux-6.15/tools/tracing/rtla/src/ |
| H A D | osnoise.c | 97 if (!strcmp(context->orig_cpus, context->curr_cpus)) in osnoise_restore_cpus() 479 if (context->orig_stop_us == context->stop_us) in osnoise_restore_stop_us() 558 if (context->orig_stop_total_us == context->stop_total_us) in osnoise_restore_stop_total_us() 638 if (context->orig_print_stack == context->print_stack) in osnoise_restore_print_stack() 717 if (context->orig_tracing_thresh == context->tracing_thresh) in osnoise_restore_tracing_thresh() 827 if (context->orig_opt_irq_disable == context->opt_irq_disable) in osnoise_restore_irq_disable() 888 if (context->orig_opt_workload == context->opt_workload) in osnoise_restore_workload() 957 context = calloc(1, sizeof(*context)); in osnoise_context_alloc() 958 if (!context) in osnoise_context_alloc() 981 return context; in osnoise_context_alloc() [all …]
|
| H A D | osnoise.h | 108 int osnoise_get_context(struct osnoise_context *context); 109 void osnoise_put_context(struct osnoise_context *context); 111 int osnoise_set_cpus(struct osnoise_context *context, char *cpus); 112 void osnoise_restore_cpus(struct osnoise_context *context); 114 int osnoise_set_runtime_period(struct osnoise_context *context, 119 int osnoise_set_stop_us(struct osnoise_context *context, 121 void osnoise_restore_stop_us(struct osnoise_context *context); 123 int osnoise_set_stop_total_us(struct osnoise_context *context, 131 int osnoise_set_tracing_thresh(struct osnoise_context *context, 136 int osnoise_set_print_stack(struct osnoise_context *context, [all …]
|
| /linux-6.15/drivers/accel/qaic/ |
| H A D | sahara.c | 272 ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], in sahara_send_reset() 314 context->tx[0]->hello_resp.mode = context->rx->hello_resp.mode; in sahara_hello() 316 ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], in sahara_hello() 490 context->rx_size_requested = context->dump_table_length; in sahara_memory_debug64() 528 ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx, in sahara_processing() 643 memcpy(context->mem_dump_freespace, context->rx, context->rx_size); in sahara_parse_dump_image() 644 context->mem_dump_freespace += context->rx_size; in sahara_parse_dump_image() 646 if (context->dump_image_offset >= context->dump_image->length) { in sahara_parse_dump_image() 696 if (context->rx_size != context->rx_size_requested && in sahara_dump_processing() 752 if (!context) in sahara_mhi_probe() [all …]
|
| /linux-6.15/drivers/misc/vmw_vmci/ |
| H A D | vmci_context.c | 107 context = kzalloc(sizeof(*context), GFP_KERNEL); in vmci_ctx_create() 108 if (!context) { in vmci_ctx_create() 173 return context; in vmci_ctx_create() 180 kfree(context); in vmci_ctx_create() 311 if (!context) { in vmci_ctx_enqueue_datagram() 409 context = c; in vmci_ctx_get() 416 return context; in vmci_ctx_get() 435 ctx_fire_notification(context->cid, context->priv_flags); in ctx_free_ctx() 606 if (!context) in vmci_ctx_add_notification() 672 if (!context) in vmci_ctx_remove_notification() [all …]
|
| H A D | vmci_route.c | 42 if (VMCI_INVALID_ID == dst->context) in vmci_route() 75 if (VMCI_INVALID_ID == src->context && in vmci_route() 77 src->context = vmci_get_context_id(); in vmci_route() 113 if (VMCI_INVALID_ID == src->context) in vmci_route() 130 if (VMCI_INVALID_ID == src->context) { in vmci_route() 139 src->context = VMCI_HOST_CONTEXT_ID; in vmci_route() 153 if (vmci_ctx_exists(dst->context)) { in vmci_route() 165 src->context = VMCI_HOST_CONTEXT_ID; in vmci_route() 167 src->context != dst->context) { in vmci_route() 209 if (VMCI_INVALID_ID == src->context) in vmci_route() [all …]
|
| /linux-6.15/security/selinux/ss/ |
| H A D | mls.h | 27 int mls_compute_context_len(struct policydb *p, struct context *context); 28 void mls_sid_to_context(struct policydb *p, struct context *context, 35 struct context *context, struct sidtab *s, u32 def_sid); 40 int mls_range_set(struct context *context, struct mls_range *range); 43 struct context *oldc, struct context *newc); 57 int mls_export_netlbl_cat(struct policydb *p, struct context *context, 59 int mls_import_netlbl_cat(struct policydb *p, struct context *context, 63 struct context *context, in mls_export_netlbl_lvl() argument 69 struct context *context, in mls_import_netlbl_lvl() argument 75 struct context *context, in mls_export_netlbl_cat() argument [all …]
|
| H A D | mls.c | 32 int mls_compute_context_len(struct policydb *p, struct context *context) in mls_compute_context_len() argument 86 void mls_sid_to_context(struct policydb *p, struct context *context, in mls_sid_to_context() argument 231 struct context *context, struct sidtab *s, u32 def_sid) in mls_context_to_sid() argument 339 context->range.level[1].sens = context->range.level[0].sens; in mls_context_to_sid() 379 int mls_range_set(struct context *context, struct mls_range *range) in mls_range_set() argument 440 struct context *oldc, struct context *newc) in mls_convert_context() 553 void mls_export_netlbl_lvl(struct policydb *p, struct context *context, in mls_export_netlbl_lvl() argument 581 context->range.level[1].sens = context->range.level[0].sens; in mls_import_netlbl_lvl() 595 int mls_export_netlbl_cat(struct policydb *p, struct context *context, in mls_export_netlbl_cat() argument 624 int mls_import_netlbl_cat(struct policydb *p, struct context *context, in mls_import_netlbl_cat() argument [all …]
|
| /linux-6.15/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_mmu.c | 23 unmapped_page = context->global->ops->unmap(context, iova, in etnaviv_context_unmap() 43 ret = context->global->ops->map(context, iova, paddr, pgsize, in etnaviv_context_map() 69 if (!context || !sgt) in etnaviv_iommu_map() 126 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping() local 284 mapping->context = etnaviv_iommu_context_get(context); in etnaviv_iommu_map_gem() 309 mapping->context = etnaviv_iommu_context_get(context); in etnaviv_iommu_map_gem() 344 etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping); in etnaviv_iommu_context_free() 346 context->global->ops->free(context); in etnaviv_iommu_context_free() 392 context->global->ops->restore(gpu, context); in etnaviv_iommu_restore() 466 return context->global->ops->dump_size(context); in etnaviv_iommu_dump_size() [all …]
|
| H A D | etnaviv_iommu.c | 37 drm_mm_takedown(&context->mm); in etnaviv_iommuv1_free() 140 context = global->v1.shared_context; in etnaviv_iommuv1_context_alloc() 141 etnaviv_iommu_context_get(context); in etnaviv_iommuv1_context_alloc() 143 return context; in etnaviv_iommuv1_context_alloc() 160 context = &v1_context->base; in etnaviv_iommuv1_context_alloc() 161 context->global = global; in etnaviv_iommuv1_context_alloc() 162 kref_init(&context->refcount); in etnaviv_iommuv1_context_alloc() 163 mutex_init(&context->lock); in etnaviv_iommuv1_context_alloc() 164 INIT_LIST_HEAD(&context->mappings); in etnaviv_iommuv1_context_alloc() 166 context->global->v1.shared_context = context; in etnaviv_iommuv1_context_alloc() [all …]
|
| H A D | etnaviv_iommu_v2.c | 52 drm_mm_takedown(&context->mm); in etnaviv_iommuv2_free() 166 struct etnaviv_iommu_context *context) in etnaviv_iommuv2_restore_nonsec() argument 181 (u32)context->global->bad_page_dma); in etnaviv_iommuv2_restore_nonsec() 190 struct etnaviv_iommu_context *context) in etnaviv_iommuv2_restore_sec() argument 273 struct etnaviv_iommu_context *context; in etnaviv_iommuv2_context_alloc() local 300 context = &v2_context->base; in etnaviv_iommuv2_context_alloc() 301 context->global = global; in etnaviv_iommuv2_context_alloc() 302 kref_init(&context->refcount); in etnaviv_iommuv2_context_alloc() 303 mutex_init(&context->lock); in etnaviv_iommuv2_context_alloc() 304 INIT_LIST_HEAD(&context->mappings); in etnaviv_iommuv2_context_alloc() [all …]
|
| /linux-6.15/kernel/ |
| H A D | auditsc.c | 1038 context->context = AUDIT_CTX_UNUSED; in audit_alloc_context() 1684 switch (context->context) { in audit_log_exit() 1690 context->arch, context->major); in audit_log_exit() 1756 context->fds[0], context->fds[1]); in audit_log_exit() 1806 if (context->context == AUDIT_CTX_SYSCALL) in audit_log_exit() 2018 if (context->context != AUDIT_CTX_UNUSED || context->name_count) { in __audit_syscall_entry() 2040 context->context = AUDIT_CTX_SYSCALL; in __audit_syscall_entry() 2060 if (!context || context->dummy || in __audit_syscall_exit() 2228 if (context->context == AUDIT_CTX_UNUSED) in __audit_getname() 2300 if (context->context == AUDIT_CTX_UNUSED) in __audit_inode() [all …]
|
| /linux-6.15/fs/xfs/ |
| H A D | xfs_attr_list.c | 91 context->put_listent(context, in xfs_attr_shortform_list() 101 if (context->seen_enough) in xfs_attr_shortform_list() 110 if (context->bufsize == 0) in xfs_attr_shortform_list() 194 context->put_listent(context, in xfs_attr_shortform_list() 456 if (context->resynch) { in xfs_attr3_leaf_list_int() 461 context->dupcnt = 0; in xfs_attr3_leaf_list_int() 464 context->dupcnt++; in xfs_attr3_leaf_list_int() 467 context->dupcnt = 0; in xfs_attr3_leaf_list_int() 479 context->resynch = 0; in xfs_attr3_leaf_list_int() 522 context->put_listent(context, entry->flags, in xfs_attr3_leaf_list_int() [all …]
|
| H A D | xfs_xattr.c | 231 if (context->count < 0 || context->seen_enough) in __xfs_xattr_put_listent() 234 if (!context->buffer) in __xfs_xattr_put_listent() 240 context->seen_enough = 1; in __xfs_xattr_put_listent() 243 offset = context->buffer + context->count; in __xfs_xattr_put_listent() 332 memset(&context, 0, sizeof(context)); in xfs_vn_listxattr() 333 context.dp = XFS_I(inode); in xfs_vn_listxattr() 334 context.resynch = 1; in xfs_vn_listxattr() 336 context.bufsize = size; in xfs_vn_listxattr() 337 context.firstu = context.bufsize; in xfs_vn_listxattr() 343 if (context.count < 0) in xfs_vn_listxattr() [all …]
|
| /linux-6.15/drivers/gpu/drm/tegra/ |
| H A D | uapi.c | 46 kfree(context); in tegra_drm_channel_context_close() 89 context = kzalloc(sizeof(*context), GFP_KERNEL); in tegra_drm_ioctl_channel_open() 90 if (!context) in tegra_drm_ioctl_channel_open() 135 err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX), in tegra_drm_ioctl_channel_open() 157 kfree(context); in tegra_drm_ioctl_channel_open() 170 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_close() 171 if (!context) { in tegra_drm_ioctl_channel_close() 200 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_map() 201 if (!context) { in tegra_drm_ioctl_channel_map() 281 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_unmap() [all …]
|
| H A D | submit.c | 26 #define SUBMIT_ERR(context, fmt, ...) \ argument 150 xa_lock(&context->mappings); in tegra_drm_mapping_get() 156 xa_unlock(&context->mappings); in tegra_drm_mapping_get() 243 SUBMIT_ERR(context, in submit_write_reloc() 403 class = context->client->base.class; in submit_create_job() 516 struct tegra_drm_context *context; in tegra_drm_ioctl_channel_submit() local 524 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_submit() 525 if (!context) { in tegra_drm_ioctl_channel_submit() 528 current->comm, args->context); in tegra_drm_ioctl_channel_submit() 598 if (context->memory_context && context->client->ops->can_use_memory_ctx) { in tegra_drm_ioctl_channel_submit() [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | en_resources.c | 47 memset(context, 0, sizeof(*context)); in mlx4_en_fill_qp_context() 49 context->pd = cpu_to_be32(mdev->priv_pdn); in mlx4_en_fill_qp_context() 50 context->mtu_msgmax = 0xff; in mlx4_en_fill_qp_context() 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; in mlx4_en_fill_qp_context() 63 context->local_qpn = cpu_to_be32(qpn); in mlx4_en_fill_qp_context() 64 context->pri_path.ackto = 1 & 0x07; in mlx4_en_fill_qp_context() 68 context->pri_path.sched_queue |= user_prio << 3; in mlx4_en_fill_qp_context() 72 context->cqn_send = cpu_to_be32(cqn); in mlx4_en_fill_qp_context() 73 context->cqn_recv = cpu_to_be32(cqn); in mlx4_en_fill_qp_context() 76 context->pri_path.counter_index != in mlx4_en_fill_qp_context() [all …]
|
| /linux-6.15/drivers/platform/x86/intel/int1092/ |
| H A D | intel_sar.c | 41 &context->config_data[context->reg_value]; in update_sar_data() 135 out = acpi_evaluate_dsm_typed(context->handle, &context->guid, rev, in sar_get_device_mode() 143 update_sar_data(context); in sar_get_device_mode() 189 context->reg_value = value; in intc_reg_store() 190 update_sar_data(context); in intc_reg_store() 223 out = acpi_evaluate_dsm_typed(context->handle, &context->guid, rev, in sar_get_data() 251 context = kzalloc(sizeof(*context), GFP_KERNEL); in sar_probe() 252 if (!context) in sar_probe() 266 sar_get_data(reg, context); in sar_probe() 291 kfree(context); in sar_probe() [all …]
|
| /linux-6.15/drivers/gpu/drm/amd/display/dc/dml/dcn32/ |
| H A D | dcn32_fpu.c | 1487 …!dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled… in dcn32_full_validate_bw_helper() 1656 context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; in dcn32_calculate_dlg_params() 1657 context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; in dcn32_calculate_dlg_params() 1658 context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; in dcn32_calculate_dlg_params() 1672 context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); in dcn32_calculate_dlg_params() 1796 context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg_v2(&context->bw_ctx.dml, in dcn32_calculate_dlg_params() 2356 stream_status = dc_state_get_stream_status(context, context->streams[i]); in dcn32_calculate_wm_and_dlg_fpu() 2407 stream_status = dc_state_get_stream_status(context, context->streams[i]); in dcn32_calculate_wm_and_dlg_fpu() 2556 context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c; in dcn32_calculate_wm_and_dlg_fpu() 2588 context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; in dcn32_calculate_wm_and_dlg_fpu() [all …]
|
| /linux-6.15/drivers/gpu/drm/amd/display/dc/dml/dcn30/ |
| H A D | dcn30_fpu.c | 321 if (context->streams[i]) in dcn30_fpu_calculate_wm_and_dlg() 322 stream_status = dc_state_get_stream_status(context, context->streams[i]); in dcn30_fpu_calculate_wm_and_dlg() 334 context, pipes, pipe_cnt, vlevel); in dcn30_fpu_calculate_wm_and_dlg() 344 dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; in dcn30_fpu_calculate_wm_and_dlg() 369 …context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cn… in dcn30_fpu_calculate_wm_and_dlg() 439 …context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cn… in dcn30_fpu_calculate_wm_and_dlg() 452 context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c; in dcn30_fpu_calculate_wm_and_dlg() 462 …context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cn… in dcn30_fpu_calculate_wm_and_dlg() 472 context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; in dcn30_fpu_calculate_wm_and_dlg() 475 context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; in dcn30_fpu_calculate_wm_and_dlg() [all …]
|
| /linux-6.15/drivers/net/ethernet/qlogic/qed/ |
| H A D | qed_nvmetcp_fw_funcs.c | 145 struct e5_nvmetcp_task_context *context = task_params->context; in init_default_nvmetcp_task() local 149 memset(context, 0, sizeof(*context)); in init_default_nvmetcp_task() 150 init_nvmetcp_task_params(context, task_params, in init_default_nvmetcp_task() 210 SET_FIELD(context->ustorm_st_context.flags, in set_local_completion_context() 221 struct e5_nvmetcp_task_context *context = task_params->context; in init_rw_nvmetcp_task() local 247 &context->mstorm_st_context.data_desc, in init_rw_nvmetcp_task() 260 &context->ustorm_ag_context, in init_rw_nvmetcp_task() 279 context->ustorm_ag_context.exp_cont_len = 0; in init_rw_nvmetcp_task() 327 struct e5_nvmetcp_task_context *context = task_params->context; in init_common_login_request_task() local 334 &context->ustorm_ag_context, in init_common_login_request_task() [all …]
|
| /linux-6.15/drivers/gpu/drm/amd/display/dc/dml2/dml21/ |
| H A D | dml21_utils.c | 85 struct dc_state *context, in dml21_find_dc_pipes_for_plane() argument 146 struct dc_state *context, in dml21_pipe_populate_global_sync() argument 243 struct dc_state *context, in dml21_add_phantom_stream() argument 280 struct dc_state *context, in dml21_add_phantom_plane() argument 345 context, in dml21_handle_phantom_streams_planes() 362 context, in dml21_handle_phantom_streams_planes() 378 struct dc_state *context, in dml21_build_fams2_programming() argument 390 for (i = 0; i < context->stream_count; i++) { in dml21_build_fams2_programming() 433 if (context->res_ctx.pipe_ctx[k].stream && in dml21_build_fams2_programming() 435 context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) { in dml21_build_fams2_programming() [all …]
|
| /linux-6.15/arch/riscv/kernel/ |
| H A D | suspend.c | 18 context->envcfg = csr_read(CSR_ENVCFG); in suspend_save_csrs() 19 context->tvec = csr_read(CSR_TVEC); in suspend_save_csrs() 20 context->ie = csr_read(CSR_IE); in suspend_save_csrs() 40 context->satp = csr_read(CSR_SATP); in suspend_save_csrs() 49 csr_write(CSR_TVEC, context->tvec); in suspend_restore_csrs() 50 csr_write(CSR_IE, context->ie); in suspend_restore_csrs() 60 csr_write(CSR_SATP, context->satp); in suspend_restore_csrs() 67 unsigned long context)) in cpu_suspend() argument 77 suspend_save_csrs(&context); in cpu_suspend() 90 (ulong)&context); in cpu_suspend() [all …]
|
| /linux-6.15/arch/s390/include/asm/ |
| H A D | mmu_context.h | 24 spin_lock_init(&mm->context.lock); in init_new_context() 25 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context() 29 mm->context.gmap_asce = 0; in init_new_context() 30 mm->context.flush_mm = 0; in init_new_context() 32 mm->context.has_pgste = 0; in init_new_context() 33 mm->context.uses_skeys = 0; in init_new_context() 34 mm->context.uses_cmm = 0; in init_new_context() 35 mm->context.allow_cow_sharing = 1; in init_new_context() 36 mm->context.allow_gmap_hpage_1m = 0; in init_new_context() 38 switch (mm->context.asce_limit) { in init_new_context() [all …]
|
| /linux-6.15/arch/sparc/mm/ |
| H A D | tsb.c | 545 spin_lock_init(&mm->context.lock); in init_new_context() 547 mm->context.sparc64_ctx_val = 0UL; in init_new_context() 549 mm->context.tag_store = NULL; in init_new_context() 559 mm->context.hugetlb_pte_count = 0; in init_new_context() 560 mm->context.thp_pte_count = 0; in init_new_context() 570 mm->context.tsb_block[i].tsb = NULL; in init_new_context() 611 if (CTX_VALID(mm->context)) { in destroy_context() 619 if (mm->context.tag_store) { in destroy_context() 624 tag_desc = mm->context.tag_store; in destroy_context() 632 kfree(mm->context.tag_store); in destroy_context() [all …]
|
| /linux-6.15/drivers/gpu/drm/amd/display/dc/resource/dcn32/ |
| H A D | dcn32_resource_helpers.c | 92 struct dc_state *context) in dcn32_helper_calculate_num_ways_for_subvp() argument 108 struct dc_state *context) in dcn32_merge_pipes_for_subvp() argument 154 struct dc_state *context) in dcn32_all_pipes_have_stream_and_plane() argument 171 struct dc_state *context) in dcn32_subvp_in_use() argument 255 if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) in override_det_for_subvp() 312 struct dc_state *context, in dcn32_determine_det_override() argument 324 if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) in dcn32_determine_det_override() 340 if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] && in dcn32_determine_det_override() 350 if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] && in dcn32_determine_det_override() 359 if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] && in dcn32_determine_det_override() [all …]
|