| /linux-6.15/drivers/cpufreq/ |
| H A D | intel_pstate.c | 451 cpu = all_cpu_data[policy->cpu]; in intel_pstate_init_acpi_perf_limits() 497 cpu = all_cpu_data[policy->cpu]; in intel_pstate_exit_perf_limits() 953 arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf, in hybrid_set_cpu_capacity() 2243 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); in intel_pstate_set_pstate() 2264 cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu); in intel_pstate_get_cpu_pstates() 2272 cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); in intel_pstate_get_cpu_pstates() 2287 cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); in intel_pstate_get_cpu_pstates() 2288 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu); in intel_pstate_get_cpu_pstates() 2697 cpu->cpu = cpunum; in intel_pstate_init_cpu() 2836 cpu = all_cpu_data[policy->cpu]; in intel_pstate_set_policy() [all …]
|
| /linux-6.15/arch/arm/boot/dts/intel/axm/ |
| H A D | axm5516-cpus.dtsi | 13 cpu-map { 16 cpu = <&CPU0>; 19 cpu = <&CPU1>; 22 cpu = <&CPU2>; 25 cpu = <&CPU3>; 30 cpu = <&CPU4>; 33 cpu = <&CPU5>; 72 CPU0: cpu@0 { 80 CPU1: cpu@1 { 88 CPU2: cpu@2 { [all …]
|
| /linux-6.15/arch/powerpc/kernel/ |
| H A D | smp.c | 502 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); in __smp_send_nmi_ipi() 595 int cpu; in crash_send_ipi() local 1064 int cpu; in init_big_cores() local 1125 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); in smp_prepare_cpus() 1267 task_thread_info(idle)->cpu = cpu; in cpu_idle_thread_init() 1425 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu)); in update_mask_by_l2() 1516 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); in add_cpu_to_smallcore_masks() 1573 map_cpu_to_node(cpu, cpu_to_node(cpu)); in add_cpu_to_masks() 1574 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); in add_cpu_to_masks() 1575 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); in add_cpu_to_masks() [all …]
|
| H A D | tau_6xx.c | 81 tau[cpu].grew = 1; in TAUupdate() 92 tau[cpu].grew = 1; in TAUupdate() 109 TAUupdate(cpu); in DEFINE_INTERRUPT_HANDLER_ASYNC() 115 int cpu; in tau_timeout() local 122 TAUupdate(cpu); in tau_timeout() 127 size = tau[cpu].high - tau[cpu].low; in tau_timeout() 137 if ((tau[cpu].high - tau[cpu].low) != min_window){ in tau_timeout() 144 tau[cpu].grew = 0; in tau_timeout() 183 tau[cpu].low = 5; in TAU_init_smp() 226 return ((tau[cpu].high << 16) | tau[cpu].low); in cpu_temp_both() [all …]
|
| H A D | watchdog.c | 153 cpu, tb, per_cpu(wd_timer_tb, cpu), in wd_lockup_ipi() 220 if (c == cpu) in watchdog_smp_panic() 271 cpu, get_tb()); in wd_smp_clear_cpu_pending() 355 watchdog_smp_panic(cpu); in watchdog_timer_interrupt() 403 set_cpu_stuck(cpu); in DEFINE_INTERRUPT_HANDLER_NMI() 408 cpu, (void *)regs->nip); in DEFINE_INTERRUPT_HANDLER_NMI() 410 cpu, tb, per_cpu(wd_timer_tb, cpu), in DEFINE_INTERRUPT_HANDLER_NMI() 550 int cpu; in watchdog_hardlockup_stop() local 553 stop_watchdog_on_cpu(cpu); in watchdog_hardlockup_stop() 558 int cpu; in watchdog_hardlockup_start() local [all …]
|
| /linux-6.15/drivers/base/ |
| H A D | arch_topology.c | 67 int cpu; in topology_set_scale_freq_source() local 98 int cpu; in topology_clear_scale_freq_source() local 186 int cpu; in topology_update_hw_pressure() local 214 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_show() local 290 int cpu; in topology_normalize_cpu_scale() local 310 cpu, topology_get_cpu_scale(cpu)); in topology_normalize_cpu_scale() 376 int cpu; in topology_init_cpu_capacity_cppc() local 396 cpu, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc() 414 cpu, topology_get_cpu_scale(cpu)); in topology_init_cpu_capacity_cppc() 440 int cpu; in init_cpu_capacity_callback() local [all …]
|
| H A D | cacheinfo.c | 25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) argument 26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) argument 27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) argument 61 if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu)) in last_level_cache_is_valid() 64 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); in last_level_cache_is_valid() 525 if (init_cache_level(cpu) || !cache_leaves(cpu)) in init_level_allocate_ci() 588 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) argument 594 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) argument 835 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), in cpu_cache_sysfs_init() 892 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); in cpu_map_shared_cache() [all …]
|
| /linux-6.15/tools/testing/selftests/cpu-hotplug/ |
| H A D | cpu-on-off-test.sh | 27 if ! ls $SYSFS/devices/system/cpu/cpu* > /dev/null 2>&1; then 63 for cpu in $SYSFS/devices/system/cpu/cpu*; do 64 if [ -f $cpu/online ] && grep -q $state $cpu/online; then 82 grep -q 1 $SYSFS/devices/system/cpu/cpu$1/online 87 grep -q 0 $SYSFS/devices/system/cpu/cpu$1/online 92 echo 1 > $SYSFS/devices/system/cpu/cpu$1/online 97 echo 0 > $SYSFS/devices/system/cpu/cpu$1/online 102 local cpu=$1 115 local cpu=$1 128 local cpu=$1 [all …]
|
| /linux-6.15/include/linux/ |
| H A D | topology.h | 200 #define topology_die_id(cpu) ((void)(cpu), -1) argument 203 #define topology_cluster_id(cpu) ((void)(cpu), -1) argument 206 #define topology_core_id(cpu) ((void)(cpu), 0) argument 209 #define topology_book_id(cpu) ((void)(cpu), -1) argument 212 #define topology_drawer_id(cpu) ((void)(cpu), -1) argument 215 #define topology_ppin(cpu) ((void)(cpu), 0ull) argument 218 #define topology_sibling_cpumask(cpu) cpumask_of(cpu) argument 221 #define topology_core_cpumask(cpu) cpumask_of(cpu) argument 227 #define topology_die_cpumask(cpu) cpumask_of(cpu) argument 230 #define topology_book_cpumask(cpu) cpumask_of(cpu) argument [all …]
|
| H A D | cpumask.h | 143 return cpu; in cpumask_check() 425 if (i != cpu) in cpumask_any_but() 447 if (i != cpu) in cpumask_any_and_but() 880 #define cpumask_of(cpu) (get_cpu_mask(cpu)) argument 1050 #define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) argument 1051 #define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) argument 1052 #define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) argument 1055 for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++) 1057 for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++) 1181 return cpu == 0; in cpu_online() [all …]
|
| /linux-6.15/Documentation/translations/zh_CN/scheduler/ |
| H A D | sched-bwc.rst | 78 cpu.cfs_period_us=100ms 79 cpu.cfs_quota_us=-1 80 cpu.cfs_burst_us=0 92 改变。将不大于 cpu.cfs_quota_us 的任何(有效的)正值写入 cpu.cfs_burst_us 将配发未使用 110 一个组的带宽统计数据通过cpu.stat的5个字段导出。 112 cpu.stat: 143 一旦一个片断被分配给一个cpu,它就不会过期。然而,如果该cpu上的所有线程都无法运行,那么除了 154 即任务组正在运行的每个cpu上未使用的片断量(通常每个cpu最多1ms或由min_cfs_rq_runtime定 160 时间的cpu-local 筒仓上浪费配额的可能性。 162 绑定cpu和非绑定cpu的交互式应用之间的互动也应该被考虑,特别是当单核使用率达到100%时。如果你 [all …]
|
| /linux-6.15/kernel/sched/ |
| H A D | ext_idle.c | 111 int cpu; in pick_idle_cpu_in_node() local 129 return cpu; in pick_idle_cpu_in_node() 180 return cpu; in pick_idle_cpu_from_online_nodes() 188 s32 cpu; in scx_pick_idle_cpu() local 287 int cpu; in llc_numa_mismatch() local 313 if (llc_weight(cpu) != numa_weight(cpu)) in llc_numa_mismatch() 352 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu)); in scx_idle_update_selcpu_topology() 432 s32 cpu; in scx_select_cpu_dfl() local 820 s32 cpu; in scx_bpf_select_cpu_dfl() local 1069 s32 cpu; in scx_bpf_pick_any_cpu_node() local [all …]
|
| /linux-6.15/tools/perf/tests/ |
| H A D | topology.c | 117 cpu.cpu = i; in check_cpu_topology() 129 cpu.cpu == id.cpu.cpu); in check_cpu_topology() 132 session->header.env.cpu[cpu.cpu].core_id == id.core); in check_cpu_topology() 134 session->header.env.cpu[cpu.cpu].socket_id == in check_cpu_topology() 138 session->header.env.cpu[cpu.cpu].die_id == id.die); in check_cpu_topology() 147 session->header.env.cpu[cpu.cpu].core_id == id.core); in check_cpu_topology() 150 session->header.env.cpu[cpu.cpu].socket_id == in check_cpu_topology() 154 session->header.env.cpu[cpu.cpu].die_id == id.die); in check_cpu_topology() 163 session->header.env.cpu[cpu.cpu].socket_id == in check_cpu_topology() 167 session->header.env.cpu[cpu.cpu].die_id == id.die); in check_cpu_topology() [all …]
|
| /linux-6.15/arch/microblaze/kernel/cpu/ |
| H A D | cpuinfo-static.c | 51 ci->use_fpu = fcpu(cpu, "xlnx,use-fpu"); in set_cpuinfo_static() 63 (fcpu(cpu, "xlnx,iopb-bus-exception") ? in set_cpuinfo_static() 65 (fcpu(cpu, "xlnx,dopb-bus-exception") ? in set_cpuinfo_static() 67 (fcpu(cpu, "xlnx,div-zero-exception") ? in set_cpuinfo_static() 77 if (fcpu(cpu, "xlnx,icache-use-fsl")) in set_cpuinfo_static() 91 if (fcpu(cpu, "xlnx,dcache-use-fsl")) in set_cpuinfo_static() 101 ci->use_dopb = fcpu(cpu, "xlnx,d-opb"); in set_cpuinfo_static() 102 ci->use_iopb = fcpu(cpu, "xlnx,i-opb"); in set_cpuinfo_static() 103 ci->use_dlmb = fcpu(cpu, "xlnx,d-lmb"); in set_cpuinfo_static() 104 ci->use_ilmb = fcpu(cpu, "xlnx,i-lmb"); in set_cpuinfo_static() [all …]
|
| /linux-6.15/arch/powerpc/include/asm/ |
| H A D | smp.h | 35 extern int cpu_to_chip_id(int cpu); 47 void (*cause_ipi)(int cpu); 49 int (*cause_nmi_ipi)(int cpu); 83 int is_cpu_dead(unsigned int cpu); 100 return smp_hw_index[cpu]; in get_hard_smp_processor_id() 105 smp_hw_index[cpu] = phys; in set_hard_smp_processor_id() 121 return per_cpu(cpu_core_map, cpu); in cpu_core_mask() 196 return cpumask_of(cpu); in cpu_sibling_mask() 201 return cpumask_of(cpu); in cpu_smallcore_mask() 206 return cpumask_of(cpu); in cpu_l2_cache_mask() [all …]
|
| /linux-6.15/arch/arm/mach-tegra/ |
| H A D | platsmp.c | 44 cpu = cpu_logical_map(cpu); in tegra20_boot_secondary() 54 tegra_put_cpu_in_reset(cpu); in tegra20_boot_secondary() 62 flowctrl_write_cpu_halt(cpu, 0); in tegra20_boot_secondary() 64 tegra_enable_cpu_clock(cpu); in tegra20_boot_secondary() 66 tegra_cpu_out_of_reset(cpu); in tegra20_boot_secondary() 75 cpu = cpu_logical_map(cpu); in tegra30_boot_secondary() 76 tegra_put_cpu_in_reset(cpu); in tegra30_boot_secondary() 114 tegra_enable_cpu_clock(cpu); in tegra30_boot_secondary() 125 tegra_cpu_out_of_reset(cpu); in tegra30_boot_secondary() 133 cpu = cpu_logical_map(cpu); in tegra114_boot_secondary() [all …]
|
| /linux-6.15/arch/arm/mach-meson/ |
| H A D | platsmp.c | 50 val |= BIT(cpu); in meson_smp_set_cpu_ctrl() 52 val &= ~BIT(cpu); in meson_smp_set_cpu_ctrl() 131 scu_cpu_power_enable(scu_base, cpu); in meson_smp_begin_secondary_boot() 142 cpu); in meson_smp_finalize_secondary_boot() 150 meson_smp_set_cpu_ctrl(cpu, true); in meson_smp_finalize_secondary_boot() 161 rstc = meson_smp_get_core_reset(cpu); in meson8_smp_boot_secondary() 167 meson_smp_begin_secondary_boot(cpu); in meson8_smp_boot_secondary() 224 meson_smp_begin_secondary_boot(cpu); in meson8b_smp_boot_secondary() 297 meson_smp_set_cpu_ctrl(cpu, false); in meson8_smp_cpu_die() 327 cpu); in meson8_smp_cpu_kill() [all …]
|
| /linux-6.15/arch/arm64/kernel/ |
| H A D | smp.c | 145 if (cpu_online(cpu)) in __cpu_up() 251 ipi_setup(cpu); in secondary_start_kernel() 253 numa_add_cpu(cpu); in secondary_start_kernel() 324 ipi_teardown(cpu); in __cpu_disable() 391 ops->cpu_die(cpu); in cpu_die() 403 ops->cpu_die(cpu); in __cpu_try_die() 518 struct cpu *c = &per_cpu(cpu_devices, cpu); in arch_register_cpu() 526 if (invalid_logical_cpuid(cpu) || !cpu_present(cpu)) { in arch_register_cpu() 545 struct cpu *c = &per_cpu(cpu_devices, cpu); in arch_unregister_cpu() 784 unsigned int cpu; in smp_prepare_cpus() local [all …]
|
| /linux-6.15/arch/arm/mach-bcm/ |
| H A D | platsmp-brcmstb.c | 82 static u32 pwr_ctrl_rd(u32 cpu) in pwr_ctrl_rd() argument 107 tmp = pwr_ctrl_rd(cpu) & mask; in pwr_ctrl_wait_tmout() 112 tmp = pwr_ctrl_rd(cpu) & mask; in pwr_ctrl_wait_tmout() 140 per_cpu_sw_state_wr(cpu, 1); in brcmstb_cpu_boot() 149 cpu_rst_cfg_set(cpu, 0); in brcmstb_cpu_boot() 178 int tmp = pwr_ctrl_rd(cpu); in brcmstb_cpu_get_power_state() 188 per_cpu_sw_state_wr(cpu, 0); in brcmstb_cpu_die() 206 if (cpu == 0) { in brcmstb_cpu_kill() 232 cpu_rst_cfg_set(cpu, 1); in brcmstb_cpu_kill() 347 brcmstb_cpu_power_on(cpu); in brcmstb_boot_secondary() [all …]
|
| /linux-6.15/arch/x86/xen/ |
| H A D | smp.c | 71 cpu, in xen_smp_intr_init() 85 cpu, in xen_smp_intr_init() 115 cpu, in xen_smp_intr_init() 129 xen_smp_intr_free(cpu); in xen_smp_intr_init() 147 unsigned cpu; in __xen_send_IPI_mask() local 150 xen_send_IPI_one(cpu, vector); in __xen_send_IPI_mask() 155 int cpu; in xen_smp_send_call_function_ipi() local 160 for_each_cpu(cpu, mask) { in xen_smp_send_call_function_ipi() 161 if (xen_vcpu_stolen(cpu)) { in xen_smp_send_call_function_ipi() 234 unsigned cpu; in xen_send_IPI_mask_allbutself() local [all …]
|
| H A D | smp_pv.c | 59 int cpu; in cpu_bringup() local 72 cpu = smp_processor_id(); in cpu_bringup() 74 set_cpu_sibling_map(cpu); in cpu_bringup() 80 notify_cpu_starting(cpu); in cpu_bringup() 121 cpu, in xen_smp_intr_init_pv() 186 unsigned cpu; in xen_pv_smp_prepare_cpus() local 215 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_pv_smp_prepare_cpus() 307 xen_pmu_init(cpu); in xen_pv_kick_ap() 327 if (cpu == 0) in xen_pv_cpu_disable() 346 xen_smp_intr_free(cpu); in xen_pv_cleanup_dead_cpu() [all …]
|
| /linux-6.15/arch/s390/include/asm/ |
| H A D | topology.h | 9 struct cpu; 29 #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) argument 30 #define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) argument 32 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument 33 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) argument 34 #define topology_book_id(cpu) (cpu_topology[cpu].book_id) argument 35 #define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) argument 36 #define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id) argument 38 #define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated) argument 56 static inline int topology_cpu_init(struct cpu *cpu) { return 0; } in topology_cpu_init() argument [all …]
|
| /linux-6.15/kernel/ |
| H A D | smpboot.c | 55 tsk = fork_idle(cpu); in idle_init() 73 if (cpu != boot_cpu) in idle_threads_init() 74 idle_init(cpu); in idle_threads_init() 85 unsigned int cpu; member 119 ht->cleanup(td->cpu, cpu_online(td->cpu)); in smpboot_thread_fn() 129 ht->park(td->cpu); in smpboot_thread_fn() 145 ht->setup(td->cpu); in smpboot_thread_fn() 181 td->cpu = cpu; in __smpboot_create_thread() 208 ht->create(cpu); in __smpboot_create_thread() 268 unsigned int cpu; in smpboot_destroy_threads() local [all …]
|
| /linux-6.15/tools/power/cpupower/utils/ |
| H A D | cpufreq-info.c | 58 unsigned int cpu, nr_cpus; in proc_cpufreq_output() local 67 for (cpu = 0; cpu < nr_cpus; cpu++) { in proc_cpufreq_output() 480 get_driver(cpu); in debug_output_one() 481 get_related_cpus(cpu); in debug_output_one() 483 get_latency(cpu, 1); in debug_output_one() 484 get_epp(cpu, true); in debug_output_one() 501 get_policy(cpu); in debug_output_one() 504 get_boost_mode(cpu); in debug_output_one() 505 get_perf_cap(cpu); in debug_output_one() 534 unsigned int cpu = 0; in cmd_freq_info() local [all …]
|
| /linux-6.15/tools/perf/util/ |
| H A D | cpumap.c | 143 RC_CHK_ACCESS(map)->map[i].cpu = cpu; in cpu_map__from_range() 388 id.cpu = cpu; in aggr_cpu_id__cpu() 406 cpu.cpu = 0; in aggr_cpu_id__global() 407 id.cpu = cpu; in aggr_cpu_id__global() 551 return cpunode_map[cpu.cpu]; in cpu__get_node() 697 bitmap[c.cpu / 8] |= 1 << (c.cpu % 8); in cpu_map__snprint_mask() 699 for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) { in cpu_map__snprint_mask() local 708 if ((cpu % 32) == 0 && cpu > 0) in cpu_map__snprint_mask() 738 a->cpu.cpu == b->cpu.cpu; in aggr_cpu_id__equal() 751 a->cpu.cpu == -1; in aggr_cpu_id__is_empty() [all …]
|