Searched refs:cpu_of (Results 1 – 11 of 11) sorted by relevance
| /linux-6.15/kernel/sched/ |
| H A D | pelt.c | 439 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); in update_irq_load_avg() 440 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq))); in update_irq_load_avg() 480 unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); in update_other_load_avgs()
|
| H A D | pelt.h | 119 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 120 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
|
| H A D | ext.c | 2163 if (sticky_cpu == cpu_of(rq)) in do_enqueue_task() 2293 sticky_cpu = cpu_of(rq); in enqueue_task_scx() 2467 set_task_cpu(p, cpu_of(dst_rq)); in move_remote_task_to_local_dsq() 2468 p->scx.sticky_cpu = cpu_of(dst_rq); in move_remote_task_to_local_dsq() 2507 int cpu = cpu_of(rq); in task_can_run_on_remote_rq() 2763 int node = cpu_to_node(cpu_of(rq)); in consume_global_dsq() 3051 scx_bpf_kick_cpu(cpu_of(rq), 0); in balance_one() 3338 scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE); in pick_task_scx() 3472 int cpu = cpu_of(rq); in handle_hotplug() 5966 if (cpu != cpu_of(this_rq)) { in kick_cpus_irq_workfn() [all …]
|
| H A D | fair.c | 315 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq() 4118 if (!cpu_active(cpu_of(rq_of(cfs_rq)))) in update_tg_load_avg() 5118 int cpu = cpu_of(rq); in update_misfit_status() 5910 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq() 5995 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq() 6206 if (cpu_of(rq) != this_cpu) { in distribute_cfs_runtime() 6711 int cpu = cpu_of(rq); in sched_fair_update_stop_tick() 7590 int core = cpu_of(rq); in __update_idle_core() 9790 int cpu = cpu_of(rq); in __update_blocked_fair() 11945 stop_one_cpu_nowait(cpu_of(busiest), in sched_balance_rq() [all …]
|
| H A D | core_sched.c | 242 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); in __sched_core_account_forceidle()
|
| H A D | ext_idle.c | 665 int cpu = cpu_of(rq); in __scx_update_idle() 677 SCX_CALL_OP(SCX_KF_REST, update_idle, rq, cpu_of(rq), idle); in __scx_update_idle()
|
| H A D | sched.h | 1322 static inline int cpu_of(struct rq *rq) in cpu_of() function 1419 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { in sched_core_cookie_match() 2702 int cpu = cpu_of(rq); in sched_update_tick_dependency() 2824 if (!cpu_active(cpu_of(rq))) in hrtick_enabled() 3271 cpu_of(rq))); in cpufreq_update_util() 3391 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); in uclamp_rq_is_capped() 3720 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); in mm_cid_snapshot_time()
|
| H A D | core.c | 780 steal = prev_steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task() 814 clock = sched_clock_cpu(cpu_of(rq)); in update_rq_clock() 845 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick() 1110 cpu = cpu_of(rq); in __resched_curr() 1290 int cpu = cpu_of(rq); in nohz_csd_func() 3831 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending() 3832 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending() 5655 hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); in sched_tick() 6119 cpu = cpu_of(rq); in pick_next_task() 6407 int cpu = cpu_of(rq); in sched_core_balance() [all …]
|
| H A D | deadline.c | 1477 int cpu = cpu_of(rq); in dl_scaled_delta_exec() 1699 int cpu = cpu_of(rq); in __dl_server_attach_root() 1702 dl_b = dl_bw_of(cpu_of(rq)); in __dl_server_attach_root() 1716 int cpu = cpu_of(rq); in dl_server_apply_params() 2906 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()
|
| H A D | rt.c | 514 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 533 int cpu = cpu_of(rq); in sched_rt_rq_enqueue() 551 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue() 2473 if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
|
| H A D | debug.c | 395 cpu_of(rq)); in sched_fair_server_write()
|