104746ed8SIngo Molnar // SPDX-License-Identifier: GPL-2.0-only
204746ed8SIngo Molnar /*
304746ed8SIngo Molnar * kernel/sched/syscalls.c
404746ed8SIngo Molnar *
504746ed8SIngo Molnar * Core kernel scheduler syscalls related code
604746ed8SIngo Molnar *
704746ed8SIngo Molnar * Copyright (C) 1991-2002 Linus Torvalds
804746ed8SIngo Molnar * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
904746ed8SIngo Molnar */
1004746ed8SIngo Molnar #include <linux/sched.h>
1104746ed8SIngo Molnar #include <linux/cpuset.h>
1204746ed8SIngo Molnar #include <linux/sched/debug.h>
1304746ed8SIngo Molnar
1404746ed8SIngo Molnar #include <uapi/linux/sched/types.h>
1504746ed8SIngo Molnar
1604746ed8SIngo Molnar #include "sched.h"
1704746ed8SIngo Molnar #include "autogroup.h"
1804746ed8SIngo Molnar
__normal_prio(int policy,int rt_prio,int nice)1904746ed8SIngo Molnar static inline int __normal_prio(int policy, int rt_prio, int nice)
2004746ed8SIngo Molnar {
2104746ed8SIngo Molnar int prio;
2204746ed8SIngo Molnar
2304746ed8SIngo Molnar if (dl_policy(policy))
2404746ed8SIngo Molnar prio = MAX_DL_PRIO - 1;
2504746ed8SIngo Molnar else if (rt_policy(policy))
2604746ed8SIngo Molnar prio = MAX_RT_PRIO - 1 - rt_prio;
2704746ed8SIngo Molnar else
2804746ed8SIngo Molnar prio = NICE_TO_PRIO(nice);
2904746ed8SIngo Molnar
3004746ed8SIngo Molnar return prio;
3104746ed8SIngo Molnar }
3204746ed8SIngo Molnar
3304746ed8SIngo Molnar /*
3404746ed8SIngo Molnar * Calculate the expected normal priority: i.e. priority
3504746ed8SIngo Molnar * without taking RT-inheritance into account. Might be
3604746ed8SIngo Molnar * boosted by interactivity modifiers. Changes upon fork,
3704746ed8SIngo Molnar * setprio syscalls, and whenever the interactivity
3804746ed8SIngo Molnar * estimator recalculates.
3904746ed8SIngo Molnar */
normal_prio(struct task_struct * p)4004746ed8SIngo Molnar static inline int normal_prio(struct task_struct *p)
4104746ed8SIngo Molnar {
4204746ed8SIngo Molnar return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
4304746ed8SIngo Molnar }
4404746ed8SIngo Molnar
4504746ed8SIngo Molnar /*
4604746ed8SIngo Molnar * Calculate the current priority, i.e. the priority
4704746ed8SIngo Molnar * taken into account by the scheduler. This value might
4804746ed8SIngo Molnar * be boosted by RT tasks, or might be boosted by
4904746ed8SIngo Molnar * interactivity modifiers. Will be RT if the task got
5004746ed8SIngo Molnar * RT-boosted. If not then it returns p->normal_prio.
5104746ed8SIngo Molnar */
effective_prio(struct task_struct * p)5204746ed8SIngo Molnar static int effective_prio(struct task_struct *p)
5304746ed8SIngo Molnar {
5404746ed8SIngo Molnar p->normal_prio = normal_prio(p);
5504746ed8SIngo Molnar /*
5604746ed8SIngo Molnar * If we are RT tasks or we were boosted to RT priority,
5704746ed8SIngo Molnar * keep the priority unchanged. Otherwise, update priority
5804746ed8SIngo Molnar * to the normal priority:
5904746ed8SIngo Molnar */
60ae04f69dSQais Yousef if (!rt_or_dl_prio(p->prio))
6104746ed8SIngo Molnar return p->normal_prio;
6204746ed8SIngo Molnar return p->prio;
6304746ed8SIngo Molnar }
6404746ed8SIngo Molnar
set_user_nice(struct task_struct * p,long nice)6504746ed8SIngo Molnar void set_user_nice(struct task_struct *p, long nice)
6604746ed8SIngo Molnar {
6704746ed8SIngo Molnar bool queued, running;
6804746ed8SIngo Molnar struct rq *rq;
6904746ed8SIngo Molnar int old_prio;
7004746ed8SIngo Molnar
7104746ed8SIngo Molnar if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
7204746ed8SIngo Molnar return;
7304746ed8SIngo Molnar /*
7404746ed8SIngo Molnar * We have to be careful, if called from sys_setpriority(),
7504746ed8SIngo Molnar * the task might be in the middle of scheduling on another CPU.
7604746ed8SIngo Molnar */
7704746ed8SIngo Molnar CLASS(task_rq_lock, rq_guard)(p);
7804746ed8SIngo Molnar rq = rq_guard.rq;
7904746ed8SIngo Molnar
8004746ed8SIngo Molnar update_rq_clock(rq);
8104746ed8SIngo Molnar
8204746ed8SIngo Molnar /*
8304746ed8SIngo Molnar * The RT priorities are set via sched_setscheduler(), but we still
8404746ed8SIngo Molnar * allow the 'normal' nice value to be set - but as expected
8504746ed8SIngo Molnar * it won't have any effect on scheduling until the task is
8604746ed8SIngo Molnar * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
8704746ed8SIngo Molnar */
8804746ed8SIngo Molnar if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
8904746ed8SIngo Molnar p->static_prio = NICE_TO_PRIO(nice);
9004746ed8SIngo Molnar return;
9104746ed8SIngo Molnar }
9204746ed8SIngo Molnar
9304746ed8SIngo Molnar queued = task_on_rq_queued(p);
94af0c8b2bSPeter Zijlstra running = task_current_donor(rq, p);
9504746ed8SIngo Molnar if (queued)
9604746ed8SIngo Molnar dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
9704746ed8SIngo Molnar if (running)
9804746ed8SIngo Molnar put_prev_task(rq, p);
9904746ed8SIngo Molnar
10004746ed8SIngo Molnar p->static_prio = NICE_TO_PRIO(nice);
10104746ed8SIngo Molnar set_load_weight(p, true);
10204746ed8SIngo Molnar old_prio = p->prio;
10304746ed8SIngo Molnar p->prio = effective_prio(p);
10404746ed8SIngo Molnar
10504746ed8SIngo Molnar if (queued)
10604746ed8SIngo Molnar enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
10704746ed8SIngo Molnar if (running)
10804746ed8SIngo Molnar set_next_task(rq, p);
10904746ed8SIngo Molnar
11004746ed8SIngo Molnar /*
11104746ed8SIngo Molnar * If the task increased its priority or is running and
11204746ed8SIngo Molnar * lowered its priority, then reschedule its CPU:
11304746ed8SIngo Molnar */
11404746ed8SIngo Molnar p->sched_class->prio_changed(rq, p, old_prio);
11504746ed8SIngo Molnar }
11604746ed8SIngo Molnar EXPORT_SYMBOL(set_user_nice);
11704746ed8SIngo Molnar
11804746ed8SIngo Molnar /*
11904746ed8SIngo Molnar * is_nice_reduction - check if nice value is an actual reduction
12004746ed8SIngo Molnar *
12104746ed8SIngo Molnar * Similar to can_nice() but does not perform a capability check.
12204746ed8SIngo Molnar *
12304746ed8SIngo Molnar * @p: task
12404746ed8SIngo Molnar * @nice: nice value
12504746ed8SIngo Molnar */
is_nice_reduction(const struct task_struct * p,const int nice)12604746ed8SIngo Molnar static bool is_nice_reduction(const struct task_struct *p, const int nice)
12704746ed8SIngo Molnar {
12804746ed8SIngo Molnar /* Convert nice value [19,-20] to rlimit style value [1,40]: */
12904746ed8SIngo Molnar int nice_rlim = nice_to_rlimit(nice);
13004746ed8SIngo Molnar
13104746ed8SIngo Molnar return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
13204746ed8SIngo Molnar }
13304746ed8SIngo Molnar
13404746ed8SIngo Molnar /*
13504746ed8SIngo Molnar * can_nice - check if a task can reduce its nice value
13604746ed8SIngo Molnar * @p: task
13704746ed8SIngo Molnar * @nice: nice value
13804746ed8SIngo Molnar */
can_nice(const struct task_struct * p,const int nice)13904746ed8SIngo Molnar int can_nice(const struct task_struct *p, const int nice)
14004746ed8SIngo Molnar {
14104746ed8SIngo Molnar return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
14204746ed8SIngo Molnar }
14304746ed8SIngo Molnar
14404746ed8SIngo Molnar #ifdef __ARCH_WANT_SYS_NICE
14504746ed8SIngo Molnar
14604746ed8SIngo Molnar /*
14704746ed8SIngo Molnar * sys_nice - change the priority of the current process.
14804746ed8SIngo Molnar * @increment: priority increment
14904746ed8SIngo Molnar *
15004746ed8SIngo Molnar * sys_setpriority is a more generic, but much slower function that
15104746ed8SIngo Molnar * does similar things.
15204746ed8SIngo Molnar */
SYSCALL_DEFINE1(nice,int,increment)15304746ed8SIngo Molnar SYSCALL_DEFINE1(nice, int, increment)
15404746ed8SIngo Molnar {
15504746ed8SIngo Molnar long nice, retval;
15604746ed8SIngo Molnar
15704746ed8SIngo Molnar /*
15804746ed8SIngo Molnar * Setpriority might change our priority at the same moment.
15904746ed8SIngo Molnar * We don't have to worry. Conceptually one call occurs first
16004746ed8SIngo Molnar * and we have a single winner.
16104746ed8SIngo Molnar */
16204746ed8SIngo Molnar increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
16304746ed8SIngo Molnar nice = task_nice(current) + increment;
16404746ed8SIngo Molnar
16504746ed8SIngo Molnar nice = clamp_val(nice, MIN_NICE, MAX_NICE);
16604746ed8SIngo Molnar if (increment < 0 && !can_nice(current, nice))
16704746ed8SIngo Molnar return -EPERM;
16804746ed8SIngo Molnar
16904746ed8SIngo Molnar retval = security_task_setnice(current, nice);
17004746ed8SIngo Molnar if (retval)
17104746ed8SIngo Molnar return retval;
17204746ed8SIngo Molnar
17304746ed8SIngo Molnar set_user_nice(current, nice);
17404746ed8SIngo Molnar return 0;
17504746ed8SIngo Molnar }
17604746ed8SIngo Molnar
17704746ed8SIngo Molnar #endif
17804746ed8SIngo Molnar
17904746ed8SIngo Molnar /**
18004746ed8SIngo Molnar * task_prio - return the priority value of a given task.
18104746ed8SIngo Molnar * @p: the task in question.
18204746ed8SIngo Molnar *
18304746ed8SIngo Molnar * Return: The priority value as seen by users in /proc.
18404746ed8SIngo Molnar *
18504746ed8SIngo Molnar * sched policy return value kernel prio user prio/nice
18604746ed8SIngo Molnar *
18704746ed8SIngo Molnar * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
18804746ed8SIngo Molnar * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
18904746ed8SIngo Molnar * deadline -101 -1 0
19004746ed8SIngo Molnar */
task_prio(const struct task_struct * p)19104746ed8SIngo Molnar int task_prio(const struct task_struct *p)
19204746ed8SIngo Molnar {
19304746ed8SIngo Molnar return p->prio - MAX_RT_PRIO;
19404746ed8SIngo Molnar }
19504746ed8SIngo Molnar
19604746ed8SIngo Molnar /**
19704746ed8SIngo Molnar * idle_cpu - is a given CPU idle currently?
19804746ed8SIngo Molnar * @cpu: the processor in question.
19904746ed8SIngo Molnar *
20004746ed8SIngo Molnar * Return: 1 if the CPU is currently idle. 0 otherwise.
20104746ed8SIngo Molnar */
idle_cpu(int cpu)20204746ed8SIngo Molnar int idle_cpu(int cpu)
20304746ed8SIngo Molnar {
20404746ed8SIngo Molnar struct rq *rq = cpu_rq(cpu);
20504746ed8SIngo Molnar
20604746ed8SIngo Molnar if (rq->curr != rq->idle)
20704746ed8SIngo Molnar return 0;
20804746ed8SIngo Molnar
20904746ed8SIngo Molnar if (rq->nr_running)
21004746ed8SIngo Molnar return 0;
21104746ed8SIngo Molnar
21204746ed8SIngo Molnar #ifdef CONFIG_SMP
21304746ed8SIngo Molnar if (rq->ttwu_pending)
21404746ed8SIngo Molnar return 0;
21504746ed8SIngo Molnar #endif
21604746ed8SIngo Molnar
21704746ed8SIngo Molnar return 1;
21804746ed8SIngo Molnar }
21904746ed8SIngo Molnar
22004746ed8SIngo Molnar /**
22104746ed8SIngo Molnar * available_idle_cpu - is a given CPU idle for enqueuing work.
22204746ed8SIngo Molnar * @cpu: the CPU in question.
22304746ed8SIngo Molnar *
22404746ed8SIngo Molnar * Return: 1 if the CPU is currently idle. 0 otherwise.
22504746ed8SIngo Molnar */
available_idle_cpu(int cpu)22604746ed8SIngo Molnar int available_idle_cpu(int cpu)
22704746ed8SIngo Molnar {
22804746ed8SIngo Molnar if (!idle_cpu(cpu))
22904746ed8SIngo Molnar return 0;
23004746ed8SIngo Molnar
23104746ed8SIngo Molnar if (vcpu_is_preempted(cpu))
23204746ed8SIngo Molnar return 0;
23304746ed8SIngo Molnar
23404746ed8SIngo Molnar return 1;
23504746ed8SIngo Molnar }
23604746ed8SIngo Molnar
23704746ed8SIngo Molnar /**
23804746ed8SIngo Molnar * idle_task - return the idle task for a given CPU.
23904746ed8SIngo Molnar * @cpu: the processor in question.
24004746ed8SIngo Molnar *
24104746ed8SIngo Molnar * Return: The idle task for the CPU @cpu.
24204746ed8SIngo Molnar */
idle_task(int cpu)24304746ed8SIngo Molnar struct task_struct *idle_task(int cpu)
24404746ed8SIngo Molnar {
24504746ed8SIngo Molnar return cpu_rq(cpu)->idle;
24604746ed8SIngo Molnar }
24704746ed8SIngo Molnar
24804746ed8SIngo Molnar #ifdef CONFIG_SCHED_CORE
sched_core_idle_cpu(int cpu)24904746ed8SIngo Molnar int sched_core_idle_cpu(int cpu)
25004746ed8SIngo Molnar {
25104746ed8SIngo Molnar struct rq *rq = cpu_rq(cpu);
25204746ed8SIngo Molnar
25304746ed8SIngo Molnar if (sched_core_enabled(rq) && rq->curr == rq->idle)
25404746ed8SIngo Molnar return 1;
25504746ed8SIngo Molnar
25604746ed8SIngo Molnar return idle_cpu(cpu);
25704746ed8SIngo Molnar }
25804746ed8SIngo Molnar
25904746ed8SIngo Molnar #endif
26004746ed8SIngo Molnar
26104746ed8SIngo Molnar /**
26204746ed8SIngo Molnar * find_process_by_pid - find a process with a matching PID value.
26304746ed8SIngo Molnar * @pid: the pid in question.
26404746ed8SIngo Molnar *
26504746ed8SIngo Molnar * The task of @pid, if found. %NULL otherwise.
26604746ed8SIngo Molnar */
find_process_by_pid(pid_t pid)26704746ed8SIngo Molnar static struct task_struct *find_process_by_pid(pid_t pid)
26804746ed8SIngo Molnar {
26904746ed8SIngo Molnar return pid ? find_task_by_vpid(pid) : current;
27004746ed8SIngo Molnar }
27104746ed8SIngo Molnar
find_get_task(pid_t pid)27204746ed8SIngo Molnar static struct task_struct *find_get_task(pid_t pid)
27304746ed8SIngo Molnar {
27404746ed8SIngo Molnar struct task_struct *p;
27504746ed8SIngo Molnar guard(rcu)();
27604746ed8SIngo Molnar
27704746ed8SIngo Molnar p = find_process_by_pid(pid);
27804746ed8SIngo Molnar if (likely(p))
27904746ed8SIngo Molnar get_task_struct(p);
28004746ed8SIngo Molnar
28104746ed8SIngo Molnar return p;
28204746ed8SIngo Molnar }
28304746ed8SIngo Molnar
DEFINE_CLASS(find_get_task,struct task_struct *,if (_T)put_task_struct (_T),find_get_task (pid),pid_t pid)28404746ed8SIngo Molnar DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
28504746ed8SIngo Molnar find_get_task(pid), pid_t pid)
28604746ed8SIngo Molnar
28704746ed8SIngo Molnar /*
28804746ed8SIngo Molnar * sched_setparam() passes in -1 for its policy, to let the functions
28904746ed8SIngo Molnar * it calls know not to change it.
29004746ed8SIngo Molnar */
29104746ed8SIngo Molnar #define SETPARAM_POLICY -1
29204746ed8SIngo Molnar
29304746ed8SIngo Molnar static void __setscheduler_params(struct task_struct *p,
29404746ed8SIngo Molnar const struct sched_attr *attr)
29504746ed8SIngo Molnar {
29604746ed8SIngo Molnar int policy = attr->sched_policy;
29704746ed8SIngo Molnar
29804746ed8SIngo Molnar if (policy == SETPARAM_POLICY)
29904746ed8SIngo Molnar policy = p->policy;
30004746ed8SIngo Molnar
30104746ed8SIngo Molnar p->policy = policy;
30204746ed8SIngo Molnar
3032cf9ac40SVincent Guittot if (dl_policy(policy))
30404746ed8SIngo Molnar __setparam_dl(p, attr);
3052cf9ac40SVincent Guittot else if (fair_policy(policy))
3062cf9ac40SVincent Guittot __setparam_fair(p, attr);
30704746ed8SIngo Molnar
308ed4fb6d7SFelix Moessbauer /* rt-policy tasks do not have a timerslack */
3092004cef1SLinus Torvalds if (rt_or_dl_task_policy(p)) {
310ed4fb6d7SFelix Moessbauer p->timer_slack_ns = 0;
311ed4fb6d7SFelix Moessbauer } else if (p->timer_slack_ns == 0) {
312ed4fb6d7SFelix Moessbauer /* when switching back to non-rt policy, restore timerslack */
313ed4fb6d7SFelix Moessbauer p->timer_slack_ns = p->default_timer_slack_ns;
314ed4fb6d7SFelix Moessbauer }
315ed4fb6d7SFelix Moessbauer
31604746ed8SIngo Molnar /*
31704746ed8SIngo Molnar * __sched_setscheduler() ensures attr->sched_priority == 0 when
31804746ed8SIngo Molnar * !rt_policy. Always setting this ensures that things like
31904746ed8SIngo Molnar * getparam()/getattr() don't report silly values for !rt tasks.
32004746ed8SIngo Molnar */
32104746ed8SIngo Molnar p->rt_priority = attr->sched_priority;
32204746ed8SIngo Molnar p->normal_prio = normal_prio(p);
32304746ed8SIngo Molnar set_load_weight(p, true);
32404746ed8SIngo Molnar }
32504746ed8SIngo Molnar
32604746ed8SIngo Molnar /*
32704746ed8SIngo Molnar * Check the target process has a UID that matches the current process's:
32804746ed8SIngo Molnar */
check_same_owner(struct task_struct * p)32904746ed8SIngo Molnar static bool check_same_owner(struct task_struct *p)
33004746ed8SIngo Molnar {
33104746ed8SIngo Molnar const struct cred *cred = current_cred(), *pcred;
33204746ed8SIngo Molnar guard(rcu)();
33304746ed8SIngo Molnar
33404746ed8SIngo Molnar pcred = __task_cred(p);
33504746ed8SIngo Molnar return (uid_eq(cred->euid, pcred->euid) ||
33604746ed8SIngo Molnar uid_eq(cred->euid, pcred->uid));
33704746ed8SIngo Molnar }
33804746ed8SIngo Molnar
33904746ed8SIngo Molnar #ifdef CONFIG_UCLAMP_TASK
34004746ed8SIngo Molnar
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)34104746ed8SIngo Molnar static int uclamp_validate(struct task_struct *p,
34204746ed8SIngo Molnar const struct sched_attr *attr)
34304746ed8SIngo Molnar {
34404746ed8SIngo Molnar int util_min = p->uclamp_req[UCLAMP_MIN].value;
34504746ed8SIngo Molnar int util_max = p->uclamp_req[UCLAMP_MAX].value;
34604746ed8SIngo Molnar
34704746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
34804746ed8SIngo Molnar util_min = attr->sched_util_min;
34904746ed8SIngo Molnar
35004746ed8SIngo Molnar if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
35104746ed8SIngo Molnar return -EINVAL;
35204746ed8SIngo Molnar }
35304746ed8SIngo Molnar
35404746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
35504746ed8SIngo Molnar util_max = attr->sched_util_max;
35604746ed8SIngo Molnar
35704746ed8SIngo Molnar if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
35804746ed8SIngo Molnar return -EINVAL;
35904746ed8SIngo Molnar }
36004746ed8SIngo Molnar
36104746ed8SIngo Molnar if (util_min != -1 && util_max != -1 && util_min > util_max)
36204746ed8SIngo Molnar return -EINVAL;
36304746ed8SIngo Molnar
36404746ed8SIngo Molnar /*
36504746ed8SIngo Molnar * We have valid uclamp attributes; make sure uclamp is enabled.
36604746ed8SIngo Molnar *
36704746ed8SIngo Molnar * We need to do that here, because enabling static branches is a
36804746ed8SIngo Molnar * blocking operation which obviously cannot be done while holding
36904746ed8SIngo Molnar * scheduler locks.
37004746ed8SIngo Molnar */
371*4bc45824SXuewen Yan sched_uclamp_enable();
37204746ed8SIngo Molnar
37304746ed8SIngo Molnar return 0;
37404746ed8SIngo Molnar }
37504746ed8SIngo Molnar
uclamp_reset(const struct sched_attr * attr,enum uclamp_id clamp_id,struct uclamp_se * uc_se)37604746ed8SIngo Molnar static bool uclamp_reset(const struct sched_attr *attr,
37704746ed8SIngo Molnar enum uclamp_id clamp_id,
37804746ed8SIngo Molnar struct uclamp_se *uc_se)
37904746ed8SIngo Molnar {
38004746ed8SIngo Molnar /* Reset on sched class change for a non user-defined clamp value. */
38104746ed8SIngo Molnar if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
38204746ed8SIngo Molnar !uc_se->user_defined)
38304746ed8SIngo Molnar return true;
38404746ed8SIngo Molnar
38504746ed8SIngo Molnar /* Reset on sched_util_{min,max} == -1. */
38604746ed8SIngo Molnar if (clamp_id == UCLAMP_MIN &&
38704746ed8SIngo Molnar attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
38804746ed8SIngo Molnar attr->sched_util_min == -1) {
38904746ed8SIngo Molnar return true;
39004746ed8SIngo Molnar }
39104746ed8SIngo Molnar
39204746ed8SIngo Molnar if (clamp_id == UCLAMP_MAX &&
39304746ed8SIngo Molnar attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
39404746ed8SIngo Molnar attr->sched_util_max == -1) {
39504746ed8SIngo Molnar return true;
39604746ed8SIngo Molnar }
39704746ed8SIngo Molnar
39804746ed8SIngo Molnar return false;
39904746ed8SIngo Molnar }
40004746ed8SIngo Molnar
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)40104746ed8SIngo Molnar static void __setscheduler_uclamp(struct task_struct *p,
40204746ed8SIngo Molnar const struct sched_attr *attr)
40304746ed8SIngo Molnar {
40404746ed8SIngo Molnar enum uclamp_id clamp_id;
40504746ed8SIngo Molnar
40604746ed8SIngo Molnar for_each_clamp_id(clamp_id) {
40704746ed8SIngo Molnar struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
40804746ed8SIngo Molnar unsigned int value;
40904746ed8SIngo Molnar
41004746ed8SIngo Molnar if (!uclamp_reset(attr, clamp_id, uc_se))
41104746ed8SIngo Molnar continue;
41204746ed8SIngo Molnar
41304746ed8SIngo Molnar /*
41404746ed8SIngo Molnar * RT by default have a 100% boost value that could be modified
41504746ed8SIngo Molnar * at runtime.
41604746ed8SIngo Molnar */
41704746ed8SIngo Molnar if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
41804746ed8SIngo Molnar value = sysctl_sched_uclamp_util_min_rt_default;
41904746ed8SIngo Molnar else
42004746ed8SIngo Molnar value = uclamp_none(clamp_id);
42104746ed8SIngo Molnar
42204746ed8SIngo Molnar uclamp_se_set(uc_se, value, false);
42304746ed8SIngo Molnar
42404746ed8SIngo Molnar }
42504746ed8SIngo Molnar
42604746ed8SIngo Molnar if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
42704746ed8SIngo Molnar return;
42804746ed8SIngo Molnar
42904746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
43004746ed8SIngo Molnar attr->sched_util_min != -1) {
43104746ed8SIngo Molnar uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
43204746ed8SIngo Molnar attr->sched_util_min, true);
43304746ed8SIngo Molnar }
43404746ed8SIngo Molnar
43504746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
43604746ed8SIngo Molnar attr->sched_util_max != -1) {
43704746ed8SIngo Molnar uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
43804746ed8SIngo Molnar attr->sched_util_max, true);
43904746ed8SIngo Molnar }
44004746ed8SIngo Molnar }
44104746ed8SIngo Molnar
44204746ed8SIngo Molnar #else /* !CONFIG_UCLAMP_TASK: */
44304746ed8SIngo Molnar
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)44404746ed8SIngo Molnar static inline int uclamp_validate(struct task_struct *p,
44504746ed8SIngo Molnar const struct sched_attr *attr)
44604746ed8SIngo Molnar {
44704746ed8SIngo Molnar return -EOPNOTSUPP;
44804746ed8SIngo Molnar }
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)44904746ed8SIngo Molnar static void __setscheduler_uclamp(struct task_struct *p,
45004746ed8SIngo Molnar const struct sched_attr *attr) { }
45104746ed8SIngo Molnar #endif
45204746ed8SIngo Molnar
45304746ed8SIngo Molnar /*
45404746ed8SIngo Molnar * Allow unprivileged RT tasks to decrease priority.
45504746ed8SIngo Molnar * Only issue a capable test if needed and only once to avoid an audit
45604746ed8SIngo Molnar * event on permitted non-privileged operations:
45704746ed8SIngo Molnar */
user_check_sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,int policy,int reset_on_fork)45804746ed8SIngo Molnar static int user_check_sched_setscheduler(struct task_struct *p,
45904746ed8SIngo Molnar const struct sched_attr *attr,
46004746ed8SIngo Molnar int policy, int reset_on_fork)
46104746ed8SIngo Molnar {
46204746ed8SIngo Molnar if (fair_policy(policy)) {
46304746ed8SIngo Molnar if (attr->sched_nice < task_nice(p) &&
46404746ed8SIngo Molnar !is_nice_reduction(p, attr->sched_nice))
46504746ed8SIngo Molnar goto req_priv;
46604746ed8SIngo Molnar }
46704746ed8SIngo Molnar
46804746ed8SIngo Molnar if (rt_policy(policy)) {
46904746ed8SIngo Molnar unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
47004746ed8SIngo Molnar
47104746ed8SIngo Molnar /* Can't set/change the rt policy: */
47204746ed8SIngo Molnar if (policy != p->policy && !rlim_rtprio)
47304746ed8SIngo Molnar goto req_priv;
47404746ed8SIngo Molnar
47504746ed8SIngo Molnar /* Can't increase priority: */
47604746ed8SIngo Molnar if (attr->sched_priority > p->rt_priority &&
47704746ed8SIngo Molnar attr->sched_priority > rlim_rtprio)
47804746ed8SIngo Molnar goto req_priv;
47904746ed8SIngo Molnar }
48004746ed8SIngo Molnar
48104746ed8SIngo Molnar /*
48204746ed8SIngo Molnar * Can't set/change SCHED_DEADLINE policy at all for now
48304746ed8SIngo Molnar * (safest behavior); in the future we would like to allow
48404746ed8SIngo Molnar * unprivileged DL tasks to increase their relative deadline
48504746ed8SIngo Molnar * or reduce their runtime (both ways reducing utilization)
48604746ed8SIngo Molnar */
48704746ed8SIngo Molnar if (dl_policy(policy))
48804746ed8SIngo Molnar goto req_priv;
48904746ed8SIngo Molnar
49004746ed8SIngo Molnar /*
49104746ed8SIngo Molnar * Treat SCHED_IDLE as nice 20. Only allow a switch to
49204746ed8SIngo Molnar * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
49304746ed8SIngo Molnar */
49404746ed8SIngo Molnar if (task_has_idle_policy(p) && !idle_policy(policy)) {
49504746ed8SIngo Molnar if (!is_nice_reduction(p, task_nice(p)))
49604746ed8SIngo Molnar goto req_priv;
49704746ed8SIngo Molnar }
49804746ed8SIngo Molnar
49904746ed8SIngo Molnar /* Can't change other user's priorities: */
50004746ed8SIngo Molnar if (!check_same_owner(p))
50104746ed8SIngo Molnar goto req_priv;
50204746ed8SIngo Molnar
50304746ed8SIngo Molnar /* Normal users shall not reset the sched_reset_on_fork flag: */
50404746ed8SIngo Molnar if (p->sched_reset_on_fork && !reset_on_fork)
50504746ed8SIngo Molnar goto req_priv;
50604746ed8SIngo Molnar
50704746ed8SIngo Molnar return 0;
50804746ed8SIngo Molnar
50904746ed8SIngo Molnar req_priv:
51004746ed8SIngo Molnar if (!capable(CAP_SYS_NICE))
51104746ed8SIngo Molnar return -EPERM;
51204746ed8SIngo Molnar
51304746ed8SIngo Molnar return 0;
51404746ed8SIngo Molnar }
51504746ed8SIngo Molnar
__sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,bool user,bool pi)51604746ed8SIngo Molnar int __sched_setscheduler(struct task_struct *p,
51704746ed8SIngo Molnar const struct sched_attr *attr,
51804746ed8SIngo Molnar bool user, bool pi)
51904746ed8SIngo Molnar {
52004746ed8SIngo Molnar int oldpolicy = -1, policy = attr->sched_policy;
52104746ed8SIngo Molnar int retval, oldprio, newprio, queued, running;
52298442f0cSPeter Zijlstra const struct sched_class *prev_class, *next_class;
52304746ed8SIngo Molnar struct balance_callback *head;
52404746ed8SIngo Molnar struct rq_flags rf;
52504746ed8SIngo Molnar int reset_on_fork;
52604746ed8SIngo Molnar int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
52704746ed8SIngo Molnar struct rq *rq;
52804746ed8SIngo Molnar bool cpuset_locked = false;
52904746ed8SIngo Molnar
53004746ed8SIngo Molnar /* The pi code expects interrupts enabled */
53104746ed8SIngo Molnar BUG_ON(pi && in_interrupt());
53204746ed8SIngo Molnar recheck:
53304746ed8SIngo Molnar /* Double check policy once rq lock held: */
53404746ed8SIngo Molnar if (policy < 0) {
53504746ed8SIngo Molnar reset_on_fork = p->sched_reset_on_fork;
53604746ed8SIngo Molnar policy = oldpolicy = p->policy;
53704746ed8SIngo Molnar } else {
53804746ed8SIngo Molnar reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
53904746ed8SIngo Molnar
54004746ed8SIngo Molnar if (!valid_policy(policy))
54104746ed8SIngo Molnar return -EINVAL;
54204746ed8SIngo Molnar }
54304746ed8SIngo Molnar
54404746ed8SIngo Molnar if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
54504746ed8SIngo Molnar return -EINVAL;
54604746ed8SIngo Molnar
54704746ed8SIngo Molnar /*
54804746ed8SIngo Molnar * Valid priorities for SCHED_FIFO and SCHED_RR are
54904746ed8SIngo Molnar * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
55004746ed8SIngo Molnar * SCHED_BATCH and SCHED_IDLE is 0.
55104746ed8SIngo Molnar */
55204746ed8SIngo Molnar if (attr->sched_priority > MAX_RT_PRIO-1)
55304746ed8SIngo Molnar return -EINVAL;
55404746ed8SIngo Molnar if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
55504746ed8SIngo Molnar (rt_policy(policy) != (attr->sched_priority != 0)))
55604746ed8SIngo Molnar return -EINVAL;
55704746ed8SIngo Molnar
55804746ed8SIngo Molnar if (user) {
55904746ed8SIngo Molnar retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
56004746ed8SIngo Molnar if (retval)
56104746ed8SIngo Molnar return retval;
56204746ed8SIngo Molnar
56304746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_SUGOV)
56404746ed8SIngo Molnar return -EINVAL;
56504746ed8SIngo Molnar
56604746ed8SIngo Molnar retval = security_task_setscheduler(p);
56704746ed8SIngo Molnar if (retval)
56804746ed8SIngo Molnar return retval;
56904746ed8SIngo Molnar }
57004746ed8SIngo Molnar
57104746ed8SIngo Molnar /* Update task specific "requested" clamps */
57204746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
57304746ed8SIngo Molnar retval = uclamp_validate(p, attr);
57404746ed8SIngo Molnar if (retval)
57504746ed8SIngo Molnar return retval;
57604746ed8SIngo Molnar }
57704746ed8SIngo Molnar
57804746ed8SIngo Molnar /*
57904746ed8SIngo Molnar * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
58004746ed8SIngo Molnar * information.
58104746ed8SIngo Molnar */
58204746ed8SIngo Molnar if (dl_policy(policy) || dl_policy(p->policy)) {
58304746ed8SIngo Molnar cpuset_locked = true;
58404746ed8SIngo Molnar cpuset_lock();
58504746ed8SIngo Molnar }
58604746ed8SIngo Molnar
58704746ed8SIngo Molnar /*
58804746ed8SIngo Molnar * Make sure no PI-waiters arrive (or leave) while we are
58904746ed8SIngo Molnar * changing the priority of the task:
59004746ed8SIngo Molnar *
59104746ed8SIngo Molnar * To be able to change p->policy safely, the appropriate
59204746ed8SIngo Molnar * runqueue lock must be held.
59304746ed8SIngo Molnar */
59404746ed8SIngo Molnar rq = task_rq_lock(p, &rf);
59504746ed8SIngo Molnar update_rq_clock(rq);
59604746ed8SIngo Molnar
59704746ed8SIngo Molnar /*
59804746ed8SIngo Molnar * Changing the policy of the stop threads its a very bad idea:
59904746ed8SIngo Molnar */
60004746ed8SIngo Molnar if (p == rq->stop) {
60104746ed8SIngo Molnar retval = -EINVAL;
60204746ed8SIngo Molnar goto unlock;
60304746ed8SIngo Molnar }
60404746ed8SIngo Molnar
6057bb6f081STejun Heo retval = scx_check_setscheduler(p, policy);
6067bb6f081STejun Heo if (retval)
6077bb6f081STejun Heo goto unlock;
6087bb6f081STejun Heo
60904746ed8SIngo Molnar /*
61004746ed8SIngo Molnar * If not changing anything there's no need to proceed further,
61104746ed8SIngo Molnar * but store a possible modification of reset_on_fork.
61204746ed8SIngo Molnar */
61304746ed8SIngo Molnar if (unlikely(policy == p->policy)) {
614857b158dSPeter Zijlstra if (fair_policy(policy) &&
615857b158dSPeter Zijlstra (attr->sched_nice != task_nice(p) ||
616857b158dSPeter Zijlstra (attr->sched_runtime != p->se.slice)))
61704746ed8SIngo Molnar goto change;
61804746ed8SIngo Molnar if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
61904746ed8SIngo Molnar goto change;
62004746ed8SIngo Molnar if (dl_policy(policy) && dl_param_changed(p, attr))
62104746ed8SIngo Molnar goto change;
62204746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
62304746ed8SIngo Molnar goto change;
62404746ed8SIngo Molnar
62504746ed8SIngo Molnar p->sched_reset_on_fork = reset_on_fork;
62604746ed8SIngo Molnar retval = 0;
62704746ed8SIngo Molnar goto unlock;
62804746ed8SIngo Molnar }
62904746ed8SIngo Molnar change:
63004746ed8SIngo Molnar
63104746ed8SIngo Molnar if (user) {
63204746ed8SIngo Molnar #ifdef CONFIG_RT_GROUP_SCHED
63304746ed8SIngo Molnar /*
634402de7fcSIngo Molnar * Do not allow real-time tasks into groups that have no runtime
63504746ed8SIngo Molnar * assigned.
63604746ed8SIngo Molnar */
63704746ed8SIngo Molnar if (rt_bandwidth_enabled() && rt_policy(policy) &&
63804746ed8SIngo Molnar task_group(p)->rt_bandwidth.rt_runtime == 0 &&
63904746ed8SIngo Molnar !task_group_is_autogroup(task_group(p))) {
64004746ed8SIngo Molnar retval = -EPERM;
64104746ed8SIngo Molnar goto unlock;
64204746ed8SIngo Molnar }
64304746ed8SIngo Molnar #endif
64404746ed8SIngo Molnar #ifdef CONFIG_SMP
64504746ed8SIngo Molnar if (dl_bandwidth_enabled() && dl_policy(policy) &&
64604746ed8SIngo Molnar !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
64704746ed8SIngo Molnar cpumask_t *span = rq->rd->span;
64804746ed8SIngo Molnar
64904746ed8SIngo Molnar /*
65004746ed8SIngo Molnar * Don't allow tasks with an affinity mask smaller than
65104746ed8SIngo Molnar * the entire root_domain to become SCHED_DEADLINE. We
65204746ed8SIngo Molnar * will also fail if there's no bandwidth available.
65304746ed8SIngo Molnar */
65404746ed8SIngo Molnar if (!cpumask_subset(span, p->cpus_ptr) ||
65504746ed8SIngo Molnar rq->rd->dl_bw.bw == 0) {
65604746ed8SIngo Molnar retval = -EPERM;
65704746ed8SIngo Molnar goto unlock;
65804746ed8SIngo Molnar }
65904746ed8SIngo Molnar }
66004746ed8SIngo Molnar #endif
66104746ed8SIngo Molnar }
66204746ed8SIngo Molnar
66304746ed8SIngo Molnar /* Re-check policy now with rq lock held: */
66404746ed8SIngo Molnar if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
66504746ed8SIngo Molnar policy = oldpolicy = -1;
66604746ed8SIngo Molnar task_rq_unlock(rq, p, &rf);
66704746ed8SIngo Molnar if (cpuset_locked)
66804746ed8SIngo Molnar cpuset_unlock();
66904746ed8SIngo Molnar goto recheck;
67004746ed8SIngo Molnar }
67104746ed8SIngo Molnar
67204746ed8SIngo Molnar /*
67304746ed8SIngo Molnar * If setscheduling to SCHED_DEADLINE (or changing the parameters
67404746ed8SIngo Molnar * of a SCHED_DEADLINE task) we need to check if enough bandwidth
67504746ed8SIngo Molnar * is available.
67604746ed8SIngo Molnar */
67704746ed8SIngo Molnar if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
67804746ed8SIngo Molnar retval = -EBUSY;
67904746ed8SIngo Molnar goto unlock;
68004746ed8SIngo Molnar }
68104746ed8SIngo Molnar
68204746ed8SIngo Molnar p->sched_reset_on_fork = reset_on_fork;
68304746ed8SIngo Molnar oldprio = p->prio;
68404746ed8SIngo Molnar
68504746ed8SIngo Molnar newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
68604746ed8SIngo Molnar if (pi) {
68704746ed8SIngo Molnar /*
68804746ed8SIngo Molnar * Take priority boosted tasks into account. If the new
68904746ed8SIngo Molnar * effective priority is unchanged, we just store the new
69004746ed8SIngo Molnar * normal parameters and do not touch the scheduler class and
69104746ed8SIngo Molnar * the runqueue. This will be done when the task deboost
69204746ed8SIngo Molnar * itself.
69304746ed8SIngo Molnar */
69404746ed8SIngo Molnar newprio = rt_effective_prio(p, newprio);
69504746ed8SIngo Molnar if (newprio == oldprio)
69604746ed8SIngo Molnar queue_flags &= ~DEQUEUE_MOVE;
69704746ed8SIngo Molnar }
69804746ed8SIngo Molnar
69998442f0cSPeter Zijlstra prev_class = p->sched_class;
7005db91545SAboorva Devarajan next_class = __setscheduler_class(policy, newprio);
70198442f0cSPeter Zijlstra
70298442f0cSPeter Zijlstra if (prev_class != next_class && p->se.sched_delayed)
70398442f0cSPeter Zijlstra dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
70498442f0cSPeter Zijlstra
70504746ed8SIngo Molnar queued = task_on_rq_queued(p);
706af0c8b2bSPeter Zijlstra running = task_current_donor(rq, p);
70704746ed8SIngo Molnar if (queued)
70804746ed8SIngo Molnar dequeue_task(rq, p, queue_flags);
70904746ed8SIngo Molnar if (running)
71004746ed8SIngo Molnar put_prev_task(rq, p);
71104746ed8SIngo Molnar
71204746ed8SIngo Molnar if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
71304746ed8SIngo Molnar __setscheduler_params(p, attr);
71498442f0cSPeter Zijlstra p->sched_class = next_class;
71598442f0cSPeter Zijlstra p->prio = newprio;
71604746ed8SIngo Molnar }
71704746ed8SIngo Molnar __setscheduler_uclamp(p, attr);
718d8c7bc2eSTejun Heo check_class_changing(rq, p, prev_class);
71904746ed8SIngo Molnar
72004746ed8SIngo Molnar if (queued) {
72104746ed8SIngo Molnar /*
72204746ed8SIngo Molnar * We enqueue to tail when the priority of a task is
72304746ed8SIngo Molnar * increased (user space view).
72404746ed8SIngo Molnar */
72504746ed8SIngo Molnar if (oldprio < p->prio)
72604746ed8SIngo Molnar queue_flags |= ENQUEUE_HEAD;
72704746ed8SIngo Molnar
72804746ed8SIngo Molnar enqueue_task(rq, p, queue_flags);
72904746ed8SIngo Molnar }
73004746ed8SIngo Molnar if (running)
73104746ed8SIngo Molnar set_next_task(rq, p);
73204746ed8SIngo Molnar
73304746ed8SIngo Molnar check_class_changed(rq, p, prev_class, oldprio);
73404746ed8SIngo Molnar
73504746ed8SIngo Molnar /* Avoid rq from going away on us: */
73604746ed8SIngo Molnar preempt_disable();
73704746ed8SIngo Molnar head = splice_balance_callbacks(rq);
73804746ed8SIngo Molnar task_rq_unlock(rq, p, &rf);
73904746ed8SIngo Molnar
74004746ed8SIngo Molnar if (pi) {
74104746ed8SIngo Molnar if (cpuset_locked)
74204746ed8SIngo Molnar cpuset_unlock();
74304746ed8SIngo Molnar rt_mutex_adjust_pi(p);
74404746ed8SIngo Molnar }
74504746ed8SIngo Molnar
74604746ed8SIngo Molnar /* Run balance callbacks after we've adjusted the PI chain: */
74704746ed8SIngo Molnar balance_callbacks(rq, head);
74804746ed8SIngo Molnar preempt_enable();
74904746ed8SIngo Molnar
75004746ed8SIngo Molnar return 0;
75104746ed8SIngo Molnar
75204746ed8SIngo Molnar unlock:
75304746ed8SIngo Molnar task_rq_unlock(rq, p, &rf);
75404746ed8SIngo Molnar if (cpuset_locked)
75504746ed8SIngo Molnar cpuset_unlock();
75604746ed8SIngo Molnar return retval;
75704746ed8SIngo Molnar }
75804746ed8SIngo Molnar
_sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param,bool check)75904746ed8SIngo Molnar static int _sched_setscheduler(struct task_struct *p, int policy,
76004746ed8SIngo Molnar const struct sched_param *param, bool check)
76104746ed8SIngo Molnar {
76204746ed8SIngo Molnar struct sched_attr attr = {
76304746ed8SIngo Molnar .sched_policy = policy,
76404746ed8SIngo Molnar .sched_priority = param->sched_priority,
76504746ed8SIngo Molnar .sched_nice = PRIO_TO_NICE(p->static_prio),
76604746ed8SIngo Molnar };
76704746ed8SIngo Molnar
768857b158dSPeter Zijlstra if (p->se.custom_slice)
769857b158dSPeter Zijlstra attr.sched_runtime = p->se.slice;
770857b158dSPeter Zijlstra
77104746ed8SIngo Molnar /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
77204746ed8SIngo Molnar if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
77304746ed8SIngo Molnar attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
77404746ed8SIngo Molnar policy &= ~SCHED_RESET_ON_FORK;
77504746ed8SIngo Molnar attr.sched_policy = policy;
77604746ed8SIngo Molnar }
77704746ed8SIngo Molnar
77804746ed8SIngo Molnar return __sched_setscheduler(p, &attr, check, true);
77904746ed8SIngo Molnar }
78004746ed8SIngo Molnar /**
78104746ed8SIngo Molnar * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
78204746ed8SIngo Molnar * @p: the task in question.
78304746ed8SIngo Molnar * @policy: new policy.
78404746ed8SIngo Molnar * @param: structure containing the new RT priority.
78504746ed8SIngo Molnar *
78604746ed8SIngo Molnar * Use sched_set_fifo(), read its comment.
78704746ed8SIngo Molnar *
78804746ed8SIngo Molnar * Return: 0 on success. An error code otherwise.
78904746ed8SIngo Molnar *
79004746ed8SIngo Molnar * NOTE that the task may be already dead.
79104746ed8SIngo Molnar */
sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param)79204746ed8SIngo Molnar int sched_setscheduler(struct task_struct *p, int policy,
79304746ed8SIngo Molnar const struct sched_param *param)
79404746ed8SIngo Molnar {
79504746ed8SIngo Molnar return _sched_setscheduler(p, policy, param, true);
79604746ed8SIngo Molnar }
79704746ed8SIngo Molnar
sched_setattr(struct task_struct * p,const struct sched_attr * attr)79804746ed8SIngo Molnar int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
79904746ed8SIngo Molnar {
80004746ed8SIngo Molnar return __sched_setscheduler(p, attr, true, true);
80104746ed8SIngo Molnar }
80204746ed8SIngo Molnar
sched_setattr_nocheck(struct task_struct * p,const struct sched_attr * attr)80304746ed8SIngo Molnar int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
80404746ed8SIngo Molnar {
80504746ed8SIngo Molnar return __sched_setscheduler(p, attr, false, true);
80604746ed8SIngo Molnar }
80704746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
80804746ed8SIngo Molnar
80904746ed8SIngo Molnar /**
810402de7fcSIngo Molnar * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space.
81104746ed8SIngo Molnar * @p: the task in question.
81204746ed8SIngo Molnar * @policy: new policy.
81304746ed8SIngo Molnar * @param: structure containing the new RT priority.
81404746ed8SIngo Molnar *
81504746ed8SIngo Molnar * Just like sched_setscheduler, only don't bother checking if the
81604746ed8SIngo Molnar * current context has permission. For example, this is needed in
81704746ed8SIngo Molnar * stop_machine(): we create temporary high priority worker threads,
81804746ed8SIngo Molnar * but our caller might not have that capability.
81904746ed8SIngo Molnar *
82004746ed8SIngo Molnar * Return: 0 on success. An error code otherwise.
82104746ed8SIngo Molnar */
sched_setscheduler_nocheck(struct task_struct * p,int policy,const struct sched_param * param)82204746ed8SIngo Molnar int sched_setscheduler_nocheck(struct task_struct *p, int policy,
82304746ed8SIngo Molnar const struct sched_param *param)
82404746ed8SIngo Molnar {
82504746ed8SIngo Molnar return _sched_setscheduler(p, policy, param, false);
82604746ed8SIngo Molnar }
82704746ed8SIngo Molnar
82804746ed8SIngo Molnar /*
82904746ed8SIngo Molnar * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
83004746ed8SIngo Molnar * incapable of resource management, which is the one thing an OS really should
83104746ed8SIngo Molnar * be doing.
83204746ed8SIngo Molnar *
83304746ed8SIngo Molnar * This is of course the reason it is limited to privileged users only.
83404746ed8SIngo Molnar *
83504746ed8SIngo Molnar * Worse still; it is fundamentally impossible to compose static priority
83604746ed8SIngo Molnar * workloads. You cannot take two correctly working static prio workloads
83704746ed8SIngo Molnar * and smash them together and still expect them to work.
83804746ed8SIngo Molnar *
83904746ed8SIngo Molnar * For this reason 'all' FIFO tasks the kernel creates are basically at:
84004746ed8SIngo Molnar *
84104746ed8SIngo Molnar * MAX_RT_PRIO / 2
84204746ed8SIngo Molnar *
84304746ed8SIngo Molnar * The administrator _MUST_ configure the system, the kernel simply doesn't
84404746ed8SIngo Molnar * know enough information to make a sensible choice.
84504746ed8SIngo Molnar */
sched_set_fifo(struct task_struct * p)84604746ed8SIngo Molnar void sched_set_fifo(struct task_struct *p)
84704746ed8SIngo Molnar {
84804746ed8SIngo Molnar struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
84904746ed8SIngo Molnar WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
85004746ed8SIngo Molnar }
85104746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_fifo);
85204746ed8SIngo Molnar
85304746ed8SIngo Molnar /*
85404746ed8SIngo Molnar * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
85504746ed8SIngo Molnar */
sched_set_fifo_low(struct task_struct * p)85604746ed8SIngo Molnar void sched_set_fifo_low(struct task_struct *p)
85704746ed8SIngo Molnar {
85804746ed8SIngo Molnar struct sched_param sp = { .sched_priority = 1 };
85904746ed8SIngo Molnar WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
86004746ed8SIngo Molnar }
86104746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_fifo_low);
86204746ed8SIngo Molnar
sched_set_normal(struct task_struct * p,int nice)86304746ed8SIngo Molnar void sched_set_normal(struct task_struct *p, int nice)
86404746ed8SIngo Molnar {
86504746ed8SIngo Molnar struct sched_attr attr = {
86604746ed8SIngo Molnar .sched_policy = SCHED_NORMAL,
86704746ed8SIngo Molnar .sched_nice = nice,
86804746ed8SIngo Molnar };
86904746ed8SIngo Molnar WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
87004746ed8SIngo Molnar }
87104746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_normal);
87204746ed8SIngo Molnar
87304746ed8SIngo Molnar static int
do_sched_setscheduler(pid_t pid,int policy,struct sched_param __user * param)87404746ed8SIngo Molnar do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
87504746ed8SIngo Molnar {
87604746ed8SIngo Molnar struct sched_param lparam;
87704746ed8SIngo Molnar
8781a5d3492SColin Ian King if (unlikely(!param || pid < 0))
87904746ed8SIngo Molnar return -EINVAL;
88004746ed8SIngo Molnar if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
88104746ed8SIngo Molnar return -EFAULT;
88204746ed8SIngo Molnar
88304746ed8SIngo Molnar CLASS(find_get_task, p)(pid);
88404746ed8SIngo Molnar if (!p)
88504746ed8SIngo Molnar return -ESRCH;
88604746ed8SIngo Molnar
88704746ed8SIngo Molnar return sched_setscheduler(p, policy, &lparam);
88804746ed8SIngo Molnar }
88904746ed8SIngo Molnar
89004746ed8SIngo Molnar /*
89104746ed8SIngo Molnar * Mimics kernel/events/core.c perf_copy_attr().
89204746ed8SIngo Molnar */
sched_copy_attr(struct sched_attr __user * uattr,struct sched_attr * attr)89304746ed8SIngo Molnar static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
89404746ed8SIngo Molnar {
89504746ed8SIngo Molnar u32 size;
89604746ed8SIngo Molnar int ret;
89704746ed8SIngo Molnar
89804746ed8SIngo Molnar /* Zero the full structure, so that a short copy will be nice: */
89904746ed8SIngo Molnar memset(attr, 0, sizeof(*attr));
90004746ed8SIngo Molnar
90104746ed8SIngo Molnar ret = get_user(size, &uattr->size);
90204746ed8SIngo Molnar if (ret)
90304746ed8SIngo Molnar return ret;
90404746ed8SIngo Molnar
90504746ed8SIngo Molnar /* ABI compatibility quirk: */
90604746ed8SIngo Molnar if (!size)
90704746ed8SIngo Molnar size = SCHED_ATTR_SIZE_VER0;
90804746ed8SIngo Molnar if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
90904746ed8SIngo Molnar goto err_size;
91004746ed8SIngo Molnar
91104746ed8SIngo Molnar ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
91204746ed8SIngo Molnar if (ret) {
91304746ed8SIngo Molnar if (ret == -E2BIG)
91404746ed8SIngo Molnar goto err_size;
91504746ed8SIngo Molnar return ret;
91604746ed8SIngo Molnar }
91704746ed8SIngo Molnar
91804746ed8SIngo Molnar if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
91904746ed8SIngo Molnar size < SCHED_ATTR_SIZE_VER1)
92004746ed8SIngo Molnar return -EINVAL;
92104746ed8SIngo Molnar
92204746ed8SIngo Molnar /*
92304746ed8SIngo Molnar * XXX: Do we want to be lenient like existing syscalls; or do we want
92404746ed8SIngo Molnar * to be strict and return an error on out-of-bounds values?
92504746ed8SIngo Molnar */
92604746ed8SIngo Molnar attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
92704746ed8SIngo Molnar
92804746ed8SIngo Molnar return 0;
92904746ed8SIngo Molnar
93004746ed8SIngo Molnar err_size:
93104746ed8SIngo Molnar put_user(sizeof(*attr), &uattr->size);
93204746ed8SIngo Molnar return -E2BIG;
93304746ed8SIngo Molnar }
93404746ed8SIngo Molnar
get_params(struct task_struct * p,struct sched_attr * attr)93504746ed8SIngo Molnar static void get_params(struct task_struct *p, struct sched_attr *attr)
93604746ed8SIngo Molnar {
937857b158dSPeter Zijlstra if (task_has_dl_policy(p)) {
93804746ed8SIngo Molnar __getparam_dl(p, attr);
939857b158dSPeter Zijlstra } else if (task_has_rt_policy(p)) {
94004746ed8SIngo Molnar attr->sched_priority = p->rt_priority;
941857b158dSPeter Zijlstra } else {
94204746ed8SIngo Molnar attr->sched_nice = task_nice(p);
943857b158dSPeter Zijlstra attr->sched_runtime = p->se.slice;
944857b158dSPeter Zijlstra }
94504746ed8SIngo Molnar }
94604746ed8SIngo Molnar
94704746ed8SIngo Molnar /**
94804746ed8SIngo Molnar * sys_sched_setscheduler - set/change the scheduler policy and RT priority
94904746ed8SIngo Molnar * @pid: the pid in question.
95004746ed8SIngo Molnar * @policy: new policy.
95104746ed8SIngo Molnar * @param: structure containing the new RT priority.
95204746ed8SIngo Molnar *
95304746ed8SIngo Molnar * Return: 0 on success. An error code otherwise.
95404746ed8SIngo Molnar */
SYSCALL_DEFINE3(sched_setscheduler,pid_t,pid,int,policy,struct sched_param __user *,param)95504746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
95604746ed8SIngo Molnar {
95704746ed8SIngo Molnar if (policy < 0)
95804746ed8SIngo Molnar return -EINVAL;
95904746ed8SIngo Molnar
96004746ed8SIngo Molnar return do_sched_setscheduler(pid, policy, param);
96104746ed8SIngo Molnar }
96204746ed8SIngo Molnar
96304746ed8SIngo Molnar /**
96404746ed8SIngo Molnar * sys_sched_setparam - set/change the RT priority of a thread
96504746ed8SIngo Molnar * @pid: the pid in question.
96604746ed8SIngo Molnar * @param: structure containing the new RT priority.
96704746ed8SIngo Molnar *
96804746ed8SIngo Molnar * Return: 0 on success. An error code otherwise.
96904746ed8SIngo Molnar */
SYSCALL_DEFINE2(sched_setparam,pid_t,pid,struct sched_param __user *,param)97004746ed8SIngo Molnar SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
97104746ed8SIngo Molnar {
97204746ed8SIngo Molnar return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
97304746ed8SIngo Molnar }
97404746ed8SIngo Molnar
97504746ed8SIngo Molnar /**
97604746ed8SIngo Molnar * sys_sched_setattr - same as above, but with extended sched_attr
97704746ed8SIngo Molnar * @pid: the pid in question.
97804746ed8SIngo Molnar * @uattr: structure containing the extended parameters.
97904746ed8SIngo Molnar * @flags: for future extension.
98004746ed8SIngo Molnar */
SYSCALL_DEFINE3(sched_setattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,flags)98104746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
98204746ed8SIngo Molnar unsigned int, flags)
98304746ed8SIngo Molnar {
98404746ed8SIngo Molnar struct sched_attr attr;
98504746ed8SIngo Molnar int retval;
98604746ed8SIngo Molnar
9871a5d3492SColin Ian King if (unlikely(!uattr || pid < 0 || flags))
98804746ed8SIngo Molnar return -EINVAL;
98904746ed8SIngo Molnar
99004746ed8SIngo Molnar retval = sched_copy_attr(uattr, &attr);
99104746ed8SIngo Molnar if (retval)
99204746ed8SIngo Molnar return retval;
99304746ed8SIngo Molnar
99404746ed8SIngo Molnar if ((int)attr.sched_policy < 0)
99504746ed8SIngo Molnar return -EINVAL;
99604746ed8SIngo Molnar if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
99704746ed8SIngo Molnar attr.sched_policy = SETPARAM_POLICY;
99804746ed8SIngo Molnar
99904746ed8SIngo Molnar CLASS(find_get_task, p)(pid);
100004746ed8SIngo Molnar if (!p)
100104746ed8SIngo Molnar return -ESRCH;
100204746ed8SIngo Molnar
100304746ed8SIngo Molnar if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
100404746ed8SIngo Molnar get_params(p, &attr);
100504746ed8SIngo Molnar
100604746ed8SIngo Molnar return sched_setattr(p, &attr);
100704746ed8SIngo Molnar }
100804746ed8SIngo Molnar
100904746ed8SIngo Molnar /**
101004746ed8SIngo Molnar * sys_sched_getscheduler - get the policy (scheduling class) of a thread
101104746ed8SIngo Molnar * @pid: the pid in question.
101204746ed8SIngo Molnar *
101304746ed8SIngo Molnar * Return: On success, the policy of the thread. Otherwise, a negative error
101404746ed8SIngo Molnar * code.
101504746ed8SIngo Molnar */
SYSCALL_DEFINE1(sched_getscheduler,pid_t,pid)101604746ed8SIngo Molnar SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
101704746ed8SIngo Molnar {
101804746ed8SIngo Molnar struct task_struct *p;
101904746ed8SIngo Molnar int retval;
102004746ed8SIngo Molnar
102104746ed8SIngo Molnar if (pid < 0)
102204746ed8SIngo Molnar return -EINVAL;
102304746ed8SIngo Molnar
102404746ed8SIngo Molnar guard(rcu)();
102504746ed8SIngo Molnar p = find_process_by_pid(pid);
102604746ed8SIngo Molnar if (!p)
102704746ed8SIngo Molnar return -ESRCH;
102804746ed8SIngo Molnar
102904746ed8SIngo Molnar retval = security_task_getscheduler(p);
103004746ed8SIngo Molnar if (!retval) {
103104746ed8SIngo Molnar retval = p->policy;
103204746ed8SIngo Molnar if (p->sched_reset_on_fork)
103304746ed8SIngo Molnar retval |= SCHED_RESET_ON_FORK;
103404746ed8SIngo Molnar }
103504746ed8SIngo Molnar return retval;
103604746ed8SIngo Molnar }
103704746ed8SIngo Molnar
103804746ed8SIngo Molnar /**
103904746ed8SIngo Molnar * sys_sched_getparam - get the RT priority of a thread
104004746ed8SIngo Molnar * @pid: the pid in question.
104104746ed8SIngo Molnar * @param: structure containing the RT priority.
104204746ed8SIngo Molnar *
104304746ed8SIngo Molnar * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
104404746ed8SIngo Molnar * code.
104504746ed8SIngo Molnar */
SYSCALL_DEFINE2(sched_getparam,pid_t,pid,struct sched_param __user *,param)104604746ed8SIngo Molnar SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
104704746ed8SIngo Molnar {
104804746ed8SIngo Molnar struct sched_param lp = { .sched_priority = 0 };
104904746ed8SIngo Molnar struct task_struct *p;
105004746ed8SIngo Molnar int retval;
105104746ed8SIngo Molnar
10521a5d3492SColin Ian King if (unlikely(!param || pid < 0))
105304746ed8SIngo Molnar return -EINVAL;
105404746ed8SIngo Molnar
105504746ed8SIngo Molnar scoped_guard (rcu) {
105604746ed8SIngo Molnar p = find_process_by_pid(pid);
105704746ed8SIngo Molnar if (!p)
105804746ed8SIngo Molnar return -ESRCH;
105904746ed8SIngo Molnar
106004746ed8SIngo Molnar retval = security_task_getscheduler(p);
106104746ed8SIngo Molnar if (retval)
106204746ed8SIngo Molnar return retval;
106304746ed8SIngo Molnar
106404746ed8SIngo Molnar if (task_has_rt_policy(p))
106504746ed8SIngo Molnar lp.sched_priority = p->rt_priority;
106604746ed8SIngo Molnar }
106704746ed8SIngo Molnar
106804746ed8SIngo Molnar /*
106904746ed8SIngo Molnar * This one might sleep, we cannot do it with a spinlock held ...
107004746ed8SIngo Molnar */
107104746ed8SIngo Molnar return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
107204746ed8SIngo Molnar }
107304746ed8SIngo Molnar
107404746ed8SIngo Molnar /**
107504746ed8SIngo Molnar * sys_sched_getattr - similar to sched_getparam, but with sched_attr
107604746ed8SIngo Molnar * @pid: the pid in question.
107704746ed8SIngo Molnar * @uattr: structure containing the extended parameters.
107804746ed8SIngo Molnar * @usize: sizeof(attr) for fwd/bwd comp.
107904746ed8SIngo Molnar * @flags: for future extension.
108004746ed8SIngo Molnar */
SYSCALL_DEFINE4(sched_getattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,usize,unsigned int,flags)108104746ed8SIngo Molnar SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
108204746ed8SIngo Molnar unsigned int, usize, unsigned int, flags)
108304746ed8SIngo Molnar {
108404746ed8SIngo Molnar struct sched_attr kattr = { };
108504746ed8SIngo Molnar struct task_struct *p;
108604746ed8SIngo Molnar int retval;
108704746ed8SIngo Molnar
10881a5d3492SColin Ian King if (unlikely(!uattr || pid < 0 || usize > PAGE_SIZE ||
10891a5d3492SColin Ian King usize < SCHED_ATTR_SIZE_VER0 || flags))
109004746ed8SIngo Molnar return -EINVAL;
109104746ed8SIngo Molnar
109204746ed8SIngo Molnar scoped_guard (rcu) {
109304746ed8SIngo Molnar p = find_process_by_pid(pid);
109404746ed8SIngo Molnar if (!p)
109504746ed8SIngo Molnar return -ESRCH;
109604746ed8SIngo Molnar
109704746ed8SIngo Molnar retval = security_task_getscheduler(p);
109804746ed8SIngo Molnar if (retval)
109904746ed8SIngo Molnar return retval;
110004746ed8SIngo Molnar
110104746ed8SIngo Molnar kattr.sched_policy = p->policy;
110204746ed8SIngo Molnar if (p->sched_reset_on_fork)
110304746ed8SIngo Molnar kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
110404746ed8SIngo Molnar get_params(p, &kattr);
110504746ed8SIngo Molnar kattr.sched_flags &= SCHED_FLAG_ALL;
110604746ed8SIngo Molnar
110704746ed8SIngo Molnar #ifdef CONFIG_UCLAMP_TASK
110804746ed8SIngo Molnar /*
110904746ed8SIngo Molnar * This could race with another potential updater, but this is fine
111004746ed8SIngo Molnar * because it'll correctly read the old or the new value. We don't need
111104746ed8SIngo Molnar * to guarantee who wins the race as long as it doesn't return garbage.
111204746ed8SIngo Molnar */
111304746ed8SIngo Molnar kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
111404746ed8SIngo Molnar kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
111504746ed8SIngo Molnar #endif
111604746ed8SIngo Molnar }
111704746ed8SIngo Molnar
1118112cca09SAleksa Sarai kattr.size = min(usize, sizeof(kattr));
1119112cca09SAleksa Sarai return copy_struct_to_user(uattr, usize, &kattr, sizeof(kattr), NULL);
112004746ed8SIngo Molnar }
112104746ed8SIngo Molnar
112204746ed8SIngo Molnar #ifdef CONFIG_SMP
dl_task_check_affinity(struct task_struct * p,const struct cpumask * mask)112304746ed8SIngo Molnar int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
112404746ed8SIngo Molnar {
112504746ed8SIngo Molnar /*
112604746ed8SIngo Molnar * If the task isn't a deadline task or admission control is
112704746ed8SIngo Molnar * disabled then we don't care about affinity changes.
112804746ed8SIngo Molnar */
112904746ed8SIngo Molnar if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
113004746ed8SIngo Molnar return 0;
113104746ed8SIngo Molnar
113204746ed8SIngo Molnar /*
113393940fbdSChristian Loehle * The special/sugov task isn't part of regular bandwidth/admission
113493940fbdSChristian Loehle * control so let userspace change affinities.
113593940fbdSChristian Loehle */
113693940fbdSChristian Loehle if (dl_entity_is_special(&p->dl))
113793940fbdSChristian Loehle return 0;
113893940fbdSChristian Loehle
113993940fbdSChristian Loehle /*
114004746ed8SIngo Molnar * Since bandwidth control happens on root_domain basis,
114104746ed8SIngo Molnar * if admission test is enabled, we only admit -deadline
114204746ed8SIngo Molnar * tasks allowed to run on all the CPUs in the task's
114304746ed8SIngo Molnar * root_domain.
114404746ed8SIngo Molnar */
114504746ed8SIngo Molnar guard(rcu)();
114604746ed8SIngo Molnar if (!cpumask_subset(task_rq(p)->rd->span, mask))
114704746ed8SIngo Molnar return -EBUSY;
114804746ed8SIngo Molnar
114904746ed8SIngo Molnar return 0;
115004746ed8SIngo Molnar }
115104746ed8SIngo Molnar #endif /* CONFIG_SMP */
115204746ed8SIngo Molnar
__sched_setaffinity(struct task_struct * p,struct affinity_context * ctx)115304746ed8SIngo Molnar int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
115404746ed8SIngo Molnar {
115504746ed8SIngo Molnar int retval;
115604746ed8SIngo Molnar cpumask_var_t cpus_allowed, new_mask;
115704746ed8SIngo Molnar
115804746ed8SIngo Molnar if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
115904746ed8SIngo Molnar return -ENOMEM;
116004746ed8SIngo Molnar
116104746ed8SIngo Molnar if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
116204746ed8SIngo Molnar retval = -ENOMEM;
116304746ed8SIngo Molnar goto out_free_cpus_allowed;
116404746ed8SIngo Molnar }
116504746ed8SIngo Molnar
116604746ed8SIngo Molnar cpuset_cpus_allowed(p, cpus_allowed);
116704746ed8SIngo Molnar cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
116804746ed8SIngo Molnar
116904746ed8SIngo Molnar ctx->new_mask = new_mask;
117004746ed8SIngo Molnar ctx->flags |= SCA_CHECK;
117104746ed8SIngo Molnar
117204746ed8SIngo Molnar retval = dl_task_check_affinity(p, new_mask);
117304746ed8SIngo Molnar if (retval)
117404746ed8SIngo Molnar goto out_free_new_mask;
117504746ed8SIngo Molnar
117604746ed8SIngo Molnar retval = __set_cpus_allowed_ptr(p, ctx);
117704746ed8SIngo Molnar if (retval)
117804746ed8SIngo Molnar goto out_free_new_mask;
117904746ed8SIngo Molnar
118004746ed8SIngo Molnar cpuset_cpus_allowed(p, cpus_allowed);
118104746ed8SIngo Molnar if (!cpumask_subset(new_mask, cpus_allowed)) {
118204746ed8SIngo Molnar /*
118304746ed8SIngo Molnar * We must have raced with a concurrent cpuset update.
118404746ed8SIngo Molnar * Just reset the cpumask to the cpuset's cpus_allowed.
118504746ed8SIngo Molnar */
118604746ed8SIngo Molnar cpumask_copy(new_mask, cpus_allowed);
118704746ed8SIngo Molnar
118804746ed8SIngo Molnar /*
118904746ed8SIngo Molnar * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
119004746ed8SIngo Molnar * will restore the previous user_cpus_ptr value.
119104746ed8SIngo Molnar *
119204746ed8SIngo Molnar * In the unlikely event a previous user_cpus_ptr exists,
119304746ed8SIngo Molnar * we need to further restrict the mask to what is allowed
119404746ed8SIngo Molnar * by that old user_cpus_ptr.
119504746ed8SIngo Molnar */
119604746ed8SIngo Molnar if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
119704746ed8SIngo Molnar bool empty = !cpumask_and(new_mask, new_mask,
119804746ed8SIngo Molnar ctx->user_mask);
119904746ed8SIngo Molnar
120070ee7947SJosh Don if (empty)
120104746ed8SIngo Molnar cpumask_copy(new_mask, cpus_allowed);
120204746ed8SIngo Molnar }
120304746ed8SIngo Molnar __set_cpus_allowed_ptr(p, ctx);
120404746ed8SIngo Molnar retval = -EINVAL;
120504746ed8SIngo Molnar }
120604746ed8SIngo Molnar
120704746ed8SIngo Molnar out_free_new_mask:
120804746ed8SIngo Molnar free_cpumask_var(new_mask);
120904746ed8SIngo Molnar out_free_cpus_allowed:
121004746ed8SIngo Molnar free_cpumask_var(cpus_allowed);
121104746ed8SIngo Molnar return retval;
121204746ed8SIngo Molnar }
121304746ed8SIngo Molnar
sched_setaffinity(pid_t pid,const struct cpumask * in_mask)121404746ed8SIngo Molnar long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
121504746ed8SIngo Molnar {
121604746ed8SIngo Molnar struct affinity_context ac;
121704746ed8SIngo Molnar struct cpumask *user_mask;
121804746ed8SIngo Molnar int retval;
121904746ed8SIngo Molnar
122004746ed8SIngo Molnar CLASS(find_get_task, p)(pid);
122104746ed8SIngo Molnar if (!p)
122204746ed8SIngo Molnar return -ESRCH;
122304746ed8SIngo Molnar
122404746ed8SIngo Molnar if (p->flags & PF_NO_SETAFFINITY)
122504746ed8SIngo Molnar return -EINVAL;
122604746ed8SIngo Molnar
122704746ed8SIngo Molnar if (!check_same_owner(p)) {
122804746ed8SIngo Molnar guard(rcu)();
122904746ed8SIngo Molnar if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
123004746ed8SIngo Molnar return -EPERM;
123104746ed8SIngo Molnar }
123204746ed8SIngo Molnar
123304746ed8SIngo Molnar retval = security_task_setscheduler(p);
123404746ed8SIngo Molnar if (retval)
123504746ed8SIngo Molnar return retval;
123604746ed8SIngo Molnar
123704746ed8SIngo Molnar /*
123804746ed8SIngo Molnar * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
123904746ed8SIngo Molnar * alloc_user_cpus_ptr() returns NULL.
124004746ed8SIngo Molnar */
124104746ed8SIngo Molnar user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
124204746ed8SIngo Molnar if (user_mask) {
124304746ed8SIngo Molnar cpumask_copy(user_mask, in_mask);
124404746ed8SIngo Molnar } else if (IS_ENABLED(CONFIG_SMP)) {
124504746ed8SIngo Molnar return -ENOMEM;
124604746ed8SIngo Molnar }
124704746ed8SIngo Molnar
124804746ed8SIngo Molnar ac = (struct affinity_context){
124904746ed8SIngo Molnar .new_mask = in_mask,
125004746ed8SIngo Molnar .user_mask = user_mask,
125104746ed8SIngo Molnar .flags = SCA_USER,
125204746ed8SIngo Molnar };
125304746ed8SIngo Molnar
125404746ed8SIngo Molnar retval = __sched_setaffinity(p, &ac);
125504746ed8SIngo Molnar kfree(ac.user_mask);
125604746ed8SIngo Molnar
125704746ed8SIngo Molnar return retval;
125804746ed8SIngo Molnar }
125904746ed8SIngo Molnar
get_user_cpu_mask(unsigned long __user * user_mask_ptr,unsigned len,struct cpumask * new_mask)126004746ed8SIngo Molnar static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
126104746ed8SIngo Molnar struct cpumask *new_mask)
126204746ed8SIngo Molnar {
126304746ed8SIngo Molnar if (len < cpumask_size())
126404746ed8SIngo Molnar cpumask_clear(new_mask);
126504746ed8SIngo Molnar else if (len > cpumask_size())
126604746ed8SIngo Molnar len = cpumask_size();
126704746ed8SIngo Molnar
126804746ed8SIngo Molnar return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
126904746ed8SIngo Molnar }
127004746ed8SIngo Molnar
127104746ed8SIngo Molnar /**
127204746ed8SIngo Molnar * sys_sched_setaffinity - set the CPU affinity of a process
127304746ed8SIngo Molnar * @pid: pid of the process
127404746ed8SIngo Molnar * @len: length in bytes of the bitmask pointed to by user_mask_ptr
127504746ed8SIngo Molnar * @user_mask_ptr: user-space pointer to the new CPU mask
127604746ed8SIngo Molnar *
127704746ed8SIngo Molnar * Return: 0 on success. An error code otherwise.
127804746ed8SIngo Molnar */
SYSCALL_DEFINE3(sched_setaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)127904746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
128004746ed8SIngo Molnar unsigned long __user *, user_mask_ptr)
128104746ed8SIngo Molnar {
128204746ed8SIngo Molnar cpumask_var_t new_mask;
128304746ed8SIngo Molnar int retval;
128404746ed8SIngo Molnar
128504746ed8SIngo Molnar if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
128604746ed8SIngo Molnar return -ENOMEM;
128704746ed8SIngo Molnar
128804746ed8SIngo Molnar retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
128904746ed8SIngo Molnar if (retval == 0)
129004746ed8SIngo Molnar retval = sched_setaffinity(pid, new_mask);
129104746ed8SIngo Molnar free_cpumask_var(new_mask);
129204746ed8SIngo Molnar return retval;
129304746ed8SIngo Molnar }
129404746ed8SIngo Molnar
sched_getaffinity(pid_t pid,struct cpumask * mask)129504746ed8SIngo Molnar long sched_getaffinity(pid_t pid, struct cpumask *mask)
129604746ed8SIngo Molnar {
129704746ed8SIngo Molnar struct task_struct *p;
129804746ed8SIngo Molnar int retval;
129904746ed8SIngo Molnar
130004746ed8SIngo Molnar guard(rcu)();
130104746ed8SIngo Molnar p = find_process_by_pid(pid);
130204746ed8SIngo Molnar if (!p)
130304746ed8SIngo Molnar return -ESRCH;
130404746ed8SIngo Molnar
130504746ed8SIngo Molnar retval = security_task_getscheduler(p);
130604746ed8SIngo Molnar if (retval)
130704746ed8SIngo Molnar return retval;
130804746ed8SIngo Molnar
130904746ed8SIngo Molnar guard(raw_spinlock_irqsave)(&p->pi_lock);
131004746ed8SIngo Molnar cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
131104746ed8SIngo Molnar
131204746ed8SIngo Molnar return 0;
131304746ed8SIngo Molnar }
131404746ed8SIngo Molnar
131504746ed8SIngo Molnar /**
131604746ed8SIngo Molnar * sys_sched_getaffinity - get the CPU affinity of a process
131704746ed8SIngo Molnar * @pid: pid of the process
131804746ed8SIngo Molnar * @len: length in bytes of the bitmask pointed to by user_mask_ptr
131904746ed8SIngo Molnar * @user_mask_ptr: user-space pointer to hold the current CPU mask
132004746ed8SIngo Molnar *
132104746ed8SIngo Molnar * Return: size of CPU mask copied to user_mask_ptr on success. An
132204746ed8SIngo Molnar * error code otherwise.
132304746ed8SIngo Molnar */
SYSCALL_DEFINE3(sched_getaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)132404746ed8SIngo Molnar SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
132504746ed8SIngo Molnar unsigned long __user *, user_mask_ptr)
132604746ed8SIngo Molnar {
132704746ed8SIngo Molnar int ret;
132804746ed8SIngo Molnar cpumask_var_t mask;
132904746ed8SIngo Molnar
133004746ed8SIngo Molnar if ((len * BITS_PER_BYTE) < nr_cpu_ids)
133104746ed8SIngo Molnar return -EINVAL;
133204746ed8SIngo Molnar if (len & (sizeof(unsigned long)-1))
133304746ed8SIngo Molnar return -EINVAL;
133404746ed8SIngo Molnar
133504746ed8SIngo Molnar if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
133604746ed8SIngo Molnar return -ENOMEM;
133704746ed8SIngo Molnar
133804746ed8SIngo Molnar ret = sched_getaffinity(pid, mask);
133904746ed8SIngo Molnar if (ret == 0) {
134004746ed8SIngo Molnar unsigned int retlen = min(len, cpumask_size());
134104746ed8SIngo Molnar
134204746ed8SIngo Molnar if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
134304746ed8SIngo Molnar ret = -EFAULT;
134404746ed8SIngo Molnar else
134504746ed8SIngo Molnar ret = retlen;
134604746ed8SIngo Molnar }
134704746ed8SIngo Molnar free_cpumask_var(mask);
134804746ed8SIngo Molnar
134904746ed8SIngo Molnar return ret;
135004746ed8SIngo Molnar }
135104746ed8SIngo Molnar
do_sched_yield(void)135204746ed8SIngo Molnar static void do_sched_yield(void)
135304746ed8SIngo Molnar {
135404746ed8SIngo Molnar struct rq_flags rf;
135504746ed8SIngo Molnar struct rq *rq;
135604746ed8SIngo Molnar
135704746ed8SIngo Molnar rq = this_rq_lock_irq(&rf);
135804746ed8SIngo Molnar
135904746ed8SIngo Molnar schedstat_inc(rq->yld_count);
136004746ed8SIngo Molnar current->sched_class->yield_task(rq);
136104746ed8SIngo Molnar
136204746ed8SIngo Molnar preempt_disable();
136304746ed8SIngo Molnar rq_unlock_irq(rq, &rf);
136404746ed8SIngo Molnar sched_preempt_enable_no_resched();
136504746ed8SIngo Molnar
136604746ed8SIngo Molnar schedule();
136704746ed8SIngo Molnar }
136804746ed8SIngo Molnar
136904746ed8SIngo Molnar /**
137004746ed8SIngo Molnar * sys_sched_yield - yield the current processor to other threads.
137104746ed8SIngo Molnar *
137204746ed8SIngo Molnar * This function yields the current CPU to other tasks. If there are no
137304746ed8SIngo Molnar * other threads running on this CPU then this function will return.
137404746ed8SIngo Molnar *
137504746ed8SIngo Molnar * Return: 0.
137604746ed8SIngo Molnar */
SYSCALL_DEFINE0(sched_yield)137704746ed8SIngo Molnar SYSCALL_DEFINE0(sched_yield)
137804746ed8SIngo Molnar {
137904746ed8SIngo Molnar do_sched_yield();
138004746ed8SIngo Molnar return 0;
138104746ed8SIngo Molnar }
138204746ed8SIngo Molnar
138304746ed8SIngo Molnar /**
138404746ed8SIngo Molnar * yield - yield the current processor to other threads.
138504746ed8SIngo Molnar *
138604746ed8SIngo Molnar * Do not ever use this function, there's a 99% chance you're doing it wrong.
138704746ed8SIngo Molnar *
138804746ed8SIngo Molnar * The scheduler is at all times free to pick the calling task as the most
138904746ed8SIngo Molnar * eligible task to run, if removing the yield() call from your code breaks
139004746ed8SIngo Molnar * it, it's already broken.
139104746ed8SIngo Molnar *
139204746ed8SIngo Molnar * Typical broken usage is:
139304746ed8SIngo Molnar *
139404746ed8SIngo Molnar * while (!event)
139504746ed8SIngo Molnar * yield();
139604746ed8SIngo Molnar *
139704746ed8SIngo Molnar * where one assumes that yield() will let 'the other' process run that will
139804746ed8SIngo Molnar * make event true. If the current task is a SCHED_FIFO task that will never
139904746ed8SIngo Molnar * happen. Never use yield() as a progress guarantee!!
140004746ed8SIngo Molnar *
140104746ed8SIngo Molnar * If you want to use yield() to wait for something, use wait_event().
140204746ed8SIngo Molnar * If you want to use yield() to be 'nice' for others, use cond_resched().
140304746ed8SIngo Molnar * If you still want to use yield(), do not!
140404746ed8SIngo Molnar */
yield(void)140504746ed8SIngo Molnar void __sched yield(void)
140604746ed8SIngo Molnar {
140704746ed8SIngo Molnar set_current_state(TASK_RUNNING);
140804746ed8SIngo Molnar do_sched_yield();
140904746ed8SIngo Molnar }
141004746ed8SIngo Molnar EXPORT_SYMBOL(yield);
141104746ed8SIngo Molnar
141204746ed8SIngo Molnar /**
141304746ed8SIngo Molnar * yield_to - yield the current processor to another thread in
141404746ed8SIngo Molnar * your thread group, or accelerate that thread toward the
141504746ed8SIngo Molnar * processor it's on.
141604746ed8SIngo Molnar * @p: target task
141704746ed8SIngo Molnar * @preempt: whether task preemption is allowed or not
141804746ed8SIngo Molnar *
141904746ed8SIngo Molnar * It's the caller's job to ensure that the target task struct
142004746ed8SIngo Molnar * can't go away on us before we can do any checks.
142104746ed8SIngo Molnar *
142204746ed8SIngo Molnar * Return:
142304746ed8SIngo Molnar * true (>0) if we indeed boosted the target task.
142404746ed8SIngo Molnar * false (0) if we failed to boost the target.
142504746ed8SIngo Molnar * -ESRCH if there's no task to yield to.
142604746ed8SIngo Molnar */
yield_to(struct task_struct * p,bool preempt)142704746ed8SIngo Molnar int __sched yield_to(struct task_struct *p, bool preempt)
142804746ed8SIngo Molnar {
142904746ed8SIngo Molnar struct task_struct *curr = current;
143004746ed8SIngo Molnar struct rq *rq, *p_rq;
143104746ed8SIngo Molnar int yielded = 0;
143204746ed8SIngo Molnar
14335d808c78STianchen Ding scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
143404746ed8SIngo Molnar rq = this_rq();
143504746ed8SIngo Molnar
143604746ed8SIngo Molnar again:
143704746ed8SIngo Molnar p_rq = task_rq(p);
143804746ed8SIngo Molnar /*
143904746ed8SIngo Molnar * If we're the only runnable task on the rq and target rq also
144004746ed8SIngo Molnar * has only one task, there's absolutely no point in yielding.
144104746ed8SIngo Molnar */
144204746ed8SIngo Molnar if (rq->nr_running == 1 && p_rq->nr_running == 1)
144304746ed8SIngo Molnar return -ESRCH;
144404746ed8SIngo Molnar
144504746ed8SIngo Molnar guard(double_rq_lock)(rq, p_rq);
144604746ed8SIngo Molnar if (task_rq(p) != p_rq)
144704746ed8SIngo Molnar goto again;
144804746ed8SIngo Molnar
144904746ed8SIngo Molnar if (!curr->sched_class->yield_to_task)
145004746ed8SIngo Molnar return 0;
145104746ed8SIngo Molnar
145204746ed8SIngo Molnar if (curr->sched_class != p->sched_class)
145304746ed8SIngo Molnar return 0;
145404746ed8SIngo Molnar
145504746ed8SIngo Molnar if (task_on_cpu(p_rq, p) || !task_is_running(p))
145604746ed8SIngo Molnar return 0;
145704746ed8SIngo Molnar
145804746ed8SIngo Molnar yielded = curr->sched_class->yield_to_task(rq, p);
145904746ed8SIngo Molnar if (yielded) {
146004746ed8SIngo Molnar schedstat_inc(rq->yld_count);
146104746ed8SIngo Molnar /*
146204746ed8SIngo Molnar * Make p's CPU reschedule; pick_next_entity
146304746ed8SIngo Molnar * takes care of fairness.
146404746ed8SIngo Molnar */
146504746ed8SIngo Molnar if (preempt && rq != p_rq)
146604746ed8SIngo Molnar resched_curr(p_rq);
146704746ed8SIngo Molnar }
146804746ed8SIngo Molnar }
146904746ed8SIngo Molnar
147004746ed8SIngo Molnar if (yielded)
147104746ed8SIngo Molnar schedule();
147204746ed8SIngo Molnar
147304746ed8SIngo Molnar return yielded;
147404746ed8SIngo Molnar }
147504746ed8SIngo Molnar EXPORT_SYMBOL_GPL(yield_to);
147604746ed8SIngo Molnar
147704746ed8SIngo Molnar /**
147804746ed8SIngo Molnar * sys_sched_get_priority_max - return maximum RT priority.
147904746ed8SIngo Molnar * @policy: scheduling class.
148004746ed8SIngo Molnar *
148104746ed8SIngo Molnar * Return: On success, this syscall returns the maximum
148204746ed8SIngo Molnar * rt_priority that can be used by a given scheduling class.
148304746ed8SIngo Molnar * On failure, a negative error code is returned.
148404746ed8SIngo Molnar */
SYSCALL_DEFINE1(sched_get_priority_max,int,policy)148504746ed8SIngo Molnar SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
148604746ed8SIngo Molnar {
148704746ed8SIngo Molnar int ret = -EINVAL;
148804746ed8SIngo Molnar
148904746ed8SIngo Molnar switch (policy) {
149004746ed8SIngo Molnar case SCHED_FIFO:
149104746ed8SIngo Molnar case SCHED_RR:
149204746ed8SIngo Molnar ret = MAX_RT_PRIO-1;
149304746ed8SIngo Molnar break;
149404746ed8SIngo Molnar case SCHED_DEADLINE:
149504746ed8SIngo Molnar case SCHED_NORMAL:
149604746ed8SIngo Molnar case SCHED_BATCH:
149704746ed8SIngo Molnar case SCHED_IDLE:
1498f0e1a064STejun Heo case SCHED_EXT:
149904746ed8SIngo Molnar ret = 0;
150004746ed8SIngo Molnar break;
150104746ed8SIngo Molnar }
150204746ed8SIngo Molnar return ret;
150304746ed8SIngo Molnar }
150404746ed8SIngo Molnar
150504746ed8SIngo Molnar /**
150604746ed8SIngo Molnar * sys_sched_get_priority_min - return minimum RT priority.
150704746ed8SIngo Molnar * @policy: scheduling class.
150804746ed8SIngo Molnar *
150904746ed8SIngo Molnar * Return: On success, this syscall returns the minimum
151004746ed8SIngo Molnar * rt_priority that can be used by a given scheduling class.
151104746ed8SIngo Molnar * On failure, a negative error code is returned.
151204746ed8SIngo Molnar */
SYSCALL_DEFINE1(sched_get_priority_min,int,policy)151304746ed8SIngo Molnar SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
151404746ed8SIngo Molnar {
151504746ed8SIngo Molnar int ret = -EINVAL;
151604746ed8SIngo Molnar
151704746ed8SIngo Molnar switch (policy) {
151804746ed8SIngo Molnar case SCHED_FIFO:
151904746ed8SIngo Molnar case SCHED_RR:
152004746ed8SIngo Molnar ret = 1;
152104746ed8SIngo Molnar break;
152204746ed8SIngo Molnar case SCHED_DEADLINE:
152304746ed8SIngo Molnar case SCHED_NORMAL:
152404746ed8SIngo Molnar case SCHED_BATCH:
152504746ed8SIngo Molnar case SCHED_IDLE:
1526f0e1a064STejun Heo case SCHED_EXT:
152704746ed8SIngo Molnar ret = 0;
152804746ed8SIngo Molnar }
152904746ed8SIngo Molnar return ret;
153004746ed8SIngo Molnar }
153104746ed8SIngo Molnar
sched_rr_get_interval(pid_t pid,struct timespec64 * t)153204746ed8SIngo Molnar static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
153304746ed8SIngo Molnar {
153404746ed8SIngo Molnar unsigned int time_slice = 0;
153504746ed8SIngo Molnar int retval;
153604746ed8SIngo Molnar
153704746ed8SIngo Molnar if (pid < 0)
153804746ed8SIngo Molnar return -EINVAL;
153904746ed8SIngo Molnar
154004746ed8SIngo Molnar scoped_guard (rcu) {
154104746ed8SIngo Molnar struct task_struct *p = find_process_by_pid(pid);
154204746ed8SIngo Molnar if (!p)
154304746ed8SIngo Molnar return -ESRCH;
154404746ed8SIngo Molnar
154504746ed8SIngo Molnar retval = security_task_getscheduler(p);
154604746ed8SIngo Molnar if (retval)
154704746ed8SIngo Molnar return retval;
154804746ed8SIngo Molnar
154904746ed8SIngo Molnar scoped_guard (task_rq_lock, p) {
155004746ed8SIngo Molnar struct rq *rq = scope.rq;
155104746ed8SIngo Molnar if (p->sched_class->get_rr_interval)
155204746ed8SIngo Molnar time_slice = p->sched_class->get_rr_interval(rq, p);
155304746ed8SIngo Molnar }
155404746ed8SIngo Molnar }
155504746ed8SIngo Molnar
155604746ed8SIngo Molnar jiffies_to_timespec64(time_slice, t);
155704746ed8SIngo Molnar return 0;
155804746ed8SIngo Molnar }
155904746ed8SIngo Molnar
156004746ed8SIngo Molnar /**
1561402de7fcSIngo Molnar * sys_sched_rr_get_interval - return the default time-slice of a process.
156204746ed8SIngo Molnar * @pid: pid of the process.
1563402de7fcSIngo Molnar * @interval: userspace pointer to the time-slice value.
156404746ed8SIngo Molnar *
1565402de7fcSIngo Molnar * this syscall writes the default time-slice value of a given process
156604746ed8SIngo Molnar * into the user-space timespec buffer. A value of '0' means infinity.
156704746ed8SIngo Molnar *
1568402de7fcSIngo Molnar * Return: On success, 0 and the time-slice is in @interval. Otherwise,
156904746ed8SIngo Molnar * an error code.
157004746ed8SIngo Molnar */
SYSCALL_DEFINE2(sched_rr_get_interval,pid_t,pid,struct __kernel_timespec __user *,interval)157104746ed8SIngo Molnar SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
157204746ed8SIngo Molnar struct __kernel_timespec __user *, interval)
157304746ed8SIngo Molnar {
157404746ed8SIngo Molnar struct timespec64 t;
157504746ed8SIngo Molnar int retval = sched_rr_get_interval(pid, &t);
157604746ed8SIngo Molnar
157704746ed8SIngo Molnar if (retval == 0)
157804746ed8SIngo Molnar retval = put_timespec64(&t, interval);
157904746ed8SIngo Molnar
158004746ed8SIngo Molnar return retval;
158104746ed8SIngo Molnar }
158204746ed8SIngo Molnar
158304746ed8SIngo Molnar #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE2(sched_rr_get_interval_time32,pid_t,pid,struct old_timespec32 __user *,interval)158404746ed8SIngo Molnar SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
158504746ed8SIngo Molnar struct old_timespec32 __user *, interval)
158604746ed8SIngo Molnar {
158704746ed8SIngo Molnar struct timespec64 t;
158804746ed8SIngo Molnar int retval = sched_rr_get_interval(pid, &t);
158904746ed8SIngo Molnar
159004746ed8SIngo Molnar if (retval == 0)
159104746ed8SIngo Molnar retval = put_old_timespec32(&t, interval);
159204746ed8SIngo Molnar return retval;
159304746ed8SIngo Molnar }
159404746ed8SIngo Molnar #endif
1595