1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * linux/include/linux/cpufreq.h
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 2001 Russell King
61da177e4SLinus Torvalds * (C) 2002 - 2003 Dominik Brodowski <[email protected]>
71da177e4SLinus Torvalds */
81da177e4SLinus Torvalds #ifndef _LINUX_CPUFREQ_H
91da177e4SLinus Torvalds #define _LINUX_CPUFREQ_H
101da177e4SLinus Torvalds
11652ed95dSViresh Kumar #include <linux/clk.h>
12c17495b0SViresh Kumar #include <linux/cpu.h>
131da177e4SLinus Torvalds #include <linux/cpumask.h>
145ff0a268SViresh Kumar #include <linux/completion.h>
155ff0a268SViresh Kumar #include <linux/kobject.h>
165ff0a268SViresh Kumar #include <linux/notifier.h>
178486a32dSHector.Yuan #include <linux/of.h>
18c17495b0SViresh Kumar #include <linux/pm_opp.h>
193000ce3cSRafael J. Wysocki #include <linux/pm_qos.h>
2012478cf0SSrivatsa S. Bhat #include <linux/spinlock.h>
215ff0a268SViresh Kumar #include <linux/sysfs.h>
22a436ae94SLiao Chang #include <linux/minmax.h>
231da177e4SLinus Torvalds
241da177e4SLinus Torvalds /*********************************************************************
2574aca95dSViresh Kumar * CPUFREQ INTERFACE *
261da177e4SLinus Torvalds *********************************************************************/
2774aca95dSViresh Kumar /*
2874aca95dSViresh Kumar * Frequency values here are CPU kHz
2974aca95dSViresh Kumar *
30b53cc6eaSDave Jones * Maximum transition latency is in nanoseconds - if it's unknown,
311da177e4SLinus Torvalds * CPUFREQ_ETERNAL shall be used.
321da177e4SLinus Torvalds */
331da177e4SLinus Torvalds
3474aca95dSViresh Kumar #define CPUFREQ_ETERNAL (-1)
3574aca95dSViresh Kumar #define CPUFREQ_NAME_LEN 16
36565ebe80SViresh Kumar /* Print length for names. Extra 1 space for accommodating '\n' in prints */
3774aca95dSViresh Kumar #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
3874aca95dSViresh Kumar
391da177e4SLinus Torvalds struct cpufreq_governor;
401da177e4SLinus Torvalds
41da0c6dc0SViresh Kumar enum cpufreq_table_sorting {
42da0c6dc0SViresh Kumar CPUFREQ_TABLE_UNSORTED,
43da0c6dc0SViresh Kumar CPUFREQ_TABLE_SORTED_ASCENDING,
44da0c6dc0SViresh Kumar CPUFREQ_TABLE_SORTED_DESCENDING
45da0c6dc0SViresh Kumar };
46da0c6dc0SViresh Kumar
471da177e4SLinus Torvalds struct cpufreq_cpuinfo {
481da177e4SLinus Torvalds unsigned int max_freq;
491da177e4SLinus Torvalds unsigned int min_freq;
50335dc333SThiago Farina
51335dc333SThiago Farina /* in 10^(-9) s = nanoseconds */
52335dc333SThiago Farina unsigned int transition_latency;
531da177e4SLinus Torvalds };
541da177e4SLinus Torvalds
551da177e4SLinus Torvalds struct cpufreq_policy {
56951fc5f4SViresh Kumar /* CPUs sharing clock, require sw coordination */
57951fc5f4SViresh Kumar cpumask_var_t cpus; /* Online CPUs only */
58951fc5f4SViresh Kumar cpumask_var_t related_cpus; /* Online + Offline CPUs */
59559ed407SRafael J. Wysocki cpumask_var_t real_cpus; /* Related and present */
60951fc5f4SViresh Kumar
6162b36cc1SViresh Kumar unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
623b2d9942SVenkatesh Pallipadi should set cpufreq */
639d16f207SSaravana Kannan unsigned int cpu; /* cpu managing this policy, must be online */
649d16f207SSaravana Kannan
65652ed95dSViresh Kumar struct clk *clk;
661da177e4SLinus Torvalds struct cpufreq_cpuinfo cpuinfo;/* see above */
671da177e4SLinus Torvalds
681da177e4SLinus Torvalds unsigned int min; /* in kHz */
691da177e4SLinus Torvalds unsigned int max; /* in kHz */
701da177e4SLinus Torvalds unsigned int cur; /* in kHz, only needed if cpufreq
711da177e4SLinus Torvalds * governors are used */
72e28867eaSViresh Kumar unsigned int suspend_freq; /* freq to set during suspend */
73e28867eaSViresh Kumar
741da177e4SLinus Torvalds unsigned int policy; /* see above */
7569030dd1SSrinivas Pandruvada unsigned int last_policy; /* policy before unplug */
761da177e4SLinus Torvalds struct cpufreq_governor *governor; /* see below */
777bd353a9SViresh Kumar void *governor_data;
784573237bSViresh Kumar char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
791da177e4SLinus Torvalds
801da177e4SLinus Torvalds struct work_struct update; /* if update_policy() needs to be
811da177e4SLinus Torvalds * called, but you're in IRQ context */
821da177e4SLinus Torvalds
833000ce3cSRafael J. Wysocki struct freq_constraints constraints;
843000ce3cSRafael J. Wysocki struct freq_qos_request *min_freq_req;
853000ce3cSRafael J. Wysocki struct freq_qos_request *max_freq_req;
863000ce3cSRafael J. Wysocki
87e0b3165bSViresh Kumar struct cpufreq_frequency_table *freq_table;
88da0c6dc0SViresh Kumar enum cpufreq_table_sorting freq_table_sorted;
891da177e4SLinus Torvalds
90c88a1f8bSLukasz Majewski struct list_head policy_list;
911da177e4SLinus Torvalds struct kobject kobj;
921da177e4SLinus Torvalds struct completion kobj_unregister;
93ad7722daSviresh kumar
94ad7722daSviresh kumar /*
95ad7722daSviresh kumar * The rules for this semaphore:
96ad7722daSviresh kumar * - Any routine that wants to read from the policy structure will
97ad7722daSviresh kumar * do a down_read on this semaphore.
98ad7722daSviresh kumar * - Any routine that will write to the policy structure and/or may take away
99ad7722daSviresh kumar * the policy altogether (eg. CPU hotplug), will hold this lock in write
100ad7722daSviresh kumar * mode before doing so.
101ad7722daSviresh kumar */
102ad7722daSviresh kumar struct rw_semaphore rwsem;
10312478cf0SSrivatsa S. Bhat
104b7898fdaSRafael J. Wysocki /*
105b7898fdaSRafael J. Wysocki * Fast switch flags:
106b7898fdaSRafael J. Wysocki * - fast_switch_possible should be set by the driver if it can
107b7898fdaSRafael J. Wysocki * guarantee that frequency can be changed on any CPU sharing the
108b7898fdaSRafael J. Wysocki * policy and that the change will affect all of the policy CPUs then.
109b7898fdaSRafael J. Wysocki * - fast_switch_enabled is to be set by governors that support fast
110565ebe80SViresh Kumar * frequency switching with the help of cpufreq_enable_fast_switch().
111b7898fdaSRafael J. Wysocki */
112b7898fdaSRafael J. Wysocki bool fast_switch_possible;
113b7898fdaSRafael J. Wysocki bool fast_switch_enabled;
114b7898fdaSRafael J. Wysocki
1151b72e7fdSRafael J. Wysocki /*
116ea9364bbSRafael J. Wysocki * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
117ea9364bbSRafael J. Wysocki * governor.
118ea9364bbSRafael J. Wysocki */
119ea9364bbSRafael J. Wysocki bool strict_target;
120ea9364bbSRafael J. Wysocki
121ea9364bbSRafael J. Wysocki /*
1221f39fa0dSVincent Donnefort * Set if inefficient frequencies were found in the frequency table.
1231f39fa0dSVincent Donnefort * This indicates if the relation flag CPUFREQ_RELATION_E can be
1241f39fa0dSVincent Donnefort * honored.
1251f39fa0dSVincent Donnefort */
1261f39fa0dSVincent Donnefort bool efficiencies_available;
1271f39fa0dSVincent Donnefort
1281f39fa0dSVincent Donnefort /*
1291b72e7fdSRafael J. Wysocki * Preferred average time interval between consecutive invocations of
1301b72e7fdSRafael J. Wysocki * the driver to set the frequency for this policy. To be set by the
1311b72e7fdSRafael J. Wysocki * scaling driver (0, which is the default, means no preference).
1321b72e7fdSRafael J. Wysocki */
1331b72e7fdSRafael J. Wysocki unsigned int transition_delay_us;
1341b72e7fdSRafael J. Wysocki
13599d14d0eSViresh Kumar /*
13699d14d0eSViresh Kumar * Remote DVFS flag (Not added to the driver structure as we don't want
13799d14d0eSViresh Kumar * to access another structure from scheduler hotpath).
13899d14d0eSViresh Kumar *
13999d14d0eSViresh Kumar * Should be set if CPUs can do DVFS on behalf of other CPUs from
14099d14d0eSViresh Kumar * different cpufreq policies.
14199d14d0eSViresh Kumar */
14299d14d0eSViresh Kumar bool dvfs_possible_from_any_cpu;
14399d14d0eSViresh Kumar
144218a06a7SJie Zhan /* Per policy boost enabled flag. */
145218a06a7SJie Zhan bool boost_enabled;
146218a06a7SJie Zhan
1471f7d1babSViresh Kumar /* Per policy boost supported flag. */
1481f7d1babSViresh Kumar bool boost_supported;
1491f7d1babSViresh Kumar
150e3c06236SSteve Muckle /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
151e3c06236SSteve Muckle unsigned int cached_target_freq;
152292072c3SViresh Kumar unsigned int cached_resolved_idx;
153e3c06236SSteve Muckle
15412478cf0SSrivatsa S. Bhat /* Synchronization for frequency transitions */
15512478cf0SSrivatsa S. Bhat bool transition_ongoing; /* Tracks transition status */
15612478cf0SSrivatsa S. Bhat spinlock_t transition_lock;
15712478cf0SSrivatsa S. Bhat wait_queue_head_t transition_wait;
158ca654dc3SSrivatsa S. Bhat struct task_struct *transition_task; /* Task which is doing the transition */
159413fffc3SViresh Kumar
160a9aaf291SViresh Kumar /* cpufreq-stats */
161a9aaf291SViresh Kumar struct cpufreq_stats *stats;
162a9aaf291SViresh Kumar
163413fffc3SViresh Kumar /* For cpufreq driver's internal use */
164413fffc3SViresh Kumar void *driver_data;
1655c238a8bSAmit Kucheria
1665c238a8bSAmit Kucheria /* Pointer to the cooling device if used for thermal mitigation */
1675c238a8bSAmit Kucheria struct thermal_cooling_device *cdev;
16867d874c3SViresh Kumar
16967d874c3SViresh Kumar struct notifier_block nb_min;
17067d874c3SViresh Kumar struct notifier_block nb_max;
1711da177e4SLinus Torvalds };
1721da177e4SLinus Torvalds
1731e4f63aeSRafael J. Wysocki /*
1741e4f63aeSRafael J. Wysocki * Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
1751e4f63aeSRafael J. Wysocki * callback for sanitization. That callback is only expected to modify the min
1761e4f63aeSRafael J. Wysocki * and max values, if necessary, and specifically it must not update the
1771e4f63aeSRafael J. Wysocki * frequency table.
1781e4f63aeSRafael J. Wysocki */
1791e4f63aeSRafael J. Wysocki struct cpufreq_policy_data {
1801e4f63aeSRafael J. Wysocki struct cpufreq_cpuinfo cpuinfo;
1811e4f63aeSRafael J. Wysocki struct cpufreq_frequency_table *freq_table;
1821e4f63aeSRafael J. Wysocki unsigned int cpu;
1831e4f63aeSRafael J. Wysocki unsigned int min; /* in kHz */
1841e4f63aeSRafael J. Wysocki unsigned int max; /* in kHz */
1851e4f63aeSRafael J. Wysocki };
1861e4f63aeSRafael J. Wysocki
187df24014aSViresh Kumar struct cpufreq_freqs {
188df24014aSViresh Kumar struct cpufreq_policy *policy;
189df24014aSViresh Kumar unsigned int old;
190df24014aSViresh Kumar unsigned int new;
191df24014aSViresh Kumar u8 flags; /* flags of cpufreq_driver, see below. */
192df24014aSViresh Kumar };
193df24014aSViresh Kumar
19462b36cc1SViresh Kumar /* Only for ACPI */
19546f18e3aSVenkatesh Pallipadi #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
19646f18e3aSVenkatesh Pallipadi #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
19746f18e3aSVenkatesh Pallipadi #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
19846f18e3aSVenkatesh Pallipadi #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
1991da177e4SLinus Torvalds
200c75b505dSDaniel Vetter #ifdef CONFIG_CPU_FREQ
2011f0bd44eSRafael J. Wysocki struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
20274aca95dSViresh Kumar struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
2033a3e9e06SViresh Kumar void cpufreq_cpu_put(struct cpufreq_policy *policy);
204c75b505dSDaniel Vetter #else
cpufreq_cpu_get_raw(unsigned int cpu)2051f0bd44eSRafael J. Wysocki static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
2061f0bd44eSRafael J. Wysocki {
2071f0bd44eSRafael J. Wysocki return NULL;
2081f0bd44eSRafael J. Wysocki }
cpufreq_cpu_get(unsigned int cpu)209c75b505dSDaniel Vetter static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
210c75b505dSDaniel Vetter {
211c75b505dSDaniel Vetter return NULL;
212c75b505dSDaniel Vetter }
cpufreq_cpu_put(struct cpufreq_policy * policy)213c75b505dSDaniel Vetter static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
214c75b505dSDaniel Vetter #endif
21574aca95dSViresh Kumar
21697a705dcSDhananjay Ugwekar /* Scope based cleanup macro for cpufreq_policy kobject reference counting */
DEFINE_FREE(put_cpufreq_policy,struct cpufreq_policy *,if (_T)cpufreq_cpu_put (_T))21797a705dcSDhananjay Ugwekar DEFINE_FREE(put_cpufreq_policy, struct cpufreq_policy *, if (_T) cpufreq_cpu_put(_T))
21897a705dcSDhananjay Ugwekar
2199083e498SRafael J. Wysocki static inline bool policy_is_inactive(struct cpufreq_policy *policy)
2209083e498SRafael J. Wysocki {
2219083e498SRafael J. Wysocki return cpumask_empty(policy->cpus);
2229083e498SRafael J. Wysocki }
2239083e498SRafael J. Wysocki
policy_is_shared(struct cpufreq_policy * policy)2242624f90cSFabio Baltieri static inline bool policy_is_shared(struct cpufreq_policy *policy)
2252624f90cSFabio Baltieri {
2262624f90cSFabio Baltieri return cpumask_weight(policy->cpus) > 1;
2272624f90cSFabio Baltieri }
2282624f90cSFabio Baltieri
22974aca95dSViresh Kumar #ifdef CONFIG_CPU_FREQ
23074aca95dSViresh Kumar unsigned int cpufreq_get(unsigned int cpu);
23174aca95dSViresh Kumar unsigned int cpufreq_quick_get(unsigned int cpu);
23274aca95dSViresh Kumar unsigned int cpufreq_quick_get_max(unsigned int cpu);
233bbce8eaaSIonela Voinescu unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
23474aca95dSViresh Kumar void disable_cpufreq(void);
2351da177e4SLinus Torvalds
23674aca95dSViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
2379083e498SRafael J. Wysocki
2389083e498SRafael J. Wysocki struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
2399083e498SRafael J. Wysocki void cpufreq_cpu_release(struct cpufreq_policy *policy);
24074aca95dSViresh Kumar int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
241c57b25bdSViresh Kumar void refresh_frequency_limits(struct cpufreq_policy *policy);
24230248fefSRafael J. Wysocki void cpufreq_update_policy(unsigned int cpu);
2435a25e3f7SRafael J. Wysocki void cpufreq_update_limits(unsigned int cpu);
24474aca95dSViresh Kumar bool have_governor_per_policy(void);
245874f6353SIonela Voinescu bool cpufreq_supports_freq_invariance(void);
24674aca95dSViresh Kumar struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
247b7898fdaSRafael J. Wysocki void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
2486c9d9c81SRafael J. Wysocki void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
249a038895eSViresh Kumar bool has_target_index(void);
25075d65931SVincent Guittot
25175d65931SVincent Guittot DECLARE_PER_CPU(unsigned long, cpufreq_pressure);
cpufreq_get_pressure(int cpu)25275d65931SVincent Guittot static inline unsigned long cpufreq_get_pressure(int cpu)
25375d65931SVincent Guittot {
25475d65931SVincent Guittot return READ_ONCE(per_cpu(cpufreq_pressure, cpu));
25575d65931SVincent Guittot }
25674aca95dSViresh Kumar #else
cpufreq_get(unsigned int cpu)25774aca95dSViresh Kumar static inline unsigned int cpufreq_get(unsigned int cpu)
2581da177e4SLinus Torvalds {
25974aca95dSViresh Kumar return 0;
26074aca95dSViresh Kumar }
cpufreq_quick_get(unsigned int cpu)26174aca95dSViresh Kumar static inline unsigned int cpufreq_quick_get(unsigned int cpu)
26274aca95dSViresh Kumar {
26374aca95dSViresh Kumar return 0;
26474aca95dSViresh Kumar }
cpufreq_quick_get_max(unsigned int cpu)26574aca95dSViresh Kumar static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
26674aca95dSViresh Kumar {
26774aca95dSViresh Kumar return 0;
26874aca95dSViresh Kumar }
cpufreq_get_hw_max_freq(unsigned int cpu)269bbce8eaaSIonela Voinescu static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
270bbce8eaaSIonela Voinescu {
271bbce8eaaSIonela Voinescu return 0;
272bbce8eaaSIonela Voinescu }
cpufreq_supports_freq_invariance(void)273874f6353SIonela Voinescu static inline bool cpufreq_supports_freq_invariance(void)
274874f6353SIonela Voinescu {
275874f6353SIonela Voinescu return false;
276874f6353SIonela Voinescu }
disable_cpufreq(void)27774aca95dSViresh Kumar static inline void disable_cpufreq(void) { }
cpufreq_update_limits(unsigned int cpu)2789c4a13a0SMeng Li static inline void cpufreq_update_limits(unsigned int cpu) { }
cpufreq_get_pressure(int cpu)27975d65931SVincent Guittot static inline unsigned long cpufreq_get_pressure(int cpu)
28075d65931SVincent Guittot {
28175d65931SVincent Guittot return 0;
28275d65931SVincent Guittot }
2831da177e4SLinus Torvalds #endif
2841da177e4SLinus Torvalds
2851aefc75bSRafael J. Wysocki #ifdef CONFIG_CPU_FREQ_STAT
2861aefc75bSRafael J. Wysocki void cpufreq_stats_create_table(struct cpufreq_policy *policy);
2871aefc75bSRafael J. Wysocki void cpufreq_stats_free_table(struct cpufreq_policy *policy);
2881aefc75bSRafael J. Wysocki void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
2891aefc75bSRafael J. Wysocki unsigned int new_freq);
2901aefc75bSRafael J. Wysocki #else
cpufreq_stats_create_table(struct cpufreq_policy * policy)2911aefc75bSRafael J. Wysocki static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
cpufreq_stats_free_table(struct cpufreq_policy * policy)2921aefc75bSRafael J. Wysocki static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
cpufreq_stats_record_transition(struct cpufreq_policy * policy,unsigned int new_freq)2931aefc75bSRafael J. Wysocki static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
2941aefc75bSRafael J. Wysocki unsigned int new_freq) { }
2951aefc75bSRafael J. Wysocki #endif /* CONFIG_CPU_FREQ_STAT */
2961aefc75bSRafael J. Wysocki
2971da177e4SLinus Torvalds /*********************************************************************
2981da177e4SLinus Torvalds * CPUFREQ DRIVER INTERFACE *
2991da177e4SLinus Torvalds *********************************************************************/
3001da177e4SLinus Torvalds
3011da177e4SLinus Torvalds #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
3021da177e4SLinus Torvalds #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
3035b0c0b16SStratos Karafotis #define CPUFREQ_RELATION_C 2 /* closest frequency to target */
3041f39fa0dSVincent Donnefort /* relation flags */
3051f39fa0dSVincent Donnefort #define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */
3061da177e4SLinus Torvalds
307b894d20eSVincent Donnefort #define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E)
308b894d20eSVincent Donnefort #define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E)
309b894d20eSVincent Donnefort #define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E)
3101da177e4SLinus Torvalds
31174aca95dSViresh Kumar struct freq_attr {
31274aca95dSViresh Kumar struct attribute attr;
31374aca95dSViresh Kumar ssize_t (*show)(struct cpufreq_policy *, char *);
31474aca95dSViresh Kumar ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
31574aca95dSViresh Kumar };
31674aca95dSViresh Kumar
31774aca95dSViresh Kumar #define cpufreq_freq_attr_ro(_name) \
31874aca95dSViresh Kumar static struct freq_attr _name = \
31974aca95dSViresh Kumar __ATTR(_name, 0444, show_##_name, NULL)
32074aca95dSViresh Kumar
32174aca95dSViresh Kumar #define cpufreq_freq_attr_ro_perm(_name, _perm) \
32274aca95dSViresh Kumar static struct freq_attr _name = \
32374aca95dSViresh Kumar __ATTR(_name, _perm, show_##_name, NULL)
32474aca95dSViresh Kumar
32574aca95dSViresh Kumar #define cpufreq_freq_attr_rw(_name) \
32674aca95dSViresh Kumar static struct freq_attr _name = \
32774aca95dSViresh Kumar __ATTR(_name, 0644, show_##_name, store_##_name)
32874aca95dSViresh Kumar
329ee7930eeSMarkus Mayer #define cpufreq_freq_attr_wo(_name) \
330ee7930eeSMarkus Mayer static struct freq_attr _name = \
331ee7930eeSMarkus Mayer __ATTR(_name, 0200, NULL, store_##_name)
332ee7930eeSMarkus Mayer
33374aca95dSViresh Kumar #define define_one_global_ro(_name) \
334625c85a6SViresh Kumar static struct kobj_attribute _name = \
33574aca95dSViresh Kumar __ATTR(_name, 0444, show_##_name, NULL)
33674aca95dSViresh Kumar
33774aca95dSViresh Kumar #define define_one_global_rw(_name) \
338625c85a6SViresh Kumar static struct kobj_attribute _name = \
33974aca95dSViresh Kumar __ATTR(_name, 0644, show_##_name, store_##_name)
34074aca95dSViresh Kumar
3411da177e4SLinus Torvalds
3421da177e4SLinus Torvalds struct cpufreq_driver {
3431da177e4SLinus Torvalds char name[CPUFREQ_NAME_LEN];
3441c534352SRafael J. Wysocki u16 flags;
34551315cdfSThomas Petazzoni void *driver_data;
3461da177e4SLinus Torvalds
3471da177e4SLinus Torvalds /* needed by all drivers */
3481da177e4SLinus Torvalds int (*init)(struct cpufreq_policy *policy);
3491e4f63aeSRafael J. Wysocki int (*verify)(struct cpufreq_policy_data *policy);
3501da177e4SLinus Torvalds
3511da177e4SLinus Torvalds /* define one out of two */
3521da177e4SLinus Torvalds int (*setpolicy)(struct cpufreq_policy *policy);
3531c03a2d0SViresh Kumar
35490452e61SViresh Kumar int (*target)(struct cpufreq_policy *policy,
3551da177e4SLinus Torvalds unsigned int target_freq,
35690452e61SViresh Kumar unsigned int relation); /* Deprecated */
3579c0ebcf7SViresh Kumar int (*target_index)(struct cpufreq_policy *policy,
3589c0ebcf7SViresh Kumar unsigned int index);
359b7898fdaSRafael J. Wysocki unsigned int (*fast_switch)(struct cpufreq_policy *policy,
360b7898fdaSRafael J. Wysocki unsigned int target_freq);
361ee2cc427SRafael J. Wysocki /*
362ee2cc427SRafael J. Wysocki * ->fast_switch() replacement for drivers that use an internal
363ee2cc427SRafael J. Wysocki * representation of performance levels and can pass hints other than
364b4a11fa3SWyes Karny * the target performance level to the hardware. This can only be set
365b4a11fa3SWyes Karny * if ->fast_switch is set too, because in those cases (under specific
366b4a11fa3SWyes Karny * conditions) scale invariance can be disabled, which causes the
367b4a11fa3SWyes Karny * schedutil governor to fall back to the latter.
368ee2cc427SRafael J. Wysocki */
369ee2cc427SRafael J. Wysocki void (*adjust_perf)(unsigned int cpu,
370ee2cc427SRafael J. Wysocki unsigned long min_perf,
371ee2cc427SRafael J. Wysocki unsigned long target_perf,
372ee2cc427SRafael J. Wysocki unsigned long capacity);
373e3c06236SSteve Muckle
374e3c06236SSteve Muckle /*
3751c03a2d0SViresh Kumar * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
3761c03a2d0SViresh Kumar * unset.
3771c03a2d0SViresh Kumar *
3781c03a2d0SViresh Kumar * get_intermediate should return a stable intermediate frequency
3791c03a2d0SViresh Kumar * platform wants to switch to and target_intermediate() should set CPU
3802909438dSWang Wenhu * to that frequency, before jumping to the frequency corresponding
3811c03a2d0SViresh Kumar * to 'index'. Core will take care of sending notifications and driver
3821c03a2d0SViresh Kumar * doesn't have to handle them in target_intermediate() or
3831c03a2d0SViresh Kumar * target_index().
3841c03a2d0SViresh Kumar *
3851c03a2d0SViresh Kumar * Drivers can return '0' from get_intermediate() in case they don't
3861c03a2d0SViresh Kumar * wish to switch to intermediate frequency for some target frequency.
3871c03a2d0SViresh Kumar * In that case core will directly call ->target_index().
3881c03a2d0SViresh Kumar */
3891c03a2d0SViresh Kumar unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
3901c03a2d0SViresh Kumar unsigned int index);
3911c03a2d0SViresh Kumar int (*target_intermediate)(struct cpufreq_policy *policy,
3921c03a2d0SViresh Kumar unsigned int index);
3931da177e4SLinus Torvalds
3946a4fec4fSLiao Chang /* should be defined, if possible, return 0 on error */
3951da177e4SLinus Torvalds unsigned int (*get)(unsigned int cpu);
3961da177e4SLinus Torvalds
3975a25e3f7SRafael J. Wysocki /* Called to update policy limits on firmware notifications. */
3985a25e3f7SRafael J. Wysocki void (*update_limits)(unsigned int cpu);
3995a25e3f7SRafael J. Wysocki
4001da177e4SLinus Torvalds /* optional */
401e2f74f35SThomas Renninger int (*bios_limit)(int cpu, unsigned int *limit);
402bf0b90e3S[email protected]
40391a12e91SViresh Kumar int (*online)(struct cpufreq_policy *policy);
40491a12e91SViresh Kumar int (*offline)(struct cpufreq_policy *policy);
405b4b1ddc9SLizhe void (*exit)(struct cpufreq_policy *policy);
4067ca64e2dSRafael J. Wysocki int (*suspend)(struct cpufreq_policy *policy);
4071da177e4SLinus Torvalds int (*resume)(struct cpufreq_policy *policy);
4087c45cf31SViresh Kumar
4094f774c4aSBjorn Andersson /* Will be called after the driver is fully initialized */
4104f774c4aSBjorn Andersson void (*ready)(struct cpufreq_policy *policy);
4114f774c4aSBjorn Andersson
4121da177e4SLinus Torvalds struct freq_attr **attr;
4136f19efc0SLukasz Majewski
4146f19efc0SLukasz Majewski /* platform specific boost support code */
4156f19efc0SLukasz Majewski bool boost_enabled;
416cf6fada7SXiongfeng Wang int (*set_boost)(struct cpufreq_policy *policy, int state);
417c17495b0SViresh Kumar
418c17495b0SViresh Kumar /*
419c17495b0SViresh Kumar * Set by drivers that want to register with the energy model after the
420c17495b0SViresh Kumar * policy is properly initialized, but before the governor is started.
421c17495b0SViresh Kumar */
422c17495b0SViresh Kumar void (*register_em)(struct cpufreq_policy *policy);
4231da177e4SLinus Torvalds };
4241da177e4SLinus Torvalds
4251da177e4SLinus Torvalds /* flags */
4268321be6aSAmit Kucheria
4275ae4a4b4SViresh Kumar /*
4283598b30bSRafael J. Wysocki * Set by drivers that need to update internal upper and lower boundaries along
4295ae4a4b4SViresh Kumar * with the target frequency and so the core and governors should also invoke
4305ae4a4b4SViresh Kumar * the diver if the target frequency does not change, but the policy min or max
4315ae4a4b4SViresh Kumar * may have changed.
4325ae4a4b4SViresh Kumar */
4335ae4a4b4SViresh Kumar #define CPUFREQ_NEED_UPDATE_LIMITS BIT(0)
4348321be6aSAmit Kucheria
4358321be6aSAmit Kucheria /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
4368321be6aSAmit Kucheria #define CPUFREQ_CONST_LOOPS BIT(1)
4378321be6aSAmit Kucheria
4382f053186SViresh Kumar /*
4392f053186SViresh Kumar * Set by drivers that want the core to automatically register the cpufreq
4402f053186SViresh Kumar * driver as a thermal cooling device.
4412f053186SViresh Kumar */
4422f053186SViresh Kumar #define CPUFREQ_IS_COOLING_DEV BIT(2)
4431da177e4SLinus Torvalds
4440b981e70SViresh Kumar /*
4450b981e70SViresh Kumar * This should be set by platforms having multiple clock-domains, i.e.
4460b981e70SViresh Kumar * supporting multiple policies. With this sysfs directories of governor would
4470b981e70SViresh Kumar * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
4480b981e70SViresh Kumar * governor with different tunables for different clusters.
4490b981e70SViresh Kumar */
4508321be6aSAmit Kucheria #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
4510b981e70SViresh Kumar
4527dbf694dSViresh Kumar /*
4537dbf694dSViresh Kumar * Driver will do POSTCHANGE notifications from outside of their ->target()
4547dbf694dSViresh Kumar * routine and so must set cpufreq_driver->flags with this flag, so that core
4557dbf694dSViresh Kumar * can handle them specially.
4567dbf694dSViresh Kumar */
4578321be6aSAmit Kucheria #define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
4587dbf694dSViresh Kumar
459ae6b4271SViresh Kumar /*
460ae6b4271SViresh Kumar * Set by drivers which want cpufreq core to check if CPU is running at a
461ae6b4271SViresh Kumar * frequency present in freq-table exposed by the driver. For these drivers if
462ae6b4271SViresh Kumar * CPU is found running at an out of table freq, we will try to set it to a freq
463ae6b4271SViresh Kumar * from the table. And if that fails, we will stop further boot process by
464ae6b4271SViresh Kumar * issuing a BUG_ON().
465ae6b4271SViresh Kumar */
4668321be6aSAmit Kucheria #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
467ae6b4271SViresh Kumar
468fe829ed8SViresh Kumar /*
469fe829ed8SViresh Kumar * Set by drivers to disallow use of governors with "dynamic_switching" flag
470fe829ed8SViresh Kumar * set.
471fe829ed8SViresh Kumar */
4728321be6aSAmit Kucheria #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
473fe829ed8SViresh Kumar
474221dee28SLinus Torvalds int cpufreq_register_driver(struct cpufreq_driver *driver_data);
475dd329e1eSUwe Kleine-König void cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
4761da177e4SLinus Torvalds
477a62f68f5SRafael J. Wysocki bool cpufreq_driver_test_flags(u16 flags);
47874aca95dSViresh Kumar const char *cpufreq_get_current_driver(void);
47951315cdfSThomas Petazzoni void *cpufreq_get_driver_data(void);
4801da177e4SLinus Torvalds
cpufreq_thermal_control_enabled(struct cpufreq_driver * drv)481bcc61569SDaniel Lezcano static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
482bcc61569SDaniel Lezcano {
483bcc61569SDaniel Lezcano return IS_ENABLED(CONFIG_CPU_THERMAL) &&
484bcc61569SDaniel Lezcano (drv->flags & CPUFREQ_IS_COOLING_DEV);
485bcc61569SDaniel Lezcano }
486bcc61569SDaniel Lezcano
cpufreq_verify_within_limits(struct cpufreq_policy_data * policy,unsigned int min,unsigned int max)4871e4f63aeSRafael J. Wysocki static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
4881e4f63aeSRafael J. Wysocki unsigned int min,
4891e4f63aeSRafael J. Wysocki unsigned int max)
4901da177e4SLinus Torvalds {
491a436ae94SLiao Chang policy->max = clamp(policy->max, min, max);
492a436ae94SLiao Chang policy->min = clamp(policy->min, min, policy->max);
4931da177e4SLinus Torvalds }
4941da177e4SLinus Torvalds
495be49e346SViresh Kumar static inline void
cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data * policy)4961e4f63aeSRafael J. Wysocki cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
497be49e346SViresh Kumar {
498be49e346SViresh Kumar cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
499be49e346SViresh Kumar policy->cpuinfo.max_freq);
500be49e346SViresh Kumar }
501be49e346SViresh Kumar
5022f0aea93SViresh Kumar #ifdef CONFIG_CPU_FREQ
5032f0aea93SViresh Kumar void cpufreq_suspend(void);
5042f0aea93SViresh Kumar void cpufreq_resume(void);
505e28867eaSViresh Kumar int cpufreq_generic_suspend(struct cpufreq_policy *policy);
5062f0aea93SViresh Kumar #else
cpufreq_suspend(void)5072f0aea93SViresh Kumar static inline void cpufreq_suspend(void) {}
cpufreq_resume(void)5082f0aea93SViresh Kumar static inline void cpufreq_resume(void) {}
5092f0aea93SViresh Kumar #endif
5102f0aea93SViresh Kumar
5111da177e4SLinus Torvalds /*********************************************************************
51274aca95dSViresh Kumar * CPUFREQ NOTIFIER INTERFACE *
5131da177e4SLinus Torvalds *********************************************************************/
5141da177e4SLinus Torvalds
51574aca95dSViresh Kumar #define CPUFREQ_TRANSITION_NOTIFIER (0)
51674aca95dSViresh Kumar #define CPUFREQ_POLICY_NOTIFIER (1)
51774aca95dSViresh Kumar
51874aca95dSViresh Kumar /* Transition notifiers */
51974aca95dSViresh Kumar #define CPUFREQ_PRECHANGE (0)
52074aca95dSViresh Kumar #define CPUFREQ_POSTCHANGE (1)
52174aca95dSViresh Kumar
52274aca95dSViresh Kumar /* Policy Notifiers */
523df0eea44SViresh Kumar #define CPUFREQ_CREATE_POLICY (0)
524df0eea44SViresh Kumar #define CPUFREQ_REMOVE_POLICY (1)
52574aca95dSViresh Kumar
52674aca95dSViresh Kumar #ifdef CONFIG_CPU_FREQ
52774aca95dSViresh Kumar int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
52874aca95dSViresh Kumar int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
52974aca95dSViresh Kumar
53012478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
53112478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs);
53212478cf0SSrivatsa S. Bhat void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
53312478cf0SSrivatsa S. Bhat struct cpufreq_freqs *freqs, int transition_failed);
53474aca95dSViresh Kumar
53574aca95dSViresh Kumar #else /* CONFIG_CPU_FREQ */
cpufreq_register_notifier(struct notifier_block * nb,unsigned int list)53674aca95dSViresh Kumar static inline int cpufreq_register_notifier(struct notifier_block *nb,
53774aca95dSViresh Kumar unsigned int list)
53874aca95dSViresh Kumar {
53974aca95dSViresh Kumar return 0;
54074aca95dSViresh Kumar }
cpufreq_unregister_notifier(struct notifier_block * nb,unsigned int list)54174aca95dSViresh Kumar static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
54274aca95dSViresh Kumar unsigned int list)
54374aca95dSViresh Kumar {
54474aca95dSViresh Kumar return 0;
54574aca95dSViresh Kumar }
54674aca95dSViresh Kumar #endif /* !CONFIG_CPU_FREQ */
54774aca95dSViresh Kumar
54874aca95dSViresh Kumar /**
54974aca95dSViresh Kumar * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
55074aca95dSViresh Kumar * safe)
55174aca95dSViresh Kumar * @old: old value
55274aca95dSViresh Kumar * @div: divisor
55374aca95dSViresh Kumar * @mult: multiplier
55474aca95dSViresh Kumar *
55574aca95dSViresh Kumar *
55674aca95dSViresh Kumar * new = old * mult / div
55774aca95dSViresh Kumar */
cpufreq_scale(unsigned long old,u_int div,u_int mult)55874aca95dSViresh Kumar static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
55974aca95dSViresh Kumar u_int mult)
56074aca95dSViresh Kumar {
56174aca95dSViresh Kumar #if BITS_PER_LONG == 32
56274aca95dSViresh Kumar u64 result = ((u64) old) * ((u64) mult);
56374aca95dSViresh Kumar do_div(result, div);
56474aca95dSViresh Kumar return (unsigned long) result;
56574aca95dSViresh Kumar
56674aca95dSViresh Kumar #elif BITS_PER_LONG == 64
56774aca95dSViresh Kumar unsigned long result = old * ((u64) mult);
56874aca95dSViresh Kumar result /= div;
56974aca95dSViresh Kumar return result;
57074aca95dSViresh Kumar #endif
57174aca95dSViresh Kumar }
57274aca95dSViresh Kumar
57374aca95dSViresh Kumar /*********************************************************************
57474aca95dSViresh Kumar * CPUFREQ GOVERNORS *
57574aca95dSViresh Kumar *********************************************************************/
57674aca95dSViresh Kumar
5771e4f63aeSRafael J. Wysocki #define CPUFREQ_POLICY_UNKNOWN (0)
57874aca95dSViresh Kumar /*
57974aca95dSViresh Kumar * If (cpufreq_driver->target) exists, the ->governor decides what frequency
58074aca95dSViresh Kumar * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
58174aca95dSViresh Kumar * two generic policies are available:
58274aca95dSViresh Kumar */
58374aca95dSViresh Kumar #define CPUFREQ_POLICY_POWERSAVE (1)
58474aca95dSViresh Kumar #define CPUFREQ_POLICY_PERFORMANCE (2)
58574aca95dSViresh Kumar
58674aca95dSViresh Kumar struct cpufreq_governor {
58774aca95dSViresh Kumar char name[CPUFREQ_NAME_LEN];
588e788892bSRafael J. Wysocki int (*init)(struct cpufreq_policy *policy);
589e788892bSRafael J. Wysocki void (*exit)(struct cpufreq_policy *policy);
590e788892bSRafael J. Wysocki int (*start)(struct cpufreq_policy *policy);
591e788892bSRafael J. Wysocki void (*stop)(struct cpufreq_policy *policy);
592e788892bSRafael J. Wysocki void (*limits)(struct cpufreq_policy *policy);
59374aca95dSViresh Kumar ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
59474aca95dSViresh Kumar char *buf);
59574aca95dSViresh Kumar int (*store_setspeed) (struct cpufreq_policy *policy,
59674aca95dSViresh Kumar unsigned int freq);
59774aca95dSViresh Kumar struct list_head governor_list;
59874aca95dSViresh Kumar struct module *owner;
5999a2a9ebcSRafael J. Wysocki u8 flags;
60074aca95dSViresh Kumar };
60174aca95dSViresh Kumar
6029a2a9ebcSRafael J. Wysocki /* Governor flags */
6039a2a9ebcSRafael J. Wysocki
6049a2a9ebcSRafael J. Wysocki /* For governors which change frequency dynamically by themselves */
6059a2a9ebcSRafael J. Wysocki #define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
6069a2a9ebcSRafael J. Wysocki
607218f6687SRafael J. Wysocki /* For governors wanting the target frequency to be set exactly */
608218f6687SRafael J. Wysocki #define CPUFREQ_GOV_STRICT_TARGET BIT(1)
609218f6687SRafael J. Wysocki
6109a2a9ebcSRafael J. Wysocki
61174aca95dSViresh Kumar /* Pass a target to the cpufreq driver */
612b7898fdaSRafael J. Wysocki unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
613b7898fdaSRafael J. Wysocki unsigned int target_freq);
614ee2cc427SRafael J. Wysocki void cpufreq_driver_adjust_perf(unsigned int cpu,
615ee2cc427SRafael J. Wysocki unsigned long min_perf,
616ee2cc427SRafael J. Wysocki unsigned long target_perf,
617ee2cc427SRafael J. Wysocki unsigned long capacity);
618ee2cc427SRafael J. Wysocki bool cpufreq_driver_has_adjust_perf(void);
61974aca95dSViresh Kumar int cpufreq_driver_target(struct cpufreq_policy *policy,
62074aca95dSViresh Kumar unsigned int target_freq,
62174aca95dSViresh Kumar unsigned int relation);
62274aca95dSViresh Kumar int __cpufreq_driver_target(struct cpufreq_policy *policy,
62374aca95dSViresh Kumar unsigned int target_freq,
62474aca95dSViresh Kumar unsigned int relation);
625e3c06236SSteve Muckle unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
626e3c06236SSteve Muckle unsigned int target_freq);
627aa7519afSViresh Kumar unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
62874aca95dSViresh Kumar int cpufreq_register_governor(struct cpufreq_governor *governor);
62974aca95dSViresh Kumar void cpufreq_unregister_governor(struct cpufreq_governor *governor);
630f6ebbcf0SRafael J. Wysocki int cpufreq_start_governor(struct cpufreq_policy *policy);
631f6ebbcf0SRafael J. Wysocki void cpufreq_stop_governor(struct cpufreq_policy *policy);
63274aca95dSViresh Kumar
63310dd8573SQuentin Perret #define cpufreq_governor_init(__governor) \
63410dd8573SQuentin Perret static int __init __governor##_init(void) \
63510dd8573SQuentin Perret { \
63610dd8573SQuentin Perret return cpufreq_register_governor(&__governor); \
63710dd8573SQuentin Perret } \
63810dd8573SQuentin Perret core_initcall(__governor##_init)
63910dd8573SQuentin Perret
64010dd8573SQuentin Perret #define cpufreq_governor_exit(__governor) \
64110dd8573SQuentin Perret static void __exit __governor##_exit(void) \
64210dd8573SQuentin Perret { \
64310dd8573SQuentin Perret return cpufreq_unregister_governor(&__governor); \
64410dd8573SQuentin Perret } \
64510dd8573SQuentin Perret module_exit(__governor##_exit)
64610dd8573SQuentin Perret
647de1df26bSRafael J. Wysocki struct cpufreq_governor *cpufreq_default_governor(void);
648de1df26bSRafael J. Wysocki struct cpufreq_governor *cpufreq_fallback_governor(void);
6491da177e4SLinus Torvalds
cpufreq_policy_apply_limits(struct cpufreq_policy * policy)650bf2be2deSViresh Kumar static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
651bf2be2deSViresh Kumar {
652bf2be2deSViresh Kumar if (policy->max < policy->cur)
653b894d20eSVincent Donnefort __cpufreq_driver_target(policy, policy->max,
654b894d20eSVincent Donnefort CPUFREQ_RELATION_HE);
655bf2be2deSViresh Kumar else if (policy->min > policy->cur)
656b894d20eSVincent Donnefort __cpufreq_driver_target(policy, policy->min,
657b894d20eSVincent Donnefort CPUFREQ_RELATION_LE);
658bf2be2deSViresh Kumar }
659bf2be2deSViresh Kumar
66066893b6aSRafael J. Wysocki /* Governor attribute set */
66166893b6aSRafael J. Wysocki struct gov_attr_set {
66266893b6aSRafael J. Wysocki struct kobject kobj;
66366893b6aSRafael J. Wysocki struct list_head policy_list;
66466893b6aSRafael J. Wysocki struct mutex update_lock;
66566893b6aSRafael J. Wysocki int usage_count;
66666893b6aSRafael J. Wysocki };
66766893b6aSRafael J. Wysocki
66866893b6aSRafael J. Wysocki /* sysfs ops for cpufreq governors */
66966893b6aSRafael J. Wysocki extern const struct sysfs_ops governor_sysfs_ops;
67066893b6aSRafael J. Wysocki
to_gov_attr_set(struct kobject * kobj)671ae265086SKevin Hao static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
672ae265086SKevin Hao {
673ae265086SKevin Hao return container_of(kobj, struct gov_attr_set, kobj);
674ae265086SKevin Hao }
675ae265086SKevin Hao
67666893b6aSRafael J. Wysocki void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
67766893b6aSRafael J. Wysocki void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
67866893b6aSRafael J. Wysocki unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
67966893b6aSRafael J. Wysocki
68066893b6aSRafael J. Wysocki /* Governor sysfs attribute */
68166893b6aSRafael J. Wysocki struct governor_attr {
68266893b6aSRafael J. Wysocki struct attribute attr;
68366893b6aSRafael J. Wysocki ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
68466893b6aSRafael J. Wysocki ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
68566893b6aSRafael J. Wysocki size_t count);
68666893b6aSRafael J. Wysocki };
68766893b6aSRafael J. Wysocki
6881da177e4SLinus Torvalds /*********************************************************************
6891da177e4SLinus Torvalds * FREQUENCY TABLE HELPERS *
6901da177e4SLinus Torvalds *********************************************************************/
6911da177e4SLinus Torvalds
6927f4b0461SViresh Kumar /* Special Values of .frequency field */
6932b1987a9SBrian W Hart #define CPUFREQ_ENTRY_INVALID ~0u
6942b1987a9SBrian W Hart #define CPUFREQ_TABLE_END ~1u
6957f4b0461SViresh Kumar /* Special Values of .flags field */
6967f4b0461SViresh Kumar #define CPUFREQ_BOOST_FREQ (1 << 0)
697442d24a5SVincent Donnefort #define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
6981da177e4SLinus Torvalds
6991da177e4SLinus Torvalds struct cpufreq_frequency_table {
7007f4b0461SViresh Kumar unsigned int flags;
70150701588SViresh Kumar unsigned int driver_data; /* driver specific data, not used by core */
7021da177e4SLinus Torvalds unsigned int frequency; /* kHz - doesn't need to be in ascending
7031da177e4SLinus Torvalds * order */
7041da177e4SLinus Torvalds };
7051da177e4SLinus Torvalds
70627e289dcSStratos Karafotis /*
70727e289dcSStratos Karafotis * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
70827e289dcSStratos Karafotis * @pos: the cpufreq_frequency_table * to use as a loop cursor.
70927e289dcSStratos Karafotis * @table: the cpufreq_frequency_table * to iterate over.
71027e289dcSStratos Karafotis */
71127e289dcSStratos Karafotis
71227e289dcSStratos Karafotis #define cpufreq_for_each_entry(pos, table) \
71327e289dcSStratos Karafotis for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
71427e289dcSStratos Karafotis
71527e289dcSStratos Karafotis /*
716ffd81dcfSDominik Brodowski * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table
717ffd81dcfSDominik Brodowski * with index
718ffd81dcfSDominik Brodowski * @pos: the cpufreq_frequency_table * to use as a loop cursor.
719ffd81dcfSDominik Brodowski * @table: the cpufreq_frequency_table * to iterate over.
720ffd81dcfSDominik Brodowski * @idx: the table entry currently being processed
721ffd81dcfSDominik Brodowski */
722ffd81dcfSDominik Brodowski
723ffd81dcfSDominik Brodowski #define cpufreq_for_each_entry_idx(pos, table, idx) \
724ffd81dcfSDominik Brodowski for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
725ffd81dcfSDominik Brodowski pos++, idx++)
726ffd81dcfSDominik Brodowski
727ffd81dcfSDominik Brodowski /*
72827e289dcSStratos Karafotis * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
72927e289dcSStratos Karafotis * excluding CPUFREQ_ENTRY_INVALID frequencies.
73027e289dcSStratos Karafotis * @pos: the cpufreq_frequency_table * to use as a loop cursor.
73127e289dcSStratos Karafotis * @table: the cpufreq_frequency_table * to iterate over.
73227e289dcSStratos Karafotis */
73327e289dcSStratos Karafotis
73427e289dcSStratos Karafotis #define cpufreq_for_each_valid_entry(pos, table) \
73534b08705SRafael J. Wysocki for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
73634b08705SRafael J. Wysocki if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
73734b08705SRafael J. Wysocki continue; \
73834b08705SRafael J. Wysocki else
73927e289dcSStratos Karafotis
740ffd81dcfSDominik Brodowski /*
741ffd81dcfSDominik Brodowski * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq
742ffd81dcfSDominik Brodowski * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies.
743ffd81dcfSDominik Brodowski * @pos: the cpufreq_frequency_table * to use as a loop cursor.
744ffd81dcfSDominik Brodowski * @table: the cpufreq_frequency_table * to iterate over.
745ffd81dcfSDominik Brodowski * @idx: the table entry currently being processed
746ffd81dcfSDominik Brodowski */
747ffd81dcfSDominik Brodowski
748ffd81dcfSDominik Brodowski #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
749ffd81dcfSDominik Brodowski cpufreq_for_each_entry_idx(pos, table, idx) \
750ffd81dcfSDominik Brodowski if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
751ffd81dcfSDominik Brodowski continue; \
752ffd81dcfSDominik Brodowski else
753ffd81dcfSDominik Brodowski
7541f39fa0dSVincent Donnefort /**
7551f39fa0dSVincent Donnefort * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq
7561f39fa0dSVincent Donnefort * frequency_table excluding CPUFREQ_ENTRY_INVALID and
7571f39fa0dSVincent Donnefort * CPUFREQ_INEFFICIENT_FREQ frequencies.
7581f39fa0dSVincent Donnefort * @pos: the &struct cpufreq_frequency_table to use as a loop cursor.
7591f39fa0dSVincent Donnefort * @table: the &struct cpufreq_frequency_table to iterate over.
7601f39fa0dSVincent Donnefort * @idx: the table entry currently being processed.
7611f39fa0dSVincent Donnefort * @efficiencies: set to true to only iterate over efficient frequencies.
7621f39fa0dSVincent Donnefort */
7631f39fa0dSVincent Donnefort
7641f39fa0dSVincent Donnefort #define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
7651f39fa0dSVincent Donnefort cpufreq_for_each_valid_entry_idx(pos, table, idx) \
7661f39fa0dSVincent Donnefort if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \
7671f39fa0dSVincent Donnefort continue; \
7681f39fa0dSVincent Donnefort else
7691f39fa0dSVincent Donnefort
770ffd81dcfSDominik Brodowski
7711da177e4SLinus Torvalds int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
7721da177e4SLinus Torvalds struct cpufreq_frequency_table *table);
7731da177e4SLinus Torvalds
7741e4f63aeSRafael J. Wysocki int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
7751da177e4SLinus Torvalds struct cpufreq_frequency_table *table);
7761e4f63aeSRafael J. Wysocki int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
7771da177e4SLinus Torvalds
778da0c6dc0SViresh Kumar int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
779*b7902803SRafael J. Wysocki unsigned int target_freq, unsigned int min,
780*b7902803SRafael J. Wysocki unsigned int max, unsigned int relation);
781d3916691SViresh Kumar int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
782d3916691SViresh Kumar unsigned int freq);
7831da177e4SLinus Torvalds
78474aca95dSViresh Kumar ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
78574aca95dSViresh Kumar
7866f19efc0SLukasz Majewski #ifdef CONFIG_CPU_FREQ
78743c0226cSDhruva Gole bool cpufreq_boost_enabled(void);
7889a23eb8bSViresh Kumar int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state);
789da0c6dc0SViresh Kumar
790da0c6dc0SViresh Kumar /* Find lowest freq at or above target in a table in ascending order */
cpufreq_table_find_index_al(struct cpufreq_policy * policy,unsigned int target_freq,bool efficiencies)791da0c6dc0SViresh Kumar static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
7921f39fa0dSVincent Donnefort unsigned int target_freq,
7931f39fa0dSVincent Donnefort bool efficiencies)
794da0c6dc0SViresh Kumar {
795da0c6dc0SViresh Kumar struct cpufreq_frequency_table *table = policy->freq_table;
796ffd81dcfSDominik Brodowski struct cpufreq_frequency_table *pos;
797da0c6dc0SViresh Kumar unsigned int freq;
798ffd81dcfSDominik Brodowski int idx, best = -1;
799da0c6dc0SViresh Kumar
8001f39fa0dSVincent Donnefort cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
801899bb664SAaro Koskinen freq = pos->frequency;
802da0c6dc0SViresh Kumar
803da0c6dc0SViresh Kumar if (freq >= target_freq)
804ffd81dcfSDominik Brodowski return idx;
805da0c6dc0SViresh Kumar
806ffd81dcfSDominik Brodowski best = idx;
807da0c6dc0SViresh Kumar }
808da0c6dc0SViresh Kumar
809ffd81dcfSDominik Brodowski return best;
810da0c6dc0SViresh Kumar }
811da0c6dc0SViresh Kumar
812da0c6dc0SViresh Kumar /* Find lowest freq at or above target in a table in descending order */
cpufreq_table_find_index_dl(struct cpufreq_policy * policy,unsigned int target_freq,bool efficiencies)813da0c6dc0SViresh Kumar static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
8141f39fa0dSVincent Donnefort unsigned int target_freq,
8151f39fa0dSVincent Donnefort bool efficiencies)
816da0c6dc0SViresh Kumar {
817da0c6dc0SViresh Kumar struct cpufreq_frequency_table *table = policy->freq_table;
818ffd81dcfSDominik Brodowski struct cpufreq_frequency_table *pos;
819da0c6dc0SViresh Kumar unsigned int freq;
820ffd81dcfSDominik Brodowski int idx, best = -1;
821da0c6dc0SViresh Kumar
8221f39fa0dSVincent Donnefort cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
823899bb664SAaro Koskinen freq = pos->frequency;
824da0c6dc0SViresh Kumar
825da0c6dc0SViresh Kumar if (freq == target_freq)
826ffd81dcfSDominik Brodowski return idx;
827da0c6dc0SViresh Kumar
828da0c6dc0SViresh Kumar if (freq > target_freq) {
829ffd81dcfSDominik Brodowski best = idx;
830da0c6dc0SViresh Kumar continue;
831da0c6dc0SViresh Kumar }
832da0c6dc0SViresh Kumar
833da0c6dc0SViresh Kumar /* No freq found above target_freq */
834ffd81dcfSDominik Brodowski if (best == -1)
835ffd81dcfSDominik Brodowski return idx;
836da0c6dc0SViresh Kumar
837ffd81dcfSDominik Brodowski return best;
838da0c6dc0SViresh Kumar }
839da0c6dc0SViresh Kumar
840ffd81dcfSDominik Brodowski return best;
841da0c6dc0SViresh Kumar }
842da0c6dc0SViresh Kumar
find_index_l(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int min,unsigned int max,bool efficiencies)843*b7902803SRafael J. Wysocki static inline int find_index_l(struct cpufreq_policy *policy,
8441f39fa0dSVincent Donnefort unsigned int target_freq,
845*b7902803SRafael J. Wysocki unsigned int min, unsigned int max,
8461f39fa0dSVincent Donnefort bool efficiencies)
847da0c6dc0SViresh Kumar {
848*b7902803SRafael J. Wysocki target_freq = clamp_val(target_freq, min, max);
849da0c6dc0SViresh Kumar
850da0c6dc0SViresh Kumar if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
8511f39fa0dSVincent Donnefort return cpufreq_table_find_index_al(policy, target_freq,
8521f39fa0dSVincent Donnefort efficiencies);
853da0c6dc0SViresh Kumar else
8541f39fa0dSVincent Donnefort return cpufreq_table_find_index_dl(policy, target_freq,
8551f39fa0dSVincent Donnefort efficiencies);
856da0c6dc0SViresh Kumar }
857da0c6dc0SViresh Kumar
858*b7902803SRafael J. Wysocki /* Works only on sorted freq-tables */
cpufreq_table_find_index_l(struct cpufreq_policy * policy,unsigned int target_freq,bool efficiencies)859*b7902803SRafael J. Wysocki static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
860*b7902803SRafael J. Wysocki unsigned int target_freq,
861*b7902803SRafael J. Wysocki bool efficiencies)
862*b7902803SRafael J. Wysocki {
863*b7902803SRafael J. Wysocki return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
864*b7902803SRafael J. Wysocki }
865*b7902803SRafael J. Wysocki
866da0c6dc0SViresh Kumar /* Find highest freq at or below target in a table in ascending order */
cpufreq_table_find_index_ah(struct cpufreq_policy * policy,unsigned int target_freq,bool efficiencies)867da0c6dc0SViresh Kumar static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
8681f39fa0dSVincent Donnefort unsigned int target_freq,
8691f39fa0dSVincent Donnefort bool efficiencies)
870da0c6dc0SViresh Kumar {
871da0c6dc0SViresh Kumar struct cpufreq_frequency_table *table = policy->freq_table;
872ffd81dcfSDominik Brodowski struct cpufreq_frequency_table *pos;
873da0c6dc0SViresh Kumar unsigned int freq;
874ffd81dcfSDominik Brodowski int idx, best = -1;
875da0c6dc0SViresh Kumar
8761f39fa0dSVincent Donnefort cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
877899bb664SAaro Koskinen freq = pos->frequency;
878da0c6dc0SViresh Kumar
879da0c6dc0SViresh Kumar if (freq == target_freq)
880ffd81dcfSDominik Brodowski return idx;
881da0c6dc0SViresh Kumar
882da0c6dc0SViresh Kumar if (freq < target_freq) {
883ffd81dcfSDominik Brodowski best = idx;
884da0c6dc0SViresh Kumar continue;
885da0c6dc0SViresh Kumar }
886da0c6dc0SViresh Kumar
887da0c6dc0SViresh Kumar /* No freq found below target_freq */
888ffd81dcfSDominik Brodowski if (best == -1)
889ffd81dcfSDominik Brodowski return idx;
890da0c6dc0SViresh Kumar
891ffd81dcfSDominik Brodowski return best;
892da0c6dc0SViresh Kumar }
893da0c6dc0SViresh Kumar
894ffd81dcfSDominik Brodowski return best;
895da0c6dc0SViresh Kumar }
896da0c6dc0SViresh Kumar
897da0c6dc0SViresh Kumar /* Find highest freq at or below target in a table in descending order */
cpufreq_table_find_index_dh(struct cpufreq_policy * policy,unsigned int target_freq,bool efficiencies)898da0c6dc0SViresh Kumar static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
8991f39fa0dSVincent Donnefort unsigned int target_freq,
9001f39fa0dSVincent Donnefort bool efficiencies)
901da0c6dc0SViresh Kumar {
902da0c6dc0SViresh Kumar struct cpufreq_frequency_table *table = policy->freq_table;
903ffd81dcfSDominik Brodowski struct cpufreq_frequency_table *pos;
904da0c6dc0SViresh Kumar unsigned int freq;
905ffd81dcfSDominik Brodowski int idx, best = -1;
906da0c6dc0SViresh Kumar
9071f39fa0dSVincent Donnefort cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
908899bb664SAaro Koskinen freq = pos->frequency;
909da0c6dc0SViresh Kumar
910da0c6dc0SViresh Kumar if (freq <= target_freq)
911ffd81dcfSDominik Brodowski return idx;
912da0c6dc0SViresh Kumar
913ffd81dcfSDominik Brodowski best = idx;
914da0c6dc0SViresh Kumar }
915da0c6dc0SViresh Kumar
916ffd81dcfSDominik Brodowski return best;
917da0c6dc0SViresh Kumar }
918da0c6dc0SViresh Kumar
find_index_h(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int min,unsigned int max,bool efficiencies)919*b7902803SRafael J. Wysocki static inline int find_index_h(struct cpufreq_policy *policy,
9201f39fa0dSVincent Donnefort unsigned int target_freq,
921*b7902803SRafael J. Wysocki unsigned int min, unsigned int max,
9221f39fa0dSVincent Donnefort bool efficiencies)
923da0c6dc0SViresh Kumar {
924*b7902803SRafael J. Wysocki target_freq = clamp_val(target_freq, min, max);
925da0c6dc0SViresh Kumar
926da0c6dc0SViresh Kumar if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
9271f39fa0dSVincent Donnefort return cpufreq_table_find_index_ah(policy, target_freq,
9281f39fa0dSVincent Donnefort efficiencies);
929da0c6dc0SViresh Kumar else
9301f39fa0dSVincent Donnefort return cpufreq_table_find_index_dh(policy, target_freq,
9311f39fa0dSVincent Donnefort efficiencies);
932da0c6dc0SViresh Kumar }
933da0c6dc0SViresh Kumar
934*b7902803SRafael J. Wysocki /* Works only on sorted freq-tables */
cpufreq_table_find_index_h(struct cpufreq_policy * policy,unsigned int target_freq,bool efficiencies)935*b7902803SRafael J. Wysocki static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
936*b7902803SRafael J. Wysocki unsigned int target_freq,
937*b7902803SRafael J. Wysocki bool efficiencies)
938*b7902803SRafael J. Wysocki {
939*b7902803SRafael J. Wysocki return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
940*b7902803SRafael J. Wysocki }
941*b7902803SRafael J. Wysocki
942da0c6dc0SViresh Kumar /* Find closest freq to target in a table in ascending order */
cpufreq_table_find_index_ac(struct cpufreq_policy * policy,unsigned int target_freq,bool efficiencies)943da0c6dc0SViresh Kumar static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
9441f39fa0dSVincent Donnefort unsigned int target_freq,
9451f39fa0dSVincent Donnefort bool efficiencies)
946da0c6dc0SViresh Kumar {
947da0c6dc0SViresh Kumar struct cpufreq_frequency_table *table = policy->freq_table;
948ffd81dcfSDominik Brodowski struct cpufreq_frequency_table *pos;
949da0c6dc0SViresh Kumar unsigned int freq;
950ffd81dcfSDominik Brodowski int idx, best = -1;
951da0c6dc0SViresh Kumar
9521f39fa0dSVincent Donnefort cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
953899bb664SAaro Koskinen freq = pos->frequency;
954da0c6dc0SViresh Kumar
955da0c6dc0SViresh Kumar if (freq == target_freq)
956ffd81dcfSDominik Brodowski return idx;
957da0c6dc0SViresh Kumar
958da0c6dc0SViresh Kumar if (freq < target_freq) {
959ffd81dcfSDominik Brodowski best = idx;
960da0c6dc0SViresh Kumar continue;
961da0c6dc0SViresh Kumar }
962da0c6dc0SViresh Kumar
963da0c6dc0SViresh Kumar /* No freq found below target_freq */
964ffd81dcfSDominik Brodowski if (best == -1)
965ffd81dcfSDominik Brodowski return idx;
966da0c6dc0SViresh Kumar
967da0c6dc0SViresh Kumar /* Choose the closest freq */
968ffd81dcfSDominik Brodowski if (target_freq - table[best].frequency > freq - target_freq)
969ffd81dcfSDominik Brodowski return idx;
970da0c6dc0SViresh Kumar
971ffd81dcfSDominik Brodowski return best;
972da0c6dc0SViresh Kumar }
973da0c6dc0SViresh Kumar
974ffd81dcfSDominik Brodowski return best;
975da0c6dc0SViresh Kumar }
976da0c6dc0SViresh Kumar
977da0c6dc0SViresh Kumar /* Find closest freq to target in a table in descending order */
cpufreq_table_find_index_dc(struct cpufreq_policy * policy,unsigned int target_freq,bool efficiencies)978da0c6dc0SViresh Kumar static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
9791f39fa0dSVincent Donnefort unsigned int target_freq,
9801f39fa0dSVincent Donnefort bool efficiencies)
981da0c6dc0SViresh Kumar {
982da0c6dc0SViresh Kumar struct cpufreq_frequency_table *table = policy->freq_table;
983ffd81dcfSDominik Brodowski struct cpufreq_frequency_table *pos;
984da0c6dc0SViresh Kumar unsigned int freq;
985ffd81dcfSDominik Brodowski int idx, best = -1;
986da0c6dc0SViresh Kumar
9871f39fa0dSVincent Donnefort cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
988899bb664SAaro Koskinen freq = pos->frequency;
989da0c6dc0SViresh Kumar
990da0c6dc0SViresh Kumar if (freq == target_freq)
991ffd81dcfSDominik Brodowski return idx;
992da0c6dc0SViresh Kumar
993da0c6dc0SViresh Kumar if (freq > target_freq) {
994ffd81dcfSDominik Brodowski best = idx;
995da0c6dc0SViresh Kumar continue;
996da0c6dc0SViresh Kumar }
997da0c6dc0SViresh Kumar
998da0c6dc0SViresh Kumar /* No freq found above target_freq */
999ffd81dcfSDominik Brodowski if (best == -1)
1000ffd81dcfSDominik Brodowski return idx;
1001da0c6dc0SViresh Kumar
1002da0c6dc0SViresh Kumar /* Choose the closest freq */
1003ffd81dcfSDominik Brodowski if (table[best].frequency - target_freq > target_freq - freq)
1004ffd81dcfSDominik Brodowski return idx;
1005da0c6dc0SViresh Kumar
1006ffd81dcfSDominik Brodowski return best;
1007da0c6dc0SViresh Kumar }
1008da0c6dc0SViresh Kumar
1009ffd81dcfSDominik Brodowski return best;
1010da0c6dc0SViresh Kumar }
1011da0c6dc0SViresh Kumar
find_index_c(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int min,unsigned int max,bool efficiencies)1012*b7902803SRafael J. Wysocki static inline int find_index_c(struct cpufreq_policy *policy,
10131f39fa0dSVincent Donnefort unsigned int target_freq,
1014*b7902803SRafael J. Wysocki unsigned int min, unsigned int max,
10151f39fa0dSVincent Donnefort bool efficiencies)
1016da0c6dc0SViresh Kumar {
1017*b7902803SRafael J. Wysocki target_freq = clamp_val(target_freq, min, max);
1018da0c6dc0SViresh Kumar
1019da0c6dc0SViresh Kumar if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
10201f39fa0dSVincent Donnefort return cpufreq_table_find_index_ac(policy, target_freq,
10211f39fa0dSVincent Donnefort efficiencies);
1022da0c6dc0SViresh Kumar else
10231f39fa0dSVincent Donnefort return cpufreq_table_find_index_dc(policy, target_freq,
10241f39fa0dSVincent Donnefort efficiencies);
1025da0c6dc0SViresh Kumar }
1026da0c6dc0SViresh Kumar
1027*b7902803SRafael J. Wysocki /* Works only on sorted freq-tables */
cpufreq_table_find_index_c(struct cpufreq_policy * policy,unsigned int target_freq,bool efficiencies)1028*b7902803SRafael J. Wysocki static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
1029*b7902803SRafael J. Wysocki unsigned int target_freq,
1030*b7902803SRafael J. Wysocki bool efficiencies)
1031*b7902803SRafael J. Wysocki {
1032*b7902803SRafael J. Wysocki return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
1033*b7902803SRafael J. Wysocki }
1034*b7902803SRafael J. Wysocki
cpufreq_is_in_limits(struct cpufreq_policy * policy,unsigned int min,unsigned int max,int idx)1035*b7902803SRafael J. Wysocki static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
1036*b7902803SRafael J. Wysocki unsigned int min, unsigned int max,
1037*b7902803SRafael J. Wysocki int idx)
1038d394abcbSShivnandan Kumar {
1039d394abcbSShivnandan Kumar unsigned int freq;
1040d394abcbSShivnandan Kumar
1041d394abcbSShivnandan Kumar if (idx < 0)
1042d394abcbSShivnandan Kumar return false;
1043d394abcbSShivnandan Kumar
1044d394abcbSShivnandan Kumar freq = policy->freq_table[idx].frequency;
1045d394abcbSShivnandan Kumar
1046*b7902803SRafael J. Wysocki return freq == clamp_val(freq, min, max);
1047d394abcbSShivnandan Kumar }
1048d394abcbSShivnandan Kumar
cpufreq_frequency_table_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int min,unsigned int max,unsigned int relation)1049da0c6dc0SViresh Kumar static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
1050da0c6dc0SViresh Kumar unsigned int target_freq,
1051*b7902803SRafael J. Wysocki unsigned int min,
1052*b7902803SRafael J. Wysocki unsigned int max,
1053da0c6dc0SViresh Kumar unsigned int relation)
1054da0c6dc0SViresh Kumar {
10551f39fa0dSVincent Donnefort bool efficiencies = policy->efficiencies_available &&
10561f39fa0dSVincent Donnefort (relation & CPUFREQ_RELATION_E);
10571f39fa0dSVincent Donnefort int idx;
10581f39fa0dSVincent Donnefort
10591f39fa0dSVincent Donnefort /* cpufreq_table_index_unsorted() has no use for this flag anyway */
10601f39fa0dSVincent Donnefort relation &= ~CPUFREQ_RELATION_E;
10611f39fa0dSVincent Donnefort
1062da0c6dc0SViresh Kumar if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
1063*b7902803SRafael J. Wysocki return cpufreq_table_index_unsorted(policy, target_freq, min,
1064*b7902803SRafael J. Wysocki max, relation);
10651f39fa0dSVincent Donnefort retry:
1066da0c6dc0SViresh Kumar switch (relation) {
1067da0c6dc0SViresh Kumar case CPUFREQ_RELATION_L:
1068*b7902803SRafael J. Wysocki idx = find_index_l(policy, target_freq, min, max, efficiencies);
10691f39fa0dSVincent Donnefort break;
1070da0c6dc0SViresh Kumar case CPUFREQ_RELATION_H:
1071*b7902803SRafael J. Wysocki idx = find_index_h(policy, target_freq, min, max, efficiencies);
10721f39fa0dSVincent Donnefort break;
1073da0c6dc0SViresh Kumar case CPUFREQ_RELATION_C:
1074*b7902803SRafael J. Wysocki idx = find_index_c(policy, target_freq, min, max, efficiencies);
10751f39fa0dSVincent Donnefort break;
1076da0c6dc0SViresh Kumar default:
107730b8e6b2SViresh Kumar WARN_ON_ONCE(1);
107830b8e6b2SViresh Kumar return 0;
1079da0c6dc0SViresh Kumar }
10801f39fa0dSVincent Donnefort
1081*b7902803SRafael J. Wysocki /* Limit frequency index to honor min and max */
1082*b7902803SRafael J. Wysocki if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
10831f39fa0dSVincent Donnefort efficiencies = false;
10841f39fa0dSVincent Donnefort goto retry;
10851f39fa0dSVincent Donnefort }
10861f39fa0dSVincent Donnefort
10871f39fa0dSVincent Donnefort return idx;
1088da0c6dc0SViresh Kumar }
108955d85293SViresh Kumar
cpufreq_table_count_valid_entries(const struct cpufreq_policy * policy)109055d85293SViresh Kumar static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
109155d85293SViresh Kumar {
109255d85293SViresh Kumar struct cpufreq_frequency_table *pos;
109355d85293SViresh Kumar int count = 0;
109455d85293SViresh Kumar
109555d85293SViresh Kumar if (unlikely(!policy->freq_table))
109655d85293SViresh Kumar return 0;
109755d85293SViresh Kumar
109855d85293SViresh Kumar cpufreq_for_each_valid_entry(pos, policy->freq_table)
109955d85293SViresh Kumar count++;
110055d85293SViresh Kumar
110155d85293SViresh Kumar return count;
110255d85293SViresh Kumar }
11038486a32dSHector.Yuan
1104442d24a5SVincent Donnefort /**
1105442d24a5SVincent Donnefort * cpufreq_table_set_inefficient() - Mark a frequency as inefficient
1106442d24a5SVincent Donnefort * @policy: the &struct cpufreq_policy containing the inefficient frequency
1107442d24a5SVincent Donnefort * @frequency: the inefficient frequency
1108442d24a5SVincent Donnefort *
1109442d24a5SVincent Donnefort * The &struct cpufreq_policy must use a sorted frequency table
1110442d24a5SVincent Donnefort *
1111442d24a5SVincent Donnefort * Return: %0 on success or a negative errno code
1112442d24a5SVincent Donnefort */
1113442d24a5SVincent Donnefort
1114442d24a5SVincent Donnefort static inline int
cpufreq_table_set_inefficient(struct cpufreq_policy * policy,unsigned int frequency)1115442d24a5SVincent Donnefort cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
1116442d24a5SVincent Donnefort unsigned int frequency)
1117442d24a5SVincent Donnefort {
1118442d24a5SVincent Donnefort struct cpufreq_frequency_table *pos;
1119442d24a5SVincent Donnefort
1120442d24a5SVincent Donnefort /* Not supported */
1121442d24a5SVincent Donnefort if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)
1122442d24a5SVincent Donnefort return -EINVAL;
1123442d24a5SVincent Donnefort
1124442d24a5SVincent Donnefort cpufreq_for_each_valid_entry(pos, policy->freq_table) {
1125442d24a5SVincent Donnefort if (pos->frequency == frequency) {
1126442d24a5SVincent Donnefort pos->flags |= CPUFREQ_INEFFICIENT_FREQ;
11271f39fa0dSVincent Donnefort policy->efficiencies_available = true;
1128442d24a5SVincent Donnefort return 0;
1129442d24a5SVincent Donnefort }
1130442d24a5SVincent Donnefort }
1131442d24a5SVincent Donnefort
1132442d24a5SVincent Donnefort return -EINVAL;
1133442d24a5SVincent Donnefort }
1134442d24a5SVincent Donnefort
parse_perf_domain(int cpu,const char * list_name,const char * cell_name,struct of_phandle_args * args)11358486a32dSHector.Yuan static inline int parse_perf_domain(int cpu, const char *list_name,
1136d182dc6dSHector Martin const char *cell_name,
1137d182dc6dSHector Martin struct of_phandle_args *args)
11388486a32dSHector.Yuan {
11398486a32dSHector.Yuan int ret;
11408486a32dSHector.Yuan
1141c0f02536SMiquel Sabaté Solà struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu);
11428486a32dSHector.Yuan if (!cpu_np)
11438486a32dSHector.Yuan return -ENODEV;
11448486a32dSHector.Yuan
11458486a32dSHector.Yuan ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
1146d182dc6dSHector Martin args);
11478486a32dSHector.Yuan if (ret < 0)
11488486a32dSHector.Yuan return ret;
1149d182dc6dSHector Martin return 0;
11508486a32dSHector.Yuan }
11518486a32dSHector.Yuan
of_perf_domain_get_sharing_cpumask(int pcpu,const char * list_name,const char * cell_name,struct cpumask * cpumask,struct of_phandle_args * pargs)11528486a32dSHector.Yuan static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
1153d182dc6dSHector Martin const char *cell_name, struct cpumask *cpumask,
1154d182dc6dSHector Martin struct of_phandle_args *pargs)
11558486a32dSHector.Yuan {
11568486a32dSHector.Yuan int cpu, ret;
1157d182dc6dSHector Martin struct of_phandle_args args;
11588486a32dSHector.Yuan
1159d182dc6dSHector Martin ret = parse_perf_domain(pcpu, list_name, cell_name, pargs);
11608486a32dSHector.Yuan if (ret < 0)
11618486a32dSHector.Yuan return ret;
11628486a32dSHector.Yuan
11638486a32dSHector.Yuan cpumask_set_cpu(pcpu, cpumask);
11648486a32dSHector.Yuan
11658486a32dSHector.Yuan for_each_possible_cpu(cpu) {
11668486a32dSHector.Yuan if (cpu == pcpu)
11678486a32dSHector.Yuan continue;
11688486a32dSHector.Yuan
1169d182dc6dSHector Martin ret = parse_perf_domain(cpu, list_name, cell_name, &args);
11708486a32dSHector.Yuan if (ret < 0)
11718486a32dSHector.Yuan continue;
11728486a32dSHector.Yuan
11730f289828SKrzysztof Kozlowski if (of_phandle_args_equal(pargs, &args))
11748486a32dSHector.Yuan cpumask_set_cpu(cpu, cpumask);
1175d182dc6dSHector Martin
1176d182dc6dSHector Martin of_node_put(args.np);
11778486a32dSHector.Yuan }
11788486a32dSHector.Yuan
1179d182dc6dSHector Martin return 0;
11808486a32dSHector.Yuan }
11816f19efc0SLukasz Majewski #else
cpufreq_boost_enabled(void)118243c0226cSDhruva Gole static inline bool cpufreq_boost_enabled(void)
11836f19efc0SLukasz Majewski {
118443c0226cSDhruva Gole return false;
11856f19efc0SLukasz Majewski }
118644139ed4SViresh Kumar
cpufreq_boost_set_sw(struct cpufreq_policy * policy,int state)11879a23eb8bSViresh Kumar static inline int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
118844139ed4SViresh Kumar {
11899a23eb8bSViresh Kumar return -EOPNOTSUPP;
119044139ed4SViresh Kumar }
11918486a32dSHector.Yuan
1192442d24a5SVincent Donnefort static inline int
cpufreq_table_set_inefficient(struct cpufreq_policy * policy,unsigned int frequency)1193442d24a5SVincent Donnefort cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
1194442d24a5SVincent Donnefort unsigned int frequency)
1195442d24a5SVincent Donnefort {
1196442d24a5SVincent Donnefort return -EINVAL;
1197442d24a5SVincent Donnefort }
1198442d24a5SVincent Donnefort
of_perf_domain_get_sharing_cpumask(int pcpu,const char * list_name,const char * cell_name,struct cpumask * cpumask,struct of_phandle_args * pargs)11998486a32dSHector.Yuan static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
1200d182dc6dSHector Martin const char *cell_name, struct cpumask *cpumask,
1201d182dc6dSHector Martin struct of_phandle_args *pargs)
12028486a32dSHector.Yuan {
12038486a32dSHector.Yuan return -EOPNOTSUPP;
12048486a32dSHector.Yuan }
12056f19efc0SLukasz Majewski #endif
12061da177e4SLinus Torvalds
120738e480d4SBeata Michalska extern int arch_freq_get_on_cpu(int cpu);
1208f8475cefSLen Brown
1209a20b7053SIonela Voinescu #ifndef arch_set_freq_scale
1210a20b7053SIonela Voinescu static __always_inline
arch_set_freq_scale(const struct cpumask * cpus,unsigned long cur_freq,unsigned long max_freq)1211a20b7053SIonela Voinescu void arch_set_freq_scale(const struct cpumask *cpus,
1212ecddc3a0SValentin Schneider unsigned long cur_freq,
1213a20b7053SIonela Voinescu unsigned long max_freq)
1214a20b7053SIonela Voinescu {
1215a20b7053SIonela Voinescu }
1216a20b7053SIonela Voinescu #endif
1217599457baSVincent Guittot
12181da177e4SLinus Torvalds /* the following are really really optional */
12191da177e4SLinus Torvalds extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
122021c36d35SBartlomiej Zolnierkiewicz extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
1221d417e069SViresh Kumar int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
1222f4fd3797SLan Tianyu
1223652ed95dSViresh Kumar unsigned int cpufreq_generic_get(unsigned int cpu);
1224c4dcc8a1SViresh Kumar void cpufreq_generic_init(struct cpufreq_policy *policy,
122570e9e778SViresh Kumar struct cpufreq_frequency_table *table,
122670e9e778SViresh Kumar unsigned int transition_latency);
1227c17495b0SViresh Kumar
cpufreq_register_em_with_opp(struct cpufreq_policy * policy)1228c17495b0SViresh Kumar static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
1229c17495b0SViresh Kumar {
1230c17495b0SViresh Kumar dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
1231c17495b0SViresh Kumar policy->related_cpus);
1232c17495b0SViresh Kumar }
12331da177e4SLinus Torvalds #endif /* _LINUX_CPUFREQ_H */
1234