1 /* 2 * cpuidle.h - a generic framework for CPU idle power management 3 * 4 * (C) 2007 Venkatesh Pallipadi <[email protected]> 5 * Shaohua Li <[email protected]> 6 * Adam Belay <[email protected]> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11 #ifndef _LINUX_CPUIDLE_H 12 #define _LINUX_CPUIDLE_H 13 14 #include <linux/percpu.h> 15 #include <linux/list.h> 16 #include <linux/hrtimer.h> 17 18 #define CPUIDLE_STATE_MAX 10 19 #define CPUIDLE_NAME_LEN 16 20 #define CPUIDLE_DESC_LEN 32 21 22 struct module; 23 24 struct cpuidle_device; 25 struct cpuidle_driver; 26 27 28 /**************************** 29 * CPUIDLE DEVICE INTERFACE * 30 ****************************/ 31 32 struct cpuidle_state_usage { 33 unsigned long long disable; 34 unsigned long long usage; 35 unsigned long long time; /* in US */ 36 unsigned long long above; /* Number of times it's been too deep */ 37 unsigned long long below; /* Number of times it's been too shallow */ 38 #ifdef CONFIG_SUSPEND 39 unsigned long long s2idle_usage; 40 unsigned long long s2idle_time; /* in US */ 41 #endif 42 }; 43 44 struct cpuidle_state { 45 char name[CPUIDLE_NAME_LEN]; 46 char desc[CPUIDLE_DESC_LEN]; 47 48 unsigned int flags; 49 unsigned int exit_latency; /* in US */ 50 int power_usage; /* in mW */ 51 unsigned int target_residency; /* in US */ 52 bool disabled; /* disabled on all CPUs */ 53 54 int (*enter) (struct cpuidle_device *dev, 55 struct cpuidle_driver *drv, 56 int index); 57 58 int (*enter_dead) (struct cpuidle_device *dev, int index); 59 60 /* 61 * CPUs execute ->enter_s2idle with the local tick or entire timekeeping 62 * suspended, so it must not re-enable interrupts at any point (even 63 * temporarily) or attempt to change states of clock event devices. 64 */ 65 void (*enter_s2idle) (struct cpuidle_device *dev, 66 struct cpuidle_driver *drv, 67 int index); 68 }; 69 70 /* Idle State Flags */ 71 #define CPUIDLE_FLAG_NONE (0x00) 72 #define CPUIDLE_FLAG_POLLING (0x01) /* polling state */ 73 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ 74 #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ 75 76 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) 77 78 struct cpuidle_device_kobj; 79 struct cpuidle_state_kobj; 80 struct cpuidle_driver_kobj; 81 82 struct cpuidle_device { 83 unsigned int registered:1; 84 unsigned int enabled:1; 85 unsigned int use_deepest_state:1; 86 unsigned int poll_time_limit:1; 87 unsigned int cpu; 88 89 int last_residency; 90 struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; 91 struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; 92 struct cpuidle_driver_kobj *kobj_driver; 93 struct cpuidle_device_kobj *kobj_dev; 94 struct list_head device_list; 95 96 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 97 cpumask_t coupled_cpus; 98 struct cpuidle_coupled *coupled; 99 #endif 100 }; 101 102 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 103 DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); 104 105 /**************************** 106 * CPUIDLE DRIVER INTERFACE * 107 ****************************/ 108 109 struct cpuidle_driver { 110 const char *name; 111 struct module *owner; 112 int refcnt; 113 114 /* used by the cpuidle framework to setup the broadcast timer */ 115 unsigned int bctimer:1; 116 /* states array must be ordered in decreasing power consumption */ 117 struct cpuidle_state states[CPUIDLE_STATE_MAX]; 118 int state_count; 119 int safe_state_index; 120 121 /* the driver handles the cpus in cpumask */ 122 struct cpumask *cpumask; 123 }; 124 125 #ifdef CONFIG_CPU_IDLE 126 extern void disable_cpuidle(void); 127 extern bool cpuidle_not_available(struct cpuidle_driver *drv, 128 struct cpuidle_device *dev); 129 130 extern int cpuidle_select(struct cpuidle_driver *drv, 131 struct cpuidle_device *dev, 132 bool *stop_tick); 133 extern int cpuidle_enter(struct cpuidle_driver *drv, 134 struct cpuidle_device *dev, int index); 135 extern void cpuidle_reflect(struct cpuidle_device *dev, int index); 136 137 extern int cpuidle_register_driver(struct cpuidle_driver *drv); 138 extern struct cpuidle_driver *cpuidle_get_driver(void); 139 extern struct cpuidle_driver *cpuidle_driver_ref(void); 140 extern void cpuidle_driver_unref(void); 141 extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); 142 extern int cpuidle_register_device(struct cpuidle_device *dev); 143 extern void cpuidle_unregister_device(struct cpuidle_device *dev); 144 extern int cpuidle_register(struct cpuidle_driver *drv, 145 const struct cpumask *const coupled_cpus); 146 extern void cpuidle_unregister(struct cpuidle_driver *drv); 147 extern void cpuidle_pause_and_lock(void); 148 extern void cpuidle_resume_and_unlock(void); 149 extern void cpuidle_pause(void); 150 extern void cpuidle_resume(void); 151 extern int cpuidle_enable_device(struct cpuidle_device *dev); 152 extern void cpuidle_disable_device(struct cpuidle_device *dev); 153 extern int cpuidle_play_dead(void); 154 155 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 156 static inline struct cpuidle_device *cpuidle_get_device(void) 157 {return __this_cpu_read(cpuidle_devices); } 158 #else 159 static inline void disable_cpuidle(void) { } 160 static inline bool cpuidle_not_available(struct cpuidle_driver *drv, 161 struct cpuidle_device *dev) 162 {return true; } 163 static inline int cpuidle_select(struct cpuidle_driver *drv, 164 struct cpuidle_device *dev, bool *stop_tick) 165 {return -ENODEV; } 166 static inline int cpuidle_enter(struct cpuidle_driver *drv, 167 struct cpuidle_device *dev, int index) 168 {return -ENODEV; } 169 static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } 170 static inline int cpuidle_register_driver(struct cpuidle_driver *drv) 171 {return -ENODEV; } 172 static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } 173 static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; } 174 static inline void cpuidle_driver_unref(void) {} 175 static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } 176 static inline int cpuidle_register_device(struct cpuidle_device *dev) 177 {return -ENODEV; } 178 static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } 179 static inline int cpuidle_register(struct cpuidle_driver *drv, 180 const struct cpumask *const coupled_cpus) 181 {return -ENODEV; } 182 static inline void cpuidle_unregister(struct cpuidle_driver *drv) { } 183 static inline void cpuidle_pause_and_lock(void) { } 184 static inline void cpuidle_resume_and_unlock(void) { } 185 static inline void cpuidle_pause(void) { } 186 static inline void cpuidle_resume(void) { } 187 static inline int cpuidle_enable_device(struct cpuidle_device *dev) 188 {return -ENODEV; } 189 static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 190 static inline int cpuidle_play_dead(void) {return -ENODEV; } 191 static inline struct cpuidle_driver *cpuidle_get_cpu_driver( 192 struct cpuidle_device *dev) {return NULL; } 193 static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } 194 #endif 195 196 #ifdef CONFIG_CPU_IDLE 197 extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 198 struct cpuidle_device *dev); 199 extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, 200 struct cpuidle_device *dev); 201 extern void cpuidle_use_deepest_state(bool enable); 202 #else 203 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 204 struct cpuidle_device *dev) 205 {return -ENODEV; } 206 static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, 207 struct cpuidle_device *dev) 208 {return -ENODEV; } 209 static inline void cpuidle_use_deepest_state(bool enable) 210 { 211 } 212 #endif 213 214 /* kernel/sched/idle.c */ 215 extern void sched_idle_set_state(struct cpuidle_state *idle_state); 216 extern void default_idle_call(void); 217 218 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 219 void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); 220 #else 221 static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) 222 { 223 } 224 #endif 225 226 #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX) 227 void cpuidle_poll_state_init(struct cpuidle_driver *drv); 228 #else 229 static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} 230 #endif 231 232 /****************************** 233 * CPUIDLE GOVERNOR INTERFACE * 234 ******************************/ 235 236 struct cpuidle_governor { 237 char name[CPUIDLE_NAME_LEN]; 238 struct list_head governor_list; 239 unsigned int rating; 240 241 int (*enable) (struct cpuidle_driver *drv, 242 struct cpuidle_device *dev); 243 void (*disable) (struct cpuidle_driver *drv, 244 struct cpuidle_device *dev); 245 246 int (*select) (struct cpuidle_driver *drv, 247 struct cpuidle_device *dev, 248 bool *stop_tick); 249 void (*reflect) (struct cpuidle_device *dev, int index); 250 }; 251 252 #ifdef CONFIG_CPU_IDLE 253 extern int cpuidle_register_governor(struct cpuidle_governor *gov); 254 extern int cpuidle_governor_latency_req(unsigned int cpu); 255 #else 256 static inline int cpuidle_register_governor(struct cpuidle_governor *gov) 257 {return 0;} 258 #endif 259 260 #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ 261 ({ \ 262 int __ret = 0; \ 263 \ 264 if (!idx) { \ 265 cpu_do_idle(); \ 266 return idx; \ 267 } \ 268 \ 269 if (!is_retention) \ 270 __ret = cpu_pm_enter(); \ 271 if (!__ret) { \ 272 __ret = low_level_idle_enter(idx); \ 273 if (!is_retention) \ 274 cpu_pm_exit(); \ 275 } \ 276 \ 277 __ret ? -1 : idx; \ 278 }) 279 280 #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ 281 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) 282 283 #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ 284 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) 285 286 #endif /* _LINUX_CPUIDLE_H */ 287