1 /* 2 * linux/include/linux/cpufreq.h 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2002 - 2003 Dominik Brodowski <[email protected]> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #ifndef _LINUX_CPUFREQ_H 12 #define _LINUX_CPUFREQ_H 13 14 #include <linux/clk.h> 15 #include <linux/cpumask.h> 16 #include <linux/completion.h> 17 #include <linux/kobject.h> 18 #include <linux/notifier.h> 19 #include <linux/spinlock.h> 20 #include <linux/sysfs.h> 21 22 /********************************************************************* 23 * CPUFREQ INTERFACE * 24 *********************************************************************/ 25 /* 26 * Frequency values here are CPU kHz 27 * 28 * Maximum transition latency is in nanoseconds - if it's unknown, 29 * CPUFREQ_ETERNAL shall be used. 30 */ 31 32 #define CPUFREQ_ETERNAL (-1) 33 #define CPUFREQ_NAME_LEN 16 34 /* Print length for names. Extra 1 space for accommodating '\n' in prints */ 35 #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) 36 37 struct cpufreq_governor; 38 39 enum cpufreq_table_sorting { 40 CPUFREQ_TABLE_UNSORTED, 41 CPUFREQ_TABLE_SORTED_ASCENDING, 42 CPUFREQ_TABLE_SORTED_DESCENDING 43 }; 44 45 struct cpufreq_cpuinfo { 46 unsigned int max_freq; 47 unsigned int min_freq; 48 49 /* in 10^(-9) s = nanoseconds */ 50 unsigned int transition_latency; 51 }; 52 53 struct cpufreq_user_policy { 54 unsigned int min; /* in kHz */ 55 unsigned int max; /* in kHz */ 56 }; 57 58 struct cpufreq_policy { 59 /* CPUs sharing clock, require sw coordination */ 60 cpumask_var_t cpus; /* Online CPUs only */ 61 cpumask_var_t related_cpus; /* Online + Offline CPUs */ 62 cpumask_var_t real_cpus; /* Related and present */ 63 64 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 65 should set cpufreq */ 66 unsigned int cpu; /* cpu managing this policy, must be online */ 67 68 struct clk *clk; 69 struct cpufreq_cpuinfo cpuinfo;/* see above */ 70 71 unsigned int min; /* in kHz */ 72 unsigned int max; /* in kHz */ 73 unsigned int cur; /* in kHz, only needed if cpufreq 74 * governors are used */ 75 unsigned int restore_freq; /* = policy->cur before transition */ 76 unsigned int suspend_freq; /* freq to set during suspend */ 77 78 unsigned int policy; /* see above */ 79 unsigned int last_policy; /* policy before unplug */ 80 struct cpufreq_governor *governor; /* see below */ 81 void *governor_data; 82 char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ 83 84 struct work_struct update; /* if update_policy() needs to be 85 * called, but you're in IRQ context */ 86 87 struct cpufreq_user_policy user_policy; 88 struct cpufreq_frequency_table *freq_table; 89 enum cpufreq_table_sorting freq_table_sorted; 90 91 struct list_head policy_list; 92 struct kobject kobj; 93 struct completion kobj_unregister; 94 95 /* 96 * The rules for this semaphore: 97 * - Any routine that wants to read from the policy structure will 98 * do a down_read on this semaphore. 99 * - Any routine that will write to the policy structure and/or may take away 100 * the policy altogether (eg. CPU hotplug), will hold this lock in write 101 * mode before doing so. 102 */ 103 struct rw_semaphore rwsem; 104 105 /* 106 * Fast switch flags: 107 * - fast_switch_possible should be set by the driver if it can 108 * guarantee that frequency can be changed on any CPU sharing the 109 * policy and that the change will affect all of the policy CPUs then. 110 * - fast_switch_enabled is to be set by governors that support fast 111 * frequency switching with the help of cpufreq_enable_fast_switch(). 112 */ 113 bool fast_switch_possible; 114 bool fast_switch_enabled; 115 116 /* 117 * Preferred average time interval between consecutive invocations of 118 * the driver to set the frequency for this policy. To be set by the 119 * scaling driver (0, which is the default, means no preference). 120 */ 121 unsigned int transition_delay_us; 122 123 /* 124 * Remote DVFS flag (Not added to the driver structure as we don't want 125 * to access another structure from scheduler hotpath). 126 * 127 * Should be set if CPUs can do DVFS on behalf of other CPUs from 128 * different cpufreq policies. 129 */ 130 bool dvfs_possible_from_any_cpu; 131 132 /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ 133 unsigned int cached_target_freq; 134 int cached_resolved_idx; 135 136 /* Synchronization for frequency transitions */ 137 bool transition_ongoing; /* Tracks transition status */ 138 spinlock_t transition_lock; 139 wait_queue_head_t transition_wait; 140 struct task_struct *transition_task; /* Task which is doing the transition */ 141 142 /* cpufreq-stats */ 143 struct cpufreq_stats *stats; 144 145 /* For cpufreq driver's internal use */ 146 void *driver_data; 147 148 /* Pointer to the cooling device if used for thermal mitigation */ 149 struct thermal_cooling_device *cdev; 150 }; 151 152 struct cpufreq_freqs { 153 struct cpufreq_policy *policy; 154 unsigned int old; 155 unsigned int new; 156 u8 flags; /* flags of cpufreq_driver, see below. */ 157 }; 158 159 /* Only for ACPI */ 160 #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ 161 #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ 162 #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ 163 #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ 164 165 #ifdef CONFIG_CPU_FREQ 166 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); 167 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); 168 void cpufreq_cpu_put(struct cpufreq_policy *policy); 169 #else 170 static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 171 { 172 return NULL; 173 } 174 static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 175 { 176 return NULL; 177 } 178 static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } 179 #endif 180 181 static inline bool policy_is_inactive(struct cpufreq_policy *policy) 182 { 183 return cpumask_empty(policy->cpus); 184 } 185 186 static inline bool policy_is_shared(struct cpufreq_policy *policy) 187 { 188 return cpumask_weight(policy->cpus) > 1; 189 } 190 191 /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ 192 extern struct kobject *cpufreq_global_kobject; 193 194 #ifdef CONFIG_CPU_FREQ 195 unsigned int cpufreq_get(unsigned int cpu); 196 unsigned int cpufreq_quick_get(unsigned int cpu); 197 unsigned int cpufreq_quick_get_max(unsigned int cpu); 198 void disable_cpufreq(void); 199 200 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); 201 202 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu); 203 void cpufreq_cpu_release(struct cpufreq_policy *policy); 204 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); 205 int cpufreq_set_policy(struct cpufreq_policy *policy, 206 struct cpufreq_policy *new_policy); 207 void cpufreq_update_policy(unsigned int cpu); 208 void cpufreq_update_limits(unsigned int cpu); 209 bool have_governor_per_policy(void); 210 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); 211 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); 212 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); 213 #else 214 static inline unsigned int cpufreq_get(unsigned int cpu) 215 { 216 return 0; 217 } 218 static inline unsigned int cpufreq_quick_get(unsigned int cpu) 219 { 220 return 0; 221 } 222 static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) 223 { 224 return 0; 225 } 226 static inline void disable_cpufreq(void) { } 227 #endif 228 229 #ifdef CONFIG_CPU_FREQ_STAT 230 void cpufreq_stats_create_table(struct cpufreq_policy *policy); 231 void cpufreq_stats_free_table(struct cpufreq_policy *policy); 232 void cpufreq_stats_record_transition(struct cpufreq_policy *policy, 233 unsigned int new_freq); 234 #else 235 static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { } 236 static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { } 237 static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy, 238 unsigned int new_freq) { } 239 #endif /* CONFIG_CPU_FREQ_STAT */ 240 241 /********************************************************************* 242 * CPUFREQ DRIVER INTERFACE * 243 *********************************************************************/ 244 245 #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ 246 #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ 247 #define CPUFREQ_RELATION_C 2 /* closest frequency to target */ 248 249 struct freq_attr { 250 struct attribute attr; 251 ssize_t (*show)(struct cpufreq_policy *, char *); 252 ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); 253 }; 254 255 #define cpufreq_freq_attr_ro(_name) \ 256 static struct freq_attr _name = \ 257 __ATTR(_name, 0444, show_##_name, NULL) 258 259 #define cpufreq_freq_attr_ro_perm(_name, _perm) \ 260 static struct freq_attr _name = \ 261 __ATTR(_name, _perm, show_##_name, NULL) 262 263 #define cpufreq_freq_attr_rw(_name) \ 264 static struct freq_attr _name = \ 265 __ATTR(_name, 0644, show_##_name, store_##_name) 266 267 #define cpufreq_freq_attr_wo(_name) \ 268 static struct freq_attr _name = \ 269 __ATTR(_name, 0200, NULL, store_##_name) 270 271 #define define_one_global_ro(_name) \ 272 static struct kobj_attribute _name = \ 273 __ATTR(_name, 0444, show_##_name, NULL) 274 275 #define define_one_global_rw(_name) \ 276 static struct kobj_attribute _name = \ 277 __ATTR(_name, 0644, show_##_name, store_##_name) 278 279 280 struct cpufreq_driver { 281 char name[CPUFREQ_NAME_LEN]; 282 u8 flags; 283 void *driver_data; 284 285 /* needed by all drivers */ 286 int (*init)(struct cpufreq_policy *policy); 287 int (*verify)(struct cpufreq_policy *policy); 288 289 /* define one out of two */ 290 int (*setpolicy)(struct cpufreq_policy *policy); 291 292 /* 293 * On failure, should always restore frequency to policy->restore_freq 294 * (i.e. old freq). 295 */ 296 int (*target)(struct cpufreq_policy *policy, 297 unsigned int target_freq, 298 unsigned int relation); /* Deprecated */ 299 int (*target_index)(struct cpufreq_policy *policy, 300 unsigned int index); 301 unsigned int (*fast_switch)(struct cpufreq_policy *policy, 302 unsigned int target_freq); 303 304 /* 305 * Caches and returns the lowest driver-supported frequency greater than 306 * or equal to the target frequency, subject to any driver limitations. 307 * Does not set the frequency. Only to be implemented for drivers with 308 * target(). 309 */ 310 unsigned int (*resolve_freq)(struct cpufreq_policy *policy, 311 unsigned int target_freq); 312 313 /* 314 * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION 315 * unset. 316 * 317 * get_intermediate should return a stable intermediate frequency 318 * platform wants to switch to and target_intermediate() should set CPU 319 * to to that frequency, before jumping to the frequency corresponding 320 * to 'index'. Core will take care of sending notifications and driver 321 * doesn't have to handle them in target_intermediate() or 322 * target_index(). 323 * 324 * Drivers can return '0' from get_intermediate() in case they don't 325 * wish to switch to intermediate frequency for some target frequency. 326 * In that case core will directly call ->target_index(). 327 */ 328 unsigned int (*get_intermediate)(struct cpufreq_policy *policy, 329 unsigned int index); 330 int (*target_intermediate)(struct cpufreq_policy *policy, 331 unsigned int index); 332 333 /* should be defined, if possible */ 334 unsigned int (*get)(unsigned int cpu); 335 336 /* Called to update policy limits on firmware notifications. */ 337 void (*update_limits)(unsigned int cpu); 338 339 /* optional */ 340 int (*bios_limit)(int cpu, unsigned int *limit); 341 342 int (*online)(struct cpufreq_policy *policy); 343 int (*offline)(struct cpufreq_policy *policy); 344 int (*exit)(struct cpufreq_policy *policy); 345 void (*stop_cpu)(struct cpufreq_policy *policy); 346 int (*suspend)(struct cpufreq_policy *policy); 347 int (*resume)(struct cpufreq_policy *policy); 348 349 /* Will be called after the driver is fully initialized */ 350 void (*ready)(struct cpufreq_policy *policy); 351 352 struct freq_attr **attr; 353 354 /* platform specific boost support code */ 355 bool boost_enabled; 356 int (*set_boost)(int state); 357 }; 358 359 /* flags */ 360 361 /* driver isn't removed even if all ->init() calls failed */ 362 #define CPUFREQ_STICKY BIT(0) 363 364 /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ 365 #define CPUFREQ_CONST_LOOPS BIT(1) 366 367 /* don't warn on suspend/resume speed mismatches */ 368 #define CPUFREQ_PM_NO_WARN BIT(2) 369 370 /* 371 * This should be set by platforms having multiple clock-domains, i.e. 372 * supporting multiple policies. With this sysfs directories of governor would 373 * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same 374 * governor with different tunables for different clusters. 375 */ 376 #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3) 377 378 /* 379 * Driver will do POSTCHANGE notifications from outside of their ->target() 380 * routine and so must set cpufreq_driver->flags with this flag, so that core 381 * can handle them specially. 382 */ 383 #define CPUFREQ_ASYNC_NOTIFICATION BIT(4) 384 385 /* 386 * Set by drivers which want cpufreq core to check if CPU is running at a 387 * frequency present in freq-table exposed by the driver. For these drivers if 388 * CPU is found running at an out of table freq, we will try to set it to a freq 389 * from the table. And if that fails, we will stop further boot process by 390 * issuing a BUG_ON(). 391 */ 392 #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5) 393 394 /* 395 * Set by drivers to disallow use of governors with "dynamic_switching" flag 396 * set. 397 */ 398 #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) 399 400 /* 401 * Set by drivers that want the core to automatically register the cpufreq 402 * driver as a thermal cooling device. 403 */ 404 #define CPUFREQ_IS_COOLING_DEV BIT(7) 405 406 int cpufreq_register_driver(struct cpufreq_driver *driver_data); 407 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); 408 409 const char *cpufreq_get_current_driver(void); 410 void *cpufreq_get_driver_data(void); 411 412 static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, 413 unsigned int min, unsigned int max) 414 { 415 if (policy->min < min) 416 policy->min = min; 417 if (policy->max < min) 418 policy->max = min; 419 if (policy->min > max) 420 policy->min = max; 421 if (policy->max > max) 422 policy->max = max; 423 if (policy->min > policy->max) 424 policy->min = policy->max; 425 return; 426 } 427 428 static inline void 429 cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy) 430 { 431 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 432 policy->cpuinfo.max_freq); 433 } 434 435 #ifdef CONFIG_CPU_FREQ 436 void cpufreq_suspend(void); 437 void cpufreq_resume(void); 438 int cpufreq_generic_suspend(struct cpufreq_policy *policy); 439 #else 440 static inline void cpufreq_suspend(void) {} 441 static inline void cpufreq_resume(void) {} 442 #endif 443 444 /********************************************************************* 445 * CPUFREQ NOTIFIER INTERFACE * 446 *********************************************************************/ 447 448 #define CPUFREQ_TRANSITION_NOTIFIER (0) 449 #define CPUFREQ_POLICY_NOTIFIER (1) 450 451 /* Transition notifiers */ 452 #define CPUFREQ_PRECHANGE (0) 453 #define CPUFREQ_POSTCHANGE (1) 454 455 /* Policy Notifiers */ 456 #define CPUFREQ_ADJUST (0) 457 #define CPUFREQ_NOTIFY (1) 458 459 #ifdef CONFIG_CPU_FREQ 460 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 461 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); 462 463 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 464 struct cpufreq_freqs *freqs); 465 void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 466 struct cpufreq_freqs *freqs, int transition_failed); 467 468 #else /* CONFIG_CPU_FREQ */ 469 static inline int cpufreq_register_notifier(struct notifier_block *nb, 470 unsigned int list) 471 { 472 return 0; 473 } 474 static inline int cpufreq_unregister_notifier(struct notifier_block *nb, 475 unsigned int list) 476 { 477 return 0; 478 } 479 #endif /* !CONFIG_CPU_FREQ */ 480 481 /** 482 * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch 483 * safe) 484 * @old: old value 485 * @div: divisor 486 * @mult: multiplier 487 * 488 * 489 * new = old * mult / div 490 */ 491 static inline unsigned long cpufreq_scale(unsigned long old, u_int div, 492 u_int mult) 493 { 494 #if BITS_PER_LONG == 32 495 u64 result = ((u64) old) * ((u64) mult); 496 do_div(result, div); 497 return (unsigned long) result; 498 499 #elif BITS_PER_LONG == 64 500 unsigned long result = old * ((u64) mult); 501 result /= div; 502 return result; 503 #endif 504 } 505 506 /********************************************************************* 507 * CPUFREQ GOVERNORS * 508 *********************************************************************/ 509 510 /* 511 * If (cpufreq_driver->target) exists, the ->governor decides what frequency 512 * within the limits is used. If (cpufreq_driver->setpolicy> exists, these 513 * two generic policies are available: 514 */ 515 #define CPUFREQ_POLICY_POWERSAVE (1) 516 #define CPUFREQ_POLICY_PERFORMANCE (2) 517 518 /* 519 * The polling frequency depends on the capability of the processor. Default 520 * polling frequency is 1000 times the transition latency of the processor. The 521 * ondemand governor will work on any processor with transition latency <= 10ms, 522 * using appropriate sampling rate. 523 */ 524 #define LATENCY_MULTIPLIER (1000) 525 526 struct cpufreq_governor { 527 char name[CPUFREQ_NAME_LEN]; 528 int (*init)(struct cpufreq_policy *policy); 529 void (*exit)(struct cpufreq_policy *policy); 530 int (*start)(struct cpufreq_policy *policy); 531 void (*stop)(struct cpufreq_policy *policy); 532 void (*limits)(struct cpufreq_policy *policy); 533 ssize_t (*show_setspeed) (struct cpufreq_policy *policy, 534 char *buf); 535 int (*store_setspeed) (struct cpufreq_policy *policy, 536 unsigned int freq); 537 /* For governors which change frequency dynamically by themselves */ 538 bool dynamic_switching; 539 struct list_head governor_list; 540 struct module *owner; 541 }; 542 543 /* Pass a target to the cpufreq driver */ 544 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, 545 unsigned int target_freq); 546 int cpufreq_driver_target(struct cpufreq_policy *policy, 547 unsigned int target_freq, 548 unsigned int relation); 549 int __cpufreq_driver_target(struct cpufreq_policy *policy, 550 unsigned int target_freq, 551 unsigned int relation); 552 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, 553 unsigned int target_freq); 554 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); 555 int cpufreq_register_governor(struct cpufreq_governor *governor); 556 void cpufreq_unregister_governor(struct cpufreq_governor *governor); 557 558 struct cpufreq_governor *cpufreq_default_governor(void); 559 struct cpufreq_governor *cpufreq_fallback_governor(void); 560 561 static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) 562 { 563 if (policy->max < policy->cur) 564 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); 565 else if (policy->min > policy->cur) 566 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); 567 } 568 569 /* Governor attribute set */ 570 struct gov_attr_set { 571 struct kobject kobj; 572 struct list_head policy_list; 573 struct mutex update_lock; 574 int usage_count; 575 }; 576 577 /* sysfs ops for cpufreq governors */ 578 extern const struct sysfs_ops governor_sysfs_ops; 579 580 void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); 581 void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); 582 unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); 583 584 /* Governor sysfs attribute */ 585 struct governor_attr { 586 struct attribute attr; 587 ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); 588 ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, 589 size_t count); 590 }; 591 592 static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) 593 { 594 /* 595 * Allow remote callbacks if: 596 * - dvfs_possible_from_any_cpu flag is set 597 * - the local and remote CPUs share cpufreq policy 598 */ 599 return policy->dvfs_possible_from_any_cpu || 600 cpumask_test_cpu(smp_processor_id(), policy->cpus); 601 } 602 603 /********************************************************************* 604 * FREQUENCY TABLE HELPERS * 605 *********************************************************************/ 606 607 /* Special Values of .frequency field */ 608 #define CPUFREQ_ENTRY_INVALID ~0u 609 #define CPUFREQ_TABLE_END ~1u 610 /* Special Values of .flags field */ 611 #define CPUFREQ_BOOST_FREQ (1 << 0) 612 613 struct cpufreq_frequency_table { 614 unsigned int flags; 615 unsigned int driver_data; /* driver specific data, not used by core */ 616 unsigned int frequency; /* kHz - doesn't need to be in ascending 617 * order */ 618 }; 619 620 #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) 621 int dev_pm_opp_init_cpufreq_table(struct device *dev, 622 struct cpufreq_frequency_table **table); 623 void dev_pm_opp_free_cpufreq_table(struct device *dev, 624 struct cpufreq_frequency_table **table); 625 #else 626 static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, 627 struct cpufreq_frequency_table 628 **table) 629 { 630 return -EINVAL; 631 } 632 633 static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, 634 struct cpufreq_frequency_table 635 **table) 636 { 637 } 638 #endif 639 640 /* 641 * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table 642 * @pos: the cpufreq_frequency_table * to use as a loop cursor. 643 * @table: the cpufreq_frequency_table * to iterate over. 644 */ 645 646 #define cpufreq_for_each_entry(pos, table) \ 647 for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) 648 649 /* 650 * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table 651 * with index 652 * @pos: the cpufreq_frequency_table * to use as a loop cursor. 653 * @table: the cpufreq_frequency_table * to iterate over. 654 * @idx: the table entry currently being processed 655 */ 656 657 #define cpufreq_for_each_entry_idx(pos, table, idx) \ 658 for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \ 659 pos++, idx++) 660 661 /* 662 * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table 663 * excluding CPUFREQ_ENTRY_INVALID frequencies. 664 * @pos: the cpufreq_frequency_table * to use as a loop cursor. 665 * @table: the cpufreq_frequency_table * to iterate over. 666 */ 667 668 #define cpufreq_for_each_valid_entry(pos, table) \ 669 for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \ 670 if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ 671 continue; \ 672 else 673 674 /* 675 * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq 676 * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies. 677 * @pos: the cpufreq_frequency_table * to use as a loop cursor. 678 * @table: the cpufreq_frequency_table * to iterate over. 679 * @idx: the table entry currently being processed 680 */ 681 682 #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \ 683 cpufreq_for_each_entry_idx(pos, table, idx) \ 684 if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ 685 continue; \ 686 else 687 688 689 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, 690 struct cpufreq_frequency_table *table); 691 692 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, 693 struct cpufreq_frequency_table *table); 694 int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); 695 696 int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, 697 unsigned int target_freq, 698 unsigned int relation); 699 int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, 700 unsigned int freq); 701 702 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); 703 704 #ifdef CONFIG_CPU_FREQ 705 int cpufreq_boost_trigger_state(int state); 706 int cpufreq_boost_enabled(void); 707 int cpufreq_enable_boost_support(void); 708 bool policy_has_boost_freq(struct cpufreq_policy *policy); 709 710 /* Find lowest freq at or above target in a table in ascending order */ 711 static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, 712 unsigned int target_freq) 713 { 714 struct cpufreq_frequency_table *table = policy->freq_table; 715 struct cpufreq_frequency_table *pos; 716 unsigned int freq; 717 int idx, best = -1; 718 719 cpufreq_for_each_valid_entry_idx(pos, table, idx) { 720 freq = pos->frequency; 721 722 if (freq >= target_freq) 723 return idx; 724 725 best = idx; 726 } 727 728 return best; 729 } 730 731 /* Find lowest freq at or above target in a table in descending order */ 732 static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, 733 unsigned int target_freq) 734 { 735 struct cpufreq_frequency_table *table = policy->freq_table; 736 struct cpufreq_frequency_table *pos; 737 unsigned int freq; 738 int idx, best = -1; 739 740 cpufreq_for_each_valid_entry_idx(pos, table, idx) { 741 freq = pos->frequency; 742 743 if (freq == target_freq) 744 return idx; 745 746 if (freq > target_freq) { 747 best = idx; 748 continue; 749 } 750 751 /* No freq found above target_freq */ 752 if (best == -1) 753 return idx; 754 755 return best; 756 } 757 758 return best; 759 } 760 761 /* Works only on sorted freq-tables */ 762 static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, 763 unsigned int target_freq) 764 { 765 target_freq = clamp_val(target_freq, policy->min, policy->max); 766 767 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) 768 return cpufreq_table_find_index_al(policy, target_freq); 769 else 770 return cpufreq_table_find_index_dl(policy, target_freq); 771 } 772 773 /* Find highest freq at or below target in a table in ascending order */ 774 static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, 775 unsigned int target_freq) 776 { 777 struct cpufreq_frequency_table *table = policy->freq_table; 778 struct cpufreq_frequency_table *pos; 779 unsigned int freq; 780 int idx, best = -1; 781 782 cpufreq_for_each_valid_entry_idx(pos, table, idx) { 783 freq = pos->frequency; 784 785 if (freq == target_freq) 786 return idx; 787 788 if (freq < target_freq) { 789 best = idx; 790 continue; 791 } 792 793 /* No freq found below target_freq */ 794 if (best == -1) 795 return idx; 796 797 return best; 798 } 799 800 return best; 801 } 802 803 /* Find highest freq at or below target in a table in descending order */ 804 static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, 805 unsigned int target_freq) 806 { 807 struct cpufreq_frequency_table *table = policy->freq_table; 808 struct cpufreq_frequency_table *pos; 809 unsigned int freq; 810 int idx, best = -1; 811 812 cpufreq_for_each_valid_entry_idx(pos, table, idx) { 813 freq = pos->frequency; 814 815 if (freq <= target_freq) 816 return idx; 817 818 best = idx; 819 } 820 821 return best; 822 } 823 824 /* Works only on sorted freq-tables */ 825 static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, 826 unsigned int target_freq) 827 { 828 target_freq = clamp_val(target_freq, policy->min, policy->max); 829 830 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) 831 return cpufreq_table_find_index_ah(policy, target_freq); 832 else 833 return cpufreq_table_find_index_dh(policy, target_freq); 834 } 835 836 /* Find closest freq to target in a table in ascending order */ 837 static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, 838 unsigned int target_freq) 839 { 840 struct cpufreq_frequency_table *table = policy->freq_table; 841 struct cpufreq_frequency_table *pos; 842 unsigned int freq; 843 int idx, best = -1; 844 845 cpufreq_for_each_valid_entry_idx(pos, table, idx) { 846 freq = pos->frequency; 847 848 if (freq == target_freq) 849 return idx; 850 851 if (freq < target_freq) { 852 best = idx; 853 continue; 854 } 855 856 /* No freq found below target_freq */ 857 if (best == -1) 858 return idx; 859 860 /* Choose the closest freq */ 861 if (target_freq - table[best].frequency > freq - target_freq) 862 return idx; 863 864 return best; 865 } 866 867 return best; 868 } 869 870 /* Find closest freq to target in a table in descending order */ 871 static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, 872 unsigned int target_freq) 873 { 874 struct cpufreq_frequency_table *table = policy->freq_table; 875 struct cpufreq_frequency_table *pos; 876 unsigned int freq; 877 int idx, best = -1; 878 879 cpufreq_for_each_valid_entry_idx(pos, table, idx) { 880 freq = pos->frequency; 881 882 if (freq == target_freq) 883 return idx; 884 885 if (freq > target_freq) { 886 best = idx; 887 continue; 888 } 889 890 /* No freq found above target_freq */ 891 if (best == -1) 892 return idx; 893 894 /* Choose the closest freq */ 895 if (table[best].frequency - target_freq > target_freq - freq) 896 return idx; 897 898 return best; 899 } 900 901 return best; 902 } 903 904 /* Works only on sorted freq-tables */ 905 static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, 906 unsigned int target_freq) 907 { 908 target_freq = clamp_val(target_freq, policy->min, policy->max); 909 910 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) 911 return cpufreq_table_find_index_ac(policy, target_freq); 912 else 913 return cpufreq_table_find_index_dc(policy, target_freq); 914 } 915 916 static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, 917 unsigned int target_freq, 918 unsigned int relation) 919 { 920 if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) 921 return cpufreq_table_index_unsorted(policy, target_freq, 922 relation); 923 924 switch (relation) { 925 case CPUFREQ_RELATION_L: 926 return cpufreq_table_find_index_l(policy, target_freq); 927 case CPUFREQ_RELATION_H: 928 return cpufreq_table_find_index_h(policy, target_freq); 929 case CPUFREQ_RELATION_C: 930 return cpufreq_table_find_index_c(policy, target_freq); 931 default: 932 pr_err("%s: Invalid relation: %d\n", __func__, relation); 933 return -EINVAL; 934 } 935 } 936 937 static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy) 938 { 939 struct cpufreq_frequency_table *pos; 940 int count = 0; 941 942 if (unlikely(!policy->freq_table)) 943 return 0; 944 945 cpufreq_for_each_valid_entry(pos, policy->freq_table) 946 count++; 947 948 return count; 949 } 950 #else 951 static inline int cpufreq_boost_trigger_state(int state) 952 { 953 return 0; 954 } 955 static inline int cpufreq_boost_enabled(void) 956 { 957 return 0; 958 } 959 960 static inline int cpufreq_enable_boost_support(void) 961 { 962 return -EINVAL; 963 } 964 965 static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) 966 { 967 return false; 968 } 969 #endif 970 971 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 972 void sched_cpufreq_governor_change(struct cpufreq_policy *policy, 973 struct cpufreq_governor *old_gov); 974 #else 975 static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy, 976 struct cpufreq_governor *old_gov) { } 977 #endif 978 979 extern void arch_freq_prepare_all(void); 980 extern unsigned int arch_freq_get_on_cpu(int cpu); 981 982 extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, 983 unsigned long max_freq); 984 985 /* the following are really really optional */ 986 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; 987 extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; 988 extern struct freq_attr *cpufreq_generic_attr[]; 989 int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); 990 991 unsigned int cpufreq_generic_get(unsigned int cpu); 992 int cpufreq_generic_init(struct cpufreq_policy *policy, 993 struct cpufreq_frequency_table *table, 994 unsigned int transition_latency); 995 #endif /* _LINUX_CPUFREQ_H */ 996