1 /* linux/include/linux/clocksource.h 2 * 3 * This file contains the structure definitions for clocksources. 4 * 5 * If you are not a clocksource, or timekeeping code, you should 6 * not be including this file! 7 */ 8 #ifndef _LINUX_CLOCKSOURCE_H 9 #define _LINUX_CLOCKSOURCE_H 10 11 #include <linux/types.h> 12 #include <linux/timex.h> 13 #include <linux/time.h> 14 #include <linux/list.h> 15 #include <linux/cache.h> 16 #include <linux/timer.h> 17 #include <linux/init.h> 18 #include <asm/div64.h> 19 #include <asm/io.h> 20 21 /* clocksource cycle base type */ 22 typedef u64 cycle_t; 23 struct clocksource; 24 25 /** 26 * struct cyclecounter - hardware abstraction for a free running counter 27 * Provides completely state-free accessors to the underlying hardware. 28 * Depending on which hardware it reads, the cycle counter may wrap 29 * around quickly. Locking rules (if necessary) have to be defined 30 * by the implementor and user of specific instances of this API. 31 * 32 * @read: returns the current cycle value 33 * @mask: bitmask for two's complement 34 * subtraction of non 64 bit counters, 35 * see CLOCKSOURCE_MASK() helper macro 36 * @mult: cycle to nanosecond multiplier 37 * @shift: cycle to nanosecond divisor (power of two) 38 */ 39 struct cyclecounter { 40 cycle_t (*read)(const struct cyclecounter *cc); 41 cycle_t mask; 42 u32 mult; 43 u32 shift; 44 }; 45 46 /** 47 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds 48 * Contains the state needed by timecounter_read() to detect 49 * cycle counter wrap around. Initialize with 50 * timecounter_init(). Also used to convert cycle counts into the 51 * corresponding nanosecond counts with timecounter_cyc2time(). Users 52 * of this code are responsible for initializing the underlying 53 * cycle counter hardware, locking issues and reading the time 54 * more often than the cycle counter wraps around. The nanosecond 55 * counter will only wrap around after ~585 years. 56 * 57 * @cc: the cycle counter used by this instance 58 * @cycle_last: most recent cycle counter value seen by 59 * timecounter_read() 60 * @nsec: continuously increasing count 61 */ 62 struct timecounter { 63 const struct cyclecounter *cc; 64 cycle_t cycle_last; 65 u64 nsec; 66 }; 67 68 /** 69 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds 70 * @tc: Pointer to cycle counter. 71 * @cycles: Cycles 72 * 73 * XXX - This could use some mult_lxl_ll() asm optimization. Same code 74 * as in cyc2ns, but with unsigned result. 75 */ 76 static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, 77 cycle_t cycles) 78 { 79 u64 ret = (u64)cycles; 80 ret = (ret * cc->mult) >> cc->shift; 81 return ret; 82 } 83 84 /** 85 * timecounter_init - initialize a time counter 86 * @tc: Pointer to time counter which is to be initialized/reset 87 * @cc: A cycle counter, ready to be used. 88 * @start_tstamp: Arbitrary initial time stamp. 89 * 90 * After this call the current cycle register (roughly) corresponds to 91 * the initial time stamp. Every call to timecounter_read() increments 92 * the time stamp counter by the number of elapsed nanoseconds. 93 */ 94 extern void timecounter_init(struct timecounter *tc, 95 const struct cyclecounter *cc, 96 u64 start_tstamp); 97 98 /** 99 * timecounter_read - return nanoseconds elapsed since timecounter_init() 100 * plus the initial time stamp 101 * @tc: Pointer to time counter. 102 * 103 * In other words, keeps track of time since the same epoch as 104 * the function which generated the initial time stamp. 105 */ 106 extern u64 timecounter_read(struct timecounter *tc); 107 108 /** 109 * timecounter_cyc2time - convert a cycle counter to same 110 * time base as values returned by 111 * timecounter_read() 112 * @tc: Pointer to time counter. 113 * @cycle: a value returned by tc->cc->read() 114 * 115 * Cycle counts that are converted correctly as long as they 116 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], 117 * with "max cycle count" == cs->mask+1. 118 * 119 * This allows conversion of cycle counter values which were generated 120 * in the past. 121 */ 122 extern u64 timecounter_cyc2time(struct timecounter *tc, 123 cycle_t cycle_tstamp); 124 125 /** 126 * struct clocksource - hardware abstraction for a free running counter 127 * Provides mostly state-free accessors to the underlying hardware. 128 * This is the structure used for system time. 129 * 130 * @name: ptr to clocksource name 131 * @list: list head for registration 132 * @rating: rating value for selection (higher is better) 133 * To avoid rating inflation the following 134 * list should give you a guide as to how 135 * to assign your clocksource a rating 136 * 1-99: Unfit for real use 137 * Only available for bootup and testing purposes. 138 * 100-199: Base level usability. 139 * Functional for real use, but not desired. 140 * 200-299: Good. 141 * A correct and usable clocksource. 142 * 300-399: Desired. 143 * A reasonably fast and accurate clocksource. 144 * 400-499: Perfect 145 * The ideal clocksource. A must-use where 146 * available. 147 * @read: returns a cycle value, passes clocksource as argument 148 * @enable: optional function to enable the clocksource 149 * @disable: optional function to disable the clocksource 150 * @mask: bitmask for two's complement 151 * subtraction of non 64 bit counters 152 * @mult: cycle to nanosecond multiplier 153 * @shift: cycle to nanosecond divisor (power of two) 154 * @max_idle_ns: max idle time permitted by the clocksource (nsecs) 155 * @flags: flags describing special properties 156 * @vread: vsyscall based read 157 * @suspend: suspend function for the clocksource, if necessary 158 * @resume: resume function for the clocksource, if necessary 159 */ 160 struct clocksource { 161 /* 162 * Hotpath data, fits in a single cache line when the 163 * clocksource itself is cacheline aligned. 164 */ 165 cycle_t (*read)(struct clocksource *cs); 166 cycle_t cycle_last; 167 cycle_t mask; 168 u32 mult; 169 u32 shift; 170 u64 max_idle_ns; 171 172 #ifdef CONFIG_IA64 173 void *fsys_mmio; /* used by fsyscall asm code */ 174 #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) 175 #else 176 #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) 177 #endif 178 const char *name; 179 struct list_head list; 180 int rating; 181 cycle_t (*vread)(void); 182 int (*enable)(struct clocksource *cs); 183 void (*disable)(struct clocksource *cs); 184 unsigned long flags; 185 void (*suspend)(struct clocksource *cs); 186 void (*resume)(struct clocksource *cs); 187 188 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 189 /* Watchdog related data, used by the framework */ 190 struct list_head wd_list; 191 cycle_t wd_last; 192 #endif 193 } ____cacheline_aligned; 194 195 /* 196 * Clock source flags bits:: 197 */ 198 #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 199 #define CLOCK_SOURCE_MUST_VERIFY 0x02 200 201 #define CLOCK_SOURCE_WATCHDOG 0x10 202 #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 203 #define CLOCK_SOURCE_UNSTABLE 0x40 204 205 /* simplify initialization of mask field */ 206 #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 207 208 /** 209 * clocksource_khz2mult - calculates mult from khz and shift 210 * @khz: Clocksource frequency in KHz 211 * @shift_constant: Clocksource shift factor 212 * 213 * Helper functions that converts a khz counter frequency to a timsource 214 * multiplier, given the clocksource shift value 215 */ 216 static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) 217 { 218 /* khz = cyc/(Million ns) 219 * mult/2^shift = ns/cyc 220 * mult = ns/cyc * 2^shift 221 * mult = 1Million/khz * 2^shift 222 * mult = 1000000 * 2^shift / khz 223 * mult = (1000000<<shift) / khz 224 */ 225 u64 tmp = ((u64)1000000) << shift_constant; 226 227 tmp += khz/2; /* round for do_div */ 228 do_div(tmp, khz); 229 230 return (u32)tmp; 231 } 232 233 /** 234 * clocksource_hz2mult - calculates mult from hz and shift 235 * @hz: Clocksource frequency in Hz 236 * @shift_constant: Clocksource shift factor 237 * 238 * Helper functions that converts a hz counter 239 * frequency to a timsource multiplier, given the 240 * clocksource shift value 241 */ 242 static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) 243 { 244 /* hz = cyc/(Billion ns) 245 * mult/2^shift = ns/cyc 246 * mult = ns/cyc * 2^shift 247 * mult = 1Billion/hz * 2^shift 248 * mult = 1000000000 * 2^shift / hz 249 * mult = (1000000000<<shift) / hz 250 */ 251 u64 tmp = ((u64)1000000000) << shift_constant; 252 253 tmp += hz/2; /* round for do_div */ 254 do_div(tmp, hz); 255 256 return (u32)tmp; 257 } 258 259 /** 260 * clocksource_cyc2ns - converts clocksource cycles to nanoseconds 261 * 262 * Converts cycles to nanoseconds, using the given mult and shift. 263 * 264 * XXX - This could use some mult_lxl_ll() asm optimization 265 */ 266 static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) 267 { 268 return ((u64) cycles * mult) >> shift; 269 } 270 271 272 extern int clocksource_register(struct clocksource*); 273 extern void clocksource_unregister(struct clocksource*); 274 extern void clocksource_touch_watchdog(void); 275 extern struct clocksource* clocksource_get_next(void); 276 extern void clocksource_change_rating(struct clocksource *cs, int rating); 277 extern void clocksource_suspend(void); 278 extern void clocksource_resume(void); 279 extern struct clocksource * __init __weak clocksource_default_clock(void); 280 extern void clocksource_mark_unstable(struct clocksource *cs); 281 282 extern void 283 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); 284 285 /* 286 * Don't call __clocksource_register_scale directly, use 287 * clocksource_register_hz/khz 288 */ 289 extern int 290 __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); 291 extern void 292 __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); 293 294 static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) 295 { 296 return __clocksource_register_scale(cs, 1, hz); 297 } 298 299 static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) 300 { 301 return __clocksource_register_scale(cs, 1000, khz); 302 } 303 304 static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) 305 { 306 __clocksource_updatefreq_scale(cs, 1, hz); 307 } 308 309 static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) 310 { 311 __clocksource_updatefreq_scale(cs, 1000, khz); 312 } 313 314 static inline void 315 clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) 316 { 317 return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 318 NSEC_PER_SEC, minsec); 319 } 320 321 #ifdef CONFIG_GENERIC_TIME_VSYSCALL 322 extern void 323 update_vsyscall(struct timespec *ts, struct timespec *wtm, 324 struct clocksource *c, u32 mult); 325 extern void update_vsyscall_tz(void); 326 #else 327 static inline void 328 update_vsyscall(struct timespec *ts, struct timespec *wtm, 329 struct clocksource *c, u32 mult) 330 { 331 } 332 333 static inline void update_vsyscall_tz(void) 334 { 335 } 336 #endif 337 338 extern void timekeeping_notify(struct clocksource *clock); 339 340 extern cycle_t clocksource_mmio_readl_up(struct clocksource *); 341 extern cycle_t clocksource_mmio_readl_down(struct clocksource *); 342 extern cycle_t clocksource_mmio_readw_up(struct clocksource *); 343 extern cycle_t clocksource_mmio_readw_down(struct clocksource *); 344 345 extern int clocksource_mmio_init(void __iomem *, const char *, 346 unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); 347 348 extern int clocksource_i8253_init(void); 349 350 #endif /* _LINUX_CLOCKSOURCE_H */ 351