1 /* linux/include/linux/clocksource.h 2 * 3 * This file contains the structure definitions for clocksources. 4 * 5 * If you are not a clocksource, or timekeeping code, you should 6 * not be including this file! 7 */ 8 #ifndef _LINUX_CLOCKSOURCE_H 9 #define _LINUX_CLOCKSOURCE_H 10 11 #include <linux/types.h> 12 #include <linux/timex.h> 13 #include <linux/time.h> 14 #include <linux/list.h> 15 #include <linux/cache.h> 16 #include <linux/timer.h> 17 #include <asm/div64.h> 18 #include <asm/io.h> 19 20 /* clocksource cycle base type */ 21 typedef u64 cycle_t; 22 struct clocksource; 23 24 /** 25 * struct cyclecounter - hardware abstraction for a free running counter 26 * Provides completely state-free accessors to the underlying hardware. 27 * Depending on which hardware it reads, the cycle counter may wrap 28 * around quickly. Locking rules (if necessary) have to be defined 29 * by the implementor and user of specific instances of this API. 30 * 31 * @read: returns the current cycle value 32 * @mask: bitmask for two's complement 33 * subtraction of non 64 bit counters, 34 * see CLOCKSOURCE_MASK() helper macro 35 * @mult: cycle to nanosecond multiplier 36 * @shift: cycle to nanosecond divisor (power of two) 37 */ 38 struct cyclecounter { 39 cycle_t (*read)(const struct cyclecounter *cc); 40 cycle_t mask; 41 u32 mult; 42 u32 shift; 43 }; 44 45 /** 46 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds 47 * Contains the state needed by timecounter_read() to detect 48 * cycle counter wrap around. Initialize with 49 * timecounter_init(). Also used to convert cycle counts into the 50 * corresponding nanosecond counts with timecounter_cyc2time(). Users 51 * of this code are responsible for initializing the underlying 52 * cycle counter hardware, locking issues and reading the time 53 * more often than the cycle counter wraps around. The nanosecond 54 * counter will only wrap around after ~585 years. 55 * 56 * @cc: the cycle counter used by this instance 57 * @cycle_last: most recent cycle counter value seen by 58 * timecounter_read() 59 * @nsec: continuously increasing count 60 */ 61 struct timecounter { 62 const struct cyclecounter *cc; 63 cycle_t cycle_last; 64 u64 nsec; 65 }; 66 67 /** 68 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds 69 * @tc: Pointer to cycle counter. 70 * @cycles: Cycles 71 * 72 * XXX - This could use some mult_lxl_ll() asm optimization. Same code 73 * as in cyc2ns, but with unsigned result. 74 */ 75 static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, 76 cycle_t cycles) 77 { 78 u64 ret = (u64)cycles; 79 ret = (ret * cc->mult) >> cc->shift; 80 return ret; 81 } 82 83 /** 84 * timecounter_init - initialize a time counter 85 * @tc: Pointer to time counter which is to be initialized/reset 86 * @cc: A cycle counter, ready to be used. 87 * @start_tstamp: Arbitrary initial time stamp. 88 * 89 * After this call the current cycle register (roughly) corresponds to 90 * the initial time stamp. Every call to timecounter_read() increments 91 * the time stamp counter by the number of elapsed nanoseconds. 92 */ 93 extern void timecounter_init(struct timecounter *tc, 94 const struct cyclecounter *cc, 95 u64 start_tstamp); 96 97 /** 98 * timecounter_read - return nanoseconds elapsed since timecounter_init() 99 * plus the initial time stamp 100 * @tc: Pointer to time counter. 101 * 102 * In other words, keeps track of time since the same epoch as 103 * the function which generated the initial time stamp. 104 */ 105 extern u64 timecounter_read(struct timecounter *tc); 106 107 /** 108 * timecounter_cyc2time - convert a cycle counter to same 109 * time base as values returned by 110 * timecounter_read() 111 * @tc: Pointer to time counter. 112 * @cycle: a value returned by tc->cc->read() 113 * 114 * Cycle counts that are converted correctly as long as they 115 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], 116 * with "max cycle count" == cs->mask+1. 117 * 118 * This allows conversion of cycle counter values which were generated 119 * in the past. 120 */ 121 extern u64 timecounter_cyc2time(struct timecounter *tc, 122 cycle_t cycle_tstamp); 123 124 /** 125 * struct clocksource - hardware abstraction for a free running counter 126 * Provides mostly state-free accessors to the underlying hardware. 127 * This is the structure used for system time. 128 * 129 * @name: ptr to clocksource name 130 * @list: list head for registration 131 * @rating: rating value for selection (higher is better) 132 * To avoid rating inflation the following 133 * list should give you a guide as to how 134 * to assign your clocksource a rating 135 * 1-99: Unfit for real use 136 * Only available for bootup and testing purposes. 137 * 100-199: Base level usability. 138 * Functional for real use, but not desired. 139 * 200-299: Good. 140 * A correct and usable clocksource. 141 * 300-399: Desired. 142 * A reasonably fast and accurate clocksource. 143 * 400-499: Perfect 144 * The ideal clocksource. A must-use where 145 * available. 146 * @read: returns a cycle value 147 * @mask: bitmask for two's complement 148 * subtraction of non 64 bit counters 149 * @mult: cycle to nanosecond multiplier (adjusted by NTP) 150 * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP) 151 * @shift: cycle to nanosecond divisor (power of two) 152 * @flags: flags describing special properties 153 * @vread: vsyscall based read 154 * @resume: resume function for the clocksource, if necessary 155 * @cycle_interval: Used internally by timekeeping core, please ignore. 156 * @xtime_interval: Used internally by timekeeping core, please ignore. 157 */ 158 struct clocksource { 159 /* 160 * First part of structure is read mostly 161 */ 162 char *name; 163 struct list_head list; 164 int rating; 165 cycle_t (*read)(void); 166 cycle_t mask; 167 u32 mult; 168 u32 mult_orig; 169 u32 shift; 170 unsigned long flags; 171 cycle_t (*vread)(void); 172 void (*resume)(void); 173 #ifdef CONFIG_IA64 174 void *fsys_mmio; /* used by fsyscall asm code */ 175 #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) 176 #else 177 #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) 178 #endif 179 180 /* timekeeping specific data, ignore */ 181 cycle_t cycle_interval; 182 u64 xtime_interval; 183 u32 raw_interval; 184 /* 185 * Second part is written at each timer interrupt 186 * Keep it in a different cache line to dirty no 187 * more than one cache line. 188 */ 189 cycle_t cycle_last ____cacheline_aligned_in_smp; 190 u64 xtime_nsec; 191 s64 error; 192 struct timespec raw_time; 193 194 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 195 /* Watchdog related data, used by the framework */ 196 struct list_head wd_list; 197 cycle_t wd_last; 198 #endif 199 }; 200 201 extern struct clocksource *clock; /* current clocksource */ 202 203 /* 204 * Clock source flags bits:: 205 */ 206 #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 207 #define CLOCK_SOURCE_MUST_VERIFY 0x02 208 209 #define CLOCK_SOURCE_WATCHDOG 0x10 210 #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 211 212 /* simplify initialization of mask field */ 213 #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 214 215 /** 216 * clocksource_khz2mult - calculates mult from khz and shift 217 * @khz: Clocksource frequency in KHz 218 * @shift_constant: Clocksource shift factor 219 * 220 * Helper functions that converts a khz counter frequency to a timsource 221 * multiplier, given the clocksource shift value 222 */ 223 static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) 224 { 225 /* khz = cyc/(Million ns) 226 * mult/2^shift = ns/cyc 227 * mult = ns/cyc * 2^shift 228 * mult = 1Million/khz * 2^shift 229 * mult = 1000000 * 2^shift / khz 230 * mult = (1000000<<shift) / khz 231 */ 232 u64 tmp = ((u64)1000000) << shift_constant; 233 234 tmp += khz/2; /* round for do_div */ 235 do_div(tmp, khz); 236 237 return (u32)tmp; 238 } 239 240 /** 241 * clocksource_hz2mult - calculates mult from hz and shift 242 * @hz: Clocksource frequency in Hz 243 * @shift_constant: Clocksource shift factor 244 * 245 * Helper functions that converts a hz counter 246 * frequency to a timsource multiplier, given the 247 * clocksource shift value 248 */ 249 static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) 250 { 251 /* hz = cyc/(Billion ns) 252 * mult/2^shift = ns/cyc 253 * mult = ns/cyc * 2^shift 254 * mult = 1Billion/hz * 2^shift 255 * mult = 1000000000 * 2^shift / hz 256 * mult = (1000000000<<shift) / hz 257 */ 258 u64 tmp = ((u64)1000000000) << shift_constant; 259 260 tmp += hz/2; /* round for do_div */ 261 do_div(tmp, hz); 262 263 return (u32)tmp; 264 } 265 266 /** 267 * clocksource_read: - Access the clocksource's current cycle value 268 * @cs: pointer to clocksource being read 269 * 270 * Uses the clocksource to return the current cycle_t value 271 */ 272 static inline cycle_t clocksource_read(struct clocksource *cs) 273 { 274 return cs->read(); 275 } 276 277 /** 278 * cyc2ns - converts clocksource cycles to nanoseconds 279 * @cs: Pointer to clocksource 280 * @cycles: Cycles 281 * 282 * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. 283 * 284 * XXX - This could use some mult_lxl_ll() asm optimization 285 */ 286 static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) 287 { 288 u64 ret = (u64)cycles; 289 ret = (ret * cs->mult) >> cs->shift; 290 return ret; 291 } 292 293 /** 294 * clocksource_calculate_interval - Calculates a clocksource interval struct 295 * 296 * @c: Pointer to clocksource. 297 * @length_nsec: Desired interval length in nanoseconds. 298 * 299 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment 300 * pair and interval request. 301 * 302 * Unless you're the timekeeping code, you should not be using this! 303 */ 304 static inline void clocksource_calculate_interval(struct clocksource *c, 305 unsigned long length_nsec) 306 { 307 u64 tmp; 308 309 /* Do the ns -> cycle conversion first, using original mult */ 310 tmp = length_nsec; 311 tmp <<= c->shift; 312 tmp += c->mult_orig/2; 313 do_div(tmp, c->mult_orig); 314 315 c->cycle_interval = (cycle_t)tmp; 316 if (c->cycle_interval == 0) 317 c->cycle_interval = 1; 318 319 /* Go back from cycles -> shifted ns, this time use ntp adjused mult */ 320 c->xtime_interval = (u64)c->cycle_interval * c->mult; 321 c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift; 322 } 323 324 325 /* used to install a new clocksource */ 326 extern int clocksource_register(struct clocksource*); 327 extern void clocksource_unregister(struct clocksource*); 328 extern void clocksource_touch_watchdog(void); 329 extern struct clocksource* clocksource_get_next(void); 330 extern void clocksource_change_rating(struct clocksource *cs, int rating); 331 extern void clocksource_resume(void); 332 333 #ifdef CONFIG_GENERIC_TIME_VSYSCALL 334 extern void update_vsyscall(struct timespec *ts, struct clocksource *c); 335 extern void update_vsyscall_tz(void); 336 #else 337 static inline void update_vsyscall(struct timespec *ts, struct clocksource *c) 338 { 339 } 340 341 static inline void update_vsyscall_tz(void) 342 { 343 } 344 #endif 345 346 #endif /* _LINUX_CLOCKSOURCE_H */ 347