xref: /linux-6.15/kernel/sched/clock.c (revision 3f07c014)
1 /*
2  * sched_clock for unstable cpu clocks
3  *
4  *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
5  *
6  *  Updates and enhancements:
7  *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <[email protected]>
8  *
9  * Based on code by:
10  *   Ingo Molnar <[email protected]>
11  *   Guillaume Chazarain <[email protected]>
12  *
13  *
14  * What:
15  *
16  * cpu_clock(i) provides a fast (execution time) high resolution
17  * clock with bounded drift between CPUs. The value of cpu_clock(i)
18  * is monotonic for constant i. The timestamp returned is in nanoseconds.
19  *
20  * ######################### BIG FAT WARNING ##########################
21  * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
22  * # go backwards !!                                                  #
23  * ####################################################################
24  *
25  * There is no strict promise about the base, although it tends to start
26  * at 0 on boot (but people really shouldn't rely on that).
27  *
28  * cpu_clock(i)       -- can be used from any context, including NMI.
29  * local_clock()      -- is cpu_clock() on the current cpu.
30  *
31  * sched_clock_cpu(i)
32  *
33  * How:
34  *
35  * The implementation either uses sched_clock() when
36  * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
37  * sched_clock() is assumed to provide these properties (mostly it means
38  * the architecture provides a globally synchronized highres time source).
39  *
40  * Otherwise it tries to create a semi stable clock from a mixture of other
41  * clocks, including:
42  *
43  *  - GTOD (clock monotomic)
44  *  - sched_clock()
45  *  - explicit idle events
46  *
47  * We use GTOD as base and use sched_clock() deltas to improve resolution. The
48  * deltas are filtered to provide monotonicity and keeping it within an
49  * expected window.
50  *
51  * Furthermore, explicit sleep and wakeup hooks allow us to account for time
52  * that is otherwise invisible (TSC gets stopped).
53  *
54  */
55 #include <linux/spinlock.h>
56 #include <linux/hardirq.h>
57 #include <linux/export.h>
58 #include <linux/percpu.h>
59 #include <linux/ktime.h>
60 #include <linux/sched.h>
61 #include <linux/sched/clock.h>
62 #include <linux/static_key.h>
63 #include <linux/workqueue.h>
64 #include <linux/compiler.h>
65 #include <linux/tick.h>
66 
67 /*
68  * Scheduler clock - returns current time in nanosec units.
69  * This is default implementation.
70  * Architectures and sub-architectures can override this.
71  */
72 unsigned long long __weak sched_clock(void)
73 {
74 	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
75 					* (NSEC_PER_SEC / HZ);
76 }
77 EXPORT_SYMBOL_GPL(sched_clock);
78 
79 __read_mostly int sched_clock_running;
80 
81 void sched_clock_init(void)
82 {
83 	sched_clock_running = 1;
84 }
85 
86 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
87 /*
88  * We must start with !__sched_clock_stable because the unstable -> stable
89  * transition is accurate, while the stable -> unstable transition is not.
90  *
91  * Similarly we start with __sched_clock_stable_early, thereby assuming we
92  * will become stable, such that there's only a single 1 -> 0 transition.
93  */
94 static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
95 static int __sched_clock_stable_early = 1;
96 
97 /*
98  * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset
99  */
100 static __read_mostly u64 raw_offset;
101 static __read_mostly u64 gtod_offset;
102 
103 struct sched_clock_data {
104 	u64			tick_raw;
105 	u64			tick_gtod;
106 	u64			clock;
107 };
108 
109 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
110 
111 static inline struct sched_clock_data *this_scd(void)
112 {
113 	return this_cpu_ptr(&sched_clock_data);
114 }
115 
116 static inline struct sched_clock_data *cpu_sdc(int cpu)
117 {
118 	return &per_cpu(sched_clock_data, cpu);
119 }
120 
121 int sched_clock_stable(void)
122 {
123 	return static_branch_likely(&__sched_clock_stable);
124 }
125 
126 static void __set_sched_clock_stable(void)
127 {
128 	struct sched_clock_data *scd = this_scd();
129 
130 	/*
131 	 * Attempt to make the (initial) unstable->stable transition continuous.
132 	 */
133 	raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw);
134 
135 	printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
136 			scd->tick_gtod, gtod_offset,
137 			scd->tick_raw,  raw_offset);
138 
139 	static_branch_enable(&__sched_clock_stable);
140 	tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
141 }
142 
143 static void __clear_sched_clock_stable(struct work_struct *work)
144 {
145 	struct sched_clock_data *scd = this_scd();
146 
147 	/*
148 	 * Attempt to make the stable->unstable transition continuous.
149 	 *
150 	 * Trouble is, this is typically called from the TSC watchdog
151 	 * timer, which is late per definition. This means the tick
152 	 * values can already be screwy.
153 	 *
154 	 * Still do what we can.
155 	 */
156 	gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod);
157 
158 	printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
159 			scd->tick_gtod, gtod_offset,
160 			scd->tick_raw,  raw_offset);
161 
162 	static_branch_disable(&__sched_clock_stable);
163 	tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
164 }
165 
166 static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
167 
168 void clear_sched_clock_stable(void)
169 {
170 	__sched_clock_stable_early = 0;
171 
172 	smp_mb(); /* matches sched_clock_init_late() */
173 
174 	if (sched_clock_running == 2)
175 		schedule_work(&sched_clock_work);
176 }
177 
178 void sched_clock_init_late(void)
179 {
180 	sched_clock_running = 2;
181 	/*
182 	 * Ensure that it is impossible to not do a static_key update.
183 	 *
184 	 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
185 	 * and do the update, or we must see their __sched_clock_stable_early
186 	 * and do the update, or both.
187 	 */
188 	smp_mb(); /* matches {set,clear}_sched_clock_stable() */
189 
190 	if (__sched_clock_stable_early)
191 		__set_sched_clock_stable();
192 }
193 
194 /*
195  * min, max except they take wrapping into account
196  */
197 
198 static inline u64 wrap_min(u64 x, u64 y)
199 {
200 	return (s64)(x - y) < 0 ? x : y;
201 }
202 
203 static inline u64 wrap_max(u64 x, u64 y)
204 {
205 	return (s64)(x - y) > 0 ? x : y;
206 }
207 
208 /*
209  * update the percpu scd from the raw @now value
210  *
211  *  - filter out backward motion
212  *  - use the GTOD tick value to create a window to filter crazy TSC values
213  */
214 static u64 sched_clock_local(struct sched_clock_data *scd)
215 {
216 	u64 now, clock, old_clock, min_clock, max_clock;
217 	s64 delta;
218 
219 again:
220 	now = sched_clock();
221 	delta = now - scd->tick_raw;
222 	if (unlikely(delta < 0))
223 		delta = 0;
224 
225 	old_clock = scd->clock;
226 
227 	/*
228 	 * scd->clock = clamp(scd->tick_gtod + delta,
229 	 *		      max(scd->tick_gtod, scd->clock),
230 	 *		      scd->tick_gtod + TICK_NSEC);
231 	 */
232 
233 	clock = scd->tick_gtod + gtod_offset + delta;
234 	min_clock = wrap_max(scd->tick_gtod, old_clock);
235 	max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
236 
237 	clock = wrap_max(clock, min_clock);
238 	clock = wrap_min(clock, max_clock);
239 
240 	if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
241 		goto again;
242 
243 	return clock;
244 }
245 
246 static u64 sched_clock_remote(struct sched_clock_data *scd)
247 {
248 	struct sched_clock_data *my_scd = this_scd();
249 	u64 this_clock, remote_clock;
250 	u64 *ptr, old_val, val;
251 
252 #if BITS_PER_LONG != 64
253 again:
254 	/*
255 	 * Careful here: The local and the remote clock values need to
256 	 * be read out atomic as we need to compare the values and
257 	 * then update either the local or the remote side. So the
258 	 * cmpxchg64 below only protects one readout.
259 	 *
260 	 * We must reread via sched_clock_local() in the retry case on
261 	 * 32bit as an NMI could use sched_clock_local() via the
262 	 * tracer and hit between the readout of
263 	 * the low32bit and the high 32bit portion.
264 	 */
265 	this_clock = sched_clock_local(my_scd);
266 	/*
267 	 * We must enforce atomic readout on 32bit, otherwise the
268 	 * update on the remote cpu can hit inbetween the readout of
269 	 * the low32bit and the high 32bit portion.
270 	 */
271 	remote_clock = cmpxchg64(&scd->clock, 0, 0);
272 #else
273 	/*
274 	 * On 64bit the read of [my]scd->clock is atomic versus the
275 	 * update, so we can avoid the above 32bit dance.
276 	 */
277 	sched_clock_local(my_scd);
278 again:
279 	this_clock = my_scd->clock;
280 	remote_clock = scd->clock;
281 #endif
282 
283 	/*
284 	 * Use the opportunity that we have both locks
285 	 * taken to couple the two clocks: we take the
286 	 * larger time as the latest time for both
287 	 * runqueues. (this creates monotonic movement)
288 	 */
289 	if (likely((s64)(remote_clock - this_clock) < 0)) {
290 		ptr = &scd->clock;
291 		old_val = remote_clock;
292 		val = this_clock;
293 	} else {
294 		/*
295 		 * Should be rare, but possible:
296 		 */
297 		ptr = &my_scd->clock;
298 		old_val = this_clock;
299 		val = remote_clock;
300 	}
301 
302 	if (cmpxchg64(ptr, old_val, val) != old_val)
303 		goto again;
304 
305 	return val;
306 }
307 
308 /*
309  * Similar to cpu_clock(), but requires local IRQs to be disabled.
310  *
311  * See cpu_clock().
312  */
313 u64 sched_clock_cpu(int cpu)
314 {
315 	struct sched_clock_data *scd;
316 	u64 clock;
317 
318 	if (sched_clock_stable())
319 		return sched_clock() + raw_offset;
320 
321 	if (unlikely(!sched_clock_running))
322 		return 0ull;
323 
324 	preempt_disable_notrace();
325 	scd = cpu_sdc(cpu);
326 
327 	if (cpu != smp_processor_id())
328 		clock = sched_clock_remote(scd);
329 	else
330 		clock = sched_clock_local(scd);
331 	preempt_enable_notrace();
332 
333 	return clock;
334 }
335 EXPORT_SYMBOL_GPL(sched_clock_cpu);
336 
337 void sched_clock_tick(void)
338 {
339 	struct sched_clock_data *scd;
340 
341 	WARN_ON_ONCE(!irqs_disabled());
342 
343 	/*
344 	 * Update these values even if sched_clock_stable(), because it can
345 	 * become unstable at any point in time at which point we need some
346 	 * values to fall back on.
347 	 *
348 	 * XXX arguably we can skip this if we expose tsc_clocksource_reliable
349 	 */
350 	scd = this_scd();
351 	scd->tick_raw  = sched_clock();
352 	scd->tick_gtod = ktime_get_ns();
353 
354 	if (!sched_clock_stable() && likely(sched_clock_running))
355 		sched_clock_local(scd);
356 }
357 
358 /*
359  * We are going deep-idle (irqs are disabled):
360  */
361 void sched_clock_idle_sleep_event(void)
362 {
363 	sched_clock_cpu(smp_processor_id());
364 }
365 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
366 
367 /*
368  * We just idled delta nanoseconds (called with irqs disabled):
369  */
370 void sched_clock_idle_wakeup_event(u64 delta_ns)
371 {
372 	if (timekeeping_suspended)
373 		return;
374 
375 	sched_clock_tick();
376 	touch_softlockup_watchdog_sched();
377 }
378 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
379 
380 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
381 
382 u64 sched_clock_cpu(int cpu)
383 {
384 	if (unlikely(!sched_clock_running))
385 		return 0;
386 
387 	return sched_clock();
388 }
389 
390 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
391 
392 /*
393  * Running clock - returns the time that has elapsed while a guest has been
394  * running.
395  * On a guest this value should be local_clock minus the time the guest was
396  * suspended by the hypervisor (for any reason).
397  * On bare metal this function should return the same as local_clock.
398  * Architectures and sub-architectures can override this.
399  */
400 u64 __weak running_clock(void)
401 {
402 	return local_clock();
403 }
404