xref: /freebsd-12.1/sys/dev/xen/timer/timer.c (revision c8aef31d)
1 /*-
2  * Copyright (c) 2009 Adrian Chadd
3  * Copyright (c) 2012 Spectra Logic Corporation
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 /**
30  * \file dev/xen/timer/timer.c
31  * \brief A timer driver for the Xen hypervisor's PV clock.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/module.h>
42 #include <sys/time.h>
43 #include <sys/timetc.h>
44 #include <sys/timeet.h>
45 #include <sys/smp.h>
46 #include <sys/limits.h>
47 #include <sys/clock.h>
48 
49 #include <xen/xen-os.h>
50 #include <xen/features.h>
51 #include <xen/xen_intr.h>
52 #include <xen/hypervisor.h>
53 #include <xen/interface/io/xenbus.h>
54 #include <xen/interface/vcpu.h>
55 
56 #include <machine/cpu.h>
57 #include <machine/cpufunc.h>
58 #include <machine/clock.h>
59 #include <machine/_inttypes.h>
60 #include <machine/smp.h>
61 
62 #include "clock_if.h"
63 
64 static devclass_t xentimer_devclass;
65 
66 #define	NSEC_IN_SEC	1000000000ULL
67 #define	NSEC_IN_USEC	1000ULL
68 /* 18446744073 = int(2^64 / NSEC_IN_SC) = 1 ns in 64-bit fractions */
69 #define	FRAC_IN_NSEC	18446744073LL
70 
71 /* Xen timers may fire up to 100us off */
72 #define	XENTIMER_MIN_PERIOD_IN_NSEC	100*NSEC_IN_USEC
73 #define	XENCLOCK_RESOLUTION		10000000
74 
75 #define	ETIME	62	/* Xen "bad time" error */
76 
77 #define	XENTIMER_QUALITY	950
78 
79 struct xentimer_pcpu_data {
80 	uint64_t timer;
81 	uint64_t last_processed;
82 	void *irq_handle;
83 };
84 
85 DPCPU_DEFINE(struct xentimer_pcpu_data, xentimer_pcpu);
86 
87 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
88 
89 struct xentimer_softc {
90 	device_t dev;
91 	struct timecounter tc;
92 	struct eventtimer et;
93 };
94 
95 /* Last time; this guarantees a monotonically increasing clock. */
96 volatile uint64_t xen_timer_last_time = 0;
97 
98 static void
99 xentimer_identify(driver_t *driver, device_t parent)
100 {
101 	if (!xen_domain())
102 		return;
103 
104 	/* Handle all Xen PV timers in one device instance. */
105 	if (devclass_get_device(xentimer_devclass, 0))
106 		return;
107 
108 	BUS_ADD_CHILD(parent, 0, "xen_et", 0);
109 }
110 
111 static int
112 xentimer_probe(device_t dev)
113 {
114 	KASSERT((xen_domain()), ("Trying to use Xen timer on bare metal"));
115 	/*
116 	 * In order to attach, this driver requires the following:
117 	 * - Vector callback support by the hypervisor, in order to deliver
118 	 *   timer interrupts to the correct CPU for CPUs other than 0.
119 	 * - Access to the hypervisor shared info page, in order to look up
120 	 *   each VCPU's timer information and the Xen wallclock time.
121 	 * - The hypervisor must say its PV clock is "safe" to use.
122 	 * - The hypervisor must support VCPUOP hypercalls.
123 	 * - The maximum number of CPUs supported by FreeBSD must not exceed
124 	 *   the number of VCPUs supported by the hypervisor.
125 	 */
126 #define	XTREQUIRES(condition, reason...)	\
127 	if (!(condition)) {			\
128 		device_printf(dev, ## reason);	\
129 		device_detach(dev);		\
130 		return (ENXIO);			\
131 	}
132 
133 	if (xen_hvm_domain()) {
134 		XTREQUIRES(xen_vector_callback_enabled,
135 		           "vector callbacks unavailable\n");
136 		XTREQUIRES(xen_feature(XENFEAT_hvm_safe_pvclock),
137 		           "HVM safe pvclock unavailable\n");
138 	}
139 	XTREQUIRES(HYPERVISOR_shared_info != NULL,
140 	           "shared info page unavailable\n");
141 	XTREQUIRES(HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, 0, NULL) == 0,
142 	           "VCPUOPs interface unavailable\n");
143 #undef XTREQUIRES
144 	device_set_desc(dev, "Xen PV Clock");
145 	return (BUS_PROBE_NOWILDCARD);
146 }
147 
148 /*
149  * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
150  * yielding a 64-bit result.
151  */
152 static inline uint64_t
153 scale_delta(uint64_t delta, uint32_t mul_frac, int shift)
154 {
155 	uint64_t product;
156 
157 	if (shift < 0)
158 		delta >>= -shift;
159 	else
160 		delta <<= shift;
161 
162 #if defined(__i386__)
163 	{
164 		uint32_t tmp1, tmp2;
165 
166 		/**
167 		 * For i386, the formula looks like:
168 		 *
169 		 *   lower = (mul_frac * (delta & UINT_MAX)) >> 32
170 		 *   upper = mul_frac * (delta >> 32)
171 		 *   product = lower + upper
172 		 */
173 		__asm__ (
174 			"mul  %5       ; "
175 			"mov  %4,%%eax ; "
176 			"mov  %%edx,%4 ; "
177 			"mul  %5       ; "
178 			"xor  %5,%5    ; "
179 			"add  %4,%%eax ; "
180 			"adc  %5,%%edx ; "
181 			: "=A" (product), "=r" (tmp1), "=r" (tmp2)
182 			: "a" ((uint32_t)delta), "1" ((uint32_t)(delta >> 32)),
183 			  "2" (mul_frac) );
184 	}
185 #elif defined(__amd64__)
186 	{
187 		unsigned long tmp;
188 
189 		__asm__ (
190 			"mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
191 			: [lo]"=a" (product), [hi]"=d" (tmp)
192 			: "0" (delta), [mul_frac]"rm"((uint64_t)mul_frac));
193 	}
194 #else
195 #error "xentimer: unsupported architecture"
196 #endif
197 
198 	return (product);
199 }
200 
201 static uint64_t
202 get_nsec_offset(struct vcpu_time_info *tinfo)
203 {
204 
205 	return (scale_delta(rdtsc() - tinfo->tsc_timestamp,
206 	    tinfo->tsc_to_system_mul, tinfo->tsc_shift));
207 }
208 
209 /*
210  * Read the current hypervisor system uptime value from Xen.
211  * See <xen/interface/xen.h> for a description of how this works.
212  */
213 static uint32_t
214 xen_fetch_vcpu_tinfo(struct vcpu_time_info *dst, struct vcpu_time_info *src)
215 {
216 
217 	do {
218 		dst->version = src->version;
219 		rmb();
220 		dst->tsc_timestamp = src->tsc_timestamp;
221 		dst->system_time = src->system_time;
222 		dst->tsc_to_system_mul = src->tsc_to_system_mul;
223 		dst->tsc_shift = src->tsc_shift;
224 		rmb();
225 	} while ((src->version & 1) | (dst->version ^ src->version));
226 
227 	return (dst->version);
228 }
229 
230 /**
231  * \brief Get the current time, in nanoseconds, since the hypervisor booted.
232  *
233  * \note This function returns the current CPU's idea of this value, unless
234  *       it happens to be less than another CPU's previously determined value.
235  */
236 static uint64_t
237 xen_fetch_vcpu_time(void)
238 {
239 	struct vcpu_time_info dst;
240 	struct vcpu_time_info *src;
241 	uint32_t pre_version;
242 	uint64_t now;
243 	volatile uint64_t last;
244 	struct vcpu_info *vcpu = DPCPU_GET(vcpu_info);
245 
246 	src = &vcpu->time;
247 
248 	critical_enter();
249 	do {
250 		pre_version = xen_fetch_vcpu_tinfo(&dst, src);
251 		barrier();
252 		now = dst.system_time + get_nsec_offset(&dst);
253 		barrier();
254 	} while (pre_version != src->version);
255 
256 	/*
257 	 * Enforce a monotonically increasing clock time across all
258 	 * VCPUs.  If our time is too old, use the last time and return.
259 	 * Otherwise, try to update the last time.
260 	 */
261 	do {
262 		last = xen_timer_last_time;
263 		if (last > now) {
264 			now = last;
265 			break;
266 		}
267 	} while (!atomic_cmpset_64(&xen_timer_last_time, last, now));
268 
269 	critical_exit();
270 
271 	return (now);
272 }
273 
274 static uint32_t
275 xentimer_get_timecount(struct timecounter *tc)
276 {
277 
278 	return ((uint32_t)xen_fetch_vcpu_time() & UINT_MAX);
279 }
280 
281 /**
282  * \brief Fetch the hypervisor boot time, known as the "Xen wallclock".
283  *
284  * \param ts		Timespec to store the current stable value.
285  * \param version	Pointer to store the corresponding wallclock version.
286  *
287  * \note This value is updated when Domain-0 shifts its clock to follow
288  *       clock drift, e.g. as detected by NTP.
289  */
290 static void
291 xen_fetch_wallclock(struct timespec *ts)
292 {
293 	shared_info_t *src = HYPERVISOR_shared_info;
294 	uint32_t version = 0;
295 
296 	do {
297 		version = src->wc_version;
298 		rmb();
299 		ts->tv_sec = src->wc_sec;
300 		ts->tv_nsec = src->wc_nsec;
301 		rmb();
302 	} while ((src->wc_version & 1) | (version ^ src->wc_version));
303 }
304 
305 static void
306 xen_fetch_uptime(struct timespec *ts)
307 {
308 	uint64_t uptime = xen_fetch_vcpu_time();
309 	ts->tv_sec = uptime / NSEC_IN_SEC;
310 	ts->tv_nsec = uptime % NSEC_IN_SEC;
311 }
312 
313 static int
314 xentimer_settime(device_t dev __unused, struct timespec *ts)
315 {
316 	/*
317 	 * Don't return EINVAL here; just silently fail if the domain isn't
318 	 * privileged enough to set the TOD.
319 	 */
320 	return (0);
321 }
322 
323 /**
324  * \brief Return current time according to the Xen Hypervisor wallclock.
325  *
326  * \param dev	Xentimer device.
327  * \param ts	Pointer to store the wallclock time.
328  *
329  * \note  The Xen time structures document the hypervisor start time and the
330  *        uptime-since-hypervisor-start (in nsec.) They need to be combined
331  *        in order to calculate a TOD clock.
332  */
333 static int
334 xentimer_gettime(device_t dev, struct timespec *ts)
335 {
336 	struct timespec u_ts;
337 
338 	timespecclear(ts);
339 	xen_fetch_wallclock(ts);
340 	xen_fetch_uptime(&u_ts);
341 	timespecadd(ts, &u_ts);
342 
343 	return (0);
344 }
345 
346 /**
347  * \brief Handle a timer interrupt for the Xen PV timer driver.
348  *
349  * \param arg	Xen timer driver softc that is expecting the interrupt.
350  */
351 static int
352 xentimer_intr(void *arg)
353 {
354 	struct xentimer_softc *sc = (struct xentimer_softc *)arg;
355 	struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu);
356 
357 	pcpu->last_processed = xen_fetch_vcpu_time();
358 	if (pcpu->timer != 0 && sc->et.et_active)
359 		sc->et.et_event_cb(&sc->et, sc->et.et_arg);
360 
361 	return (FILTER_HANDLED);
362 }
363 
364 static int
365 xentimer_vcpu_start_timer(int vcpu, uint64_t next_time)
366 {
367 	struct vcpu_set_singleshot_timer single;
368 
369 	single.timeout_abs_ns = next_time;
370 	single.flags          = VCPU_SSHOTTMR_future;
371 	return (HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, vcpu, &single));
372 }
373 
374 static int
375 xentimer_vcpu_stop_timer(int vcpu)
376 {
377 
378 	return (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, vcpu, NULL));
379 }
380 
381 /**
382  * \brief Set the next oneshot time for the current CPU.
383  *
384  * \param et	Xen timer driver event timer to schedule on.
385  * \param first	Delta to the next time to schedule the interrupt for.
386  * \param period Not used.
387  *
388  * \note See eventtimers(9) for more information.
389  * \note
390  *
391  * \returns 0
392  */
393 static int
394 xentimer_et_start(struct eventtimer *et,
395     sbintime_t first, sbintime_t period)
396 {
397 	int error = 0, i = 0;
398 	struct xentimer_softc *sc = et->et_priv;
399 	int cpu = PCPU_GET(vcpu_id);
400 	struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu);
401 	uint64_t first_in_ns, next_time;
402 
403 	/* See sbttots() for this formula. */
404 	first_in_ns = (((first >> 32) * NSEC_IN_SEC) +
405 	               (((uint64_t)NSEC_IN_SEC * (uint32_t)first) >> 32));
406 
407 	/*
408 	 * Retry any timer scheduling failures, where the hypervisor
409 	 * returns -ETIME.  Sometimes even a 100us timer period isn't large
410 	 * enough, but larger period instances are relatively uncommon.
411 	 *
412 	 * XXX Remove the panics once et_start() and its consumers are
413 	 *     equipped to deal with start failures.
414 	 */
415 	do {
416 		if (++i == 60)
417 			panic("can't schedule timer");
418 		next_time = xen_fetch_vcpu_time() + first_in_ns;
419 		error = xentimer_vcpu_start_timer(cpu, next_time);
420 	} while (error == -ETIME);
421 
422 	if (error)
423 		panic("%s: Error %d setting singleshot timer to %"PRIu64"\n",
424 		    device_get_nameunit(sc->dev), error, next_time);
425 
426 	pcpu->timer = next_time;
427 	return (error);
428 }
429 
430 /**
431  * \brief Cancel the event timer's currently running timer, if any.
432  */
433 static int
434 xentimer_et_stop(struct eventtimer *et)
435 {
436 	int cpu = PCPU_GET(vcpu_id);
437 	struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu);
438 
439 	pcpu->timer = 0;
440 	return (xentimer_vcpu_stop_timer(cpu));
441 }
442 
443 /**
444  * \brief Attach a Xen PV timer driver instance.
445  *
446  * \param dev	Bus device object to attach.
447  *
448  * \note
449  * \returns EINVAL
450  */
451 static int
452 xentimer_attach(device_t dev)
453 {
454 	struct xentimer_softc *sc = device_get_softc(dev);
455 	int error, i;
456 
457 	sc->dev = dev;
458 
459 	/* Bind an event channel to a VIRQ on each VCPU. */
460 	CPU_FOREACH(i) {
461 		struct xentimer_pcpu_data *pcpu;
462 
463 		pcpu = DPCPU_ID_PTR(i, xentimer_pcpu);
464 		error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, i, NULL);
465 		if (error) {
466 			device_printf(dev, "Error disabling Xen periodic timer "
467 			                   "on CPU %d\n", i);
468 			return (error);
469 		}
470 
471 		error = xen_intr_bind_virq(dev, VIRQ_TIMER, i, xentimer_intr,
472 		    NULL, sc, INTR_TYPE_CLK, &pcpu->irq_handle);
473 		if (error) {
474 			device_printf(dev, "Error %d binding VIRQ_TIMER "
475 			    "to VCPU %d\n", error, i);
476 			return (error);
477 		}
478 		xen_intr_describe(pcpu->irq_handle, "c%d", i);
479 	}
480 
481 	/* Register the event timer. */
482 	sc->et.et_name = "XENTIMER";
483 	sc->et.et_quality = XENTIMER_QUALITY;
484 	sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
485 	sc->et.et_frequency = NSEC_IN_SEC;
486 	/* See tstosbt() for this formula */
487 	sc->et.et_min_period = (XENTIMER_MIN_PERIOD_IN_NSEC *
488 	                        (((uint64_t)1 << 63) / 500000000) >> 32);
489 	sc->et.et_max_period = ((sbintime_t)4 << 32);
490 	sc->et.et_start = xentimer_et_start;
491 	sc->et.et_stop = xentimer_et_stop;
492 	sc->et.et_priv = sc;
493 	et_register(&sc->et);
494 
495 	/* Register the timecounter. */
496 	sc->tc.tc_name = "XENTIMER";
497 	sc->tc.tc_quality = XENTIMER_QUALITY;
498 	sc->tc.tc_flags = TC_FLAGS_SUSPEND_SAFE;
499 	/*
500 	 * The underlying resolution is in nanoseconds, since the timer info
501 	 * scales TSC frequencies using a fraction that represents time in
502 	 * terms of nanoseconds.
503 	 */
504 	sc->tc.tc_frequency = NSEC_IN_SEC;
505 	sc->tc.tc_counter_mask = ~0u;
506 	sc->tc.tc_get_timecount = xentimer_get_timecount;
507 	sc->tc.tc_priv = sc;
508 	tc_init(&sc->tc);
509 
510 	/* Register the Hypervisor wall clock */
511 	clock_register(dev, XENCLOCK_RESOLUTION);
512 
513 	return (0);
514 }
515 
516 static int
517 xentimer_detach(device_t dev)
518 {
519 
520 	/* Implement Xen PV clock teardown - XXX see hpet_detach ? */
521 	/* If possible:
522 	 * 1. need to deregister timecounter
523 	 * 2. need to deregister event timer
524 	 * 3. need to deregister virtual IRQ event channels
525 	 */
526 	return (EBUSY);
527 }
528 
529 static void
530 xentimer_percpu_resume(void *arg)
531 {
532 	device_t dev = (device_t) arg;
533 	struct xentimer_softc *sc = device_get_softc(dev);
534 
535 	xentimer_et_start(&sc->et, sc->et.et_min_period, 0);
536 }
537 
538 static int
539 xentimer_resume(device_t dev)
540 {
541 	int error;
542 	int i;
543 
544 	/* Disable the periodic timer */
545 	CPU_FOREACH(i) {
546 		error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, i, NULL);
547 		if (error != 0) {
548 			device_printf(dev,
549 			    "Error disabling Xen periodic timer on CPU %d\n",
550 			    i);
551 			return (error);
552 		}
553 	}
554 
555 	/* Reset the last uptime value */
556 	xen_timer_last_time = 0;
557 
558 	/* Reset the RTC clock */
559 	inittodr(time_second);
560 
561 	/* Kick the timers on all CPUs */
562 	smp_rendezvous(NULL, xentimer_percpu_resume, NULL, dev);
563 
564 	if (bootverbose)
565 		device_printf(dev, "resumed operation after suspension\n");
566 
567 	return (0);
568 }
569 
570 static int
571 xentimer_suspend(device_t dev)
572 {
573 	return (0);
574 }
575 
576 static device_method_t xentimer_methods[] = {
577 	DEVMETHOD(device_identify, xentimer_identify),
578 	DEVMETHOD(device_probe, xentimer_probe),
579 	DEVMETHOD(device_attach, xentimer_attach),
580 	DEVMETHOD(device_detach, xentimer_detach),
581 	DEVMETHOD(device_suspend, xentimer_suspend),
582 	DEVMETHOD(device_resume, xentimer_resume),
583 	/* clock interface */
584 	DEVMETHOD(clock_gettime, xentimer_gettime),
585 	DEVMETHOD(clock_settime, xentimer_settime),
586 	DEVMETHOD_END
587 };
588 
589 static driver_t xentimer_driver = {
590 	"xen_et",
591 	xentimer_methods,
592 	sizeof(struct xentimer_softc),
593 };
594 
595 DRIVER_MODULE(xentimer, nexus, xentimer_driver, xentimer_devclass, 0, 0);
596 MODULE_DEPEND(xentimer, nexus, 1, 1, 1);
597