xref: /freebsd-13.1/sys/dev/xen/timer/timer.c (revision 040f9b1e)
1 /**
2  * Copyright (c) 2009 Adrian Chadd
3  * Copyright (c) 2012 Spectra Logic Corporation
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 /**
30  * \file dev/xen/timer/timer.c
31  * \brief A timer driver for the Xen hypervisor's PV clock.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/module.h>
42 #include <sys/time.h>
43 #include <sys/timetc.h>
44 #include <sys/timeet.h>
45 #include <sys/smp.h>
46 #include <sys/limits.h>
47 #include <sys/clock.h>
48 
49 #include <xen/xen-os.h>
50 #include <xen/features.h>
51 #include <xen/xen_intr.h>
52 #include <xen/hypervisor.h>
53 #include <xen/interface/io/xenbus.h>
54 #include <xen/interface/vcpu.h>
55 
56 #include <machine/cpu.h>
57 #include <machine/cpufunc.h>
58 #include <machine/clock.h>
59 #include <machine/_inttypes.h>
60 
61 #include "clock_if.h"
62 
63 static devclass_t xentimer_devclass;
64 
65 #define	NSEC_IN_SEC	1000000000ULL
66 #define	NSEC_IN_USEC	1000ULL
67 /* 18446744073 = int(2^64 / NSEC_IN_SC) = 1 ns in 64-bit fractions */
68 #define	FRAC_IN_NSEC	18446744073LL
69 
70 /* Xen timers may fire up to 100us off */
71 #define	XENTIMER_MIN_PERIOD_IN_NSEC	100*NSEC_IN_USEC
72 #define	XENCLOCK_RESOLUTION		10000000
73 
74 #define	ETIME	62	/* Xen "bad time" error */
75 
76 #define	XENTIMER_QUALITY	950
77 
78 struct xentimer_pcpu_data {
79 	uint64_t timer;
80 	uint64_t last_processed;
81 	void *irq_handle;
82 };
83 
84 DPCPU_DEFINE(struct xentimer_pcpu_data, xentimer_pcpu);
85 
86 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
87 
88 struct xentimer_softc {
89 	device_t dev;
90 	struct timecounter tc;
91 	struct eventtimer et;
92 };
93 
94 /* Last time; this guarantees a monotonically increasing clock. */
95 volatile uint64_t xen_timer_last_time = 0;
96 
97 static void
98 xentimer_identify(driver_t *driver, device_t parent)
99 {
100 	if (!xen_domain())
101 		return;
102 
103 	/* Handle all Xen PV timers in one device instance. */
104 	if (devclass_get_device(xentimer_devclass, 0))
105 		return;
106 
107 	BUS_ADD_CHILD(parent, 0, "xen_et", 0);
108 }
109 
110 static int
111 xentimer_probe(device_t dev)
112 {
113 	KASSERT((xen_domain()), ("Trying to use Xen timer on bare metal"));
114 	/*
115 	 * In order to attach, this driver requires the following:
116 	 * - Vector callback support by the hypervisor, in order to deliver
117 	 *   timer interrupts to the correct CPU for CPUs other than 0.
118 	 * - Access to the hypervisor shared info page, in order to look up
119 	 *   each VCPU's timer information and the Xen wallclock time.
120 	 * - The hypervisor must say its PV clock is "safe" to use.
121 	 * - The hypervisor must support VCPUOP hypercalls.
122 	 * - The maximum number of CPUs supported by FreeBSD must not exceed
123 	 *   the number of VCPUs supported by the hypervisor.
124 	 */
125 #define	XTREQUIRES(condition, reason...)	\
126 	if (!(condition)) {			\
127 		device_printf(dev, ## reason);	\
128 		device_detach(dev);		\
129 		return (ENXIO);			\
130 	}
131 
132 	if (xen_hvm_domain()) {
133 		XTREQUIRES(xen_vector_callback_enabled,
134 		           "vector callbacks unavailable\n");
135 		XTREQUIRES(xen_feature(XENFEAT_hvm_safe_pvclock),
136 		           "HVM safe pvclock unavailable\n");
137 	}
138 	XTREQUIRES(HYPERVISOR_shared_info != NULL,
139 	           "shared info page unavailable\n");
140 	XTREQUIRES(HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, 0, NULL) == 0,
141 	           "VCPUOPs interface unavailable\n");
142 #undef XTREQUIRES
143 	device_set_desc(dev, "Xen PV Clock");
144 	return (0);
145 }
146 
147 /*
148  * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
149  * yielding a 64-bit result.
150  */
151 static inline uint64_t
152 scale_delta(uint64_t delta, uint32_t mul_frac, int shift)
153 {
154 	uint64_t product;
155 
156 	if (shift < 0)
157 		delta >>= -shift;
158 	else
159 		delta <<= shift;
160 
161 #if defined(__i386__)
162 	{
163 		uint32_t tmp1, tmp2;
164 
165 		/**
166 		 * For i386, the formula looks like:
167 		 *
168 		 *   lower = (mul_frac * (delta & UINT_MAX)) >> 32
169 		 *   upper = mul_frac * (delta >> 32)
170 		 *   product = lower + upper
171 		 */
172 		__asm__ (
173 			"mul  %5       ; "
174 			"mov  %4,%%eax ; "
175 			"mov  %%edx,%4 ; "
176 			"mul  %5       ; "
177 			"xor  %5,%5    ; "
178 			"add  %4,%%eax ; "
179 			"adc  %5,%%edx ; "
180 			: "=A" (product), "=r" (tmp1), "=r" (tmp2)
181 			: "a" ((uint32_t)delta), "1" ((uint32_t)(delta >> 32)),
182 			  "2" (mul_frac) );
183 	}
184 #elif defined(__amd64__)
185 	{
186 		unsigned long tmp;
187 
188 		__asm__ (
189 			"mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
190 			: [lo]"=a" (product), [hi]"=d" (tmp)
191 			: "0" (delta), [mul_frac]"rm"((uint64_t)mul_frac));
192 	}
193 #else
194 #error "xentimer: unsupported architecture"
195 #endif
196 
197 	return (product);
198 }
199 
200 static uint64_t
201 get_nsec_offset(struct vcpu_time_info *tinfo)
202 {
203 
204 	return (scale_delta(rdtsc() - tinfo->tsc_timestamp,
205 	    tinfo->tsc_to_system_mul, tinfo->tsc_shift));
206 }
207 
208 /*
209  * Read the current hypervisor system uptime value from Xen.
210  * See <xen/interface/xen.h> for a description of how this works.
211  */
212 static uint32_t
213 xen_fetch_vcpu_tinfo(struct vcpu_time_info *dst, struct vcpu_time_info *src)
214 {
215 
216 	do {
217 		dst->version = src->version;
218 		rmb();
219 		dst->tsc_timestamp = src->tsc_timestamp;
220 		dst->system_time = src->system_time;
221 		dst->tsc_to_system_mul = src->tsc_to_system_mul;
222 		dst->tsc_shift = src->tsc_shift;
223 		rmb();
224 	} while ((src->version & 1) | (dst->version ^ src->version));
225 
226 	return (dst->version);
227 }
228 
229 /**
230  * \brief Get the current time, in nanoseconds, since the hypervisor booted.
231  *
232  * \note This function returns the current CPU's idea of this value, unless
233  *       it happens to be less than another CPU's previously determined value.
234  */
235 static uint64_t
236 xen_fetch_vcpu_time(void)
237 {
238 	struct vcpu_time_info dst;
239 	struct vcpu_time_info *src;
240 	uint32_t pre_version;
241 	uint64_t now;
242 	volatile uint64_t last;
243 	struct vcpu_info *vcpu = DPCPU_GET(vcpu_info);
244 
245 	src = &vcpu->time;
246 
247 	critical_enter();
248 	do {
249 		pre_version = xen_fetch_vcpu_tinfo(&dst, src);
250 		barrier();
251 		now = dst.system_time + get_nsec_offset(&dst);
252 		barrier();
253 	} while (pre_version != src->version);
254 
255 	/*
256 	 * Enforce a monotonically increasing clock time across all
257 	 * VCPUs.  If our time is too old, use the last time and return.
258 	 * Otherwise, try to update the last time.
259 	 */
260 	do {
261 		last = xen_timer_last_time;
262 		if (last > now) {
263 			now = last;
264 			break;
265 		}
266 	} while (!atomic_cmpset_64(&xen_timer_last_time, last, now));
267 
268 	critical_exit();
269 
270 	return (now);
271 }
272 
273 static uint32_t
274 xentimer_get_timecount(struct timecounter *tc)
275 {
276 
277 	return ((uint32_t)xen_fetch_vcpu_time() & UINT_MAX);
278 }
279 
280 /**
281  * \brief Fetch the hypervisor boot time, known as the "Xen wallclock".
282  *
283  * \param ts		Timespec to store the current stable value.
284  * \param version	Pointer to store the corresponding wallclock version.
285  *
286  * \note This value is updated when Domain-0 shifts its clock to follow
287  *       clock drift, e.g. as detected by NTP.
288  */
289 static void
290 xen_fetch_wallclock(struct timespec *ts)
291 {
292 	shared_info_t *src = HYPERVISOR_shared_info;
293 	uint32_t version = 0;
294 
295 	do {
296 		version = src->wc_version;
297 		rmb();
298 		ts->tv_sec = src->wc_sec;
299 		ts->tv_nsec = src->wc_nsec;
300 		rmb();
301 	} while ((src->wc_version & 1) | (version ^ src->wc_version));
302 }
303 
304 static void
305 xen_fetch_uptime(struct timespec *ts)
306 {
307 	uint64_t uptime = xen_fetch_vcpu_time();
308 	ts->tv_sec = uptime / NSEC_IN_SEC;
309 	ts->tv_nsec = uptime % NSEC_IN_SEC;
310 }
311 
312 static int
313 xentimer_settime(device_t dev __unused, struct timespec *ts)
314 {
315 	/*
316 	 * Don't return EINVAL here; just silently fail if the domain isn't
317 	 * privileged enough to set the TOD.
318 	 */
319 	return(0);
320 }
321 
322 /**
323  * \brief Return current time according to the Xen Hypervisor wallclock.
324  *
325  * \param dev	Xentimer device.
326  * \param ts	Pointer to store the wallclock time.
327  *
328  * \note  The Xen time structures document the hypervisor start time and the
329  *        uptime-since-hypervisor-start (in nsec.) They need to be combined
330  *        in order to calculate a TOD clock.
331  */
332 static int
333 xentimer_gettime(device_t dev, struct timespec *ts)
334 {
335 	struct timespec u_ts;
336 
337 	timespecclear(ts);
338 	xen_fetch_wallclock(ts);
339 	xen_fetch_uptime(&u_ts);
340 	timespecadd(ts, &u_ts);
341 
342 	return(0);
343 }
344 
345 /**
346  * \brief Handle a timer interrupt for the Xen PV timer driver.
347  *
348  * \param arg	Xen timer driver softc that is expecting the interrupt.
349  */
350 static int
351 xentimer_intr(void *arg)
352 {
353 	struct xentimer_softc *sc = (struct xentimer_softc *)arg;
354 	struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu);
355 
356 	pcpu->last_processed = xen_fetch_vcpu_time();
357 	if (pcpu->timer != 0 && sc->et.et_active)
358 		sc->et.et_event_cb(&sc->et, sc->et.et_arg);
359 
360 	return (FILTER_HANDLED);
361 }
362 
363 static int
364 xentimer_vcpu_start_timer(int vcpu, uint64_t next_time)
365 {
366 	struct vcpu_set_singleshot_timer single;
367 
368 	single.timeout_abs_ns = next_time;
369 	single.flags          = VCPU_SSHOTTMR_future;
370 	return (HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, vcpu, &single));
371 }
372 
373 static int
374 xentimer_vcpu_stop_timer(int vcpu)
375 {
376 
377 	return (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, vcpu, NULL));
378 }
379 
380 /**
381  * \brief Set the next oneshot time for the current CPU.
382  *
383  * \param et	Xen timer driver event timer to schedule on.
384  * \param first	Delta to the next time to schedule the interrupt for.
385  * \param period Not used.
386  *
387  * \note See eventtimers(9) for more information.
388  * \note
389  *
390  * \returns 0
391  */
392 static int
393 xentimer_et_start(struct eventtimer *et,
394     sbintime_t first, sbintime_t period)
395 {
396 	int error = 0, i = 0;
397 	struct xentimer_softc *sc = et->et_priv;
398 	int cpu = PCPU_GET(acpi_id);
399 	struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu);
400 	uint64_t first_in_ns, next_time;
401 
402 	/* See sbttots() for this formula. */
403 	first_in_ns = (((first >> 32) * NSEC_IN_SEC) +
404 	               (((uint64_t)NSEC_IN_SEC * (uint32_t)first) >> 32));
405 
406 	/*
407 	 * Retry any timer scheduling failures, where the hypervisor
408 	 * returns -ETIME.  Sometimes even a 100us timer period isn't large
409 	 * enough, but larger period instances are relatively uncommon.
410 	 *
411 	 * XXX Remove the panics once et_start() and its consumers are
412 	 *     equipped to deal with start failures.
413 	 */
414 	do {
415 		if (++i == 60)
416 			panic("can't schedule timer");
417 		next_time = xen_fetch_vcpu_time() + first_in_ns;
418 		error = xentimer_vcpu_start_timer(cpu, next_time);
419 	} while (error == -ETIME);
420 
421 	if (error)
422 		panic("%s: Error %d setting singleshot timer to %"PRIu64"\n",
423 		    device_get_nameunit(sc->dev), error, next_time);
424 
425 	pcpu->timer = next_time;
426 	return (error);
427 }
428 
429 /**
430  * \brief Cancel the event timer's currently running timer, if any.
431  */
432 static int
433 xentimer_et_stop(struct eventtimer *et)
434 {
435 	int cpu = PCPU_GET(acpi_id);
436 	struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu);
437 
438 	pcpu->timer = 0;
439 	return (xentimer_vcpu_stop_timer(cpu));
440 }
441 
442 /**
443  * \brief Attach a Xen PV timer driver instance.
444  *
445  * \param dev	Bus device object to attach.
446  *
447  * \note
448  * \returns EINVAL
449  */
450 static int
451 xentimer_attach(device_t dev)
452 {
453 	struct xentimer_softc *sc = device_get_softc(dev);
454 	int error, i;
455 
456 	sc->dev = dev;
457 
458 	/* Bind an event channel to a VIRQ on each VCPU. */
459 	CPU_FOREACH(i) {
460 		struct xentimer_pcpu_data *pcpu = DPCPU_ID_PTR(i, xentimer_pcpu);
461 
462 		error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, i, NULL);
463 		if (error) {
464 			device_printf(dev, "Error disabling Xen periodic timer "
465 			                   "on CPU %d\n", i);
466 			return (error);
467 		}
468 
469 		error = xen_intr_bind_virq(dev, VIRQ_TIMER, i, xentimer_intr,
470 		    NULL, sc, INTR_TYPE_CLK, &pcpu->irq_handle);
471 		if (error) {
472 			device_printf(dev, "Error %d binding VIRQ_TIMER "
473 			    "to VCPU %d\n", error, i);
474 			return (error);
475 		}
476 		xen_intr_describe(pcpu->irq_handle, "c%d", i);
477 	}
478 
479 	/* Register the event timer. */
480 	sc->et.et_name = "XENTIMER";
481 	sc->et.et_quality = XENTIMER_QUALITY;
482 	sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
483 	sc->et.et_frequency = NSEC_IN_SEC;
484 	/* See tstosbt() for this formula */
485 	sc->et.et_min_period = (XENTIMER_MIN_PERIOD_IN_NSEC *
486 	                        (((uint64_t)1 << 63) / 500000000) >> 32);
487 	sc->et.et_max_period = ((sbintime_t)4 << 32);
488 	sc->et.et_start = xentimer_et_start;
489 	sc->et.et_stop = xentimer_et_stop;
490 	sc->et.et_priv = sc;
491 	et_register(&sc->et);
492 
493 	/* Register the timecounter. */
494 	sc->tc.tc_name = "XENTIMER";
495 	sc->tc.tc_quality = XENTIMER_QUALITY;
496 	/*
497 	 * The underlying resolution is in nanoseconds, since the timer info
498 	 * scales TSC frequencies using a fraction that represents time in
499 	 * terms of nanoseconds.
500 	 */
501 	sc->tc.tc_frequency = NSEC_IN_SEC;
502 	sc->tc.tc_counter_mask = ~0u;
503 	sc->tc.tc_get_timecount = xentimer_get_timecount;
504 	sc->tc.tc_priv = sc;
505 	tc_init(&sc->tc);
506 
507 	/* Register the Hypervisor wall clock */
508 	clock_register(dev, XENCLOCK_RESOLUTION);
509 
510 	return (0);
511 }
512 
513 static int
514 xentimer_detach(device_t dev)
515 {
516 
517 	/* Implement Xen PV clock teardown - XXX see hpet_detach ? */
518 	/* If possible:
519 	 * 1. need to deregister timecounter
520 	 * 2. need to deregister event timer
521 	 * 3. need to deregister virtual IRQ event channels
522 	 */
523 	return (EBUSY);
524 }
525 
526 /**
527  * The following device methods are disabled because they wouldn't work
528  * properly.
529  */
530 #ifdef NOTYET
531 static int
532 xentimer_resume(device_t dev)
533 {
534 	struct xentimer_softc *sc = device_get_softc(dev);
535 	int error = 0;
536 	int i;
537 
538 	device_printf(sc->dev, "%s", __func__);
539 	CPU_FOREACH(i) {
540 		struct xentimer_pcpu_data *pcpu = DPCPU_ID_PTR(i, xentimer_pcpu);
541 
542 		/* Skip inactive timers. */
543 		if (pcpu->timer == 0)
544 			continue;
545 
546 		/*
547 		 * XXX This won't actually work, because Xen requires that
548 		 *     singleshot timers be set while running on the given CPU.
549 		 */
550 		error = xentimer_vcpu_start_timer(i, pcpu->timer);
551 		if (error == -ETIME) {
552 			/* Event time has already passed; process. */
553 			xentimer_intr(sc);
554 		} else if (error != 0) {
555 			panic("%s: error %d restarting vcpu %d\n",
556 			    __func__, error, i);
557 		}
558 	}
559 
560 	return (error);
561 }
562 
563 static int
564 xentimer_suspend(device_t dev)
565 {
566 	struct xentimer_softc *sc = device_get_softc(dev);
567 	int error = 0;
568 	int i;
569 
570 	device_printf(sc->dev, "%s", __func__);
571 	CPU_FOREACH(i) {
572 		struct xentimer_pcpu_data *pcpu = DPCPU_ID_PTR(i, xentimer_pcpu);
573 
574 		/* Skip inactive timers. */
575 		if (pcpu->timer == 0)
576 			continue;
577 		error = xentimer_vcpu_stop_timer(i);
578 		if (error)
579 			panic("Error %d stopping VCPU %d timer\n", error, i);
580 	}
581 
582 	return (error);
583 }
584 #endif
585 
586 static device_method_t xentimer_methods[] = {
587 	DEVMETHOD(device_identify, xentimer_identify),
588 	DEVMETHOD(device_probe, xentimer_probe),
589 	DEVMETHOD(device_attach, xentimer_attach),
590 	DEVMETHOD(device_detach, xentimer_detach),
591 #ifdef NOTYET
592 	DEVMETHOD(device_suspend, xentimer_suspend),
593 	DEVMETHOD(device_resume, xentimer_resume),
594 #endif
595 	/* clock interface */
596 	DEVMETHOD(clock_gettime, xentimer_gettime),
597 	DEVMETHOD(clock_settime, xentimer_settime),
598 	DEVMETHOD_END
599 };
600 
601 static driver_t xentimer_driver = {
602 	"xen_et",
603 	xentimer_methods,
604 	sizeof(struct xentimer_softc),
605 };
606 
607 DRIVER_MODULE(xentimer, nexus, xentimer_driver, xentimer_devclass, 0, 0);
608 MODULE_DEPEND(xentimer, nexus, 1, 1, 1);
609