1 /*- 2 * Copyright (c) 2009 Adrian Chadd 3 * Copyright (c) 2012 Spectra Logic Corporation 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 /** 30 * \file dev/xen/timer/timer.c 31 * \brief A timer driver for the Xen hypervisor's PV clock. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/kernel.h> 41 #include <sys/module.h> 42 #include <sys/time.h> 43 #include <sys/timetc.h> 44 #include <sys/timeet.h> 45 #include <sys/smp.h> 46 #include <sys/limits.h> 47 #include <sys/clock.h> 48 #include <sys/proc.h> 49 50 #include <xen/xen-os.h> 51 #include <xen/features.h> 52 #include <xen/xen_intr.h> 53 #include <xen/hypervisor.h> 54 #include <xen/interface/io/xenbus.h> 55 #include <xen/interface/vcpu.h> 56 57 #include <machine/cpu.h> 58 #include <machine/cpufunc.h> 59 #include <machine/clock.h> 60 #include <machine/_inttypes.h> 61 #include <machine/smp.h> 62 63 #include <dev/xen/timer/timer.h> 64 65 #include "clock_if.h" 66 67 static devclass_t xentimer_devclass; 68 69 #define NSEC_IN_SEC 1000000000ULL 70 #define NSEC_IN_USEC 1000ULL 71 /* 18446744073 = int(2^64 / NSEC_IN_SC) = 1 ns in 64-bit fractions */ 72 #define FRAC_IN_NSEC 18446744073LL 73 74 /* Xen timers may fire up to 100us off */ 75 #define XENTIMER_MIN_PERIOD_IN_NSEC 100*NSEC_IN_USEC 76 #define XENCLOCK_RESOLUTION 10000000 77 78 #define ETIME 62 /* Xen "bad time" error */ 79 80 #define XENTIMER_QUALITY 950 81 82 struct xentimer_pcpu_data { 83 uint64_t timer; 84 uint64_t last_processed; 85 void *irq_handle; 86 }; 87 88 DPCPU_DEFINE(struct xentimer_pcpu_data, xentimer_pcpu); 89 90 DPCPU_DECLARE(struct vcpu_info *, vcpu_info); 91 92 struct xentimer_softc { 93 device_t dev; 94 struct timecounter tc; 95 struct eventtimer et; 96 }; 97 98 /* Last time; this guarantees a monotonically increasing clock. */ 99 volatile uint64_t xen_timer_last_time = 0; 100 101 static void 102 xentimer_identify(driver_t *driver, device_t parent) 103 { 104 if (!xen_domain()) 105 return; 106 107 /* Handle all Xen PV timers in one device instance. */ 108 if (devclass_get_device(xentimer_devclass, 0)) 109 return; 110 111 BUS_ADD_CHILD(parent, 0, "xen_et", 0); 112 } 113 114 static int 115 xentimer_probe(device_t dev) 116 { 117 KASSERT((xen_domain()), ("Trying to use Xen timer on bare metal")); 118 /* 119 * In order to attach, this driver requires the following: 120 * - Vector callback support by the hypervisor, in order to deliver 121 * timer interrupts to the correct CPU for CPUs other than 0. 122 * - Access to the hypervisor shared info page, in order to look up 123 * each VCPU's timer information and the Xen wallclock time. 124 * - The hypervisor must say its PV clock is "safe" to use. 125 * - The hypervisor must support VCPUOP hypercalls. 126 * - The maximum number of CPUs supported by FreeBSD must not exceed 127 * the number of VCPUs supported by the hypervisor. 128 */ 129 #define XTREQUIRES(condition, reason...) \ 130 if (!(condition)) { \ 131 device_printf(dev, ## reason); \ 132 device_detach(dev); \ 133 return (ENXIO); \ 134 } 135 136 if (xen_hvm_domain()) { 137 XTREQUIRES(xen_vector_callback_enabled, 138 "vector callbacks unavailable\n"); 139 XTREQUIRES(xen_feature(XENFEAT_hvm_safe_pvclock), 140 "HVM safe pvclock unavailable\n"); 141 } 142 XTREQUIRES(HYPERVISOR_shared_info != NULL, 143 "shared info page unavailable\n"); 144 XTREQUIRES(HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, 0, NULL) == 0, 145 "VCPUOPs interface unavailable\n"); 146 #undef XTREQUIRES 147 device_set_desc(dev, "Xen PV Clock"); 148 return (BUS_PROBE_NOWILDCARD); 149 } 150 151 /* 152 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, 153 * yielding a 64-bit result. 154 */ 155 static inline uint64_t 156 scale_delta(uint64_t delta, uint32_t mul_frac, int shift) 157 { 158 uint64_t product; 159 160 if (shift < 0) 161 delta >>= -shift; 162 else 163 delta <<= shift; 164 165 #if defined(__i386__) 166 { 167 uint32_t tmp1, tmp2; 168 169 /** 170 * For i386, the formula looks like: 171 * 172 * lower = (mul_frac * (delta & UINT_MAX)) >> 32 173 * upper = mul_frac * (delta >> 32) 174 * product = lower + upper 175 */ 176 __asm__ ( 177 "mul %5 ; " 178 "mov %4,%%eax ; " 179 "mov %%edx,%4 ; " 180 "mul %5 ; " 181 "xor %5,%5 ; " 182 "add %4,%%eax ; " 183 "adc %5,%%edx ; " 184 : "=A" (product), "=r" (tmp1), "=r" (tmp2) 185 : "a" ((uint32_t)delta), "1" ((uint32_t)(delta >> 32)), 186 "2" (mul_frac) ); 187 } 188 #elif defined(__amd64__) 189 { 190 unsigned long tmp; 191 192 __asm__ ( 193 "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]" 194 : [lo]"=a" (product), [hi]"=d" (tmp) 195 : "0" (delta), [mul_frac]"rm"((uint64_t)mul_frac)); 196 } 197 #else 198 #error "xentimer: unsupported architecture" 199 #endif 200 201 return (product); 202 } 203 204 static uint64_t 205 get_nsec_offset(struct vcpu_time_info *tinfo) 206 { 207 208 return (scale_delta(rdtsc() - tinfo->tsc_timestamp, 209 tinfo->tsc_to_system_mul, tinfo->tsc_shift)); 210 } 211 212 /* 213 * Read the current hypervisor system uptime value from Xen. 214 * See <xen/interface/xen.h> for a description of how this works. 215 */ 216 static uint32_t 217 xen_fetch_vcpu_tinfo(struct vcpu_time_info *dst, struct vcpu_time_info *src) 218 { 219 220 do { 221 dst->version = src->version; 222 rmb(); 223 dst->tsc_timestamp = src->tsc_timestamp; 224 dst->system_time = src->system_time; 225 dst->tsc_to_system_mul = src->tsc_to_system_mul; 226 dst->tsc_shift = src->tsc_shift; 227 rmb(); 228 } while ((src->version & 1) | (dst->version ^ src->version)); 229 230 return (dst->version); 231 } 232 233 /** 234 * \brief Get the current time, in nanoseconds, since the hypervisor booted. 235 * 236 * \param vcpu vcpu_info structure to fetch the time from. 237 * 238 * \note This function returns the current CPU's idea of this value, unless 239 * it happens to be less than another CPU's previously determined value. 240 */ 241 static uint64_t 242 xen_fetch_vcpu_time(struct vcpu_info *vcpu) 243 { 244 struct vcpu_time_info dst; 245 struct vcpu_time_info *src; 246 uint32_t pre_version; 247 uint64_t now; 248 volatile uint64_t last; 249 250 src = &vcpu->time; 251 252 do { 253 pre_version = xen_fetch_vcpu_tinfo(&dst, src); 254 barrier(); 255 now = dst.system_time + get_nsec_offset(&dst); 256 barrier(); 257 } while (pre_version != src->version); 258 259 /* 260 * Enforce a monotonically increasing clock time across all 261 * VCPUs. If our time is too old, use the last time and return. 262 * Otherwise, try to update the last time. 263 */ 264 do { 265 last = xen_timer_last_time; 266 if (last > now) { 267 now = last; 268 break; 269 } 270 } while (!atomic_cmpset_64(&xen_timer_last_time, last, now)); 271 272 return (now); 273 } 274 275 static uint32_t 276 xentimer_get_timecount(struct timecounter *tc) 277 { 278 uint64_t vcpu_time; 279 280 /* 281 * We don't disable preemption here because the worst that can 282 * happen is reading the vcpu_info area of a different CPU than 283 * the one we are currently running on, but that would also 284 * return a valid tc (and we avoid the overhead of 285 * critical_{enter/exit} calls). 286 */ 287 vcpu_time = xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 288 289 return (vcpu_time & UINT32_MAX); 290 } 291 292 /** 293 * \brief Fetch the hypervisor boot time, known as the "Xen wallclock". 294 * 295 * \param ts Timespec to store the current stable value. 296 * \param version Pointer to store the corresponding wallclock version. 297 * 298 * \note This value is updated when Domain-0 shifts its clock to follow 299 * clock drift, e.g. as detected by NTP. 300 */ 301 static void 302 xen_fetch_wallclock(struct timespec *ts) 303 { 304 shared_info_t *src = HYPERVISOR_shared_info; 305 uint32_t version = 0; 306 307 do { 308 version = src->wc_version; 309 rmb(); 310 ts->tv_sec = src->wc_sec; 311 ts->tv_nsec = src->wc_nsec; 312 rmb(); 313 } while ((src->wc_version & 1) | (version ^ src->wc_version)); 314 } 315 316 static void 317 xen_fetch_uptime(struct timespec *ts) 318 { 319 uint64_t uptime; 320 321 uptime = xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 322 323 ts->tv_sec = uptime / NSEC_IN_SEC; 324 ts->tv_nsec = uptime % NSEC_IN_SEC; 325 } 326 327 static int 328 xentimer_settime(device_t dev __unused, struct timespec *ts) 329 { 330 /* 331 * Don't return EINVAL here; just silently fail if the domain isn't 332 * privileged enough to set the TOD. 333 */ 334 return (0); 335 } 336 337 /** 338 * \brief Return current time according to the Xen Hypervisor wallclock. 339 * 340 * \param dev Xentimer device. 341 * \param ts Pointer to store the wallclock time. 342 * 343 * \note The Xen time structures document the hypervisor start time and the 344 * uptime-since-hypervisor-start (in nsec.) They need to be combined 345 * in order to calculate a TOD clock. 346 */ 347 static int 348 xentimer_gettime(device_t dev, struct timespec *ts) 349 { 350 struct timespec u_ts; 351 352 timespecclear(ts); 353 xen_fetch_wallclock(ts); 354 xen_fetch_uptime(&u_ts); 355 timespecadd(ts, &u_ts); 356 357 return (0); 358 } 359 360 /** 361 * \brief Handle a timer interrupt for the Xen PV timer driver. 362 * 363 * \param arg Xen timer driver softc that is expecting the interrupt. 364 */ 365 static int 366 xentimer_intr(void *arg) 367 { 368 struct xentimer_softc *sc = (struct xentimer_softc *)arg; 369 struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu); 370 371 pcpu->last_processed = xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 372 if (pcpu->timer != 0 && sc->et.et_active) 373 sc->et.et_event_cb(&sc->et, sc->et.et_arg); 374 375 return (FILTER_HANDLED); 376 } 377 378 static int 379 xentimer_vcpu_start_timer(int vcpu, uint64_t next_time) 380 { 381 struct vcpu_set_singleshot_timer single; 382 383 single.timeout_abs_ns = next_time; 384 single.flags = VCPU_SSHOTTMR_future; 385 return (HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, vcpu, &single)); 386 } 387 388 static int 389 xentimer_vcpu_stop_timer(int vcpu) 390 { 391 392 return (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, vcpu, NULL)); 393 } 394 395 /** 396 * \brief Set the next oneshot time for the current CPU. 397 * 398 * \param et Xen timer driver event timer to schedule on. 399 * \param first Delta to the next time to schedule the interrupt for. 400 * \param period Not used. 401 * 402 * \note See eventtimers(9) for more information. 403 * \note 404 * 405 * \returns 0 406 */ 407 static int 408 xentimer_et_start(struct eventtimer *et, 409 sbintime_t first, sbintime_t period) 410 { 411 int error = 0, i = 0; 412 struct xentimer_softc *sc = et->et_priv; 413 int cpu = PCPU_GET(vcpu_id); 414 struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu); 415 struct vcpu_info *vcpu = DPCPU_GET(vcpu_info); 416 uint64_t first_in_ns, next_time; 417 #ifdef INVARIANTS 418 struct thread *td = curthread; 419 #endif 420 421 KASSERT(td->td_critnest != 0, 422 ("xentimer_et_start called without preemption disabled")); 423 424 /* See sbttots() for this formula. */ 425 first_in_ns = (((first >> 32) * NSEC_IN_SEC) + 426 (((uint64_t)NSEC_IN_SEC * (uint32_t)first) >> 32)); 427 428 /* 429 * Retry any timer scheduling failures, where the hypervisor 430 * returns -ETIME. Sometimes even a 100us timer period isn't large 431 * enough, but larger period instances are relatively uncommon. 432 * 433 * XXX Remove the panics once et_start() and its consumers are 434 * equipped to deal with start failures. 435 */ 436 do { 437 if (++i == 60) 438 panic("can't schedule timer"); 439 next_time = xen_fetch_vcpu_time(vcpu) + first_in_ns; 440 error = xentimer_vcpu_start_timer(cpu, next_time); 441 } while (error == -ETIME); 442 443 if (error) 444 panic("%s: Error %d setting singleshot timer to %"PRIu64"\n", 445 device_get_nameunit(sc->dev), error, next_time); 446 447 pcpu->timer = next_time; 448 return (error); 449 } 450 451 /** 452 * \brief Cancel the event timer's currently running timer, if any. 453 */ 454 static int 455 xentimer_et_stop(struct eventtimer *et) 456 { 457 int cpu = PCPU_GET(vcpu_id); 458 struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu); 459 460 pcpu->timer = 0; 461 return (xentimer_vcpu_stop_timer(cpu)); 462 } 463 464 /** 465 * \brief Attach a Xen PV timer driver instance. 466 * 467 * \param dev Bus device object to attach. 468 * 469 * \note 470 * \returns EINVAL 471 */ 472 static int 473 xentimer_attach(device_t dev) 474 { 475 struct xentimer_softc *sc = device_get_softc(dev); 476 int error, i; 477 478 sc->dev = dev; 479 480 /* Bind an event channel to a VIRQ on each VCPU. */ 481 CPU_FOREACH(i) { 482 struct xentimer_pcpu_data *pcpu; 483 484 pcpu = DPCPU_ID_PTR(i, xentimer_pcpu); 485 error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, i, NULL); 486 if (error) { 487 device_printf(dev, "Error disabling Xen periodic timer " 488 "on CPU %d\n", i); 489 return (error); 490 } 491 492 error = xen_intr_bind_virq(dev, VIRQ_TIMER, i, xentimer_intr, 493 NULL, sc, INTR_TYPE_CLK, &pcpu->irq_handle); 494 if (error) { 495 device_printf(dev, "Error %d binding VIRQ_TIMER " 496 "to VCPU %d\n", error, i); 497 return (error); 498 } 499 xen_intr_describe(pcpu->irq_handle, "c%d", i); 500 } 501 502 /* Register the event timer. */ 503 sc->et.et_name = "XENTIMER"; 504 sc->et.et_quality = XENTIMER_QUALITY; 505 sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; 506 sc->et.et_frequency = NSEC_IN_SEC; 507 /* See tstosbt() for this formula */ 508 sc->et.et_min_period = (XENTIMER_MIN_PERIOD_IN_NSEC * 509 (((uint64_t)1 << 63) / 500000000) >> 32); 510 sc->et.et_max_period = ((sbintime_t)4 << 32); 511 sc->et.et_start = xentimer_et_start; 512 sc->et.et_stop = xentimer_et_stop; 513 sc->et.et_priv = sc; 514 et_register(&sc->et); 515 516 /* Register the timecounter. */ 517 sc->tc.tc_name = "XENTIMER"; 518 sc->tc.tc_quality = XENTIMER_QUALITY; 519 sc->tc.tc_flags = TC_FLAGS_SUSPEND_SAFE; 520 /* 521 * The underlying resolution is in nanoseconds, since the timer info 522 * scales TSC frequencies using a fraction that represents time in 523 * terms of nanoseconds. 524 */ 525 sc->tc.tc_frequency = NSEC_IN_SEC; 526 sc->tc.tc_counter_mask = ~0u; 527 sc->tc.tc_get_timecount = xentimer_get_timecount; 528 sc->tc.tc_priv = sc; 529 tc_init(&sc->tc); 530 531 /* Register the Hypervisor wall clock */ 532 clock_register(dev, XENCLOCK_RESOLUTION); 533 534 return (0); 535 } 536 537 static int 538 xentimer_detach(device_t dev) 539 { 540 541 /* Implement Xen PV clock teardown - XXX see hpet_detach ? */ 542 /* If possible: 543 * 1. need to deregister timecounter 544 * 2. need to deregister event timer 545 * 3. need to deregister virtual IRQ event channels 546 */ 547 return (EBUSY); 548 } 549 550 static void 551 xentimer_percpu_resume(void *arg) 552 { 553 device_t dev = (device_t) arg; 554 struct xentimer_softc *sc = device_get_softc(dev); 555 556 xentimer_et_start(&sc->et, sc->et.et_min_period, 0); 557 } 558 559 static int 560 xentimer_resume(device_t dev) 561 { 562 int error; 563 int i; 564 565 /* Disable the periodic timer */ 566 CPU_FOREACH(i) { 567 error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, i, NULL); 568 if (error != 0) { 569 device_printf(dev, 570 "Error disabling Xen periodic timer on CPU %d\n", 571 i); 572 return (error); 573 } 574 } 575 576 /* Reset the last uptime value */ 577 xen_timer_last_time = 0; 578 579 /* Reset the RTC clock */ 580 inittodr(time_second); 581 582 /* Kick the timers on all CPUs */ 583 smp_rendezvous(NULL, xentimer_percpu_resume, NULL, dev); 584 585 if (bootverbose) 586 device_printf(dev, "resumed operation after suspension\n"); 587 588 return (0); 589 } 590 591 static int 592 xentimer_suspend(device_t dev) 593 { 594 return (0); 595 } 596 597 /* 598 * Xen early clock init 599 */ 600 void 601 xen_clock_init(void) 602 { 603 } 604 605 /* 606 * Xen PV DELAY function 607 * 608 * When running on PVH mode we don't have an emulated i8524, so 609 * make use of the Xen time info in order to code a simple DELAY 610 * function that can be used during early boot. 611 */ 612 void 613 xen_delay(int n) 614 { 615 struct vcpu_info *vcpu = &HYPERVISOR_shared_info->vcpu_info[0]; 616 uint64_t end_ns; 617 uint64_t current; 618 619 end_ns = xen_fetch_vcpu_time(vcpu); 620 end_ns += n * NSEC_IN_USEC; 621 622 for (;;) { 623 current = xen_fetch_vcpu_time(vcpu); 624 if (current >= end_ns) 625 break; 626 } 627 } 628 629 static device_method_t xentimer_methods[] = { 630 DEVMETHOD(device_identify, xentimer_identify), 631 DEVMETHOD(device_probe, xentimer_probe), 632 DEVMETHOD(device_attach, xentimer_attach), 633 DEVMETHOD(device_detach, xentimer_detach), 634 DEVMETHOD(device_suspend, xentimer_suspend), 635 DEVMETHOD(device_resume, xentimer_resume), 636 /* clock interface */ 637 DEVMETHOD(clock_gettime, xentimer_gettime), 638 DEVMETHOD(clock_settime, xentimer_settime), 639 DEVMETHOD_END 640 }; 641 642 static driver_t xentimer_driver = { 643 "xen_et", 644 xentimer_methods, 645 sizeof(struct xentimer_softc), 646 }; 647 648 DRIVER_MODULE(xentimer, xenpv, xentimer_driver, xentimer_devclass, 0, 0); 649 MODULE_DEPEND(xentimer, xenpv, 1, 1, 1); 650