1 /*- 2 * Copyright (c) 2009 Adrian Chadd 3 * Copyright (c) 2012 Spectra Logic Corporation 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 /** 30 * \file dev/xen/timer/timer.c 31 * \brief A timer driver for the Xen hypervisor's PV clock. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/kernel.h> 41 #include <sys/module.h> 42 #include <sys/time.h> 43 #include <sys/timetc.h> 44 #include <sys/timeet.h> 45 #include <sys/smp.h> 46 #include <sys/limits.h> 47 #include <sys/clock.h> 48 #include <sys/proc.h> 49 50 #include <xen/xen-os.h> 51 #include <xen/features.h> 52 #include <xen/xen_intr.h> 53 #include <xen/hypervisor.h> 54 #include <xen/interface/io/xenbus.h> 55 #include <xen/interface/vcpu.h> 56 57 #include <machine/cpu.h> 58 #include <machine/cpufunc.h> 59 #include <machine/clock.h> 60 #include <machine/_inttypes.h> 61 #include <machine/smp.h> 62 #include <machine/pvclock.h> 63 64 #include <dev/xen/timer/timer.h> 65 66 #include "clock_if.h" 67 68 static devclass_t xentimer_devclass; 69 70 #define NSEC_IN_SEC 1000000000ULL 71 #define NSEC_IN_USEC 1000ULL 72 /* 18446744073 = int(2^64 / NSEC_IN_SC) = 1 ns in 64-bit fractions */ 73 #define FRAC_IN_NSEC 18446744073LL 74 75 /* Xen timers may fire up to 100us off */ 76 #define XENTIMER_MIN_PERIOD_IN_NSEC 100*NSEC_IN_USEC 77 #define XENCLOCK_RESOLUTION 10000000 78 79 #define ETIME 62 /* Xen "bad time" error */ 80 81 #define XENTIMER_QUALITY 950 82 83 struct xentimer_pcpu_data { 84 uint64_t timer; 85 uint64_t last_processed; 86 void *irq_handle; 87 }; 88 89 DPCPU_DEFINE(struct xentimer_pcpu_data, xentimer_pcpu); 90 91 DPCPU_DECLARE(struct vcpu_info *, vcpu_info); 92 93 struct xentimer_softc { 94 device_t dev; 95 struct timecounter tc; 96 struct eventtimer et; 97 }; 98 99 static void 100 xentimer_identify(driver_t *driver, device_t parent) 101 { 102 if (!xen_domain()) 103 return; 104 105 /* Handle all Xen PV timers in one device instance. */ 106 if (devclass_get_device(xentimer_devclass, 0)) 107 return; 108 109 BUS_ADD_CHILD(parent, 0, "xen_et", 0); 110 } 111 112 static int 113 xentimer_probe(device_t dev) 114 { 115 KASSERT((xen_domain()), ("Trying to use Xen timer on bare metal")); 116 /* 117 * In order to attach, this driver requires the following: 118 * - Vector callback support by the hypervisor, in order to deliver 119 * timer interrupts to the correct CPU for CPUs other than 0. 120 * - Access to the hypervisor shared info page, in order to look up 121 * each VCPU's timer information and the Xen wallclock time. 122 * - The hypervisor must say its PV clock is "safe" to use. 123 * - The hypervisor must support VCPUOP hypercalls. 124 * - The maximum number of CPUs supported by FreeBSD must not exceed 125 * the number of VCPUs supported by the hypervisor. 126 */ 127 #define XTREQUIRES(condition, reason...) \ 128 if (!(condition)) { \ 129 device_printf(dev, ## reason); \ 130 device_detach(dev); \ 131 return (ENXIO); \ 132 } 133 134 if (xen_hvm_domain()) { 135 XTREQUIRES(xen_vector_callback_enabled, 136 "vector callbacks unavailable\n"); 137 XTREQUIRES(xen_feature(XENFEAT_hvm_safe_pvclock), 138 "HVM safe pvclock unavailable\n"); 139 } 140 XTREQUIRES(HYPERVISOR_shared_info != NULL, 141 "shared info page unavailable\n"); 142 XTREQUIRES(HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, 0, NULL) == 0, 143 "VCPUOPs interface unavailable\n"); 144 #undef XTREQUIRES 145 device_set_desc(dev, "Xen PV Clock"); 146 return (BUS_PROBE_NOWILDCARD); 147 } 148 149 /** 150 * \brief Get the current time, in nanoseconds, since the hypervisor booted. 151 * 152 * \param vcpu vcpu_info structure to fetch the time from. 153 * 154 */ 155 static uint64_t 156 xen_fetch_vcpu_time(struct vcpu_info *vcpu) 157 { 158 struct pvclock_vcpu_time_info *time; 159 160 time = (struct pvclock_vcpu_time_info *) &vcpu->time; 161 162 return (pvclock_get_timecount(time)); 163 } 164 165 static uint32_t 166 xentimer_get_timecount(struct timecounter *tc) 167 { 168 uint64_t vcpu_time; 169 170 /* 171 * We don't disable preemption here because the worst that can 172 * happen is reading the vcpu_info area of a different CPU than 173 * the one we are currently running on, but that would also 174 * return a valid tc (and we avoid the overhead of 175 * critical_{enter/exit} calls). 176 */ 177 vcpu_time = xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 178 179 return (vcpu_time & UINT32_MAX); 180 } 181 182 /** 183 * \brief Fetch the hypervisor boot time, known as the "Xen wallclock". 184 * 185 * \param ts Timespec to store the current stable value. 186 * \param version Pointer to store the corresponding wallclock version. 187 * 188 * \note This value is updated when Domain-0 shifts its clock to follow 189 * clock drift, e.g. as detected by NTP. 190 */ 191 static void 192 xen_fetch_wallclock(struct timespec *ts) 193 { 194 shared_info_t *src = HYPERVISOR_shared_info; 195 struct pvclock_wall_clock *wc; 196 197 wc = (struct pvclock_wall_clock *) &src->wc_version; 198 199 pvclock_get_wallclock(wc, ts); 200 } 201 202 static void 203 xen_fetch_uptime(struct timespec *ts) 204 { 205 uint64_t uptime; 206 207 uptime = xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 208 209 ts->tv_sec = uptime / NSEC_IN_SEC; 210 ts->tv_nsec = uptime % NSEC_IN_SEC; 211 } 212 213 static int 214 xentimer_settime(device_t dev __unused, struct timespec *ts) 215 { 216 /* 217 * Don't return EINVAL here; just silently fail if the domain isn't 218 * privileged enough to set the TOD. 219 */ 220 return (0); 221 } 222 223 /** 224 * \brief Return current time according to the Xen Hypervisor wallclock. 225 * 226 * \param dev Xentimer device. 227 * \param ts Pointer to store the wallclock time. 228 * 229 * \note The Xen time structures document the hypervisor start time and the 230 * uptime-since-hypervisor-start (in nsec.) They need to be combined 231 * in order to calculate a TOD clock. 232 */ 233 static int 234 xentimer_gettime(device_t dev, struct timespec *ts) 235 { 236 struct timespec u_ts; 237 238 timespecclear(ts); 239 xen_fetch_wallclock(ts); 240 xen_fetch_uptime(&u_ts); 241 timespecadd(ts, &u_ts); 242 243 return (0); 244 } 245 246 /** 247 * \brief Handle a timer interrupt for the Xen PV timer driver. 248 * 249 * \param arg Xen timer driver softc that is expecting the interrupt. 250 */ 251 static int 252 xentimer_intr(void *arg) 253 { 254 struct xentimer_softc *sc = (struct xentimer_softc *)arg; 255 struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu); 256 257 pcpu->last_processed = xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 258 if (pcpu->timer != 0 && sc->et.et_active) 259 sc->et.et_event_cb(&sc->et, sc->et.et_arg); 260 261 return (FILTER_HANDLED); 262 } 263 264 static int 265 xentimer_vcpu_start_timer(int vcpu, uint64_t next_time) 266 { 267 struct vcpu_set_singleshot_timer single; 268 269 single.timeout_abs_ns = next_time; 270 single.flags = VCPU_SSHOTTMR_future; 271 return (HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, vcpu, &single)); 272 } 273 274 static int 275 xentimer_vcpu_stop_timer(int vcpu) 276 { 277 278 return (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, vcpu, NULL)); 279 } 280 281 /** 282 * \brief Set the next oneshot time for the current CPU. 283 * 284 * \param et Xen timer driver event timer to schedule on. 285 * \param first Delta to the next time to schedule the interrupt for. 286 * \param period Not used. 287 * 288 * \note See eventtimers(9) for more information. 289 * \note 290 * 291 * \returns 0 292 */ 293 static int 294 xentimer_et_start(struct eventtimer *et, 295 sbintime_t first, sbintime_t period) 296 { 297 int error = 0, i = 0; 298 struct xentimer_softc *sc = et->et_priv; 299 int cpu = PCPU_GET(vcpu_id); 300 struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu); 301 struct vcpu_info *vcpu = DPCPU_GET(vcpu_info); 302 uint64_t first_in_ns, next_time; 303 #ifdef INVARIANTS 304 struct thread *td = curthread; 305 #endif 306 307 KASSERT(td->td_critnest != 0, 308 ("xentimer_et_start called without preemption disabled")); 309 310 /* See sbttots() for this formula. */ 311 first_in_ns = (((first >> 32) * NSEC_IN_SEC) + 312 (((uint64_t)NSEC_IN_SEC * (uint32_t)first) >> 32)); 313 314 /* 315 * Retry any timer scheduling failures, where the hypervisor 316 * returns -ETIME. Sometimes even a 100us timer period isn't large 317 * enough, but larger period instances are relatively uncommon. 318 * 319 * XXX Remove the panics once et_start() and its consumers are 320 * equipped to deal with start failures. 321 */ 322 do { 323 if (++i == 60) 324 panic("can't schedule timer"); 325 next_time = xen_fetch_vcpu_time(vcpu) + first_in_ns; 326 error = xentimer_vcpu_start_timer(cpu, next_time); 327 } while (error == -ETIME); 328 329 if (error) 330 panic("%s: Error %d setting singleshot timer to %"PRIu64"\n", 331 device_get_nameunit(sc->dev), error, next_time); 332 333 pcpu->timer = next_time; 334 return (error); 335 } 336 337 /** 338 * \brief Cancel the event timer's currently running timer, if any. 339 */ 340 static int 341 xentimer_et_stop(struct eventtimer *et) 342 { 343 int cpu = PCPU_GET(vcpu_id); 344 struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu); 345 346 pcpu->timer = 0; 347 return (xentimer_vcpu_stop_timer(cpu)); 348 } 349 350 /** 351 * \brief Attach a Xen PV timer driver instance. 352 * 353 * \param dev Bus device object to attach. 354 * 355 * \note 356 * \returns EINVAL 357 */ 358 static int 359 xentimer_attach(device_t dev) 360 { 361 struct xentimer_softc *sc = device_get_softc(dev); 362 int error, i; 363 364 sc->dev = dev; 365 366 /* Bind an event channel to a VIRQ on each VCPU. */ 367 CPU_FOREACH(i) { 368 struct xentimer_pcpu_data *pcpu; 369 370 pcpu = DPCPU_ID_PTR(i, xentimer_pcpu); 371 error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, i, NULL); 372 if (error) { 373 device_printf(dev, "Error disabling Xen periodic timer " 374 "on CPU %d\n", i); 375 return (error); 376 } 377 378 error = xen_intr_bind_virq(dev, VIRQ_TIMER, i, xentimer_intr, 379 NULL, sc, INTR_TYPE_CLK, &pcpu->irq_handle); 380 if (error) { 381 device_printf(dev, "Error %d binding VIRQ_TIMER " 382 "to VCPU %d\n", error, i); 383 return (error); 384 } 385 xen_intr_describe(pcpu->irq_handle, "c%d", i); 386 } 387 388 /* Register the event timer. */ 389 sc->et.et_name = "XENTIMER"; 390 sc->et.et_quality = XENTIMER_QUALITY; 391 sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; 392 sc->et.et_frequency = NSEC_IN_SEC; 393 /* See tstosbt() for this formula */ 394 sc->et.et_min_period = (XENTIMER_MIN_PERIOD_IN_NSEC * 395 (((uint64_t)1 << 63) / 500000000) >> 32); 396 sc->et.et_max_period = ((sbintime_t)4 << 32); 397 sc->et.et_start = xentimer_et_start; 398 sc->et.et_stop = xentimer_et_stop; 399 sc->et.et_priv = sc; 400 et_register(&sc->et); 401 402 /* Register the timecounter. */ 403 sc->tc.tc_name = "XENTIMER"; 404 sc->tc.tc_quality = XENTIMER_QUALITY; 405 sc->tc.tc_flags = TC_FLAGS_SUSPEND_SAFE; 406 /* 407 * The underlying resolution is in nanoseconds, since the timer info 408 * scales TSC frequencies using a fraction that represents time in 409 * terms of nanoseconds. 410 */ 411 sc->tc.tc_frequency = NSEC_IN_SEC; 412 sc->tc.tc_counter_mask = ~0u; 413 sc->tc.tc_get_timecount = xentimer_get_timecount; 414 sc->tc.tc_priv = sc; 415 tc_init(&sc->tc); 416 417 /* Register the Hypervisor wall clock */ 418 clock_register(dev, XENCLOCK_RESOLUTION); 419 420 return (0); 421 } 422 423 static int 424 xentimer_detach(device_t dev) 425 { 426 427 /* Implement Xen PV clock teardown - XXX see hpet_detach ? */ 428 /* If possible: 429 * 1. need to deregister timecounter 430 * 2. need to deregister event timer 431 * 3. need to deregister virtual IRQ event channels 432 */ 433 return (EBUSY); 434 } 435 436 static void 437 xentimer_percpu_resume(void *arg) 438 { 439 device_t dev = (device_t) arg; 440 struct xentimer_softc *sc = device_get_softc(dev); 441 442 xentimer_et_start(&sc->et, sc->et.et_min_period, 0); 443 } 444 445 static int 446 xentimer_resume(device_t dev) 447 { 448 int error; 449 int i; 450 451 /* Disable the periodic timer */ 452 CPU_FOREACH(i) { 453 error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, i, NULL); 454 if (error != 0) { 455 device_printf(dev, 456 "Error disabling Xen periodic timer on CPU %d\n", 457 i); 458 return (error); 459 } 460 } 461 462 /* Reset the last uptime value */ 463 pvclock_resume(); 464 465 /* Reset the RTC clock */ 466 inittodr(time_second); 467 468 /* Kick the timers on all CPUs */ 469 smp_rendezvous(NULL, xentimer_percpu_resume, NULL, dev); 470 471 if (bootverbose) 472 device_printf(dev, "resumed operation after suspension\n"); 473 474 return (0); 475 } 476 477 static int 478 xentimer_suspend(device_t dev) 479 { 480 return (0); 481 } 482 483 /* 484 * Xen early clock init 485 */ 486 void 487 xen_clock_init(void) 488 { 489 } 490 491 /* 492 * Xen PV DELAY function 493 * 494 * When running on PVH mode we don't have an emulated i8524, so 495 * make use of the Xen time info in order to code a simple DELAY 496 * function that can be used during early boot. 497 */ 498 void 499 xen_delay(int n) 500 { 501 struct vcpu_info *vcpu = &HYPERVISOR_shared_info->vcpu_info[0]; 502 uint64_t end_ns; 503 uint64_t current; 504 505 end_ns = xen_fetch_vcpu_time(vcpu); 506 end_ns += n * NSEC_IN_USEC; 507 508 for (;;) { 509 current = xen_fetch_vcpu_time(vcpu); 510 if (current >= end_ns) 511 break; 512 } 513 } 514 515 static device_method_t xentimer_methods[] = { 516 DEVMETHOD(device_identify, xentimer_identify), 517 DEVMETHOD(device_probe, xentimer_probe), 518 DEVMETHOD(device_attach, xentimer_attach), 519 DEVMETHOD(device_detach, xentimer_detach), 520 DEVMETHOD(device_suspend, xentimer_suspend), 521 DEVMETHOD(device_resume, xentimer_resume), 522 /* clock interface */ 523 DEVMETHOD(clock_gettime, xentimer_gettime), 524 DEVMETHOD(clock_settime, xentimer_settime), 525 DEVMETHOD_END 526 }; 527 528 static driver_t xentimer_driver = { 529 "xen_et", 530 xentimer_methods, 531 sizeof(struct xentimer_softc), 532 }; 533 534 DRIVER_MODULE(xentimer, xenpv, xentimer_driver, xentimer_devclass, 0, 0); 535 MODULE_DEPEND(xentimer, xenpv, 1, 1, 1); 536