1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2011 The FreeBSD Foundation
5 * Copyright (c) 2013 Ruslan Bukin <[email protected]>
6 * All rights reserved.
7 *
8 * Based on mpcore_timer.c developed by Ben Gray <[email protected]>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 /**
36 * Cortex-A7, Cortex-A15, ARMv8 and later Generic Timer
37 */
38
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41
42 #include <sys/cdefs.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/kernel.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/rman.h>
50 #include <sys/timeet.h>
51 #include <sys/timetc.h>
52 #include <sys/smp.h>
53 #include <sys/vdso.h>
54 #include <sys/watchdog.h>
55
56 #include <machine/bus.h>
57 #include <machine/cpu.h>
58 #include <machine/intr.h>
59 #include <machine/machdep.h>
60 #include <machine/md_var.h>
61
62 #if defined(__aarch64__)
63 #include <machine/undefined.h>
64 #endif
65
66 #ifdef FDT
67 #include <dev/ofw/openfirm.h>
68 #include <dev/ofw/ofw_bus.h>
69 #include <dev/ofw/ofw_bus_subr.h>
70 #endif
71
72 #ifdef DEV_ACPI
73 #include <contrib/dev/acpica/include/acpi.h>
74 #include <dev/acpica/acpivar.h>
75 #endif
76
77 #define GT_PHYS_SECURE 0
78 #define GT_PHYS_NONSECURE 1
79 #define GT_VIRT 2
80 #define GT_HYP_PHYS 3
81 #define GT_HYP_VIRT 4
82 #define GT_IRQ_COUNT 5
83
84 #define GT_CTRL_ENABLE (1 << 0)
85 #define GT_CTRL_INT_MASK (1 << 1)
86 #define GT_CTRL_INT_STAT (1 << 2)
87 #define GT_REG_CTRL 0
88 #define GT_REG_TVAL 1
89
90 #define GT_CNTKCTL_PL0PTEN (1 << 9) /* PL0 Physical timer reg access */
91 #define GT_CNTKCTL_PL0VTEN (1 << 8) /* PL0 Virtual timer reg access */
92 #define GT_CNTKCTL_EVNTI (0xf << 4) /* Virtual counter event bits */
93 #define GT_CNTKCTL_EVNTDIR (1 << 3) /* Virtual counter event transition */
94 #define GT_CNTKCTL_EVNTEN (1 << 2) /* Enables virtual counter events */
95 #define GT_CNTKCTL_PL0VCTEN (1 << 1) /* PL0 CNTVCT and CNTFRQ access */
96 #define GT_CNTKCTL_PL0PCTEN (1 << 0) /* PL0 CNTPCT and CNTFRQ access */
97
98 struct arm_tmr_softc;
99
100 struct arm_tmr_irq {
101 struct resource *res;
102 void *ihl;
103 int rid;
104 int idx;
105 };
106
107 struct arm_tmr_softc {
108 struct arm_tmr_irq irqs[GT_IRQ_COUNT];
109 uint64_t (*get_cntxct)(bool);
110 uint32_t clkfreq;
111 int irq_count;
112 struct eventtimer et;
113 bool physical;
114 };
115
116 static struct arm_tmr_softc *arm_tmr_sc = NULL;
117
118 static const struct arm_tmr_irq_defs {
119 int idx;
120 const char *name;
121 int flags;
122 } arm_tmr_irq_defs[] = {
123 {
124 .idx = GT_PHYS_SECURE,
125 .name = "sec-phys",
126 .flags = RF_ACTIVE | RF_OPTIONAL,
127 },
128 {
129 .idx = GT_PHYS_NONSECURE,
130 .name = "phys",
131 .flags = RF_ACTIVE,
132 },
133 {
134 .idx = GT_VIRT,
135 .name = "virt",
136 .flags = RF_ACTIVE,
137 },
138 {
139 .idx = GT_HYP_PHYS,
140 .name = "hyp-phys",
141 .flags = RF_ACTIVE | RF_OPTIONAL,
142 },
143 {
144 .idx = GT_HYP_VIRT,
145 .name = "hyp-virt",
146 .flags = RF_ACTIVE | RF_OPTIONAL,
147 },
148 };
149
150 static int arm_tmr_attach(device_t);
151
152 static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
153 struct timecounter *tc);
154 static void arm_tmr_do_delay(int usec, void *);
155
156 static timecounter_get_t arm_tmr_get_timecount;
157
158 static struct timecounter arm_tmr_timecount = {
159 .tc_name = "ARM MPCore Timecounter",
160 .tc_get_timecount = arm_tmr_get_timecount,
161 .tc_poll_pps = NULL,
162 .tc_counter_mask = ~0u,
163 .tc_frequency = 0,
164 .tc_quality = 1000,
165 .tc_fill_vdso_timehands = arm_tmr_fill_vdso_timehands,
166 };
167
168 #ifdef __arm__
169 #define get_el0(x) cp15_## x ##_get()
170 #define get_el1(x) cp15_## x ##_get()
171 #define set_el0(x, val) cp15_## x ##_set(val)
172 #define set_el1(x, val) cp15_## x ##_set(val)
173 #define HAS_PHYS true
174 #else /* __aarch64__ */
175 #define get_el0(x) READ_SPECIALREG(x ##_el0)
176 #define get_el1(x) READ_SPECIALREG(x ##_el1)
177 #define set_el0(x, val) WRITE_SPECIALREG(x ##_el0, val)
178 #define set_el1(x, val) WRITE_SPECIALREG(x ##_el1, val)
179 #define HAS_PHYS has_hyp()
180 #endif
181
182 static int
get_freq(void)183 get_freq(void)
184 {
185 return (get_el0(cntfrq));
186 }
187
188 static uint64_t
get_cntxct_a64_unstable(bool physical)189 get_cntxct_a64_unstable(bool physical)
190 {
191 uint64_t val
192 ;
193 isb();
194 if (physical) {
195 do {
196 val = get_el0(cntpct);
197 }
198 while (((val + 1) & 0x7FF) <= 1);
199 }
200 else {
201 do {
202 val = get_el0(cntvct);
203 }
204 while (((val + 1) & 0x7FF) <= 1);
205 }
206
207 return (val);
208 }
209
210 static uint64_t
get_cntxct(bool physical)211 get_cntxct(bool physical)
212 {
213 uint64_t val;
214
215 isb();
216 if (physical)
217 val = get_el0(cntpct);
218 else
219 val = get_el0(cntvct);
220
221 return (val);
222 }
223
224 static int
set_ctrl(uint32_t val,bool physical)225 set_ctrl(uint32_t val, bool physical)
226 {
227
228 if (physical)
229 set_el0(cntp_ctl, val);
230 else
231 set_el0(cntv_ctl, val);
232 isb();
233
234 return (0);
235 }
236
237 static int
set_tval(uint32_t val,bool physical)238 set_tval(uint32_t val, bool physical)
239 {
240
241 if (physical)
242 set_el0(cntp_tval, val);
243 else
244 set_el0(cntv_tval, val);
245 isb();
246
247 return (0);
248 }
249
250 static int
get_ctrl(bool physical)251 get_ctrl(bool physical)
252 {
253 uint32_t val;
254
255 if (physical)
256 val = get_el0(cntp_ctl);
257 else
258 val = get_el0(cntv_ctl);
259
260 return (val);
261 }
262
263 static void
setup_user_access(void * arg __unused)264 setup_user_access(void *arg __unused)
265 {
266 uint32_t cntkctl;
267
268 cntkctl = get_el1(cntkctl);
269 cntkctl &= ~(GT_CNTKCTL_PL0PTEN | GT_CNTKCTL_PL0VTEN |
270 GT_CNTKCTL_EVNTEN | GT_CNTKCTL_PL0PCTEN);
271 /* Always enable the virtual timer */
272 cntkctl |= GT_CNTKCTL_PL0VCTEN;
273 /* Enable the physical timer if supported */
274 if (arm_tmr_sc->physical) {
275 cntkctl |= GT_CNTKCTL_PL0PCTEN;
276 }
277 set_el1(cntkctl, cntkctl);
278 isb();
279 }
280
281 #ifdef __aarch64__
282 static int
cntpct_handler(vm_offset_t va,uint32_t insn,struct trapframe * frame,uint32_t esr)283 cntpct_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
284 uint32_t esr)
285 {
286 uint64_t val;
287 int reg;
288
289 if ((insn & MRS_MASK) != MRS_VALUE)
290 return (0);
291
292 if (MRS_SPECIAL(insn) != MRS_SPECIAL(CNTPCT_EL0))
293 return (0);
294
295 reg = MRS_REGISTER(insn);
296 val = READ_SPECIALREG(cntvct_el0);
297 if (reg < nitems(frame->tf_x)) {
298 frame->tf_x[reg] = val;
299 } else if (reg == 30) {
300 frame->tf_lr = val;
301 }
302
303 /*
304 * We will handle this instruction, move to the next so we
305 * don't trap here again.
306 */
307 frame->tf_elr += INSN_SIZE;
308
309 return (1);
310 }
311 #endif
312
313 static void
tmr_setup_user_access(void * arg __unused)314 tmr_setup_user_access(void *arg __unused)
315 {
316 #ifdef __aarch64__
317 int emulate;
318 #endif
319
320 if (arm_tmr_sc != NULL) {
321 smp_rendezvous(NULL, setup_user_access, NULL, NULL);
322 #ifdef __aarch64__
323 if (TUNABLE_INT_FETCH("hw.emulate_phys_counter", &emulate) &&
324 emulate != 0) {
325 install_undef_handler(true, cntpct_handler);
326 }
327 #endif
328 }
329 }
330 SYSINIT(tmr_ua, SI_SUB_SMP, SI_ORDER_ANY, tmr_setup_user_access, NULL);
331
332 static unsigned
arm_tmr_get_timecount(struct timecounter * tc)333 arm_tmr_get_timecount(struct timecounter *tc)
334 {
335
336 return (arm_tmr_sc->get_cntxct(arm_tmr_sc->physical));
337 }
338
339 static int
arm_tmr_start(struct eventtimer * et,sbintime_t first,sbintime_t period __unused)340 arm_tmr_start(struct eventtimer *et, sbintime_t first,
341 sbintime_t period __unused)
342 {
343 struct arm_tmr_softc *sc;
344 int counts, ctrl;
345
346 sc = (struct arm_tmr_softc *)et->et_priv;
347
348 if (first != 0) {
349 counts = ((uint32_t)et->et_frequency * first) >> 32;
350 ctrl = get_ctrl(sc->physical);
351 ctrl &= ~GT_CTRL_INT_MASK;
352 ctrl |= GT_CTRL_ENABLE;
353 set_tval(counts, sc->physical);
354 set_ctrl(ctrl, sc->physical);
355 return (0);
356 }
357
358 return (EINVAL);
359
360 }
361
362 static void
arm_tmr_disable(bool physical)363 arm_tmr_disable(bool physical)
364 {
365 int ctrl;
366
367 ctrl = get_ctrl(physical);
368 ctrl &= ~GT_CTRL_ENABLE;
369 set_ctrl(ctrl, physical);
370 }
371
372 static int
arm_tmr_stop(struct eventtimer * et)373 arm_tmr_stop(struct eventtimer *et)
374 {
375 struct arm_tmr_softc *sc;
376
377 sc = (struct arm_tmr_softc *)et->et_priv;
378 arm_tmr_disable(sc->physical);
379
380 return (0);
381 }
382
383 static int
arm_tmr_intr(void * arg)384 arm_tmr_intr(void *arg)
385 {
386 struct arm_tmr_softc *sc;
387 int ctrl;
388
389 sc = (struct arm_tmr_softc *)arg;
390 ctrl = get_ctrl(sc->physical);
391 if (ctrl & GT_CTRL_INT_STAT) {
392 ctrl |= GT_CTRL_INT_MASK;
393 set_ctrl(ctrl, sc->physical);
394 }
395
396 if (sc->et.et_active)
397 sc->et.et_event_cb(&sc->et, sc->et.et_arg);
398
399 return (FILTER_HANDLED);
400 }
401
402 static int
arm_tmr_attach_irq(device_t dev,struct arm_tmr_softc * sc,const struct arm_tmr_irq_defs * irq_def,int rid,int flags)403 arm_tmr_attach_irq(device_t dev, struct arm_tmr_softc *sc,
404 const struct arm_tmr_irq_defs *irq_def, int rid, int flags)
405 {
406 struct arm_tmr_irq *irq;
407
408 irq = &sc->irqs[sc->irq_count];
409 irq->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
410 &rid, flags);
411 if (irq->res == NULL) {
412 if (bootverbose || (flags & RF_OPTIONAL) == 0) {
413 device_printf(dev,
414 "could not allocate irq for %s interrupt '%s'\n",
415 (flags & RF_OPTIONAL) != 0 ? "optional" :
416 "required", irq_def->name);
417 }
418
419 if ((flags & RF_OPTIONAL) == 0)
420 return (ENXIO);
421 } else {
422 if (bootverbose)
423 device_printf(dev, "allocated irq for '%s'\n",
424 irq_def->name);
425 irq->rid = rid;
426 irq->idx = irq_def->idx;
427 sc->irq_count++;
428 }
429
430 return (0);
431 }
432
433 #ifdef FDT
434 static int
arm_tmr_fdt_probe(device_t dev)435 arm_tmr_fdt_probe(device_t dev)
436 {
437
438 if (!ofw_bus_status_okay(dev))
439 return (ENXIO);
440
441 if (ofw_bus_is_compatible(dev, "arm,armv8-timer")) {
442 device_set_desc(dev, "ARMv8 Generic Timer");
443 return (BUS_PROBE_DEFAULT);
444 } else if (ofw_bus_is_compatible(dev, "arm,armv7-timer")) {
445 device_set_desc(dev, "ARMv7 Generic Timer");
446 return (BUS_PROBE_DEFAULT);
447 }
448
449 return (ENXIO);
450 }
451
452 static int
arm_tmr_fdt_attach(device_t dev)453 arm_tmr_fdt_attach(device_t dev)
454 {
455 struct arm_tmr_softc *sc;
456 const struct arm_tmr_irq_defs *irq_def;
457 size_t i;
458 phandle_t node;
459 int error, rid;
460 bool has_names;
461
462 sc = device_get_softc(dev);
463 node = ofw_bus_get_node(dev);
464
465 has_names = OF_hasprop(node, "interrupt-names");
466 for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
467 int flags;
468
469 /*
470 * If we don't have names to go off of, we assume that they're
471 * in the "usual" order with sec-phys first and allocate by idx.
472 */
473 irq_def = &arm_tmr_irq_defs[i];
474 rid = irq_def->idx;
475 flags = irq_def->flags;
476 if (has_names) {
477 error = ofw_bus_find_string_index(node,
478 "interrupt-names", irq_def->name, &rid);
479
480 /*
481 * If we have names, missing a name means we don't
482 * have it.
483 */
484 if (error != 0) {
485 /*
486 * Could be noisy on a lot of platforms for no
487 * good cause.
488 */
489 if (bootverbose || (flags & RF_OPTIONAL) == 0) {
490 device_printf(dev,
491 "could not find irq for %s interrupt '%s'\n",
492 (flags & RF_OPTIONAL) != 0 ?
493 "optional" : "required",
494 irq_def->name);
495 }
496
497 if ((flags & RF_OPTIONAL) == 0)
498 goto out;
499
500 continue;
501 }
502
503 /*
504 * Warn about failing to activate if we did actually
505 * have the name present.
506 */
507 flags &= ~RF_OPTIONAL;
508 }
509
510 error = arm_tmr_attach_irq(dev, sc, irq_def, rid, flags);
511 if (error != 0)
512 goto out;
513 }
514
515 error = arm_tmr_attach(dev);
516 out:
517 if (error != 0) {
518 for (i = 0; i < sc->irq_count; i++) {
519 bus_release_resource(dev, SYS_RES_IRQ, sc->irqs[i].rid,
520 sc->irqs[i].res);
521 }
522 }
523
524 return (error);
525
526 }
527 #endif
528
529 #ifdef DEV_ACPI
530 static void
arm_tmr_acpi_add_irq(device_t parent,device_t dev,int rid,u_int irq)531 arm_tmr_acpi_add_irq(device_t parent, device_t dev, int rid, u_int irq)
532 {
533
534 BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, rid, irq, 1);
535 }
536
537 static void
arm_tmr_acpi_identify(driver_t * driver,device_t parent)538 arm_tmr_acpi_identify(driver_t *driver, device_t parent)
539 {
540 ACPI_TABLE_GTDT *gtdt;
541 vm_paddr_t physaddr;
542 device_t dev;
543
544 physaddr = acpi_find_table(ACPI_SIG_GTDT);
545 if (physaddr == 0)
546 return;
547
548 gtdt = acpi_map_table(physaddr, ACPI_SIG_GTDT);
549 if (gtdt == NULL) {
550 device_printf(parent, "gic: Unable to map the GTDT\n");
551 return;
552 }
553
554 dev = BUS_ADD_CHILD(parent, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE,
555 "generic_timer", -1);
556 if (dev == NULL) {
557 device_printf(parent, "add gic child failed\n");
558 goto out;
559 }
560
561 arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_SECURE,
562 gtdt->SecureEl1Interrupt);
563 arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_NONSECURE,
564 gtdt->NonSecureEl1Interrupt);
565 arm_tmr_acpi_add_irq(parent, dev, GT_VIRT,
566 gtdt->VirtualTimerInterrupt);
567 arm_tmr_acpi_add_irq(parent, dev, GT_HYP_PHYS,
568 gtdt->NonSecureEl2Interrupt);
569
570 out:
571 acpi_unmap_table(gtdt);
572 }
573
574 static int
arm_tmr_acpi_probe(device_t dev)575 arm_tmr_acpi_probe(device_t dev)
576 {
577
578 device_set_desc(dev, "ARM Generic Timer");
579 return (BUS_PROBE_NOWILDCARD);
580 }
581
582 static int
arm_tmr_acpi_attach(device_t dev)583 arm_tmr_acpi_attach(device_t dev)
584 {
585 const struct arm_tmr_irq_defs *irq_def;
586 struct arm_tmr_softc *sc;
587 int error;
588
589 sc = device_get_softc(dev);
590 for (int i = 0; i < nitems(arm_tmr_irq_defs); i++) {
591 irq_def = &arm_tmr_irq_defs[i];
592 error = arm_tmr_attach_irq(dev, sc, irq_def, irq_def->idx,
593 irq_def->flags);
594 if (error != 0)
595 goto out;
596 }
597
598 error = arm_tmr_attach(dev);
599 out:
600 if (error != 0) {
601 for (int i = 0; i < sc->irq_count; i++) {
602 bus_release_resource(dev, SYS_RES_IRQ,
603 sc->irqs[i].rid, sc->irqs[i].res);
604 }
605 }
606 return (error);
607 }
608 #endif
609
610 static int
arm_tmr_attach(device_t dev)611 arm_tmr_attach(device_t dev)
612 {
613 struct arm_tmr_softc *sc;
614 #ifdef INVARIANTS
615 const struct arm_tmr_irq_defs *irq_def;
616 #endif
617 #ifdef FDT
618 phandle_t node;
619 pcell_t clock;
620 #endif
621 int error;
622 int i, first_timer, last_timer;
623
624 sc = device_get_softc(dev);
625 if (arm_tmr_sc)
626 return (ENXIO);
627
628 sc->get_cntxct = &get_cntxct;
629 #ifdef FDT
630 /* Get the base clock frequency */
631 node = ofw_bus_get_node(dev);
632 if (node > 0) {
633 error = OF_getencprop(node, "clock-frequency", &clock,
634 sizeof(clock));
635 if (error > 0)
636 sc->clkfreq = clock;
637
638 if (OF_hasprop(node, "allwinner,sun50i-a64-unstable-timer")) {
639 sc->get_cntxct = &get_cntxct_a64_unstable;
640 if (bootverbose)
641 device_printf(dev,
642 "Enabling allwinner unstable timer workaround\n");
643 }
644 }
645 #endif
646
647 if (sc->clkfreq == 0) {
648 /* Try to get clock frequency from timer */
649 sc->clkfreq = get_freq();
650 }
651
652 if (sc->clkfreq == 0) {
653 device_printf(dev, "No clock frequency specified\n");
654 return (ENXIO);
655 }
656
657 #ifdef INVARIANTS
658 /* Confirm that non-optional irqs were allocated before coming in. */
659 for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
660 int j;
661
662 irq_def = &arm_tmr_irq_defs[i];
663
664 /* Skip optional interrupts */
665 if ((irq_def->flags & RF_OPTIONAL) != 0)
666 continue;
667
668 for (j = 0; j < sc->irq_count; j++) {
669 if (sc->irqs[j].idx == irq_def->idx)
670 break;
671 }
672 KASSERT(j < sc->irq_count, ("%s: Missing required interrupt %s",
673 __func__, irq_def->name));
674 }
675 #endif
676
677 #ifdef __aarch64__
678 /*
679 * Use the virtual timer when we can't use the hypervisor.
680 * A hypervisor guest may change the virtual timer registers while
681 * executing so any use of the virtual timer interrupt needs to be
682 * coordinated with the virtual machine manager.
683 */
684 if (!HAS_PHYS) {
685 sc->physical = false;
686 first_timer = GT_VIRT;
687 last_timer = GT_VIRT;
688 } else
689 #endif
690 /* Otherwise set up the secure and non-secure physical timers. */
691 {
692 sc->physical = true;
693 first_timer = GT_PHYS_SECURE;
694 last_timer = GT_PHYS_NONSECURE;
695 }
696
697 arm_tmr_sc = sc;
698
699 /* Setup secure, non-secure and virtual IRQs handler */
700 for (i = 0; i < sc->irq_count; i++) {
701 /* Only enable IRQs on timers we expect to use */
702 if (sc->irqs[i].idx < first_timer ||
703 sc->irqs[i].idx > last_timer)
704 continue;
705 error = bus_setup_intr(dev, sc->irqs[i].res, INTR_TYPE_CLK,
706 arm_tmr_intr, NULL, sc, &sc->irqs[i].ihl);
707 if (error) {
708 device_printf(dev, "Unable to alloc int resource.\n");
709 for (int j = 0; j < i; j++)
710 bus_teardown_intr(dev, sc->irqs[j].res,
711 &sc->irqs[j].ihl);
712 return (ENXIO);
713 }
714 }
715
716 /* Disable the timers until we are ready */
717 arm_tmr_disable(false);
718 if (HAS_PHYS)
719 arm_tmr_disable(true);
720
721 arm_tmr_timecount.tc_frequency = sc->clkfreq;
722 tc_init(&arm_tmr_timecount);
723
724 sc->et.et_name = "ARM MPCore Eventtimer";
725 sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
726 sc->et.et_quality = 1000;
727
728 sc->et.et_frequency = sc->clkfreq;
729 sc->et.et_min_period = (0x00000010LLU << 32) / sc->et.et_frequency;
730 sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency;
731 sc->et.et_start = arm_tmr_start;
732 sc->et.et_stop = arm_tmr_stop;
733 sc->et.et_priv = sc;
734 et_register(&sc->et);
735
736 #if defined(__arm__)
737 arm_set_delay(arm_tmr_do_delay, sc);
738 #endif
739
740 return (0);
741 }
742
743 #ifdef FDT
744 static device_method_t arm_tmr_fdt_methods[] = {
745 DEVMETHOD(device_probe, arm_tmr_fdt_probe),
746 DEVMETHOD(device_attach, arm_tmr_fdt_attach),
747 { 0, 0 }
748 };
749
750 static DEFINE_CLASS_0(generic_timer, arm_tmr_fdt_driver, arm_tmr_fdt_methods,
751 sizeof(struct arm_tmr_softc));
752
753 EARLY_DRIVER_MODULE(timer, simplebus, arm_tmr_fdt_driver, 0, 0,
754 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
755 EARLY_DRIVER_MODULE(timer, ofwbus, arm_tmr_fdt_driver, 0, 0,
756 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
757 #endif
758
759 #ifdef DEV_ACPI
760 static device_method_t arm_tmr_acpi_methods[] = {
761 DEVMETHOD(device_identify, arm_tmr_acpi_identify),
762 DEVMETHOD(device_probe, arm_tmr_acpi_probe),
763 DEVMETHOD(device_attach, arm_tmr_acpi_attach),
764 { 0, 0 }
765 };
766
767 static DEFINE_CLASS_0(generic_timer, arm_tmr_acpi_driver, arm_tmr_acpi_methods,
768 sizeof(struct arm_tmr_softc));
769
770 EARLY_DRIVER_MODULE(timer, acpi, arm_tmr_acpi_driver, 0, 0,
771 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
772 #endif
773
774 static void
arm_tmr_do_delay(int usec,void * arg)775 arm_tmr_do_delay(int usec, void *arg)
776 {
777 struct arm_tmr_softc *sc = arg;
778 int32_t counts, counts_per_usec;
779 uint32_t first, last;
780
781 /* Get the number of times to count */
782 counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1);
783
784 /*
785 * Clamp the timeout at a maximum value (about 32 seconds with
786 * a 66MHz clock). *Nobody* should be delay()ing for anywhere
787 * near that length of time and if they are, they should be hung
788 * out to dry.
789 */
790 if (usec >= (0x80000000U / counts_per_usec))
791 counts = (0x80000000U / counts_per_usec) - 1;
792 else
793 counts = usec * counts_per_usec;
794
795 first = sc->get_cntxct(sc->physical);
796
797 while (counts > 0) {
798 last = sc->get_cntxct(sc->physical);
799 counts -= (int32_t)(last - first);
800 first = last;
801 }
802 }
803
804 #if defined(__aarch64__)
805 void
DELAY(int usec)806 DELAY(int usec)
807 {
808 int32_t counts;
809
810 TSENTER();
811 /*
812 * Check the timers are setup, if not just
813 * use a for loop for the meantime
814 */
815 if (arm_tmr_sc == NULL) {
816 for (; usec > 0; usec--)
817 for (counts = 200; counts > 0; counts--)
818 /*
819 * Prevent the compiler from optimizing
820 * out the loop
821 */
822 cpufunc_nullop();
823 } else
824 arm_tmr_do_delay(usec, arm_tmr_sc);
825 TSEXIT();
826 }
827 #endif
828
829 static uint32_t
arm_tmr_fill_vdso_timehands(struct vdso_timehands * vdso_th,struct timecounter * tc)830 arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
831 struct timecounter *tc)
832 {
833
834 vdso_th->th_algo = VDSO_TH_ALGO_ARM_GENTIM;
835 vdso_th->th_physical = arm_tmr_sc->physical;
836 bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
837 return (1);
838 }
839