xref: /linux-6.15/arch/arm/kernel/arch_timer.c (revision ef201de4)
1 /*
2  *  linux/arch/arm/kernel/arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/smp.h>
16 #include <linux/cpu.h>
17 #include <linux/jiffies.h>
18 #include <linux/clockchips.h>
19 #include <linux/interrupt.h>
20 #include <linux/of_irq.h>
21 #include <linux/io.h>
22 
23 #include <asm/delay.h>
24 #include <asm/localtimer.h>
25 #include <asm/arch_timer.h>
26 #include <asm/sched_clock.h>
27 
28 static unsigned long arch_timer_rate;
29 
30 enum ppi_nr {
31 	PHYS_SECURE_PPI,
32 	PHYS_NONSECURE_PPI,
33 	VIRT_PPI,
34 	HYP_PPI,
35 	MAX_TIMER_PPI
36 };
37 
38 static int arch_timer_ppi[MAX_TIMER_PPI];
39 
40 static struct clock_event_device __percpu **arch_timer_evt;
41 static struct delay_timer arch_delay_timer;
42 
43 static bool arch_timer_use_virtual = true;
44 
45 /*
46  * Architected system timer support.
47  */
48 
49 #define ARCH_TIMER_CTRL_ENABLE		(1 << 0)
50 #define ARCH_TIMER_CTRL_IT_MASK		(1 << 1)
51 #define ARCH_TIMER_CTRL_IT_STAT		(1 << 2)
52 
53 #define ARCH_TIMER_REG_CTRL		0
54 #define ARCH_TIMER_REG_FREQ		1
55 #define ARCH_TIMER_REG_TVAL		2
56 
57 #define ARCH_TIMER_PHYS_ACCESS		0
58 #define ARCH_TIMER_VIRT_ACCESS		1
59 
60 /*
61  * These register accessors are marked inline so the compiler can
62  * nicely work out which register we want, and chuck away the rest of
63  * the code. At least it does so with a recent GCC (4.6.3).
64  */
65 static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
66 {
67 	if (access == ARCH_TIMER_PHYS_ACCESS) {
68 		switch (reg) {
69 		case ARCH_TIMER_REG_CTRL:
70 			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
71 			break;
72 		case ARCH_TIMER_REG_TVAL:
73 			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
74 			break;
75 		}
76 	}
77 
78 	if (access == ARCH_TIMER_VIRT_ACCESS) {
79 		switch (reg) {
80 		case ARCH_TIMER_REG_CTRL:
81 			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
82 			break;
83 		case ARCH_TIMER_REG_TVAL:
84 			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
85 			break;
86 		}
87 	}
88 
89 	isb();
90 }
91 
92 static inline u32 arch_timer_reg_read(const int access, const int reg)
93 {
94 	u32 val = 0;
95 
96 	if (access == ARCH_TIMER_PHYS_ACCESS) {
97 		switch (reg) {
98 		case ARCH_TIMER_REG_CTRL:
99 			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
100 			break;
101 		case ARCH_TIMER_REG_TVAL:
102 			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
103 			break;
104 		case ARCH_TIMER_REG_FREQ:
105 			asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
106 			break;
107 		}
108 	}
109 
110 	if (access == ARCH_TIMER_VIRT_ACCESS) {
111 		switch (reg) {
112 		case ARCH_TIMER_REG_CTRL:
113 			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
114 			break;
115 		case ARCH_TIMER_REG_TVAL:
116 			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
117 			break;
118 		}
119 	}
120 
121 	return val;
122 }
123 
124 static inline cycle_t arch_timer_counter_read(const int access)
125 {
126 	cycle_t cval = 0;
127 
128 	if (access == ARCH_TIMER_PHYS_ACCESS)
129 		asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
130 
131 	if (access == ARCH_TIMER_VIRT_ACCESS)
132 		asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
133 
134 	return cval;
135 }
136 
137 static inline cycle_t arch_counter_get_cntpct(void)
138 {
139 	return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS);
140 }
141 
142 static inline cycle_t arch_counter_get_cntvct(void)
143 {
144 	return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS);
145 }
146 
147 static irqreturn_t inline timer_handler(const int access,
148 					struct clock_event_device *evt)
149 {
150 	unsigned long ctrl;
151 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
152 	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
153 		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
154 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
155 		evt->event_handler(evt);
156 		return IRQ_HANDLED;
157 	}
158 
159 	return IRQ_NONE;
160 }
161 
162 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
163 {
164 	struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
165 
166 	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
167 }
168 
169 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
170 {
171 	struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
172 
173 	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
174 }
175 
176 static inline void timer_set_mode(const int access, int mode)
177 {
178 	unsigned long ctrl;
179 	switch (mode) {
180 	case CLOCK_EVT_MODE_UNUSED:
181 	case CLOCK_EVT_MODE_SHUTDOWN:
182 		ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
183 		ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
184 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
185 		break;
186 	default:
187 		break;
188 	}
189 }
190 
191 static void arch_timer_set_mode_virt(enum clock_event_mode mode,
192 				     struct clock_event_device *clk)
193 {
194 	timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
195 }
196 
197 static void arch_timer_set_mode_phys(enum clock_event_mode mode,
198 				     struct clock_event_device *clk)
199 {
200 	timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
201 }
202 
203 static inline void set_next_event(const int access, unsigned long evt)
204 {
205 	unsigned long ctrl;
206 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
207 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
208 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
209 	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
210 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
211 }
212 
213 static int arch_timer_set_next_event_virt(unsigned long evt,
214 					  struct clock_event_device *unused)
215 {
216 	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
217 	return 0;
218 }
219 
220 static int arch_timer_set_next_event_phys(unsigned long evt,
221 					  struct clock_event_device *unused)
222 {
223 	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
224 	return 0;
225 }
226 
227 static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
228 {
229 	clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
230 	clk->name = "arch_sys_timer";
231 	clk->rating = 450;
232 	if (arch_timer_use_virtual) {
233 		clk->irq = arch_timer_ppi[VIRT_PPI];
234 		clk->set_mode = arch_timer_set_mode_virt;
235 		clk->set_next_event = arch_timer_set_next_event_virt;
236 	} else {
237 		clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
238 		clk->set_mode = arch_timer_set_mode_phys;
239 		clk->set_next_event = arch_timer_set_next_event_phys;
240 	}
241 
242 	clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
243 
244 	clockevents_config_and_register(clk, arch_timer_rate,
245 					0xf, 0x7fffffff);
246 
247 	*__this_cpu_ptr(arch_timer_evt) = clk;
248 
249 	if (arch_timer_use_virtual)
250 		enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
251 	else {
252 		enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
253 		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
254 			enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
255 	}
256 
257 	return 0;
258 }
259 
260 static int arch_timer_available(void)
261 {
262 	unsigned long freq;
263 
264 	if (arch_timer_rate == 0) {
265 		freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS,
266 					   ARCH_TIMER_REG_FREQ);
267 
268 		/* Check the timer frequency. */
269 		if (freq == 0) {
270 			pr_warn("Architected timer frequency not available\n");
271 			return -EINVAL;
272 		}
273 
274 		arch_timer_rate = freq;
275 	}
276 
277 	pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
278 		     arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100,
279 		     arch_timer_use_virtual ? "virt" : "phys");
280 	return 0;
281 }
282 
283 static u32 notrace arch_counter_get_cntpct32(void)
284 {
285 	cycle_t cnt = arch_counter_get_cntpct();
286 
287 	/*
288 	 * The sched_clock infrastructure only knows about counters
289 	 * with at most 32bits. Forget about the upper 24 bits for the
290 	 * time being...
291 	 */
292 	return (u32)cnt;
293 }
294 
295 static u32 notrace arch_counter_get_cntvct32(void)
296 {
297 	cycle_t cnt = arch_counter_get_cntvct();
298 
299 	/*
300 	 * The sched_clock infrastructure only knows about counters
301 	 * with at most 32bits. Forget about the upper 24 bits for the
302 	 * time being...
303 	 */
304 	return (u32)cnt;
305 }
306 
307 static cycle_t arch_counter_read(struct clocksource *cs)
308 {
309 	/*
310 	 * Always use the physical counter for the clocksource.
311 	 * CNTHCTL.PL1PCTEN must be set to 1.
312 	 */
313 	return arch_counter_get_cntpct();
314 }
315 
316 static unsigned long arch_timer_read_current_timer(void)
317 {
318 	return arch_counter_get_cntpct();
319 }
320 
321 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
322 {
323 	/*
324 	 * Always use the physical counter for the clocksource.
325 	 * CNTHCTL.PL1PCTEN must be set to 1.
326 	 */
327 	return arch_counter_get_cntpct();
328 }
329 
330 static struct clocksource clocksource_counter = {
331 	.name	= "arch_sys_counter",
332 	.rating	= 400,
333 	.read	= arch_counter_read,
334 	.mask	= CLOCKSOURCE_MASK(56),
335 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
336 };
337 
338 static struct cyclecounter cyclecounter = {
339 	.read	= arch_counter_read_cc,
340 	.mask	= CLOCKSOURCE_MASK(56),
341 };
342 
343 static struct timecounter timecounter;
344 
345 struct timecounter *arch_timer_get_timecounter(void)
346 {
347 	return &timecounter;
348 }
349 
350 static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
351 {
352 	pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
353 		 clk->irq, smp_processor_id());
354 
355 	if (arch_timer_use_virtual)
356 		disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
357 	else {
358 		disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
359 		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
360 			disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
361 	}
362 
363 	clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
364 }
365 
366 static struct local_timer_ops arch_timer_ops __cpuinitdata = {
367 	.setup	= arch_timer_setup,
368 	.stop	= arch_timer_stop,
369 };
370 
371 static struct clock_event_device arch_timer_global_evt;
372 
373 static int __init arch_timer_register(void)
374 {
375 	int err;
376 	int ppi;
377 
378 	err = arch_timer_available();
379 	if (err)
380 		goto out;
381 
382 	arch_timer_evt = alloc_percpu(struct clock_event_device *);
383 	if (!arch_timer_evt) {
384 		err = -ENOMEM;
385 		goto out;
386 	}
387 
388 	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
389 	cyclecounter.mult = clocksource_counter.mult;
390 	cyclecounter.shift = clocksource_counter.shift;
391 	timecounter_init(&timecounter, &cyclecounter,
392 			 arch_counter_get_cntpct());
393 
394 	if (arch_timer_use_virtual) {
395 		ppi = arch_timer_ppi[VIRT_PPI];
396 		err = request_percpu_irq(ppi, arch_timer_handler_virt,
397 					 "arch_timer", arch_timer_evt);
398 	} else {
399 		ppi = arch_timer_ppi[PHYS_SECURE_PPI];
400 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
401 					 "arch_timer", arch_timer_evt);
402 		if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
403 			ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
404 			err = request_percpu_irq(ppi, arch_timer_handler_phys,
405 						 "arch_timer", arch_timer_evt);
406 			if (err)
407 				free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
408 						arch_timer_evt);
409 		}
410 	}
411 
412 	if (err) {
413 		pr_err("arch_timer: can't register interrupt %d (%d)\n",
414 		       ppi, err);
415 		goto out_free;
416 	}
417 
418 	err = local_timer_register(&arch_timer_ops);
419 	if (err) {
420 		/*
421 		 * We couldn't register as a local timer (could be
422 		 * because we're on a UP platform, or because some
423 		 * other local timer is already present...). Try as a
424 		 * global timer instead.
425 		 */
426 		arch_timer_global_evt.cpumask = cpumask_of(0);
427 		err = arch_timer_setup(&arch_timer_global_evt);
428 	}
429 	if (err)
430 		goto out_free_irq;
431 
432 	/* Use the architected timer for the delay loop. */
433 	arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
434 	arch_delay_timer.freq = arch_timer_rate;
435 	register_current_timer_delay(&arch_delay_timer);
436 	return 0;
437 
438 out_free_irq:
439 	if (arch_timer_use_virtual)
440 		free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
441 	else {
442 		free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
443 				arch_timer_evt);
444 		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
445 			free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
446 					arch_timer_evt);
447 	}
448 
449 out_free:
450 	free_percpu(arch_timer_evt);
451 out:
452 	return err;
453 }
454 
455 static const struct of_device_id arch_timer_of_match[] __initconst = {
456 	{ .compatible	= "arm,armv7-timer",	},
457 	{},
458 };
459 
460 int __init arch_timer_of_register(void)
461 {
462 	struct device_node *np;
463 	u32 freq;
464 	int i;
465 
466 	np = of_find_matching_node(NULL, arch_timer_of_match);
467 	if (!np) {
468 		pr_err("arch_timer: can't find DT node\n");
469 		return -ENODEV;
470 	}
471 
472 	/* Try to determine the frequency from the device tree or CNTFRQ */
473 	if (!of_property_read_u32(np, "clock-frequency", &freq))
474 		arch_timer_rate = freq;
475 
476 	for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
477 		arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
478 
479 	of_node_put(np);
480 
481 	/*
482 	 * If no interrupt provided for virtual timer, we'll have to
483 	 * stick to the physical timer. It'd better be accessible...
484 	 */
485 	if (!arch_timer_ppi[VIRT_PPI]) {
486 		arch_timer_use_virtual = false;
487 
488 		if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
489 		    !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
490 			pr_warn("arch_timer: No interrupt available, giving up\n");
491 			return -EINVAL;
492 		}
493 	}
494 
495 	return arch_timer_register();
496 }
497 
498 int __init arch_timer_sched_clock_init(void)
499 {
500 	u32 (*cnt32)(void);
501 	int err;
502 
503 	err = arch_timer_available();
504 	if (err)
505 		return err;
506 
507 	if (arch_timer_use_virtual)
508 		cnt32 = arch_counter_get_cntvct32;
509 	else
510 		cnt32 = arch_counter_get_cntpct32;
511 
512 	setup_sched_clock(cnt32, 32, arch_timer_rate);
513 	return 0;
514 }
515