xref: /linux-6.15/arch/arm/kernel/arch_timer.c (revision b2deabe3)
1 /*
2  *  linux/arch/arm/kernel/arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/smp.h>
16 #include <linux/cpu.h>
17 #include <linux/jiffies.h>
18 #include <linux/clockchips.h>
19 #include <linux/interrupt.h>
20 #include <linux/of_irq.h>
21 #include <linux/io.h>
22 
23 #include <asm/delay.h>
24 #include <asm/arch_timer.h>
25 #include <asm/sched_clock.h>
26 
27 static u32 arch_timer_rate;
28 
29 enum ppi_nr {
30 	PHYS_SECURE_PPI,
31 	PHYS_NONSECURE_PPI,
32 	VIRT_PPI,
33 	HYP_PPI,
34 	MAX_TIMER_PPI
35 };
36 
37 static int arch_timer_ppi[MAX_TIMER_PPI];
38 
39 static struct clock_event_device __percpu *arch_timer_evt;
40 static struct delay_timer arch_delay_timer;
41 
42 static bool arch_timer_use_virtual = true;
43 
44 /*
45  * Architected system timer support.
46  */
47 
48 static irqreturn_t inline timer_handler(const int access,
49 					struct clock_event_device *evt)
50 {
51 	unsigned long ctrl;
52 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
53 	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
54 		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
55 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
56 		evt->event_handler(evt);
57 		return IRQ_HANDLED;
58 	}
59 
60 	return IRQ_NONE;
61 }
62 
63 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
64 {
65 	struct clock_event_device *evt = dev_id;
66 
67 	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
68 }
69 
70 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
71 {
72 	struct clock_event_device *evt = dev_id;
73 
74 	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
75 }
76 
77 static inline void timer_set_mode(const int access, int mode)
78 {
79 	unsigned long ctrl;
80 	switch (mode) {
81 	case CLOCK_EVT_MODE_UNUSED:
82 	case CLOCK_EVT_MODE_SHUTDOWN:
83 		ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
84 		ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
85 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
86 		break;
87 	default:
88 		break;
89 	}
90 }
91 
92 static void arch_timer_set_mode_virt(enum clock_event_mode mode,
93 				     struct clock_event_device *clk)
94 {
95 	timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
96 }
97 
98 static void arch_timer_set_mode_phys(enum clock_event_mode mode,
99 				     struct clock_event_device *clk)
100 {
101 	timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
102 }
103 
104 static inline void set_next_event(const int access, unsigned long evt)
105 {
106 	unsigned long ctrl;
107 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
108 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
109 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
110 	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
111 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
112 }
113 
114 static int arch_timer_set_next_event_virt(unsigned long evt,
115 					  struct clock_event_device *unused)
116 {
117 	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
118 	return 0;
119 }
120 
121 static int arch_timer_set_next_event_phys(unsigned long evt,
122 					  struct clock_event_device *unused)
123 {
124 	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
125 	return 0;
126 }
127 
128 static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
129 {
130 	clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
131 	clk->name = "arch_sys_timer";
132 	clk->rating = 450;
133 	if (arch_timer_use_virtual) {
134 		clk->irq = arch_timer_ppi[VIRT_PPI];
135 		clk->set_mode = arch_timer_set_mode_virt;
136 		clk->set_next_event = arch_timer_set_next_event_virt;
137 	} else {
138 		clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
139 		clk->set_mode = arch_timer_set_mode_phys;
140 		clk->set_next_event = arch_timer_set_next_event_phys;
141 	}
142 
143 	clk->cpumask = cpumask_of(smp_processor_id());
144 
145 	clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
146 
147 	clockevents_config_and_register(clk, arch_timer_rate,
148 					0xf, 0x7fffffff);
149 
150 	if (arch_timer_use_virtual)
151 		enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
152 	else {
153 		enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
154 		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
155 			enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
156 	}
157 
158 	arch_counter_set_user_access();
159 
160 	return 0;
161 }
162 
163 static int arch_timer_available(void)
164 {
165 	u32 freq;
166 
167 	if (arch_timer_rate == 0) {
168 		freq = arch_timer_get_cntfrq();
169 
170 		/* Check the timer frequency. */
171 		if (freq == 0) {
172 			pr_warn("Architected timer frequency not available\n");
173 			return -EINVAL;
174 		}
175 
176 		arch_timer_rate = freq;
177 	}
178 
179 	pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
180 		     (unsigned long)arch_timer_rate / 1000000,
181 		     (unsigned long)(arch_timer_rate / 10000) % 100,
182 		     arch_timer_use_virtual ? "virt" : "phys");
183 	return 0;
184 }
185 
186 /*
187  * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
188  * call it before it has been initialised. Rather than incur a performance
189  * penalty checking for initialisation, provide a default implementation that
190  * won't lead to time appearing to jump backwards.
191  */
192 static u64 arch_timer_read_zero(void)
193 {
194 	return 0;
195 }
196 
197 u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
198 
199 static u32 arch_timer_read_counter32(void)
200 {
201 	return arch_timer_read_counter();
202 }
203 
204 static cycle_t arch_counter_read(struct clocksource *cs)
205 {
206 	return arch_timer_read_counter();
207 }
208 
209 static unsigned long arch_timer_read_current_timer(void)
210 {
211 	return arch_timer_read_counter();
212 }
213 
214 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
215 {
216 	return arch_timer_read_counter();
217 }
218 
219 static struct clocksource clocksource_counter = {
220 	.name	= "arch_sys_counter",
221 	.rating	= 400,
222 	.read	= arch_counter_read,
223 	.mask	= CLOCKSOURCE_MASK(56),
224 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
225 };
226 
227 static struct cyclecounter cyclecounter = {
228 	.read	= arch_counter_read_cc,
229 	.mask	= CLOCKSOURCE_MASK(56),
230 };
231 
232 static struct timecounter timecounter;
233 
234 struct timecounter *arch_timer_get_timecounter(void)
235 {
236 	return &timecounter;
237 }
238 
239 static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
240 {
241 	pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
242 		 clk->irq, smp_processor_id());
243 
244 	if (arch_timer_use_virtual)
245 		disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
246 	else {
247 		disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
248 		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
249 			disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
250 	}
251 
252 	clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
253 }
254 
255 static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
256 					   unsigned long action, void *hcpu)
257 {
258 	struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
259 
260 	switch (action & ~CPU_TASKS_FROZEN) {
261 	case CPU_STARTING:
262 		arch_timer_setup(evt);
263 		break;
264 	case CPU_DYING:
265 		arch_timer_stop(evt);
266 		break;
267 	}
268 
269 	return NOTIFY_OK;
270 }
271 
272 static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
273 	.notifier_call = arch_timer_cpu_notify,
274 };
275 
276 static int __init arch_timer_register(void)
277 {
278 	int err;
279 	int ppi;
280 
281 	err = arch_timer_available();
282 	if (err)
283 		goto out;
284 
285 	arch_timer_evt = alloc_percpu(struct clock_event_device);
286 	if (!arch_timer_evt) {
287 		err = -ENOMEM;
288 		goto out;
289 	}
290 
291 	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
292 	cyclecounter.mult = clocksource_counter.mult;
293 	cyclecounter.shift = clocksource_counter.shift;
294 	timecounter_init(&timecounter, &cyclecounter,
295 			 arch_counter_get_cntpct());
296 
297 	if (arch_timer_use_virtual) {
298 		ppi = arch_timer_ppi[VIRT_PPI];
299 		err = request_percpu_irq(ppi, arch_timer_handler_virt,
300 					 "arch_timer", arch_timer_evt);
301 	} else {
302 		ppi = arch_timer_ppi[PHYS_SECURE_PPI];
303 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
304 					 "arch_timer", arch_timer_evt);
305 		if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
306 			ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
307 			err = request_percpu_irq(ppi, arch_timer_handler_phys,
308 						 "arch_timer", arch_timer_evt);
309 			if (err)
310 				free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
311 						arch_timer_evt);
312 		}
313 	}
314 
315 	if (err) {
316 		pr_err("arch_timer: can't register interrupt %d (%d)\n",
317 		       ppi, err);
318 		goto out_free;
319 	}
320 
321 	err = register_cpu_notifier(&arch_timer_cpu_nb);
322 	if (err)
323 		goto out_free_irq;
324 
325 	/* Immediately configure the timer on the boot CPU */
326 	arch_timer_setup(this_cpu_ptr(arch_timer_evt));
327 
328 	/* Use the architected timer for the delay loop. */
329 	arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
330 	arch_delay_timer.freq = arch_timer_rate;
331 	register_current_timer_delay(&arch_delay_timer);
332 	return 0;
333 
334 out_free_irq:
335 	if (arch_timer_use_virtual)
336 		free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
337 	else {
338 		free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
339 				arch_timer_evt);
340 		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
341 			free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
342 					arch_timer_evt);
343 	}
344 
345 out_free:
346 	free_percpu(arch_timer_evt);
347 out:
348 	return err;
349 }
350 
351 static const struct of_device_id arch_timer_of_match[] __initconst = {
352 	{ .compatible	= "arm,armv7-timer",	},
353 	{},
354 };
355 
356 int __init arch_timer_of_register(void)
357 {
358 	struct device_node *np;
359 	u32 freq;
360 	int i;
361 
362 	np = of_find_matching_node(NULL, arch_timer_of_match);
363 	if (!np) {
364 		pr_err("arch_timer: can't find DT node\n");
365 		return -ENODEV;
366 	}
367 
368 	/* Try to determine the frequency from the device tree or CNTFRQ */
369 	if (!of_property_read_u32(np, "clock-frequency", &freq))
370 		arch_timer_rate = freq;
371 
372 	for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
373 		arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
374 
375 	of_node_put(np);
376 
377 	/*
378 	 * If no interrupt provided for virtual timer, we'll have to
379 	 * stick to the physical timer. It'd better be accessible...
380 	 */
381 	if (!arch_timer_ppi[VIRT_PPI]) {
382 		arch_timer_use_virtual = false;
383 
384 		if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
385 		    !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
386 			pr_warn("arch_timer: No interrupt available, giving up\n");
387 			return -EINVAL;
388 		}
389 	}
390 
391 	if (arch_timer_use_virtual)
392 		arch_timer_read_counter = arch_counter_get_cntvct;
393 	else
394 		arch_timer_read_counter = arch_counter_get_cntpct;
395 
396 	return arch_timer_register();
397 }
398 
399 int __init arch_timer_sched_clock_init(void)
400 {
401 	int err;
402 
403 	err = arch_timer_available();
404 	if (err)
405 		return err;
406 
407 	setup_sched_clock(arch_timer_read_counter32,
408 			  32, arch_timer_rate);
409 	return 0;
410 }
411