xref: /xnu-11215/osfmk/arm/arm_timer.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * @APPLE_FREE_COPYRIGHT@
33  */
34 /*
35  *	File:		etimer.c
36  *	Purpose:	Routines for handling the machine independent
37  *				event timer.
38  */
39 
40 #include <mach/mach_types.h>
41 
42 #include <kern/clock.h>
43 #include <kern/thread.h>
44 #include <kern/processor.h>
45 #include <kern/macro_help.h>
46 #include <kern/spl.h>
47 #include <kern/timer_queue.h>
48 #include <kern/timer_call.h>
49 
50 #include <machine/commpage.h>
51 #include <machine/machine_routines.h>
52 
53 #include <sys/kdebug.h>
54 #include <arm/cpu_data.h>
55 #include <arm/cpu_data_internal.h>
56 #include <arm/cpu_internal.h>
57 
58 /*
59  *      Event timer interrupt.
60  *
61  * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
62  *     that occur before the entire chain completes.
63  *
64  * XXX a better implementation would use a set of generic callouts and iterate over them
65  */
66 void
timer_intr(__unused int inuser,__unused uint64_t iaddr)67 timer_intr(__unused int inuser, __unused uint64_t iaddr)
68 {
69 	uint64_t        abstime, new_idle_timeout_ticks;
70 	rtclock_timer_t *mytimer;
71 	cpu_data_t     *cpu_data_ptr;
72 	processor_t     processor;
73 
74 	cpu_data_ptr = getCpuDatap();
75 	mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the event timer */
76 	abstime = mach_absolute_time(); /* Get the time now */
77 
78 	/* is it time for an idle timer event? */
79 	if ((cpu_data_ptr->idle_timer_deadline > 0) && (cpu_data_ptr->idle_timer_deadline <= abstime)) {
80 		cpu_data_ptr->idle_timer_deadline = 0x0ULL;
81 		new_idle_timeout_ticks = 0x0ULL;
82 
83 		KDBG_RELEASE(DECR_PM_DEADLINE | DBG_FUNC_START);
84 		cpu_data_ptr->idle_timer_notify(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks);
85 		KDBG_RELEASE(DECR_PM_DEADLINE | DBG_FUNC_END);
86 
87 		/* if a new idle timeout was requested set the new idle timer deadline */
88 		if (new_idle_timeout_ticks != 0x0ULL) {
89 			clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
90 		}
91 
92 		abstime = mach_absolute_time(); /* Get the time again since we ran a bit */
93 	}
94 
95 	/* has a pending clock timer expired? */
96 	if (mytimer->deadline <= abstime) {     /* Have we expired the
97 		                                 * deadline? */
98 		mytimer->has_expired = TRUE;    /* Remember that we popped */
99 		mytimer->deadline = EndOfAllTime;       /* Set timer request to
100 		                                         * the end of all time
101 		                                         * in case we have no
102 		                                         * more events */
103 		mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
104 		mytimer->has_expired = FALSE;
105 		abstime = mach_absolute_time(); /* Get the time again since we ran a bit */
106 	}
107 
108 	processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
109 	(void)running_timers_expire(processor, abstime);
110 	/*
111 	 * No need to update abstime.
112 	 */
113 
114 	/* Force reload our next deadline */
115 	cpu_data_ptr->rtcPop = EndOfAllTime;
116 	/* schedule our next deadline */
117 	timer_resync_deadlines();
118 }
119 
120 /*
121  * Set the clock deadline
122  */
123 void
timer_set_deadline(uint64_t deadline)124 timer_set_deadline(uint64_t deadline)
125 {
126 	rtclock_timer_t *mytimer;
127 	spl_t           s;
128 	cpu_data_t     *cpu_data_ptr;
129 
130 	s = splclock();         /* no interruptions */
131 	cpu_data_ptr = getCpuDatap();
132 
133 	mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the timer itself */
134 	mytimer->deadline = deadline;   /* Set the new expiration time */
135 
136 	timer_resync_deadlines();
137 
138 	splx(s);
139 }
140 
141 /*
142  * Re-evaluate the outstanding deadlines and select the most proximate.
143  *
144  * Should be called at splclock.
145  */
146 void
timer_resync_deadlines(void)147 timer_resync_deadlines(void)
148 {
149 	uint64_t        deadline;
150 	rtclock_timer_t *mytimer;
151 	spl_t           s = splclock(); /* No interruptions please */
152 	cpu_data_t     *cpu_data_ptr;
153 
154 	cpu_data_ptr = getCpuDatap();
155 
156 	deadline = 0;
157 
158 	/* if we have a clock timer set sooner, pop on that */
159 	mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the timer itself */
160 	if ((!mytimer->has_expired) && (mytimer->deadline > 0)) {
161 		deadline = mytimer->deadline;
162 	}
163 
164 	/* if we have a idle timer event coming up, how about that? */
165 	if ((cpu_data_ptr->idle_timer_deadline > 0)
166 	    && (cpu_data_ptr->idle_timer_deadline < deadline)) {
167 		deadline = cpu_data_ptr->idle_timer_deadline;
168 	}
169 
170 	uint64_t run_deadline = running_timers_deadline(
171 		PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr));
172 	if (run_deadline < deadline) {
173 		deadline = run_deadline;
174 	}
175 
176 	if ((deadline == EndOfAllTime)
177 	    || ((deadline > 0) && (cpu_data_ptr->rtcPop != deadline))) {
178 		int             decr;
179 
180 		decr = setPop(deadline);
181 
182 		KDBG_RELEASE(DECR_SET_DEADLINE | DBG_FUNC_NONE, decr, 2);
183 	}
184 	splx(s);
185 }
186 
187 void
timer_queue_expire_local(__unused void * arg)188 timer_queue_expire_local(
189 	__unused void                   *arg)
190 {
191 	rtclock_timer_t         *mytimer = &getCpuDatap()->rtclock_timer;
192 	uint64_t                abstime;
193 
194 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
195 	    DECR_TIMER_EXPIRE_LOCAL | DBG_FUNC_START,
196 	    mytimer->deadline, 0, 0, 0, 0);
197 
198 	abstime = mach_absolute_time();
199 	mytimer->has_expired = TRUE;
200 	mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
201 	mytimer->has_expired = FALSE;
202 
203 	timer_resync_deadlines();
204 
205 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
206 	    DECR_TIMER_EXPIRE_LOCAL | DBG_FUNC_END,
207 	    mytimer->deadline, 0, 0, 0, 0);
208 }
209 
210 boolean_t
timer_resort_threshold(__unused uint64_t skew)211 timer_resort_threshold(__unused uint64_t skew)
212 {
213 	return FALSE;
214 }
215 
216 mpqueue_head_t *
timer_queue_assign(uint64_t deadline)217 timer_queue_assign(
218 	uint64_t                deadline)
219 {
220 	cpu_data_t                              *cpu_data_ptr = getCpuDatap();
221 	mpqueue_head_t          *queue;
222 
223 	if (cpu_data_ptr->cpu_running) {
224 		queue = &cpu_data_ptr->rtclock_timer.queue;
225 
226 		if (deadline < cpu_data_ptr->rtclock_timer.deadline) {
227 			timer_set_deadline(deadline);
228 		}
229 	} else {
230 		/*
231 		 * No timers should be armed by powered down CPUs, except
232 		 * already badly behaved code in the hibernation path, and
233 		 * that is running on master_cpu.
234 		 */
235 		assert(ml_is_quiescing());
236 
237 		queue = &cpu_datap(master_cpu)->rtclock_timer.queue;
238 	}
239 
240 	return queue;
241 }
242 
243 void
timer_queue_cancel(mpqueue_head_t * queue,uint64_t deadline,uint64_t new_deadline)244 timer_queue_cancel(
245 	mpqueue_head_t          *queue,
246 	uint64_t                deadline,
247 	uint64_t                new_deadline)
248 {
249 	if (queue == &getCpuDatap()->rtclock_timer.queue) {
250 		if (deadline < new_deadline) {
251 			timer_set_deadline(new_deadline);
252 		}
253 	}
254 }
255 
256 mpqueue_head_t *
timer_queue_cpu(int cpu)257 timer_queue_cpu(int cpu)
258 {
259 	return &cpu_datap(cpu)->rtclock_timer.queue;
260 }
261 
262 void
timer_call_cpu(int cpu,void (* fn)(void *),void * arg)263 timer_call_cpu(int cpu, void (*fn)(void *), void *arg)
264 {
265 	cpu_signal(cpu_datap(cpu), SIGPxcall, (void *) fn, arg);
266 }
267 
268 void
timer_call_nosync_cpu(int cpu,void (* fn)(void *),void * arg)269 timer_call_nosync_cpu(int cpu, void (*fn)(void *), void *arg)
270 {
271 	/* XXX Needs error checking and retry */
272 	cpu_signal(cpu_datap(cpu), SIGPxcall, (void *) fn, arg);
273 }
274 
275 
276 static timer_coalescing_priority_params_ns_t tcoal_prio_params_init =
277 {
278 	.idle_entry_timer_processing_hdeadline_threshold_ns = 5000ULL * NSEC_PER_USEC,
279 	.interrupt_timer_coalescing_ilat_threshold_ns = 30ULL * NSEC_PER_USEC,
280 	.timer_resort_threshold_ns = 50 * NSEC_PER_MSEC,
281 	.timer_coalesce_rt_shift = 0,
282 	.timer_coalesce_bg_shift = -5,
283 	.timer_coalesce_kt_shift = 3,
284 	.timer_coalesce_fp_shift = 3,
285 	.timer_coalesce_ts_shift = 3,
286 	.timer_coalesce_rt_ns_max = 0ULL,
287 	.timer_coalesce_bg_ns_max = 100 * NSEC_PER_MSEC,
288 	.timer_coalesce_kt_ns_max = 1 * NSEC_PER_MSEC,
289 	.timer_coalesce_fp_ns_max = 1 * NSEC_PER_MSEC,
290 	.timer_coalesce_ts_ns_max = 1 * NSEC_PER_MSEC,
291 #if XNU_TARGET_OS_OSX
292 	.latency_qos_scale = {3, 2, 1, -2, 3, 3},
293 	.latency_qos_ns_max = {1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC,
294 		               75 * NSEC_PER_MSEC, 1 * NSEC_PER_MSEC, 1 * NSEC_PER_MSEC},
295 	.latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, FALSE, FALSE},
296 #else /* XNU_TARGET_OS_OSX */
297 	.latency_qos_scale = {3, 2, 1, -2, -15, -15},
298 	.latency_qos_ns_max = {1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC,
299 		               75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC},
300 	.latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, TRUE, TRUE},
301 #endif /* XNU_TARGET_OS_OSX */
302 };
303 timer_coalescing_priority_params_ns_t *
timer_call_get_priority_params(void)304 timer_call_get_priority_params(void)
305 {
306 	return &tcoal_prio_params_init;
307 }
308