xref: /linux-6.15/arch/um/kernel/time.c (revision ebef8ea2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer ([email protected])
5  * Copyright (C) 2012-2014 Cisco Systems
6  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  * Copyright (C) 2019 Intel Corporation
8  */
9 
10 #include <linux/clockchips.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/jiffies.h>
14 #include <linux/mm.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <linux/threads.h>
18 #include <asm/irq.h>
19 #include <asm/param.h>
20 #include <kern_util.h>
21 #include <os.h>
22 #include <linux/time-internal.h>
23 #include <linux/um_timetravel.h>
24 #include <shared/init.h>
25 
26 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
27 enum time_travel_mode time_travel_mode;
28 EXPORT_SYMBOL_GPL(time_travel_mode);
29 
30 static bool time_travel_start_set;
31 static unsigned long long time_travel_start;
32 static unsigned long long time_travel_time;
33 static LIST_HEAD(time_travel_events);
34 static unsigned long long time_travel_timer_interval;
35 static unsigned long long time_travel_next_event;
36 static struct time_travel_event time_travel_timer_event;
37 static int time_travel_ext_fd = -1;
38 static unsigned int time_travel_ext_waiting;
39 static bool time_travel_ext_prev_request_valid;
40 static unsigned long long time_travel_ext_prev_request;
41 static bool time_travel_ext_free_until_valid;
42 static unsigned long long time_travel_ext_free_until;
43 
44 static void time_travel_set_time(unsigned long long ns)
45 {
46 	if (unlikely(ns < time_travel_time))
47 		panic("time-travel: time goes backwards %lld -> %lld\n",
48 		      time_travel_time, ns);
49 	time_travel_time = ns;
50 }
51 
52 enum time_travel_message_handling {
53 	TTMH_IDLE,
54 	TTMH_POLL,
55 	TTMH_READ,
56 };
57 
58 static void time_travel_handle_message(struct um_timetravel_msg *msg,
59 				       enum time_travel_message_handling mode)
60 {
61 	struct um_timetravel_msg resp = {
62 		.op = UM_TIMETRAVEL_ACK,
63 	};
64 	int ret;
65 
66 	/*
67 	 * Poll outside the locked section (if we're not called to only read
68 	 * the response) so we can get interrupts for e.g. virtio while we're
69 	 * here, but then we need to lock to not get interrupted between the
70 	 * read of the message and write of the ACK.
71 	 */
72 	if (mode != TTMH_READ) {
73 		bool disabled = irqs_disabled();
74 
75 		BUG_ON(mode == TTMH_IDLE && !disabled);
76 
77 		if (disabled)
78 			local_irq_enable();
79 		while (os_poll(1, &time_travel_ext_fd) != 0) {
80 			/* nothing */
81 		}
82 		if (disabled)
83 			local_irq_disable();
84 	}
85 
86 	ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
87 
88 	if (ret == 0)
89 		panic("time-travel external link is broken\n");
90 	if (ret != sizeof(*msg))
91 		panic("invalid time-travel message - %d bytes\n", ret);
92 
93 	switch (msg->op) {
94 	default:
95 		WARN_ONCE(1, "time-travel: unexpected message %lld\n",
96 			  (unsigned long long)msg->op);
97 		break;
98 	case UM_TIMETRAVEL_ACK:
99 		return;
100 	case UM_TIMETRAVEL_RUN:
101 		time_travel_set_time(msg->time);
102 		break;
103 	case UM_TIMETRAVEL_FREE_UNTIL:
104 		time_travel_ext_free_until_valid = true;
105 		time_travel_ext_free_until = msg->time;
106 		break;
107 	}
108 
109 	os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
110 }
111 
112 static u64 time_travel_ext_req(u32 op, u64 time)
113 {
114 	static int seq;
115 	int mseq = ++seq;
116 	struct um_timetravel_msg msg = {
117 		.op = op,
118 		.time = time,
119 		.seq = mseq,
120 	};
121 	unsigned long flags;
122 
123 	/*
124 	 * We need to save interrupts here and only restore when we
125 	 * got the ACK - otherwise we can get interrupted and send
126 	 * another request while we're still waiting for an ACK, but
127 	 * the peer doesn't know we got interrupted and will send
128 	 * the ACKs in the same order as the message, but we'd need
129 	 * to see them in the opposite order ...
130 	 *
131 	 * This wouldn't matter *too* much, but some ACKs carry the
132 	 * current time (for UM_TIMETRAVEL_GET) and getting another
133 	 * ACK without a time would confuse us a lot!
134 	 *
135 	 * The sequence number assignment that happens here lets us
136 	 * debug such message handling issues more easily.
137 	 */
138 	local_irq_save(flags);
139 	os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
140 
141 	while (msg.op != UM_TIMETRAVEL_ACK)
142 		time_travel_handle_message(&msg, TTMH_READ);
143 
144 	if (msg.seq != mseq)
145 		panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
146 		      msg.op, msg.seq, mseq, msg.time);
147 
148 	if (op == UM_TIMETRAVEL_GET)
149 		time_travel_set_time(msg.time);
150 	local_irq_restore(flags);
151 
152 	return msg.time;
153 }
154 
155 void __time_travel_wait_readable(int fd)
156 {
157 	int fds[2] = { fd, time_travel_ext_fd };
158 	int ret;
159 
160 	if (time_travel_mode != TT_MODE_EXTERNAL)
161 		return;
162 
163 	while ((ret = os_poll(2, fds))) {
164 		struct um_timetravel_msg msg;
165 
166 		if (ret == 1)
167 			time_travel_handle_message(&msg, TTMH_READ);
168 	}
169 }
170 EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
171 
172 static void time_travel_ext_update_request(unsigned long long time)
173 {
174 	if (time_travel_mode != TT_MODE_EXTERNAL)
175 		return;
176 
177 	/* asked for exactly this time previously */
178 	if (time_travel_ext_prev_request_valid &&
179 	    time == time_travel_ext_prev_request)
180 		return;
181 
182 	time_travel_ext_prev_request = time;
183 	time_travel_ext_prev_request_valid = true;
184 	time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
185 }
186 
187 void __time_travel_propagate_time(void)
188 {
189 	time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
190 }
191 EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
192 
193 /* returns true if we must do a wait to the simtime device */
194 static bool time_travel_ext_request(unsigned long long time)
195 {
196 	/*
197 	 * If we received an external sync point ("free until") then we
198 	 * don't have to request/wait for anything until then, unless
199 	 * we're already waiting.
200 	 */
201 	if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
202 	    time < time_travel_ext_free_until)
203 		return false;
204 
205 	time_travel_ext_update_request(time);
206 	return true;
207 }
208 
209 static void time_travel_ext_wait(bool idle)
210 {
211 	struct um_timetravel_msg msg = {
212 		.op = UM_TIMETRAVEL_ACK,
213 	};
214 
215 	time_travel_ext_prev_request_valid = false;
216 	time_travel_ext_waiting++;
217 
218 	time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
219 
220 	/*
221 	 * Here we are deep in the idle loop, so we have to break out of the
222 	 * kernel abstraction in a sense and implement this in terms of the
223 	 * UML system waiting on the VQ interrupt while sleeping, when we get
224 	 * the signal it'll call time_travel_ext_vq_notify_done() completing the
225 	 * call.
226 	 */
227 	while (msg.op != UM_TIMETRAVEL_RUN)
228 		time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
229 
230 	time_travel_ext_waiting--;
231 
232 	/* we might request more stuff while polling - reset when we run */
233 	time_travel_ext_prev_request_valid = false;
234 }
235 
236 static void time_travel_ext_get_time(void)
237 {
238 	time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
239 }
240 
241 static void __time_travel_update_time(unsigned long long ns, bool idle)
242 {
243 	if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
244 		time_travel_ext_wait(idle);
245 	else
246 		time_travel_set_time(ns);
247 }
248 
249 static struct time_travel_event *time_travel_first_event(void)
250 {
251 	return list_first_entry_or_null(&time_travel_events,
252 					struct time_travel_event,
253 					list);
254 }
255 
256 static void __time_travel_add_event(struct time_travel_event *e,
257 				    unsigned long long time)
258 {
259 	struct time_travel_event *tmp;
260 	bool inserted = false;
261 
262 	if (WARN(time_travel_mode == TT_MODE_BASIC &&
263 		 e != &time_travel_timer_event,
264 		 "only timer events can be handled in basic mode"))
265 		return;
266 
267 	if (e->pending)
268 		return;
269 
270 	e->pending = true;
271 	e->time = time;
272 
273 	list_for_each_entry(tmp, &time_travel_events, list) {
274 		/*
275 		 * Add the new entry before one with higher time,
276 		 * or if they're equal and both on stack, because
277 		 * in that case we need to unwind the stack in the
278 		 * right order, and the later event (timer sleep
279 		 * or such) must be dequeued first.
280 		 */
281 		if ((tmp->time > e->time) ||
282 		    (tmp->time == e->time && tmp->onstack && e->onstack)) {
283 			list_add_tail(&e->list, &tmp->list);
284 			inserted = true;
285 			break;
286 		}
287 	}
288 
289 	if (!inserted)
290 		list_add_tail(&e->list, &time_travel_events);
291 
292 	tmp = time_travel_first_event();
293 	time_travel_ext_update_request(tmp->time);
294 	time_travel_next_event = tmp->time;
295 }
296 
297 static void time_travel_add_event(struct time_travel_event *e,
298 				  unsigned long long time)
299 {
300 	if (WARN_ON(!e->fn))
301 		return;
302 
303 	__time_travel_add_event(e, time);
304 }
305 
306 void time_travel_periodic_timer(struct time_travel_event *e)
307 {
308 	time_travel_add_event(&time_travel_timer_event,
309 			      time_travel_time + time_travel_timer_interval);
310 	deliver_alarm();
311 }
312 
313 static void time_travel_deliver_event(struct time_travel_event *e)
314 {
315 	if (e == &time_travel_timer_event) {
316 		/*
317 		 * deliver_alarm() does the irq_enter/irq_exit
318 		 * by itself, so must handle it specially here
319 		 */
320 		e->fn(e);
321 	} else {
322 		unsigned long flags;
323 
324 		local_irq_save(flags);
325 		irq_enter();
326 		e->fn(e);
327 		irq_exit();
328 		local_irq_restore(flags);
329 	}
330 }
331 
332 static bool time_travel_del_event(struct time_travel_event *e)
333 {
334 	if (!e->pending)
335 		return false;
336 	list_del(&e->list);
337 	e->pending = false;
338 	return true;
339 }
340 
341 static void time_travel_update_time(unsigned long long next, bool idle)
342 {
343 	struct time_travel_event ne = {
344 		.onstack = true,
345 	};
346 	struct time_travel_event *e;
347 	bool finished = idle;
348 
349 	/* add it without a handler - we deal with that specifically below */
350 	__time_travel_add_event(&ne, next);
351 
352 	do {
353 		e = time_travel_first_event();
354 
355 		BUG_ON(!e);
356 		__time_travel_update_time(e->time, idle);
357 
358 		/* new events may have been inserted while we were waiting */
359 		if (e == time_travel_first_event()) {
360 			BUG_ON(!time_travel_del_event(e));
361 			BUG_ON(time_travel_time != e->time);
362 
363 			if (e == &ne) {
364 				finished = true;
365 			} else {
366 				if (e->onstack)
367 					panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
368 					      time_travel_time, e->time, e);
369 				time_travel_deliver_event(e);
370 			}
371 		}
372 
373 		e = time_travel_first_event();
374 		if (e)
375 			time_travel_ext_update_request(e->time);
376 	} while (ne.pending && !finished);
377 
378 	time_travel_del_event(&ne);
379 }
380 
381 void time_travel_ndelay(unsigned long nsec)
382 {
383 	time_travel_update_time(time_travel_time + nsec, false);
384 }
385 EXPORT_SYMBOL(time_travel_ndelay);
386 
387 void time_travel_add_irq_event(struct time_travel_event *e)
388 {
389 	BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
390 
391 	time_travel_ext_get_time();
392 	/*
393 	 * We could model interrupt latency here, for now just
394 	 * don't have any latency at all and request the exact
395 	 * same time (again) to run the interrupt...
396 	 */
397 	time_travel_add_event(e, time_travel_time);
398 }
399 EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
400 
401 static void time_travel_oneshot_timer(struct time_travel_event *e)
402 {
403 	deliver_alarm();
404 }
405 
406 void time_travel_sleep(unsigned long long duration)
407 {
408 	unsigned long long next = time_travel_time + duration;
409 
410 	if (time_travel_mode == TT_MODE_BASIC)
411 		os_timer_disable();
412 
413 	time_travel_update_time(next, true);
414 
415 	if (time_travel_mode == TT_MODE_BASIC &&
416 	    time_travel_timer_event.pending) {
417 		if (time_travel_timer_event.fn == time_travel_periodic_timer) {
418 			/*
419 			 * This is somewhat wrong - we should get the first
420 			 * one sooner like the os_timer_one_shot() below...
421 			 */
422 			os_timer_set_interval(time_travel_timer_interval);
423 		} else {
424 			os_timer_one_shot(time_travel_timer_event.time - next);
425 		}
426 	}
427 }
428 
429 static void time_travel_handle_real_alarm(void)
430 {
431 	time_travel_set_time(time_travel_next_event);
432 
433 	time_travel_del_event(&time_travel_timer_event);
434 
435 	if (time_travel_timer_event.fn == time_travel_periodic_timer)
436 		time_travel_add_event(&time_travel_timer_event,
437 				      time_travel_time +
438 				      time_travel_timer_interval);
439 }
440 
441 static void time_travel_set_interval(unsigned long long interval)
442 {
443 	time_travel_timer_interval = interval;
444 }
445 
446 static int time_travel_connect_external(const char *socket)
447 {
448 	const char *sep;
449 	unsigned long long id = (unsigned long long)-1;
450 	int rc;
451 
452 	if ((sep = strchr(socket, ':'))) {
453 		char buf[25] = {};
454 		if (sep - socket > sizeof(buf) - 1)
455 			goto invalid_number;
456 
457 		memcpy(buf, socket, sep - socket);
458 		if (kstrtoull(buf, 0, &id)) {
459 invalid_number:
460 			panic("time-travel: invalid external ID in string '%s'\n",
461 			      socket);
462 			return -EINVAL;
463 		}
464 
465 		socket = sep + 1;
466 	}
467 
468 	rc = os_connect_socket(socket);
469 	if (rc < 0) {
470 		panic("time-travel: failed to connect to external socket %s\n",
471 		      socket);
472 		return rc;
473 	}
474 
475 	time_travel_ext_fd = rc;
476 
477 	time_travel_ext_req(UM_TIMETRAVEL_START, id);
478 
479 	return 1;
480 }
481 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
482 #define time_travel_start_set 0
483 #define time_travel_start 0
484 #define time_travel_time 0
485 
486 static inline void time_travel_update_time(unsigned long long ns, bool retearly)
487 {
488 }
489 
490 static inline void time_travel_handle_real_alarm(void)
491 {
492 }
493 
494 static void time_travel_set_interval(unsigned long long interval)
495 {
496 }
497 
498 /* fail link if this actually gets used */
499 extern u64 time_travel_ext_req(u32 op, u64 time);
500 
501 /* these are empty macros so the struct/fn need not exist */
502 #define time_travel_add_event(e, time) do { } while (0)
503 #define time_travel_del_event(e) do { } while (0)
504 #endif
505 
506 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
507 {
508 	unsigned long flags;
509 
510 	/*
511 	 * In basic time-travel mode we still get real interrupts
512 	 * (signals) but since we don't read time from the OS, we
513 	 * must update the simulated time here to the expiry when
514 	 * we get a signal.
515 	 * This is not the case in inf-cpu mode, since there we
516 	 * never get any real signals from the OS.
517 	 */
518 	if (time_travel_mode == TT_MODE_BASIC)
519 		time_travel_handle_real_alarm();
520 
521 	local_irq_save(flags);
522 	do_IRQ(TIMER_IRQ, regs);
523 	local_irq_restore(flags);
524 }
525 
526 static int itimer_shutdown(struct clock_event_device *evt)
527 {
528 	if (time_travel_mode != TT_MODE_OFF)
529 		time_travel_del_event(&time_travel_timer_event);
530 
531 	if (time_travel_mode != TT_MODE_INFCPU &&
532 	    time_travel_mode != TT_MODE_EXTERNAL)
533 		os_timer_disable();
534 
535 	return 0;
536 }
537 
538 static int itimer_set_periodic(struct clock_event_device *evt)
539 {
540 	unsigned long long interval = NSEC_PER_SEC / HZ;
541 
542 	if (time_travel_mode != TT_MODE_OFF) {
543 		time_travel_del_event(&time_travel_timer_event);
544 		time_travel_set_event_fn(&time_travel_timer_event,
545 					 time_travel_periodic_timer);
546 		time_travel_set_interval(interval);
547 		time_travel_add_event(&time_travel_timer_event,
548 				      time_travel_time + interval);
549 	}
550 
551 	if (time_travel_mode != TT_MODE_INFCPU &&
552 	    time_travel_mode != TT_MODE_EXTERNAL)
553 		os_timer_set_interval(interval);
554 
555 	return 0;
556 }
557 
558 static int itimer_next_event(unsigned long delta,
559 			     struct clock_event_device *evt)
560 {
561 	delta += 1;
562 
563 	if (time_travel_mode != TT_MODE_OFF) {
564 		time_travel_del_event(&time_travel_timer_event);
565 		time_travel_set_event_fn(&time_travel_timer_event,
566 					 time_travel_oneshot_timer);
567 		time_travel_add_event(&time_travel_timer_event,
568 				      time_travel_time + delta);
569 	}
570 
571 	if (time_travel_mode != TT_MODE_INFCPU &&
572 	    time_travel_mode != TT_MODE_EXTERNAL)
573 		return os_timer_one_shot(delta);
574 
575 	return 0;
576 }
577 
578 static int itimer_one_shot(struct clock_event_device *evt)
579 {
580 	return itimer_next_event(0, evt);
581 }
582 
583 static struct clock_event_device timer_clockevent = {
584 	.name			= "posix-timer",
585 	.rating			= 250,
586 	.cpumask		= cpu_possible_mask,
587 	.features		= CLOCK_EVT_FEAT_PERIODIC |
588 				  CLOCK_EVT_FEAT_ONESHOT,
589 	.set_state_shutdown	= itimer_shutdown,
590 	.set_state_periodic	= itimer_set_periodic,
591 	.set_state_oneshot	= itimer_one_shot,
592 	.set_next_event		= itimer_next_event,
593 	.shift			= 0,
594 	.max_delta_ns		= 0xffffffff,
595 	.max_delta_ticks	= 0xffffffff,
596 	.min_delta_ns		= TIMER_MIN_DELTA,
597 	.min_delta_ticks	= TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
598 	.irq			= 0,
599 	.mult			= 1,
600 };
601 
602 static irqreturn_t um_timer(int irq, void *dev)
603 {
604 	if (get_current()->mm != NULL)
605 	{
606         /* userspace - relay signal, results in correct userspace timers */
607 		os_alarm_process(get_current()->mm->context.id.u.pid);
608 	}
609 
610 	(*timer_clockevent.event_handler)(&timer_clockevent);
611 
612 	return IRQ_HANDLED;
613 }
614 
615 static u64 timer_read(struct clocksource *cs)
616 {
617 	if (time_travel_mode != TT_MODE_OFF) {
618 		/*
619 		 * We make reading the timer cost a bit so that we don't get
620 		 * stuck in loops that expect time to move more than the
621 		 * exact requested sleep amount, e.g. python's socket server,
622 		 * see https://bugs.python.org/issue37026.
623 		 *
624 		 * However, don't do that when we're in interrupt or such as
625 		 * then we might recurse into our own processing, and get to
626 		 * even more waiting, and that's not good - it messes up the
627 		 * "what do I do next" and onstack event we use to know when
628 		 * to return from time_travel_update_time().
629 		 */
630 		if (!irqs_disabled() && !in_interrupt() && !in_softirq())
631 			time_travel_update_time(time_travel_time +
632 						TIMER_MULTIPLIER,
633 						false);
634 		return time_travel_time / TIMER_MULTIPLIER;
635 	}
636 
637 	return os_nsecs() / TIMER_MULTIPLIER;
638 }
639 
640 static struct clocksource timer_clocksource = {
641 	.name		= "timer",
642 	.rating		= 300,
643 	.read		= timer_read,
644 	.mask		= CLOCKSOURCE_MASK(64),
645 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
646 };
647 
648 static void __init um_timer_setup(void)
649 {
650 	int err;
651 
652 	err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
653 	if (err != 0)
654 		printk(KERN_ERR "register_timer : request_irq failed - "
655 		       "errno = %d\n", -err);
656 
657 	err = os_timer_create();
658 	if (err != 0) {
659 		printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
660 		return;
661 	}
662 
663 	err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
664 	if (err) {
665 		printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
666 		return;
667 	}
668 	clockevents_register_device(&timer_clockevent);
669 }
670 
671 void read_persistent_clock64(struct timespec64 *ts)
672 {
673 	long long nsecs;
674 
675 	if (time_travel_start_set)
676 		nsecs = time_travel_start + time_travel_time;
677 	else if (time_travel_mode == TT_MODE_EXTERNAL)
678 		nsecs = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
679 	else
680 		nsecs = os_persistent_clock_emulation();
681 
682 	set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
683 				  nsecs % NSEC_PER_SEC);
684 }
685 
686 void __init time_init(void)
687 {
688 	timer_set_signal_handler();
689 	late_time_init = um_timer_setup;
690 }
691 
692 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
693 unsigned long calibrate_delay_is_known(void)
694 {
695 	if (time_travel_mode == TT_MODE_INFCPU ||
696 	    time_travel_mode == TT_MODE_EXTERNAL)
697 		return 1;
698 	return 0;
699 }
700 
701 int setup_time_travel(char *str)
702 {
703 	if (strcmp(str, "=inf-cpu") == 0) {
704 		time_travel_mode = TT_MODE_INFCPU;
705 		timer_clockevent.name = "time-travel-timer-infcpu";
706 		timer_clocksource.name = "time-travel-clock";
707 		return 1;
708 	}
709 
710 	if (strncmp(str, "=ext:", 5) == 0) {
711 		time_travel_mode = TT_MODE_EXTERNAL;
712 		timer_clockevent.name = "time-travel-timer-external";
713 		timer_clocksource.name = "time-travel-clock-external";
714 		return time_travel_connect_external(str + 5);
715 	}
716 
717 	if (!*str) {
718 		time_travel_mode = TT_MODE_BASIC;
719 		timer_clockevent.name = "time-travel-timer";
720 		timer_clocksource.name = "time-travel-clock";
721 		return 1;
722 	}
723 
724 	return -EINVAL;
725 }
726 
727 __setup("time-travel", setup_time_travel);
728 __uml_help(setup_time_travel,
729 "time-travel\n"
730 "This option just enables basic time travel mode, in which the clock/timers\n"
731 "inside the UML instance skip forward when there's nothing to do, rather than\n"
732 "waiting for real time to elapse. However, instance CPU speed is limited by\n"
733 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
734 "clock (but quicker when there's nothing to do).\n"
735 "\n"
736 "time-travel=inf-cpu\n"
737 "This enables time travel mode with infinite processing power, in which there\n"
738 "are no wall clock timers, and any CPU processing happens - as seen from the\n"
739 "guest - instantly. This can be useful for accurate simulation regardless of\n"
740 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
741 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
742 "\n"
743 "time-travel=ext:[ID:]/path/to/socket\n"
744 "This enables time travel mode similar to =inf-cpu, except the system will\n"
745 "use the given socket to coordinate with a central scheduler, in order to\n"
746 "have more than one system simultaneously be on simulated time. The virtio\n"
747 "driver code in UML knows about this so you can also simulate networks and\n"
748 "devices using it, assuming the device has the right capabilities.\n"
749 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
750 
751 int setup_time_travel_start(char *str)
752 {
753 	int err;
754 
755 	err = kstrtoull(str, 0, &time_travel_start);
756 	if (err)
757 		return err;
758 
759 	time_travel_start_set = 1;
760 	return 1;
761 }
762 
763 __setup("time-travel-start", setup_time_travel_start);
764 __uml_help(setup_time_travel_start,
765 "time-travel-start=<seconds>\n"
766 "Configure the UML instance's wall clock to start at this value rather than\n"
767 "the host's wall clock at the time of UML boot.\n");
768 #endif
769