1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CONTEXT_TRACKING_STATE_H
3 #define _LINUX_CONTEXT_TRACKING_STATE_H
4 
5 #include <linux/percpu.h>
6 #include <linux/static_key.h>
7 #include <linux/context_tracking_irq.h>
8 
9 enum ctx_state {
10 	CONTEXT_DISABLED = -1,	/* returned by ct_state() if unknown */
11 	CONTEXT_KERNEL = 0,
12 	CONTEXT_USER,
13 	CONTEXT_GUEST,
14 };
15 
16 /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
17 #define DYNTICK_IRQ_NONIDLE	((LONG_MAX / 2) + 1)
18 
19 struct context_tracking {
20 #ifdef CONFIG_CONTEXT_TRACKING_USER
21 	/*
22 	 * When active is false, probes are unset in order
23 	 * to minimize overhead: TIF flags are cleared
24 	 * and calls to user_enter/exit are ignored. This
25 	 * may be further optimized using static keys.
26 	 */
27 	bool active;
28 	int recursion;
29 	enum ctx_state state;
30 #endif
31 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
32 	atomic_t dynticks;		/* Even value for idle, else odd. */
33 	long dynticks_nesting;		/* Track process nesting level. */
34 	long dynticks_nmi_nesting;	/* Track irq/NMI nesting level. */
35 #endif
36 };
37 
38 #ifdef CONFIG_CONTEXT_TRACKING
39 DECLARE_PER_CPU(struct context_tracking, context_tracking);
40 #endif
41 
42 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
43 static __always_inline int ct_dynticks(void)
44 {
45 	return atomic_read(this_cpu_ptr(&context_tracking.dynticks));
46 }
47 
48 static __always_inline int ct_dynticks_cpu(int cpu)
49 {
50 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
51 
52 	return atomic_read(&ct->dynticks);
53 }
54 
55 static __always_inline int ct_dynticks_cpu_acquire(int cpu)
56 {
57 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
58 
59 	return atomic_read_acquire(&ct->dynticks);
60 }
61 
62 static __always_inline long ct_dynticks_nesting(void)
63 {
64 	return __this_cpu_read(context_tracking.dynticks_nesting);
65 }
66 
67 static __always_inline long ct_dynticks_nesting_cpu(int cpu)
68 {
69 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
70 
71 	return ct->dynticks_nesting;
72 }
73 
74 static __always_inline long ct_dynticks_nmi_nesting(void)
75 {
76 	return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
77 }
78 
79 static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
80 {
81 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
82 
83 	return ct->dynticks_nmi_nesting;
84 }
85 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
86 
87 #ifdef CONFIG_CONTEXT_TRACKING_USER
88 extern struct static_key_false context_tracking_key;
89 
90 static __always_inline bool context_tracking_enabled(void)
91 {
92 	return static_branch_unlikely(&context_tracking_key);
93 }
94 
95 static __always_inline bool context_tracking_enabled_cpu(int cpu)
96 {
97 	return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
98 }
99 
100 static inline bool context_tracking_enabled_this_cpu(void)
101 {
102 	return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
103 }
104 
105 #else
106 static __always_inline bool context_tracking_enabled(void) { return false; }
107 static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
108 static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
109 #endif /* CONFIG_CONTEXT_TRACKING_USER */
110 
111 #endif
112