xref: /linux-6.15/kernel/profile.c (revision 2accfdb7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/profile.c
4  *  Simple profiling. Manages a direct-mapped profile hit count buffer,
5  *  with configurable resolution, support for restricting the cpus on
6  *  which profiling is done, and switching between cpu time and
7  *  schedule() calls via kernel command line parameters passed at boot.
8  *
9  *  Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
10  *	Red Hat, July 2004
11  *  Consolidation of architecture support code for profiling,
12  *	Nadia Yvette Chambers, Oracle, July 2004
13  *  Amortized hit count accounting via per-cpu open-addressed hashtables
14  *	to resolve timer interrupt livelocks, Nadia Yvette Chambers,
15  *	Oracle, 2004
16  */
17 
18 #include <linux/export.h>
19 #include <linux/profile.h>
20 #include <linux/memblock.h>
21 #include <linux/notifier.h>
22 #include <linux/mm.h>
23 #include <linux/cpumask.h>
24 #include <linux/cpu.h>
25 #include <linux/highmem.h>
26 #include <linux/mutex.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/stat.h>
30 
31 #include <asm/sections.h>
32 #include <asm/irq_regs.h>
33 #include <asm/ptrace.h>
34 
35 struct profile_hit {
36 	u32 pc, hits;
37 };
38 #define PROFILE_GRPSHIFT	3
39 #define PROFILE_GRPSZ		(1 << PROFILE_GRPSHIFT)
40 #define NR_PROFILE_HIT		(PAGE_SIZE/sizeof(struct profile_hit))
41 #define NR_PROFILE_GRP		(NR_PROFILE_HIT/PROFILE_GRPSZ)
42 
43 static atomic_t *prof_buffer;
44 static unsigned long prof_len;
45 static unsigned short int prof_shift;
46 
47 int prof_on __read_mostly;
48 EXPORT_SYMBOL_GPL(prof_on);
49 
50 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
51 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
52 static DEFINE_PER_CPU(int, cpu_profile_flip);
53 static DEFINE_MUTEX(profile_flip_mutex);
54 #endif /* CONFIG_SMP */
55 
56 int profile_setup(char *str)
57 {
58 	static const char schedstr[] = "schedule";
59 	static const char sleepstr[] = "sleep";
60 	static const char kvmstr[] = "kvm";
61 	const char *select = NULL;
62 	int par;
63 
64 	if (!strncmp(str, sleepstr, strlen(sleepstr))) {
65 #ifdef CONFIG_SCHEDSTATS
66 		force_schedstat_enabled();
67 		prof_on = SLEEP_PROFILING;
68 		select = sleepstr;
69 #else
70 		pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
71 #endif /* CONFIG_SCHEDSTATS */
72 	} else if (!strncmp(str, schedstr, strlen(schedstr))) {
73 		prof_on = SCHED_PROFILING;
74 		select = schedstr;
75 	} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
76 		prof_on = KVM_PROFILING;
77 		select = kvmstr;
78 	} else if (get_option(&str, &par)) {
79 		prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
80 		prof_on = CPU_PROFILING;
81 		pr_info("kernel profiling enabled (shift: %u)\n",
82 			prof_shift);
83 	}
84 
85 	if (select) {
86 		if (str[strlen(select)] == ',')
87 			str += strlen(select) + 1;
88 		if (get_option(&str, &par))
89 			prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
90 		pr_info("kernel %s profiling enabled (shift: %u)\n",
91 			select, prof_shift);
92 	}
93 
94 	return 1;
95 }
96 __setup("profile=", profile_setup);
97 
98 
99 int __ref profile_init(void)
100 {
101 	int buffer_bytes;
102 	if (!prof_on)
103 		return 0;
104 
105 	/* only text is profiled */
106 	prof_len = (_etext - _stext) >> prof_shift;
107 
108 	if (!prof_len) {
109 		pr_warn("profiling shift: %u too large\n", prof_shift);
110 		prof_on = 0;
111 		return -EINVAL;
112 	}
113 
114 	buffer_bytes = prof_len*sizeof(atomic_t);
115 
116 	prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
117 	if (prof_buffer)
118 		return 0;
119 
120 	prof_buffer = alloc_pages_exact(buffer_bytes,
121 					GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
122 	if (prof_buffer)
123 		return 0;
124 
125 	prof_buffer = vzalloc(buffer_bytes);
126 	if (prof_buffer)
127 		return 0;
128 
129 	return -ENOMEM;
130 }
131 
132 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
133 {
134 	unsigned long pc;
135 	pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
136 	if (pc < prof_len)
137 		atomic_add(nr_hits, &prof_buffer[pc]);
138 }
139 
140 void profile_hits(int type, void *__pc, unsigned int nr_hits)
141 {
142 	if (prof_on != type || !prof_buffer)
143 		return;
144 	do_profile_hits(type, __pc, nr_hits);
145 }
146 EXPORT_SYMBOL_GPL(profile_hits);
147 
148 void profile_tick(int type)
149 {
150 	struct pt_regs *regs = get_irq_regs();
151 
152 	/* This is the old kernel-only legacy profiling */
153 	if (!user_mode(regs))
154 		profile_hit(type, (void *)profile_pc(regs));
155 }
156 
157 #ifdef CONFIG_PROC_FS
158 #include <linux/proc_fs.h>
159 #include <linux/seq_file.h>
160 #include <linux/uaccess.h>
161 
162 /*
163  * This function accesses profiling information. The returned data is
164  * binary: the sampling step and the actual contents of the profile
165  * buffer. Use of the program readprofile is recommended in order to
166  * get meaningful info out of these data.
167  */
168 static ssize_t
169 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
170 {
171 	unsigned long p = *ppos;
172 	ssize_t read;
173 	char *pnt;
174 	unsigned long sample_step = 1UL << prof_shift;
175 
176 	if (p >= (prof_len+1)*sizeof(unsigned int))
177 		return 0;
178 	if (count > (prof_len+1)*sizeof(unsigned int) - p)
179 		count = (prof_len+1)*sizeof(unsigned int) - p;
180 	read = 0;
181 
182 	while (p < sizeof(unsigned int) && count > 0) {
183 		if (put_user(*((char *)(&sample_step)+p), buf))
184 			return -EFAULT;
185 		buf++; p++; count--; read++;
186 	}
187 	pnt = (char *)prof_buffer + p - sizeof(atomic_t);
188 	if (copy_to_user(buf, (void *)pnt, count))
189 		return -EFAULT;
190 	read += count;
191 	*ppos += read;
192 	return read;
193 }
194 
195 /* default is to not implement this call */
196 int __weak setup_profiling_timer(unsigned mult)
197 {
198 	return -EINVAL;
199 }
200 
201 /*
202  * Writing to /proc/profile resets the counters
203  *
204  * Writing a 'profiling multiplier' value into it also re-sets the profiling
205  * interrupt frequency, on architectures that support this.
206  */
207 static ssize_t write_profile(struct file *file, const char __user *buf,
208 			     size_t count, loff_t *ppos)
209 {
210 #ifdef CONFIG_SMP
211 	if (count == sizeof(int)) {
212 		unsigned int multiplier;
213 
214 		if (copy_from_user(&multiplier, buf, sizeof(int)))
215 			return -EFAULT;
216 
217 		if (setup_profiling_timer(multiplier))
218 			return -EINVAL;
219 	}
220 #endif
221 	memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
222 	return count;
223 }
224 
225 static const struct proc_ops profile_proc_ops = {
226 	.proc_read	= read_profile,
227 	.proc_write	= write_profile,
228 	.proc_lseek	= default_llseek,
229 };
230 
231 int __ref create_proc_profile(void)
232 {
233 	struct proc_dir_entry *entry;
234 	int err = 0;
235 
236 	if (!prof_on)
237 		return 0;
238 	entry = proc_create("profile", S_IWUSR | S_IRUGO,
239 			    NULL, &profile_proc_ops);
240 	if (entry)
241 		proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
242 	return err;
243 }
244 subsys_initcall(create_proc_profile);
245 #endif /* CONFIG_PROC_FS */
246