xref: /linux-6.15/kernel/kcsan/debugfs.c (revision a4e74fa5)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/atomic.h>
4 #include <linux/bsearch.h>
5 #include <linux/bug.h>
6 #include <linux/debugfs.h>
7 #include <linux/init.h>
8 #include <linux/kallsyms.h>
9 #include <linux/sched.h>
10 #include <linux/seq_file.h>
11 #include <linux/slab.h>
12 #include <linux/sort.h>
13 #include <linux/string.h>
14 #include <linux/uaccess.h>
15 
16 #include "kcsan.h"
17 
18 /*
19  * Statistics counters.
20  */
21 static atomic_long_t counters[KCSAN_COUNTER_COUNT];
22 static const char *const counter_names[] = {
23 	[KCSAN_COUNTER_USED_WATCHPOINTS]		= "used_watchpoints",
24 	[KCSAN_COUNTER_SETUP_WATCHPOINTS]		= "setup_watchpoints",
25 	[KCSAN_COUNTER_DATA_RACES]			= "data_races",
26 	[KCSAN_COUNTER_ASSERT_FAILURES]			= "assert_failures",
27 	[KCSAN_COUNTER_NO_CAPACITY]			= "no_capacity",
28 	[KCSAN_COUNTER_REPORT_RACES]			= "report_races",
29 	[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]		= "races_unknown_origin",
30 	[KCSAN_COUNTER_UNENCODABLE_ACCESSES]		= "unencodable_accesses",
31 	[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]	= "encoding_false_positives",
32 };
33 static_assert(ARRAY_SIZE(counter_names) == KCSAN_COUNTER_COUNT);
34 
35 /*
36  * Addresses for filtering functions from reporting. This list can be used as a
37  * whitelist or blacklist.
38  */
39 static struct {
40 	unsigned long	*addrs;		/* array of addresses */
41 	size_t		size;		/* current size */
42 	int		used;		/* number of elements used */
43 	bool		sorted;		/* if elements are sorted */
44 	bool		whitelist;	/* if list is a blacklist or whitelist */
45 } report_filterlist = {
46 	.addrs		= NULL,
47 	.size		= 8,		/* small initial size */
48 	.used		= 0,
49 	.sorted		= false,
50 	.whitelist	= false,	/* default is blacklist */
51 };
52 static DEFINE_SPINLOCK(report_filterlist_lock);
53 
54 void kcsan_counter_inc(enum kcsan_counter_id id)
55 {
56 	atomic_long_inc(&counters[id]);
57 }
58 
59 void kcsan_counter_dec(enum kcsan_counter_id id)
60 {
61 	atomic_long_dec(&counters[id]);
62 }
63 
64 /*
65  * The microbenchmark allows benchmarking KCSAN core runtime only. To run
66  * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
67  * debugfs file. This will not generate any conflicts, and tests fast-path only.
68  */
69 static noinline void microbenchmark(unsigned long iters)
70 {
71 	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
72 	const bool was_enabled = READ_ONCE(kcsan_enabled);
73 	cycles_t cycles;
74 
75 	/* We may have been called from an atomic region; reset context. */
76 	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
77 	/*
78 	 * Disable to benchmark fast-path for all accesses, and (expected
79 	 * negligible) call into slow-path, but never set up watchpoints.
80 	 */
81 	WRITE_ONCE(kcsan_enabled, false);
82 
83 	pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
84 
85 	cycles = get_cycles();
86 	while (iters--) {
87 		unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
88 		int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
89 				(!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
90 		__kcsan_check_access((void *)addr, sizeof(long), type);
91 	}
92 	cycles = get_cycles() - cycles;
93 
94 	pr_info("KCSAN: %s end   | cycles: %llu\n", __func__, cycles);
95 
96 	WRITE_ONCE(kcsan_enabled, was_enabled);
97 	/* restore context */
98 	current->kcsan_ctx = ctx_save;
99 }
100 
101 /*
102  * Simple test to create conflicting accesses. Write 'test=<iters>' to KCSAN's
103  * debugfs file from multiple tasks to generate real conflicts and show reports.
104  */
105 static long test_dummy;
106 static long test_flags;
107 static long test_scoped;
108 static noinline void test_thread(unsigned long iters)
109 {
110 	const long CHANGE_BITS = 0xff00ff00ff00ff00L;
111 	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
112 	cycles_t cycles;
113 
114 	/* We may have been called from an atomic region; reset context. */
115 	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
116 
117 	pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
118 	pr_info("test_dummy@%px, test_flags@%px, test_scoped@%px,\n",
119 		&test_dummy, &test_flags, &test_scoped);
120 
121 	cycles = get_cycles();
122 	while (iters--) {
123 		/* These all should generate reports. */
124 		__kcsan_check_read(&test_dummy, sizeof(test_dummy));
125 		ASSERT_EXCLUSIVE_WRITER(test_dummy);
126 		ASSERT_EXCLUSIVE_ACCESS(test_dummy);
127 
128 		ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
129 		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
130 
131 		ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
132 		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
133 
134 		/* not actually instrumented */
135 		WRITE_ONCE(test_dummy, iters);  /* to observe value-change */
136 		__kcsan_check_write(&test_dummy, sizeof(test_dummy));
137 
138 		test_flags ^= CHANGE_BITS; /* generate value-change */
139 		__kcsan_check_write(&test_flags, sizeof(test_flags));
140 
141 		BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
142 		{
143 			/* Should generate reports anywhere in this block. */
144 			ASSERT_EXCLUSIVE_WRITER_SCOPED(test_scoped);
145 			ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_scoped);
146 			BUG_ON(!current->kcsan_ctx.scoped_accesses.prev);
147 			/* Unrelated accesses. */
148 			__kcsan_check_access(&cycles, sizeof(cycles), 0);
149 			__kcsan_check_access(&cycles, sizeof(cycles), KCSAN_ACCESS_ATOMIC);
150 		}
151 		BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
152 	}
153 	cycles = get_cycles() - cycles;
154 
155 	pr_info("KCSAN: %s end   | cycles: %llu\n", __func__, cycles);
156 
157 	/* restore context */
158 	current->kcsan_ctx = ctx_save;
159 }
160 
161 static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
162 {
163 	const unsigned long a = *(const unsigned long *)rhs;
164 	const unsigned long b = *(const unsigned long *)lhs;
165 
166 	return a < b ? -1 : a == b ? 0 : 1;
167 }
168 
169 bool kcsan_skip_report_debugfs(unsigned long func_addr)
170 {
171 	unsigned long symbolsize, offset;
172 	unsigned long flags;
173 	bool ret = false;
174 
175 	if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
176 		return false;
177 	func_addr -= offset; /* Get function start */
178 
179 	spin_lock_irqsave(&report_filterlist_lock, flags);
180 	if (report_filterlist.used == 0)
181 		goto out;
182 
183 	/* Sort array if it is unsorted, and then do a binary search. */
184 	if (!report_filterlist.sorted) {
185 		sort(report_filterlist.addrs, report_filterlist.used,
186 		     sizeof(unsigned long), cmp_filterlist_addrs, NULL);
187 		report_filterlist.sorted = true;
188 	}
189 	ret = !!bsearch(&func_addr, report_filterlist.addrs,
190 			report_filterlist.used, sizeof(unsigned long),
191 			cmp_filterlist_addrs);
192 	if (report_filterlist.whitelist)
193 		ret = !ret;
194 
195 out:
196 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
197 	return ret;
198 }
199 
200 static void set_report_filterlist_whitelist(bool whitelist)
201 {
202 	unsigned long flags;
203 
204 	spin_lock_irqsave(&report_filterlist_lock, flags);
205 	report_filterlist.whitelist = whitelist;
206 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
207 }
208 
209 /* Returns 0 on success, error-code otherwise. */
210 static ssize_t insert_report_filterlist(const char *func)
211 {
212 	unsigned long flags;
213 	unsigned long addr = kallsyms_lookup_name(func);
214 	ssize_t ret = 0;
215 
216 	if (!addr) {
217 		pr_err("KCSAN: could not find function: '%s'\n", func);
218 		return -ENOENT;
219 	}
220 
221 	spin_lock_irqsave(&report_filterlist_lock, flags);
222 
223 	if (report_filterlist.addrs == NULL) {
224 		/* initial allocation */
225 		report_filterlist.addrs =
226 			kmalloc_array(report_filterlist.size,
227 				      sizeof(unsigned long), GFP_ATOMIC);
228 		if (report_filterlist.addrs == NULL) {
229 			ret = -ENOMEM;
230 			goto out;
231 		}
232 	} else if (report_filterlist.used == report_filterlist.size) {
233 		/* resize filterlist */
234 		size_t new_size = report_filterlist.size * 2;
235 		unsigned long *new_addrs =
236 			krealloc(report_filterlist.addrs,
237 				 new_size * sizeof(unsigned long), GFP_ATOMIC);
238 
239 		if (new_addrs == NULL) {
240 			/* leave filterlist itself untouched */
241 			ret = -ENOMEM;
242 			goto out;
243 		}
244 
245 		report_filterlist.size = new_size;
246 		report_filterlist.addrs = new_addrs;
247 	}
248 
249 	/* Note: deduplicating should be done in userspace. */
250 	report_filterlist.addrs[report_filterlist.used++] =
251 		kallsyms_lookup_name(func);
252 	report_filterlist.sorted = false;
253 
254 out:
255 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
256 
257 	return ret;
258 }
259 
260 static int show_info(struct seq_file *file, void *v)
261 {
262 	int i;
263 	unsigned long flags;
264 
265 	/* show stats */
266 	seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
267 	for (i = 0; i < KCSAN_COUNTER_COUNT; ++i)
268 		seq_printf(file, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
269 
270 	/* show filter functions, and filter type */
271 	spin_lock_irqsave(&report_filterlist_lock, flags);
272 	seq_printf(file, "\n%s functions: %s\n",
273 		   report_filterlist.whitelist ? "whitelisted" : "blacklisted",
274 		   report_filterlist.used == 0 ? "none" : "");
275 	for (i = 0; i < report_filterlist.used; ++i)
276 		seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
277 	spin_unlock_irqrestore(&report_filterlist_lock, flags);
278 
279 	return 0;
280 }
281 
282 static int debugfs_open(struct inode *inode, struct file *file)
283 {
284 	return single_open(file, show_info, NULL);
285 }
286 
287 static ssize_t
288 debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
289 {
290 	char kbuf[KSYM_NAME_LEN];
291 	char *arg;
292 	int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1);
293 
294 	if (copy_from_user(kbuf, buf, read_len))
295 		return -EFAULT;
296 	kbuf[read_len] = '\0';
297 	arg = strstrip(kbuf);
298 
299 	if (!strcmp(arg, "on")) {
300 		WRITE_ONCE(kcsan_enabled, true);
301 	} else if (!strcmp(arg, "off")) {
302 		WRITE_ONCE(kcsan_enabled, false);
303 	} else if (str_has_prefix(arg, "microbench=")) {
304 		unsigned long iters;
305 
306 		if (kstrtoul(&arg[strlen("microbench=")], 0, &iters))
307 			return -EINVAL;
308 		microbenchmark(iters);
309 	} else if (str_has_prefix(arg, "test=")) {
310 		unsigned long iters;
311 
312 		if (kstrtoul(&arg[strlen("test=")], 0, &iters))
313 			return -EINVAL;
314 		test_thread(iters);
315 	} else if (!strcmp(arg, "whitelist")) {
316 		set_report_filterlist_whitelist(true);
317 	} else if (!strcmp(arg, "blacklist")) {
318 		set_report_filterlist_whitelist(false);
319 	} else if (arg[0] == '!') {
320 		ssize_t ret = insert_report_filterlist(&arg[1]);
321 
322 		if (ret < 0)
323 			return ret;
324 	} else {
325 		return -EINVAL;
326 	}
327 
328 	return count;
329 }
330 
331 static const struct file_operations debugfs_ops =
332 {
333 	.read	 = seq_read,
334 	.open	 = debugfs_open,
335 	.write	 = debugfs_write,
336 	.release = single_release
337 };
338 
339 void __init kcsan_debugfs_init(void)
340 {
341 	debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
342 }
343