1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RESCTRL_H
3 #define RESCTRL_H
4 #include <stdio.h>
5 #include <math.h>
6 #include <errno.h>
7 #include <sched.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <string.h>
11 #include <signal.h>
12 #include <dirent.h>
13 #include <stdbool.h>
14 #include <sys/stat.h>
15 #include <sys/ioctl.h>
16 #include <sys/mount.h>
17 #include <sys/types.h>
18 #include <sys/wait.h>
19 #include <sys/select.h>
20 #include <sys/time.h>
21 #include <sys/eventfd.h>
22 #include <asm/unistd.h>
23 #include <linux/perf_event.h>
24 #include "../kselftest.h"
25 
26 #define MB			(1024 * 1024)
27 #define RESCTRL_PATH		"/sys/fs/resctrl"
28 #define PHYS_ID_PATH		"/sys/devices/system/cpu/cpu"
29 #define INFO_PATH		"/sys/fs/resctrl/info"
30 
31 /*
32  * CPU vendor IDs
33  *
34  * Define as bits because they're used for vendor_specific bitmask in
35  * the struct resctrl_test.
36  */
37 #define ARCH_INTEL     1
38 #define ARCH_AMD       2
39 
40 #define END_OF_TESTS	1
41 
42 #define BENCHMARK_ARGS		64
43 
44 #define DEFAULT_SPAN		(250 * MB)
45 
46 /*
47  * fill_buf_param:	"fill_buf" benchmark parameters
48  * @buf_size:		Size (in bytes) of buffer used in benchmark.
49  *			"fill_buf" allocates and initializes buffer of
50  *			@buf_size. User can change value via command line.
51  * @memflush:		If false the buffer will not be flushed after
52  *			allocation and initialization, otherwise the
53  *			buffer will be flushed. User can change value via
54  *			command line (via integers with 0 interpreted as
55  *			false and anything else as true).
56  */
57 struct fill_buf_param {
58 	size_t		buf_size;
59 	bool		memflush;
60 };
61 
62 /*
63  * user_params:		User supplied parameters
64  * @cpu:		CPU number to which the benchmark will be bound to
65  * @bits:		Number of bits used for cache allocation size
66  * @benchmark_cmd:	Benchmark command to run during (some of the) tests
67  * @fill_buf:		Pointer to user provided parameters for "fill_buf",
68  *			NULL if user did not provide parameters and test
69  *			specific defaults should be used.
70  */
71 struct user_params {
72 	int cpu;
73 	int bits;
74 	const char *benchmark_cmd[BENCHMARK_ARGS];
75 	const struct fill_buf_param *fill_buf;
76 };
77 
78 /*
79  * resctrl_test:	resctrl test definition
80  * @name:		Test name
81  * @group:		Test group - a common name for tests that share some characteristic
82  *			(e.g., L3 CAT test belongs to the CAT group). Can be NULL
83  * @resource:		Resource to test (e.g., MB, L3, L2, etc.)
84  * @vendor_specific:	Bitmask for vendor-specific tests (can be 0 for universal tests)
85  * @disabled:		Test is disabled
86  * @feature_check:	Callback to check required resctrl features
87  * @run_test:		Callback to run the test
88  * @cleanup:		Callback to cleanup after the test
89  */
90 struct resctrl_test {
91 	const char	*name;
92 	const char	*group;
93 	const char	*resource;
94 	unsigned int	vendor_specific;
95 	bool		disabled;
96 	bool		(*feature_check)(const struct resctrl_test *test);
97 	int		(*run_test)(const struct resctrl_test *test,
98 				    const struct user_params *uparams);
99 	void		(*cleanup)(void);
100 };
101 
102 /*
103  * resctrl_val_param:	resctrl test parameters
104  * @ctrlgrp:		Name of the control monitor group (con_mon grp)
105  * @mongrp:		Name of the monitor group (mon grp)
106  * @filename:		Name of file to which the o/p should be written
107  * @init:		Callback function to initialize test environment
108  * @setup:		Callback function to setup per test run environment
109  * @measure:		Callback that performs the measurement (a single test)
110  * @fill_buf:		Parameters for default "fill_buf" benchmark.
111  *			Initialized with user provided parameters, possibly
112  *			adapted to be relevant to the test. If user does
113  *			not provide parameters for "fill_buf" nor a
114  *			replacement benchmark then initialized with defaults
115  *			appropriate for test. NULL if user provided
116  *			benchmark.
117  */
118 struct resctrl_val_param {
119 	const char		*ctrlgrp;
120 	const char		*mongrp;
121 	char			filename[64];
122 	unsigned long		mask;
123 	int			num_of_runs;
124 	int			(*init)(const struct resctrl_val_param *param,
125 					int domain_id);
126 	int			(*setup)(const struct resctrl_test *test,
127 					 const struct user_params *uparams,
128 					 struct resctrl_val_param *param);
129 	int			(*measure)(const struct user_params *uparams,
130 					   struct resctrl_val_param *param,
131 					   pid_t bm_pid);
132 	struct fill_buf_param	*fill_buf;
133 };
134 
135 struct perf_event_read {
136 	__u64 nr;			/* The number of events */
137 	struct {
138 		__u64 value;		/* The value of the event */
139 	} values[2];
140 };
141 
142 /*
143  * Memory location that consumes values compiler must not optimize away.
144  * Volatile ensures writes to this location cannot be optimized away by
145  * compiler.
146  */
147 extern volatile int *value_sink;
148 
149 extern char llc_occup_path[1024];
150 
151 int get_vendor(void);
152 bool check_resctrlfs_support(void);
153 int filter_dmesg(void);
154 int get_domain_id(const char *resource, int cpu_no, int *domain_id);
155 int mount_resctrlfs(void);
156 int umount_resctrlfs(void);
157 bool resctrl_resource_exists(const char *resource);
158 bool resctrl_mon_feature_exists(const char *resource, const char *feature);
159 bool resource_info_file_exists(const char *resource, const char *file);
160 bool test_resource_feature_check(const struct resctrl_test *test);
161 char *fgrep(FILE *inf, const char *str);
162 int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity);
163 int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity);
164 int write_schemata(const char *ctrlgrp, char *schemata, int cpu_no,
165 		   const char *resource);
166 int write_bm_pid_to_resctrl(pid_t bm_pid, const char *ctrlgrp, const char *mongrp);
167 int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
168 		    int group_fd, unsigned long flags);
169 unsigned char *alloc_buffer(size_t buf_size, bool memflush);
170 void mem_flush(unsigned char *buf, size_t buf_size);
171 void fill_cache_read(unsigned char *buf, size_t buf_size, bool once);
172 int initialize_read_mem_bw_imc(void);
173 int measure_read_mem_bw(const struct user_params *uparams,
174 			struct resctrl_val_param *param, pid_t bm_pid);
175 void initialize_mem_bw_resctrl(const struct resctrl_val_param *param,
176 			       int domain_id);
177 int resctrl_val(const struct resctrl_test *test,
178 		const struct user_params *uparams,
179 		struct resctrl_val_param *param);
180 unsigned long create_bit_mask(unsigned int start, unsigned int len);
181 unsigned int count_contiguous_bits(unsigned long val, unsigned int *start);
182 int get_full_cbm(const char *cache_type, unsigned long *mask);
183 int get_mask_no_shareable(const char *cache_type, unsigned long *mask);
184 int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size);
185 int resource_info_unsigned_get(const char *resource, const char *filename, unsigned int *val);
186 void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
187 int signal_handler_register(const struct resctrl_test *test);
188 void signal_handler_unregister(void);
189 unsigned int count_bits(unsigned long n);
190 
191 void perf_event_attr_initialize(struct perf_event_attr *pea, __u64 config);
192 void perf_event_initialize_read_format(struct perf_event_read *pe_read);
193 int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no);
194 int perf_event_reset_enable(int pe_fd);
195 int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
196 		       const char *filename, pid_t bm_pid);
197 int measure_llc_resctrl(const char *filename, pid_t bm_pid);
198 void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines);
199 
200 /*
201  * cache_portion_size - Calculate the size of a cache portion
202  * @cache_size:		Total cache size in bytes
203  * @portion_mask:	Cache portion mask
204  * @full_cache_mask:	Full Cache Bit Mask (CBM) for the cache
205  *
206  * Return: The size of the cache portion in bytes.
207  */
208 static inline unsigned long cache_portion_size(unsigned long cache_size,
209 					       unsigned long portion_mask,
210 					       unsigned long full_cache_mask)
211 {
212 	unsigned int bits = count_bits(full_cache_mask);
213 
214 	/*
215 	 * With no bits the full CBM, assume cache cannot be split into
216 	 * smaller portions. To avoid divide by zero, return cache_size.
217 	 */
218 	if (!bits)
219 		return cache_size;
220 
221 	return cache_size * count_bits(portion_mask) / bits;
222 }
223 
224 extern struct resctrl_test mbm_test;
225 extern struct resctrl_test mba_test;
226 extern struct resctrl_test cmt_test;
227 extern struct resctrl_test l3_cat_test;
228 extern struct resctrl_test l3_noncont_cat_test;
229 extern struct resctrl_test l2_noncont_cat_test;
230 
231 #endif /* RESCTRL_H */
232