xref: /linux-6.15/include/linux/resctrl.h (revision c32a7d77)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _RESCTRL_H
3 #define _RESCTRL_H
4 
5 #include <linux/cacheinfo.h>
6 #include <linux/kernel.h>
7 #include <linux/list.h>
8 #include <linux/pid.h>
9 #include <linux/resctrl_types.h>
10 
11 /* CLOSID, RMID value used by the default control group */
12 #define RESCTRL_RESERVED_CLOSID		0
13 #define RESCTRL_RESERVED_RMID		0
14 
15 #define RESCTRL_PICK_ANY_CPU		-1
16 
17 #ifdef CONFIG_PROC_CPU_RESCTRL
18 
19 int proc_resctrl_show(struct seq_file *m,
20 		      struct pid_namespace *ns,
21 		      struct pid *pid,
22 		      struct task_struct *tsk);
23 
24 #endif
25 
26 /* max value for struct rdt_domain's mbps_val */
27 #define MBA_MAX_MBPS   U32_MAX
28 
29 /* Walk all possible resources, with variants for only controls or monitors. */
30 #define for_each_rdt_resource(_r)						\
31 	for ((_r) = resctrl_arch_get_resource(0);				\
32 	     (_r) && (_r)->rid < RDT_NUM_RESOURCES;				\
33 	     (_r) = resctrl_arch_get_resource((_r)->rid + 1))
34 
35 #define for_each_capable_rdt_resource(r)				      \
36 	for_each_rdt_resource((r))					      \
37 		if ((r)->alloc_capable || (r)->mon_capable)
38 
39 #define for_each_alloc_capable_rdt_resource(r)				      \
40 	for_each_rdt_resource((r))					      \
41 		if ((r)->alloc_capable)
42 
43 #define for_each_mon_capable_rdt_resource(r)				      \
44 	for_each_rdt_resource((r))					      \
45 		if ((r)->mon_capable)
46 
47 /**
48  * enum resctrl_conf_type - The type of configuration.
49  * @CDP_NONE:	No prioritisation, both code and data are controlled or monitored.
50  * @CDP_CODE:	Configuration applies to instruction fetches.
51  * @CDP_DATA:	Configuration applies to reads and writes.
52  */
53 enum resctrl_conf_type {
54 	CDP_NONE,
55 	CDP_CODE,
56 	CDP_DATA,
57 };
58 
59 #define CDP_NUM_TYPES	(CDP_DATA + 1)
60 
61 /**
62  * struct resctrl_staged_config - parsed configuration to be applied
63  * @new_ctrl:		new ctrl value to be loaded
64  * @have_new_ctrl:	whether the user provided new_ctrl is valid
65  */
66 struct resctrl_staged_config {
67 	u32			new_ctrl;
68 	bool			have_new_ctrl;
69 };
70 
71 enum resctrl_domain_type {
72 	RESCTRL_CTRL_DOMAIN,
73 	RESCTRL_MON_DOMAIN,
74 };
75 
76 /**
77  * struct rdt_domain_hdr - common header for different domain types
78  * @list:		all instances of this resource
79  * @id:			unique id for this instance
80  * @type:		type of this instance
81  * @cpu_mask:		which CPUs share this resource
82  */
83 struct rdt_domain_hdr {
84 	struct list_head		list;
85 	int				id;
86 	enum resctrl_domain_type	type;
87 	struct cpumask			cpu_mask;
88 };
89 
90 /**
91  * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
92  * @hdr:		common header for different domain types
93  * @plr:		pseudo-locked region (if any) associated with domain
94  * @staged_config:	parsed configuration to be applied
95  * @mbps_val:		When mba_sc is enabled, this holds the array of user
96  *			specified control values for mba_sc in MBps, indexed
97  *			by closid
98  */
99 struct rdt_ctrl_domain {
100 	struct rdt_domain_hdr		hdr;
101 	struct pseudo_lock_region	*plr;
102 	struct resctrl_staged_config	staged_config[CDP_NUM_TYPES];
103 	u32				*mbps_val;
104 };
105 
106 /**
107  * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
108  * @hdr:		common header for different domain types
109  * @ci:			cache info for this domain
110  * @rmid_busy_llc:	bitmap of which limbo RMIDs are above threshold
111  * @mbm_total:		saved state for MBM total bandwidth
112  * @mbm_local:		saved state for MBM local bandwidth
113  * @mbm_over:		worker to periodically read MBM h/w counters
114  * @cqm_limbo:		worker to periodically read CQM h/w counters
115  * @mbm_work_cpu:	worker CPU for MBM h/w counters
116  * @cqm_work_cpu:	worker CPU for CQM h/w counters
117  */
118 struct rdt_mon_domain {
119 	struct rdt_domain_hdr		hdr;
120 	struct cacheinfo		*ci;
121 	unsigned long			*rmid_busy_llc;
122 	struct mbm_state		*mbm_total;
123 	struct mbm_state		*mbm_local;
124 	struct delayed_work		mbm_over;
125 	struct delayed_work		cqm_limbo;
126 	int				mbm_work_cpu;
127 	int				cqm_work_cpu;
128 };
129 
130 /**
131  * struct resctrl_cache - Cache allocation related data
132  * @cbm_len:		Length of the cache bit mask
133  * @min_cbm_bits:	Minimum number of consecutive bits to be set.
134  *			The value 0 means the architecture can support
135  *			zero CBM.
136  * @shareable_bits:	Bitmask of shareable resource with other
137  *			executing entities
138  * @arch_has_sparse_bitmasks:	True if a bitmask like f00f is valid.
139  * @arch_has_per_cpu_cfg:	True if QOS_CFG register for this cache
140  *				level has CPU scope.
141  */
142 struct resctrl_cache {
143 	unsigned int	cbm_len;
144 	unsigned int	min_cbm_bits;
145 	unsigned int	shareable_bits;
146 	bool		arch_has_sparse_bitmasks;
147 	bool		arch_has_per_cpu_cfg;
148 };
149 
150 /**
151  * enum membw_throttle_mode - System's memory bandwidth throttling mode
152  * @THREAD_THROTTLE_UNDEFINED:	Not relevant to the system
153  * @THREAD_THROTTLE_MAX:	Memory bandwidth is throttled at the core
154  *				always using smallest bandwidth percentage
155  *				assigned to threads, aka "max throttling"
156  * @THREAD_THROTTLE_PER_THREAD:	Memory bandwidth is throttled at the thread
157  */
158 enum membw_throttle_mode {
159 	THREAD_THROTTLE_UNDEFINED = 0,
160 	THREAD_THROTTLE_MAX,
161 	THREAD_THROTTLE_PER_THREAD,
162 };
163 
164 /**
165  * struct resctrl_membw - Memory bandwidth allocation related data
166  * @min_bw:		Minimum memory bandwidth percentage user can request
167  * @max_bw:		Maximum memory bandwidth value, used as the reset value
168  * @bw_gran:		Granularity at which the memory bandwidth is allocated
169  * @delay_linear:	True if memory B/W delay is in linear scale
170  * @arch_needs_linear:	True if we can't configure non-linear resources
171  * @throttle_mode:	Bandwidth throttling mode when threads request
172  *			different memory bandwidths
173  * @mba_sc:		True if MBA software controller(mba_sc) is enabled
174  * @mb_map:		Mapping of memory B/W percentage to memory B/W delay
175  */
176 struct resctrl_membw {
177 	u32				min_bw;
178 	u32				max_bw;
179 	u32				bw_gran;
180 	u32				delay_linear;
181 	bool				arch_needs_linear;
182 	enum membw_throttle_mode	throttle_mode;
183 	bool				mba_sc;
184 	u32				*mb_map;
185 };
186 
187 struct resctrl_schema;
188 
189 enum resctrl_scope {
190 	RESCTRL_L2_CACHE = 2,
191 	RESCTRL_L3_CACHE = 3,
192 	RESCTRL_L3_NODE,
193 };
194 
195 /**
196  * enum resctrl_schema_fmt - The format user-space provides for a schema.
197  * @RESCTRL_SCHEMA_BITMAP:	The schema is a bitmap in hex.
198  * @RESCTRL_SCHEMA_RANGE:	The schema is a decimal number.
199  */
200 enum resctrl_schema_fmt {
201 	RESCTRL_SCHEMA_BITMAP,
202 	RESCTRL_SCHEMA_RANGE,
203 };
204 
205 /**
206  * struct rdt_resource - attributes of a resctrl resource
207  * @rid:		The index of the resource
208  * @alloc_capable:	Is allocation available on this machine
209  * @mon_capable:	Is monitor feature available on this machine
210  * @num_rmid:		Number of RMIDs available
211  * @ctrl_scope:		Scope of this resource for control functions
212  * @mon_scope:		Scope of this resource for monitor functions
213  * @cache:		Cache allocation related data
214  * @membw:		If the component has bandwidth controls, their properties.
215  * @ctrl_domains:	RCU list of all control domains for this resource
216  * @mon_domains:	RCU list of all monitor domains for this resource
217  * @name:		Name to use in "schemata" file.
218  * @schema_fmt:		Which format string and parser is used for this schema.
219  * @evt_list:		List of monitoring events
220  * @mbm_cfg_mask:	Bandwidth sources that can be tracked when bandwidth
221  *			monitoring events can be configured.
222  * @cdp_capable:	Is the CDP feature available on this resource
223  */
224 struct rdt_resource {
225 	int			rid;
226 	bool			alloc_capable;
227 	bool			mon_capable;
228 	int			num_rmid;
229 	enum resctrl_scope	ctrl_scope;
230 	enum resctrl_scope	mon_scope;
231 	struct resctrl_cache	cache;
232 	struct resctrl_membw	membw;
233 	struct list_head	ctrl_domains;
234 	struct list_head	mon_domains;
235 	char			*name;
236 	enum resctrl_schema_fmt	schema_fmt;
237 	struct list_head	evt_list;
238 	unsigned int		mbm_cfg_mask;
239 	bool			cdp_capable;
240 };
241 
242 /*
243  * Get the resource that exists at this level. If the level is not supported
244  * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES
245  * will return NULL.
246  */
247 struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l);
248 
249 /**
250  * struct resctrl_schema - configuration abilities of a resource presented to
251  *			   user-space
252  * @list:	Member of resctrl_schema_all.
253  * @name:	The name to use in the "schemata" file.
254  * @fmt_str:	Format string to show domain value.
255  * @conf_type:	Whether this schema is specific to code/data.
256  * @res:	The resource structure exported by the architecture to describe
257  *		the hardware that is configured by this schema.
258  * @num_closid:	The number of closid that can be used with this schema. When
259  *		features like CDP are enabled, this will be lower than the
260  *		hardware supports for the resource.
261  */
262 struct resctrl_schema {
263 	struct list_head		list;
264 	char				name[8];
265 	const char			*fmt_str;
266 	enum resctrl_conf_type		conf_type;
267 	struct rdt_resource		*res;
268 	u32				num_closid;
269 };
270 
271 struct resctrl_cpu_defaults {
272 	u32 closid;
273 	u32 rmid;
274 };
275 
276 struct resctrl_mon_config_info {
277 	struct rdt_resource	*r;
278 	struct rdt_mon_domain	*d;
279 	u32			evtid;
280 	u32			mon_config;
281 };
282 
283 /**
284  * resctrl_arch_sync_cpu_closid_rmid() - Refresh this CPU's CLOSID and RMID.
285  *					 Call via IPI.
286  * @info:	If non-NULL, a pointer to a struct resctrl_cpu_defaults
287  *		specifying the new CLOSID and RMID for tasks in the default
288  *		resctrl ctrl and mon group when running on this CPU.  If NULL,
289  *		this CPU is not re-assigned to a different default group.
290  *
291  * Propagates reassignment of CPUs and/or tasks to different resctrl groups
292  * when requested by the resctrl core code.
293  *
294  * This function records the per-cpu defaults specified by @info (if any),
295  * and then reconfigures the CPU's hardware CLOSID and RMID for subsequent
296  * execution based on @current, in the same way as during a task switch.
297  */
298 void resctrl_arch_sync_cpu_closid_rmid(void *info);
299 
300 /**
301  * resctrl_get_default_ctrl() - Return the default control value for this
302  *                              resource.
303  * @r:		The resource whose default control type is queried.
304  */
305 static inline u32 resctrl_get_default_ctrl(struct rdt_resource *r)
306 {
307 	switch (r->schema_fmt) {
308 	case RESCTRL_SCHEMA_BITMAP:
309 		return BIT_MASK(r->cache.cbm_len) - 1;
310 	case RESCTRL_SCHEMA_RANGE:
311 		return r->membw.max_bw;
312 	}
313 
314 	return WARN_ON_ONCE(1);
315 }
316 
317 /* The number of closid supported by this resource regardless of CDP */
318 u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
319 u32 resctrl_arch_system_num_rmid_idx(void);
320 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
321 
322 __init bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
323 
324 /**
325  * resctrl_arch_mon_event_config_write() - Write the config for an event.
326  * @config_info: struct resctrl_mon_config_info describing the resource, domain
327  *		 and event.
328  *
329  * Reads resource, domain and eventid from @config_info and writes the
330  * event config_info->mon_config into hardware.
331  *
332  * Called via IPI to reach a CPU that is a member of the specified domain.
333  */
334 void resctrl_arch_mon_event_config_write(void *config_info);
335 
336 /**
337  * resctrl_arch_mon_event_config_read() - Read the config for an event.
338  * @config_info: struct resctrl_mon_config_info describing the resource, domain
339  *		 and event.
340  *
341  * Reads resource, domain and eventid from @config_info and reads the
342  * hardware config value into config_info->mon_config.
343  *
344  * Called via IPI to reach a CPU that is a member of the specified domain.
345  */
346 void resctrl_arch_mon_event_config_read(void *config_info);
347 
348 /*
349  * Update the ctrl_val and apply this config right now.
350  * Must be called on one of the domain's CPUs.
351  */
352 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
353 			    u32 closid, enum resctrl_conf_type t, u32 cfg_val);
354 
355 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
356 			    u32 closid, enum resctrl_conf_type type);
357 int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
358 int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
359 void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
360 void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
361 void resctrl_online_cpu(unsigned int cpu);
362 void resctrl_offline_cpu(unsigned int cpu);
363 
364 /**
365  * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
366  *			      for this resource and domain.
367  * @r:			resource that the counter should be read from.
368  * @d:			domain that the counter should be read from.
369  * @closid:		closid that matches the rmid. Depending on the architecture, the
370  *			counter may match traffic of both @closid and @rmid, or @rmid
371  *			only.
372  * @rmid:		rmid of the counter to read.
373  * @eventid:		eventid to read, e.g. L3 occupancy.
374  * @val:		result of the counter read in bytes.
375  * @arch_mon_ctx:	An architecture specific value from
376  *			resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
377  *			the hardware monitor allocated for this read request.
378  *
379  * Some architectures need to sleep when first programming some of the counters.
380  * (specifically: arm64's MPAM cache occupancy counters can return 'not ready'
381  *  for a short period of time). Call from a non-migrateable process context on
382  * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or
383  * schedule_work_on(). This function can be called with interrupts masked,
384  * e.g. using smp_call_function_any(), but may consistently return an error.
385  *
386  * Return:
387  * 0 on success, or -EIO, -EINVAL etc on error.
388  */
389 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
390 			   u32 closid, u32 rmid, enum resctrl_event_id eventid,
391 			   u64 *val, void *arch_mon_ctx);
392 
393 /**
394  * resctrl_arch_rmid_read_context_check()  - warn about invalid contexts
395  *
396  * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when
397  * resctrl_arch_rmid_read() is called with preemption disabled.
398  *
399  * The contract with resctrl_arch_rmid_read() is that if interrupts
400  * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an
401  * IPI, (and fail if the call needed to sleep), while most of the time
402  * the work is scheduled, allowing the call to sleep.
403  */
404 static inline void resctrl_arch_rmid_read_context_check(void)
405 {
406 	if (!irqs_disabled())
407 		might_sleep();
408 }
409 
410 /**
411  * resctrl_find_domain() - Search for a domain id in a resource domain list.
412  * @h:		The domain list to search.
413  * @id:		The domain id to search for.
414  * @pos:	A pointer to position in the list id should be inserted.
415  *
416  * Search the domain list to find the domain id. If the domain id is
417  * found, return the domain. NULL otherwise.  If the domain id is not
418  * found (and NULL returned) then the first domain with id bigger than
419  * the input id can be returned to the caller via @pos.
420  */
421 struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
422 					   struct list_head **pos);
423 
424 /**
425  * resctrl_arch_reset_rmid() - Reset any private state associated with rmid
426  *			       and eventid.
427  * @r:		The domain's resource.
428  * @d:		The rmid's domain.
429  * @closid:	closid that matches the rmid. Depending on the architecture, the
430  *		counter may match traffic of both @closid and @rmid, or @rmid only.
431  * @rmid:	The rmid whose counter values should be reset.
432  * @eventid:	The eventid whose counter values should be reset.
433  *
434  * This can be called from any CPU.
435  */
436 void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
437 			     u32 closid, u32 rmid,
438 			     enum resctrl_event_id eventid);
439 
440 /**
441  * resctrl_arch_reset_rmid_all() - Reset all private state associated with
442  *				   all rmids and eventids.
443  * @r:		The resctrl resource.
444  * @d:		The domain for which all architectural counter state will
445  *		be cleared.
446  *
447  * This can be called from any CPU.
448  */
449 void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d);
450 
451 /**
452  * resctrl_arch_reset_all_ctrls() - Reset the control for each CLOSID to its
453  *				    default.
454  * @r:		The resctrl resource to reset.
455  *
456  * This can be called from any CPU.
457  */
458 void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
459 
460 extern unsigned int resctrl_rmid_realloc_threshold;
461 extern unsigned int resctrl_rmid_realloc_limit;
462 
463 int __init resctrl_init(void);
464 void __exit resctrl_exit(void);
465 
466 #endif /* _RESCTRL_H */
467