xref: /linux-6.15/include/linux/resctrl.h (revision bb9343c8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _RESCTRL_H
3 #define _RESCTRL_H
4 
5 #include <linux/cacheinfo.h>
6 #include <linux/kernel.h>
7 #include <linux/list.h>
8 #include <linux/pid.h>
9 
10 /* CLOSID, RMID value used by the default control group */
11 #define RESCTRL_RESERVED_CLOSID		0
12 #define RESCTRL_RESERVED_RMID		0
13 
14 #define RESCTRL_PICK_ANY_CPU		-1
15 
16 #ifdef CONFIG_PROC_CPU_RESCTRL
17 
18 int proc_resctrl_show(struct seq_file *m,
19 		      struct pid_namespace *ns,
20 		      struct pid *pid,
21 		      struct task_struct *tsk);
22 
23 #endif
24 
25 /* max value for struct rdt_domain's mbps_val */
26 #define MBA_MAX_MBPS   U32_MAX
27 
28 /**
29  * enum resctrl_conf_type - The type of configuration.
30  * @CDP_NONE:	No prioritisation, both code and data are controlled or monitored.
31  * @CDP_CODE:	Configuration applies to instruction fetches.
32  * @CDP_DATA:	Configuration applies to reads and writes.
33  */
34 enum resctrl_conf_type {
35 	CDP_NONE,
36 	CDP_CODE,
37 	CDP_DATA,
38 };
39 
40 enum resctrl_res_level {
41 	RDT_RESOURCE_L3,
42 	RDT_RESOURCE_L2,
43 	RDT_RESOURCE_MBA,
44 	RDT_RESOURCE_SMBA,
45 
46 	/* Must be the last */
47 	RDT_NUM_RESOURCES,
48 };
49 
50 #define CDP_NUM_TYPES	(CDP_DATA + 1)
51 
52 /*
53  * Event IDs, the values match those used to program IA32_QM_EVTSEL before
54  * reading IA32_QM_CTR on RDT systems.
55  */
56 enum resctrl_event_id {
57 	QOS_L3_OCCUP_EVENT_ID		= 0x01,
58 	QOS_L3_MBM_TOTAL_EVENT_ID	= 0x02,
59 	QOS_L3_MBM_LOCAL_EVENT_ID	= 0x03,
60 };
61 
62 /**
63  * struct resctrl_staged_config - parsed configuration to be applied
64  * @new_ctrl:		new ctrl value to be loaded
65  * @have_new_ctrl:	whether the user provided new_ctrl is valid
66  */
67 struct resctrl_staged_config {
68 	u32			new_ctrl;
69 	bool			have_new_ctrl;
70 };
71 
72 enum resctrl_domain_type {
73 	RESCTRL_CTRL_DOMAIN,
74 	RESCTRL_MON_DOMAIN,
75 };
76 
77 /**
78  * struct rdt_domain_hdr - common header for different domain types
79  * @list:		all instances of this resource
80  * @id:			unique id for this instance
81  * @type:		type of this instance
82  * @cpu_mask:		which CPUs share this resource
83  */
84 struct rdt_domain_hdr {
85 	struct list_head		list;
86 	int				id;
87 	enum resctrl_domain_type	type;
88 	struct cpumask			cpu_mask;
89 };
90 
91 /**
92  * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
93  * @hdr:		common header for different domain types
94  * @plr:		pseudo-locked region (if any) associated with domain
95  * @staged_config:	parsed configuration to be applied
96  * @mbps_val:		When mba_sc is enabled, this holds the array of user
97  *			specified control values for mba_sc in MBps, indexed
98  *			by closid
99  */
100 struct rdt_ctrl_domain {
101 	struct rdt_domain_hdr		hdr;
102 	struct pseudo_lock_region	*plr;
103 	struct resctrl_staged_config	staged_config[CDP_NUM_TYPES];
104 	u32				*mbps_val;
105 };
106 
107 /**
108  * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
109  * @hdr:		common header for different domain types
110  * @ci:			cache info for this domain
111  * @rmid_busy_llc:	bitmap of which limbo RMIDs are above threshold
112  * @mbm_total:		saved state for MBM total bandwidth
113  * @mbm_local:		saved state for MBM local bandwidth
114  * @mbm_over:		worker to periodically read MBM h/w counters
115  * @cqm_limbo:		worker to periodically read CQM h/w counters
116  * @mbm_work_cpu:	worker CPU for MBM h/w counters
117  * @cqm_work_cpu:	worker CPU for CQM h/w counters
118  */
119 struct rdt_mon_domain {
120 	struct rdt_domain_hdr		hdr;
121 	struct cacheinfo		*ci;
122 	unsigned long			*rmid_busy_llc;
123 	struct mbm_state		*mbm_total;
124 	struct mbm_state		*mbm_local;
125 	struct delayed_work		mbm_over;
126 	struct delayed_work		cqm_limbo;
127 	int				mbm_work_cpu;
128 	int				cqm_work_cpu;
129 };
130 
131 /**
132  * struct resctrl_cache - Cache allocation related data
133  * @cbm_len:		Length of the cache bit mask
134  * @min_cbm_bits:	Minimum number of consecutive bits to be set.
135  *			The value 0 means the architecture can support
136  *			zero CBM.
137  * @shareable_bits:	Bitmask of shareable resource with other
138  *			executing entities
139  * @arch_has_sparse_bitmasks:	True if a bitmask like f00f is valid.
140  * @arch_has_per_cpu_cfg:	True if QOS_CFG register for this cache
141  *				level has CPU scope.
142  */
143 struct resctrl_cache {
144 	unsigned int	cbm_len;
145 	unsigned int	min_cbm_bits;
146 	unsigned int	shareable_bits;
147 	bool		arch_has_sparse_bitmasks;
148 	bool		arch_has_per_cpu_cfg;
149 };
150 
151 /**
152  * enum membw_throttle_mode - System's memory bandwidth throttling mode
153  * @THREAD_THROTTLE_UNDEFINED:	Not relevant to the system
154  * @THREAD_THROTTLE_MAX:	Memory bandwidth is throttled at the core
155  *				always using smallest bandwidth percentage
156  *				assigned to threads, aka "max throttling"
157  * @THREAD_THROTTLE_PER_THREAD:	Memory bandwidth is throttled at the thread
158  */
159 enum membw_throttle_mode {
160 	THREAD_THROTTLE_UNDEFINED = 0,
161 	THREAD_THROTTLE_MAX,
162 	THREAD_THROTTLE_PER_THREAD,
163 };
164 
165 /**
166  * struct resctrl_membw - Memory bandwidth allocation related data
167  * @min_bw:		Minimum memory bandwidth percentage user can request
168  * @bw_gran:		Granularity at which the memory bandwidth is allocated
169  * @delay_linear:	True if memory B/W delay is in linear scale
170  * @arch_needs_linear:	True if we can't configure non-linear resources
171  * @throttle_mode:	Bandwidth throttling mode when threads request
172  *			different memory bandwidths
173  * @mba_sc:		True if MBA software controller(mba_sc) is enabled
174  * @mb_map:		Mapping of memory B/W percentage to memory B/W delay
175  */
176 struct resctrl_membw {
177 	u32				min_bw;
178 	u32				bw_gran;
179 	u32				delay_linear;
180 	bool				arch_needs_linear;
181 	enum membw_throttle_mode	throttle_mode;
182 	bool				mba_sc;
183 	u32				*mb_map;
184 };
185 
186 struct resctrl_schema;
187 
188 enum resctrl_scope {
189 	RESCTRL_L2_CACHE = 2,
190 	RESCTRL_L3_CACHE = 3,
191 	RESCTRL_L3_NODE,
192 };
193 
194 /**
195  * enum resctrl_schema_fmt - The format user-space provides for a schema.
196  * @RESCTRL_SCHEMA_BITMAP:	The schema is a bitmap in hex.
197  * @RESCTRL_SCHEMA_RANGE:	The schema is a decimal number.
198  */
199 enum resctrl_schema_fmt {
200 	RESCTRL_SCHEMA_BITMAP,
201 	RESCTRL_SCHEMA_RANGE,
202 };
203 
204 /**
205  * struct rdt_resource - attributes of a resctrl resource
206  * @rid:		The index of the resource
207  * @alloc_capable:	Is allocation available on this machine
208  * @mon_capable:	Is monitor feature available on this machine
209  * @num_rmid:		Number of RMIDs available
210  * @ctrl_scope:		Scope of this resource for control functions
211  * @mon_scope:		Scope of this resource for monitor functions
212  * @cache:		Cache allocation related data
213  * @membw:		If the component has bandwidth controls, their properties.
214  * @ctrl_domains:	RCU list of all control domains for this resource
215  * @mon_domains:	RCU list of all monitor domains for this resource
216  * @name:		Name to use in "schemata" file.
217  * @data_width:		Character width of data when displaying
218  * @default_ctrl:	Specifies default cache cbm or memory B/W percent.
219  * @schema_fmt:		Which format string and parser is used for this schema.
220  * @evt_list:		List of monitoring events
221  * @cdp_capable:	Is the CDP feature available on this resource
222  */
223 struct rdt_resource {
224 	int			rid;
225 	bool			alloc_capable;
226 	bool			mon_capable;
227 	int			num_rmid;
228 	enum resctrl_scope	ctrl_scope;
229 	enum resctrl_scope	mon_scope;
230 	struct resctrl_cache	cache;
231 	struct resctrl_membw	membw;
232 	struct list_head	ctrl_domains;
233 	struct list_head	mon_domains;
234 	char			*name;
235 	int			data_width;
236 	u32			default_ctrl;
237 	enum resctrl_schema_fmt	schema_fmt;
238 	struct list_head	evt_list;
239 	bool			cdp_capable;
240 };
241 
242 /*
243  * Get the resource that exists at this level. If the level is not supported
244  * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES
245  * will return NULL.
246  */
247 struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l);
248 
249 /**
250  * struct resctrl_schema - configuration abilities of a resource presented to
251  *			   user-space
252  * @list:	Member of resctrl_schema_all.
253  * @name:	The name to use in the "schemata" file.
254  * @fmt_str:	Format string to show domain value.
255  * @conf_type:	Whether this schema is specific to code/data.
256  * @res:	The resource structure exported by the architecture to describe
257  *		the hardware that is configured by this schema.
258  * @num_closid:	The number of closid that can be used with this schema. When
259  *		features like CDP are enabled, this will be lower than the
260  *		hardware supports for the resource.
261  */
262 struct resctrl_schema {
263 	struct list_head		list;
264 	char				name[8];
265 	const char			*fmt_str;
266 	enum resctrl_conf_type		conf_type;
267 	struct rdt_resource		*res;
268 	u32				num_closid;
269 };
270 
271 /* The number of closid supported by this resource regardless of CDP */
272 u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
273 u32 resctrl_arch_system_num_rmid_idx(void);
274 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
275 
276 /*
277  * Update the ctrl_val and apply this config right now.
278  * Must be called on one of the domain's CPUs.
279  */
280 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
281 			    u32 closid, enum resctrl_conf_type t, u32 cfg_val);
282 
283 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
284 			    u32 closid, enum resctrl_conf_type type);
285 int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
286 int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
287 void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
288 void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
289 void resctrl_online_cpu(unsigned int cpu);
290 void resctrl_offline_cpu(unsigned int cpu);
291 
292 /**
293  * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
294  *			      for this resource and domain.
295  * @r:			resource that the counter should be read from.
296  * @d:			domain that the counter should be read from.
297  * @closid:		closid that matches the rmid. Depending on the architecture, the
298  *			counter may match traffic of both @closid and @rmid, or @rmid
299  *			only.
300  * @rmid:		rmid of the counter to read.
301  * @eventid:		eventid to read, e.g. L3 occupancy.
302  * @val:		result of the counter read in bytes.
303  * @arch_mon_ctx:	An architecture specific value from
304  *			resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
305  *			the hardware monitor allocated for this read request.
306  *
307  * Some architectures need to sleep when first programming some of the counters.
308  * (specifically: arm64's MPAM cache occupancy counters can return 'not ready'
309  *  for a short period of time). Call from a non-migrateable process context on
310  * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or
311  * schedule_work_on(). This function can be called with interrupts masked,
312  * e.g. using smp_call_function_any(), but may consistently return an error.
313  *
314  * Return:
315  * 0 on success, or -EIO, -EINVAL etc on error.
316  */
317 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
318 			   u32 closid, u32 rmid, enum resctrl_event_id eventid,
319 			   u64 *val, void *arch_mon_ctx);
320 
321 /**
322  * resctrl_arch_rmid_read_context_check()  - warn about invalid contexts
323  *
324  * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when
325  * resctrl_arch_rmid_read() is called with preemption disabled.
326  *
327  * The contract with resctrl_arch_rmid_read() is that if interrupts
328  * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an
329  * IPI, (and fail if the call needed to sleep), while most of the time
330  * the work is scheduled, allowing the call to sleep.
331  */
332 static inline void resctrl_arch_rmid_read_context_check(void)
333 {
334 	if (!irqs_disabled())
335 		might_sleep();
336 }
337 
338 /**
339  * resctrl_arch_reset_rmid() - Reset any private state associated with rmid
340  *			       and eventid.
341  * @r:		The domain's resource.
342  * @d:		The rmid's domain.
343  * @closid:	closid that matches the rmid. Depending on the architecture, the
344  *		counter may match traffic of both @closid and @rmid, or @rmid only.
345  * @rmid:	The rmid whose counter values should be reset.
346  * @eventid:	The eventid whose counter values should be reset.
347  *
348  * This can be called from any CPU.
349  */
350 void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
351 			     u32 closid, u32 rmid,
352 			     enum resctrl_event_id eventid);
353 
354 /**
355  * resctrl_arch_reset_rmid_all() - Reset all private state associated with
356  *				   all rmids and eventids.
357  * @r:		The resctrl resource.
358  * @d:		The domain for which all architectural counter state will
359  *		be cleared.
360  *
361  * This can be called from any CPU.
362  */
363 void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d);
364 
365 extern unsigned int resctrl_rmid_realloc_threshold;
366 extern unsigned int resctrl_rmid_realloc_limit;
367 
368 #endif /* _RESCTRL_H */
369