1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _RESCTRL_H 3 #define _RESCTRL_H 4 5 #include <linux/cacheinfo.h> 6 #include <linux/kernel.h> 7 #include <linux/list.h> 8 #include <linux/pid.h> 9 10 /* CLOSID, RMID value used by the default control group */ 11 #define RESCTRL_RESERVED_CLOSID 0 12 #define RESCTRL_RESERVED_RMID 0 13 14 #define RESCTRL_PICK_ANY_CPU -1 15 16 #ifdef CONFIG_PROC_CPU_RESCTRL 17 18 int proc_resctrl_show(struct seq_file *m, 19 struct pid_namespace *ns, 20 struct pid *pid, 21 struct task_struct *tsk); 22 23 #endif 24 25 /* max value for struct rdt_domain's mbps_val */ 26 #define MBA_MAX_MBPS U32_MAX 27 28 /** 29 * enum resctrl_conf_type - The type of configuration. 30 * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. 31 * @CDP_CODE: Configuration applies to instruction fetches. 32 * @CDP_DATA: Configuration applies to reads and writes. 33 */ 34 enum resctrl_conf_type { 35 CDP_NONE, 36 CDP_CODE, 37 CDP_DATA, 38 }; 39 40 enum resctrl_res_level { 41 RDT_RESOURCE_L3, 42 RDT_RESOURCE_L2, 43 RDT_RESOURCE_MBA, 44 RDT_RESOURCE_SMBA, 45 46 /* Must be the last */ 47 RDT_NUM_RESOURCES, 48 }; 49 50 #define CDP_NUM_TYPES (CDP_DATA + 1) 51 52 /* 53 * Event IDs, the values match those used to program IA32_QM_EVTSEL before 54 * reading IA32_QM_CTR on RDT systems. 55 */ 56 enum resctrl_event_id { 57 QOS_L3_OCCUP_EVENT_ID = 0x01, 58 QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, 59 QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, 60 }; 61 62 /** 63 * struct resctrl_staged_config - parsed configuration to be applied 64 * @new_ctrl: new ctrl value to be loaded 65 * @have_new_ctrl: whether the user provided new_ctrl is valid 66 */ 67 struct resctrl_staged_config { 68 u32 new_ctrl; 69 bool have_new_ctrl; 70 }; 71 72 enum resctrl_domain_type { 73 RESCTRL_CTRL_DOMAIN, 74 RESCTRL_MON_DOMAIN, 75 }; 76 77 /** 78 * struct rdt_domain_hdr - common header for different domain types 79 * @list: all instances of this resource 80 * @id: unique id for this instance 81 * @type: type of this instance 82 * @cpu_mask: which CPUs share this resource 83 */ 84 struct rdt_domain_hdr { 85 struct list_head list; 86 int id; 87 enum resctrl_domain_type type; 88 struct cpumask cpu_mask; 89 }; 90 91 /** 92 * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource 93 * @hdr: common header for different domain types 94 * @plr: pseudo-locked region (if any) associated with domain 95 * @staged_config: parsed configuration to be applied 96 * @mbps_val: When mba_sc is enabled, this holds the array of user 97 * specified control values for mba_sc in MBps, indexed 98 * by closid 99 */ 100 struct rdt_ctrl_domain { 101 struct rdt_domain_hdr hdr; 102 struct pseudo_lock_region *plr; 103 struct resctrl_staged_config staged_config[CDP_NUM_TYPES]; 104 u32 *mbps_val; 105 }; 106 107 /** 108 * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource 109 * @hdr: common header for different domain types 110 * @ci: cache info for this domain 111 * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold 112 * @mbm_total: saved state for MBM total bandwidth 113 * @mbm_local: saved state for MBM local bandwidth 114 * @mbm_over: worker to periodically read MBM h/w counters 115 * @cqm_limbo: worker to periodically read CQM h/w counters 116 * @mbm_work_cpu: worker CPU for MBM h/w counters 117 * @cqm_work_cpu: worker CPU for CQM h/w counters 118 */ 119 struct rdt_mon_domain { 120 struct rdt_domain_hdr hdr; 121 struct cacheinfo *ci; 122 unsigned long *rmid_busy_llc; 123 struct mbm_state *mbm_total; 124 struct mbm_state *mbm_local; 125 struct delayed_work mbm_over; 126 struct delayed_work cqm_limbo; 127 int mbm_work_cpu; 128 int cqm_work_cpu; 129 }; 130 131 /** 132 * struct resctrl_cache - Cache allocation related data 133 * @cbm_len: Length of the cache bit mask 134 * @min_cbm_bits: Minimum number of consecutive bits to be set. 135 * The value 0 means the architecture can support 136 * zero CBM. 137 * @shareable_bits: Bitmask of shareable resource with other 138 * executing entities 139 * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid. 140 * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache 141 * level has CPU scope. 142 */ 143 struct resctrl_cache { 144 unsigned int cbm_len; 145 unsigned int min_cbm_bits; 146 unsigned int shareable_bits; 147 bool arch_has_sparse_bitmasks; 148 bool arch_has_per_cpu_cfg; 149 }; 150 151 /** 152 * enum membw_throttle_mode - System's memory bandwidth throttling mode 153 * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system 154 * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core 155 * always using smallest bandwidth percentage 156 * assigned to threads, aka "max throttling" 157 * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread 158 */ 159 enum membw_throttle_mode { 160 THREAD_THROTTLE_UNDEFINED = 0, 161 THREAD_THROTTLE_MAX, 162 THREAD_THROTTLE_PER_THREAD, 163 }; 164 165 /** 166 * struct resctrl_membw - Memory bandwidth allocation related data 167 * @min_bw: Minimum memory bandwidth percentage user can request 168 * @bw_gran: Granularity at which the memory bandwidth is allocated 169 * @delay_linear: True if memory B/W delay is in linear scale 170 * @arch_needs_linear: True if we can't configure non-linear resources 171 * @throttle_mode: Bandwidth throttling mode when threads request 172 * different memory bandwidths 173 * @mba_sc: True if MBA software controller(mba_sc) is enabled 174 * @mb_map: Mapping of memory B/W percentage to memory B/W delay 175 */ 176 struct resctrl_membw { 177 u32 min_bw; 178 u32 bw_gran; 179 u32 delay_linear; 180 bool arch_needs_linear; 181 enum membw_throttle_mode throttle_mode; 182 bool mba_sc; 183 u32 *mb_map; 184 }; 185 186 struct rdt_parse_data; 187 struct resctrl_schema; 188 189 enum resctrl_scope { 190 RESCTRL_L2_CACHE = 2, 191 RESCTRL_L3_CACHE = 3, 192 RESCTRL_L3_NODE, 193 }; 194 195 /** 196 * struct rdt_resource - attributes of a resctrl resource 197 * @rid: The index of the resource 198 * @alloc_capable: Is allocation available on this machine 199 * @mon_capable: Is monitor feature available on this machine 200 * @num_rmid: Number of RMIDs available 201 * @ctrl_scope: Scope of this resource for control functions 202 * @mon_scope: Scope of this resource for monitor functions 203 * @cache: Cache allocation related data 204 * @membw: If the component has bandwidth controls, their properties. 205 * @ctrl_domains: RCU list of all control domains for this resource 206 * @mon_domains: RCU list of all monitor domains for this resource 207 * @name: Name to use in "schemata" file. 208 * @data_width: Character width of data when displaying 209 * @default_ctrl: Specifies default cache cbm or memory B/W percent. 210 * @format_str: Per resource format string to show domain value 211 * @parse_ctrlval: Per resource function pointer to parse control values 212 * @evt_list: List of monitoring events 213 * @cdp_capable: Is the CDP feature available on this resource 214 */ 215 struct rdt_resource { 216 int rid; 217 bool alloc_capable; 218 bool mon_capable; 219 int num_rmid; 220 enum resctrl_scope ctrl_scope; 221 enum resctrl_scope mon_scope; 222 struct resctrl_cache cache; 223 struct resctrl_membw membw; 224 struct list_head ctrl_domains; 225 struct list_head mon_domains; 226 char *name; 227 int data_width; 228 u32 default_ctrl; 229 const char *format_str; 230 int (*parse_ctrlval)(struct rdt_parse_data *data, 231 struct resctrl_schema *s, 232 struct rdt_ctrl_domain *d); 233 struct list_head evt_list; 234 bool cdp_capable; 235 }; 236 237 /* 238 * Get the resource that exists at this level. If the level is not supported 239 * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES 240 * will return NULL. 241 */ 242 struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l); 243 244 /** 245 * struct resctrl_schema - configuration abilities of a resource presented to 246 * user-space 247 * @list: Member of resctrl_schema_all. 248 * @name: The name to use in the "schemata" file. 249 * @conf_type: Whether this schema is specific to code/data. 250 * @res: The resource structure exported by the architecture to describe 251 * the hardware that is configured by this schema. 252 * @num_closid: The number of closid that can be used with this schema. When 253 * features like CDP are enabled, this will be lower than the 254 * hardware supports for the resource. 255 */ 256 struct resctrl_schema { 257 struct list_head list; 258 char name[8]; 259 enum resctrl_conf_type conf_type; 260 struct rdt_resource *res; 261 u32 num_closid; 262 }; 263 264 /* The number of closid supported by this resource regardless of CDP */ 265 u32 resctrl_arch_get_num_closid(struct rdt_resource *r); 266 u32 resctrl_arch_system_num_rmid_idx(void); 267 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); 268 269 /* 270 * Update the ctrl_val and apply this config right now. 271 * Must be called on one of the domain's CPUs. 272 */ 273 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d, 274 u32 closid, enum resctrl_conf_type t, u32 cfg_val); 275 276 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d, 277 u32 closid, enum resctrl_conf_type type); 278 int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d); 279 int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d); 280 void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d); 281 void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d); 282 void resctrl_online_cpu(unsigned int cpu); 283 void resctrl_offline_cpu(unsigned int cpu); 284 285 /** 286 * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid 287 * for this resource and domain. 288 * @r: resource that the counter should be read from. 289 * @d: domain that the counter should be read from. 290 * @closid: closid that matches the rmid. Depending on the architecture, the 291 * counter may match traffic of both @closid and @rmid, or @rmid 292 * only. 293 * @rmid: rmid of the counter to read. 294 * @eventid: eventid to read, e.g. L3 occupancy. 295 * @val: result of the counter read in bytes. 296 * @arch_mon_ctx: An architecture specific value from 297 * resctrl_arch_mon_ctx_alloc(), for MPAM this identifies 298 * the hardware monitor allocated for this read request. 299 * 300 * Some architectures need to sleep when first programming some of the counters. 301 * (specifically: arm64's MPAM cache occupancy counters can return 'not ready' 302 * for a short period of time). Call from a non-migrateable process context on 303 * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or 304 * schedule_work_on(). This function can be called with interrupts masked, 305 * e.g. using smp_call_function_any(), but may consistently return an error. 306 * 307 * Return: 308 * 0 on success, or -EIO, -EINVAL etc on error. 309 */ 310 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d, 311 u32 closid, u32 rmid, enum resctrl_event_id eventid, 312 u64 *val, void *arch_mon_ctx); 313 314 /** 315 * resctrl_arch_rmid_read_context_check() - warn about invalid contexts 316 * 317 * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when 318 * resctrl_arch_rmid_read() is called with preemption disabled. 319 * 320 * The contract with resctrl_arch_rmid_read() is that if interrupts 321 * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an 322 * IPI, (and fail if the call needed to sleep), while most of the time 323 * the work is scheduled, allowing the call to sleep. 324 */ 325 static inline void resctrl_arch_rmid_read_context_check(void) 326 { 327 if (!irqs_disabled()) 328 might_sleep(); 329 } 330 331 /** 332 * resctrl_arch_reset_rmid() - Reset any private state associated with rmid 333 * and eventid. 334 * @r: The domain's resource. 335 * @d: The rmid's domain. 336 * @closid: closid that matches the rmid. Depending on the architecture, the 337 * counter may match traffic of both @closid and @rmid, or @rmid only. 338 * @rmid: The rmid whose counter values should be reset. 339 * @eventid: The eventid whose counter values should be reset. 340 * 341 * This can be called from any CPU. 342 */ 343 void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d, 344 u32 closid, u32 rmid, 345 enum resctrl_event_id eventid); 346 347 /** 348 * resctrl_arch_reset_rmid_all() - Reset all private state associated with 349 * all rmids and eventids. 350 * @r: The resctrl resource. 351 * @d: The domain for which all architectural counter state will 352 * be cleared. 353 * 354 * This can be called from any CPU. 355 */ 356 void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d); 357 358 extern unsigned int resctrl_rmid_realloc_threshold; 359 extern unsigned int resctrl_rmid_realloc_limit; 360 361 #endif /* _RESCTRL_H */ 362