1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * cacheinfo support - processor cache information via sysfs 4 * 5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c 6 * Author: Sudeep Holla <[email protected]> 7 */ 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/acpi.h> 11 #include <linux/bitops.h> 12 #include <linux/cacheinfo.h> 13 #include <linux/compiler.h> 14 #include <linux/cpu.h> 15 #include <linux/device.h> 16 #include <linux/init.h> 17 #include <linux/of_device.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/smp.h> 21 #include <linux/sysfs.h> 22 23 /* pointer to per cpu cacheinfo */ 24 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); 25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) 26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) 27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) 28 #define per_cpu_cacheinfo_idx(cpu, idx) \ 29 (per_cpu_cacheinfo(cpu) + (idx)) 30 31 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) 32 { 33 return ci_cacheinfo(cpu); 34 } 35 36 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, 37 struct cacheinfo *sib_leaf) 38 { 39 /* 40 * For non DT/ACPI systems, assume unique level 1 caches, 41 * system-wide shared caches for all other levels. 42 */ 43 if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI))) 44 return (this_leaf->level != 1) && (sib_leaf->level != 1); 45 46 if ((sib_leaf->attributes & CACHE_ID) && 47 (this_leaf->attributes & CACHE_ID)) 48 return sib_leaf->id == this_leaf->id; 49 50 return sib_leaf->fw_token == this_leaf->fw_token; 51 } 52 53 bool last_level_cache_is_valid(unsigned int cpu) 54 { 55 struct cacheinfo *llc; 56 57 if (!cache_leaves(cpu)) 58 return false; 59 60 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); 61 62 return (llc->attributes & CACHE_ID) || !!llc->fw_token; 63 64 } 65 66 bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y) 67 { 68 struct cacheinfo *llc_x, *llc_y; 69 70 if (!last_level_cache_is_valid(cpu_x) || 71 !last_level_cache_is_valid(cpu_y)) 72 return false; 73 74 llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1); 75 llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1); 76 77 return cache_leaves_are_shared(llc_x, llc_y); 78 } 79 80 #ifdef CONFIG_OF 81 82 static bool of_check_cache_nodes(struct device_node *np); 83 84 /* OF properties to query for a given cache type */ 85 struct cache_type_info { 86 const char *size_prop; 87 const char *line_size_props[2]; 88 const char *nr_sets_prop; 89 }; 90 91 static const struct cache_type_info cache_type_info[] = { 92 { 93 .size_prop = "cache-size", 94 .line_size_props = { "cache-line-size", 95 "cache-block-size", }, 96 .nr_sets_prop = "cache-sets", 97 }, { 98 .size_prop = "i-cache-size", 99 .line_size_props = { "i-cache-line-size", 100 "i-cache-block-size", }, 101 .nr_sets_prop = "i-cache-sets", 102 }, { 103 .size_prop = "d-cache-size", 104 .line_size_props = { "d-cache-line-size", 105 "d-cache-block-size", }, 106 .nr_sets_prop = "d-cache-sets", 107 }, 108 }; 109 110 static inline int get_cacheinfo_idx(enum cache_type type) 111 { 112 if (type == CACHE_TYPE_UNIFIED) 113 return 0; 114 return type; 115 } 116 117 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) 118 { 119 const char *propname; 120 int ct_idx; 121 122 ct_idx = get_cacheinfo_idx(this_leaf->type); 123 propname = cache_type_info[ct_idx].size_prop; 124 125 of_property_read_u32(np, propname, &this_leaf->size); 126 } 127 128 /* not cache_line_size() because that's a macro in include/linux/cache.h */ 129 static void cache_get_line_size(struct cacheinfo *this_leaf, 130 struct device_node *np) 131 { 132 int i, lim, ct_idx; 133 134 ct_idx = get_cacheinfo_idx(this_leaf->type); 135 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props); 136 137 for (i = 0; i < lim; i++) { 138 int ret; 139 u32 line_size; 140 const char *propname; 141 142 propname = cache_type_info[ct_idx].line_size_props[i]; 143 ret = of_property_read_u32(np, propname, &line_size); 144 if (!ret) { 145 this_leaf->coherency_line_size = line_size; 146 break; 147 } 148 } 149 } 150 151 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) 152 { 153 const char *propname; 154 int ct_idx; 155 156 ct_idx = get_cacheinfo_idx(this_leaf->type); 157 propname = cache_type_info[ct_idx].nr_sets_prop; 158 159 of_property_read_u32(np, propname, &this_leaf->number_of_sets); 160 } 161 162 static void cache_associativity(struct cacheinfo *this_leaf) 163 { 164 unsigned int line_size = this_leaf->coherency_line_size; 165 unsigned int nr_sets = this_leaf->number_of_sets; 166 unsigned int size = this_leaf->size; 167 168 /* 169 * If the cache is fully associative, there is no need to 170 * check the other properties. 171 */ 172 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0)) 173 this_leaf->ways_of_associativity = (size / nr_sets) / line_size; 174 } 175 176 static bool cache_node_is_unified(struct cacheinfo *this_leaf, 177 struct device_node *np) 178 { 179 return of_property_read_bool(np, "cache-unified"); 180 } 181 182 static void cache_of_set_props(struct cacheinfo *this_leaf, 183 struct device_node *np) 184 { 185 /* 186 * init_cache_level must setup the cache level correctly 187 * overriding the architecturally specified levels, so 188 * if type is NONE at this stage, it should be unified 189 */ 190 if (this_leaf->type == CACHE_TYPE_NOCACHE && 191 cache_node_is_unified(this_leaf, np)) 192 this_leaf->type = CACHE_TYPE_UNIFIED; 193 cache_size(this_leaf, np); 194 cache_get_line_size(this_leaf, np); 195 cache_nr_sets(this_leaf, np); 196 cache_associativity(this_leaf); 197 } 198 199 static int cache_setup_of_node(unsigned int cpu) 200 { 201 struct device_node *np, *prev; 202 struct cacheinfo *this_leaf; 203 unsigned int index = 0; 204 205 np = of_cpu_device_node_get(cpu); 206 if (!np) { 207 pr_err("Failed to find cpu%d device node\n", cpu); 208 return -ENOENT; 209 } 210 211 if (!of_check_cache_nodes(np)) { 212 of_node_put(np); 213 return -ENOENT; 214 } 215 216 prev = np; 217 218 while (index < cache_leaves(cpu)) { 219 this_leaf = per_cpu_cacheinfo_idx(cpu, index); 220 if (this_leaf->level != 1) { 221 np = of_find_next_cache_node(np); 222 of_node_put(prev); 223 prev = np; 224 if (!np) 225 break; 226 } 227 cache_of_set_props(this_leaf, np); 228 this_leaf->fw_token = np; 229 index++; 230 } 231 232 of_node_put(np); 233 234 if (index != cache_leaves(cpu)) /* not all OF nodes populated */ 235 return -ENOENT; 236 237 return 0; 238 } 239 240 static bool of_check_cache_nodes(struct device_node *np) 241 { 242 struct device_node *next; 243 244 if (of_property_present(np, "cache-size") || 245 of_property_present(np, "i-cache-size") || 246 of_property_present(np, "d-cache-size") || 247 of_property_present(np, "cache-unified")) 248 return true; 249 250 next = of_find_next_cache_node(np); 251 if (next) { 252 of_node_put(next); 253 return true; 254 } 255 256 return false; 257 } 258 259 static int of_count_cache_leaves(struct device_node *np) 260 { 261 unsigned int leaves = 0; 262 263 if (of_property_read_bool(np, "cache-size")) 264 ++leaves; 265 if (of_property_read_bool(np, "i-cache-size")) 266 ++leaves; 267 if (of_property_read_bool(np, "d-cache-size")) 268 ++leaves; 269 270 if (!leaves) { 271 /* The '[i-|d-|]cache-size' property is required, but 272 * if absent, fallback on the 'cache-unified' property. 273 */ 274 if (of_property_read_bool(np, "cache-unified")) 275 return 1; 276 else 277 return 2; 278 } 279 280 return leaves; 281 } 282 283 int init_of_cache_level(unsigned int cpu) 284 { 285 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 286 struct device_node *np = of_cpu_device_node_get(cpu); 287 struct device_node *prev = NULL; 288 unsigned int levels = 0, leaves, level; 289 290 if (!of_check_cache_nodes(np)) { 291 of_node_put(np); 292 return -ENOENT; 293 } 294 295 leaves = of_count_cache_leaves(np); 296 if (leaves > 0) 297 levels = 1; 298 299 prev = np; 300 while ((np = of_find_next_cache_node(np))) { 301 of_node_put(prev); 302 prev = np; 303 if (!of_device_is_compatible(np, "cache")) 304 goto err_out; 305 if (of_property_read_u32(np, "cache-level", &level)) 306 goto err_out; 307 if (level <= levels) 308 goto err_out; 309 310 leaves += of_count_cache_leaves(np); 311 levels = level; 312 } 313 314 of_node_put(np); 315 this_cpu_ci->num_levels = levels; 316 this_cpu_ci->num_leaves = leaves; 317 318 return 0; 319 320 err_out: 321 of_node_put(np); 322 return -EINVAL; 323 } 324 325 #else 326 static inline int cache_setup_of_node(unsigned int cpu) { return 0; } 327 int init_of_cache_level(unsigned int cpu) { return 0; } 328 #endif 329 330 int __weak cache_setup_acpi(unsigned int cpu) 331 { 332 return -ENOTSUPP; 333 } 334 335 unsigned int coherency_max_size; 336 337 static int cache_setup_properties(unsigned int cpu) 338 { 339 int ret = 0; 340 341 if (of_have_populated_dt()) 342 ret = cache_setup_of_node(cpu); 343 else if (!acpi_disabled) 344 ret = cache_setup_acpi(cpu); 345 346 return ret; 347 } 348 349 static int cache_shared_cpu_map_setup(unsigned int cpu) 350 { 351 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 352 struct cacheinfo *this_leaf, *sib_leaf; 353 unsigned int index, sib_index; 354 int ret = 0; 355 356 if (this_cpu_ci->cpu_map_populated) 357 return 0; 358 359 /* 360 * skip setting up cache properties if LLC is valid, just need 361 * to update the shared cpu_map if the cache attributes were 362 * populated early before all the cpus are brought online 363 */ 364 if (!last_level_cache_is_valid(cpu)) { 365 ret = cache_setup_properties(cpu); 366 if (ret) 367 return ret; 368 } 369 370 for (index = 0; index < cache_leaves(cpu); index++) { 371 unsigned int i; 372 373 this_leaf = per_cpu_cacheinfo_idx(cpu, index); 374 375 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); 376 for_each_online_cpu(i) { 377 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); 378 379 if (i == cpu || !sib_cpu_ci->info_list) 380 continue;/* skip if itself or no cacheinfo */ 381 for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { 382 sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); 383 if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 384 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); 385 cpumask_set_cpu(i, &this_leaf->shared_cpu_map); 386 break; 387 } 388 } 389 } 390 /* record the maximum cache line size */ 391 if (this_leaf->coherency_line_size > coherency_max_size) 392 coherency_max_size = this_leaf->coherency_line_size; 393 } 394 395 return 0; 396 } 397 398 static void cache_shared_cpu_map_remove(unsigned int cpu) 399 { 400 struct cacheinfo *this_leaf, *sib_leaf; 401 unsigned int sibling, index, sib_index; 402 403 for (index = 0; index < cache_leaves(cpu); index++) { 404 this_leaf = per_cpu_cacheinfo_idx(cpu, index); 405 for_each_cpu(sibling, &this_leaf->shared_cpu_map) { 406 struct cpu_cacheinfo *sib_cpu_ci = 407 get_cpu_cacheinfo(sibling); 408 409 if (sibling == cpu || !sib_cpu_ci->info_list) 410 continue;/* skip if itself or no cacheinfo */ 411 412 for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { 413 sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); 414 if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 415 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 416 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 417 break; 418 } 419 } 420 } 421 } 422 } 423 424 static void free_cache_attributes(unsigned int cpu) 425 { 426 if (!per_cpu_cacheinfo(cpu)) 427 return; 428 429 cache_shared_cpu_map_remove(cpu); 430 } 431 432 int __weak early_cache_level(unsigned int cpu) 433 { 434 return -ENOENT; 435 } 436 437 int __weak init_cache_level(unsigned int cpu) 438 { 439 return -ENOENT; 440 } 441 442 int __weak populate_cache_leaves(unsigned int cpu) 443 { 444 return -ENOENT; 445 } 446 447 static inline 448 int allocate_cache_info(int cpu) 449 { 450 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), 451 sizeof(struct cacheinfo), GFP_ATOMIC); 452 if (!per_cpu_cacheinfo(cpu)) { 453 cache_leaves(cpu) = 0; 454 return -ENOMEM; 455 } 456 457 return 0; 458 } 459 460 int fetch_cache_info(unsigned int cpu) 461 { 462 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 463 unsigned int levels = 0, split_levels = 0; 464 int ret; 465 466 if (acpi_disabled) { 467 ret = init_of_cache_level(cpu); 468 } else { 469 ret = acpi_get_cache_info(cpu, &levels, &split_levels); 470 if (!ret) { 471 this_cpu_ci->num_levels = levels; 472 /* 473 * This assumes that: 474 * - there cannot be any split caches (data/instruction) 475 * above a unified cache 476 * - data/instruction caches come by pair 477 */ 478 this_cpu_ci->num_leaves = levels + split_levels; 479 } 480 } 481 482 if (ret || !cache_leaves(cpu)) { 483 ret = early_cache_level(cpu); 484 if (ret) 485 return ret; 486 487 if (!cache_leaves(cpu)) 488 return -ENOENT; 489 490 this_cpu_ci->early_ci_levels = true; 491 } 492 493 return allocate_cache_info(cpu); 494 } 495 496 static inline int init_level_allocate_ci(unsigned int cpu) 497 { 498 unsigned int early_leaves = cache_leaves(cpu); 499 500 /* Since early initialization/allocation of the cacheinfo is allowed 501 * via fetch_cache_info() and this also gets called as CPU hotplug 502 * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped 503 * as it will happen only once (the cacheinfo memory is never freed). 504 * Just populate the cacheinfo. However, if the cacheinfo has been 505 * allocated early through the arch-specific early_cache_level() call, 506 * there is a chance the info is wrong (this can happen on arm64). In 507 * that case, call init_cache_level() anyway to give the arch-specific 508 * code a chance to make things right. 509 */ 510 if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels) 511 return 0; 512 513 if (init_cache_level(cpu) || !cache_leaves(cpu)) 514 return -ENOENT; 515 516 /* 517 * Now that we have properly initialized the cache level info, make 518 * sure we don't try to do that again the next time we are called 519 * (e.g. as CPU hotplug callbacks). 520 */ 521 ci_cacheinfo(cpu)->early_ci_levels = false; 522 523 if (cache_leaves(cpu) <= early_leaves) 524 return 0; 525 526 kfree(per_cpu_cacheinfo(cpu)); 527 return allocate_cache_info(cpu); 528 } 529 530 int detect_cache_attributes(unsigned int cpu) 531 { 532 int ret; 533 534 ret = init_level_allocate_ci(cpu); 535 if (ret) 536 return ret; 537 538 /* 539 * populate_cache_leaves() may completely setup the cache leaves and 540 * shared_cpu_map or it may leave it partially setup. 541 */ 542 ret = populate_cache_leaves(cpu); 543 if (ret) 544 goto free_ci; 545 546 /* 547 * For systems using DT for cache hierarchy, fw_token 548 * and shared_cpu_map will be set up here only if they are 549 * not populated already 550 */ 551 ret = cache_shared_cpu_map_setup(cpu); 552 if (ret) { 553 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); 554 goto free_ci; 555 } 556 557 return 0; 558 559 free_ci: 560 free_cache_attributes(cpu); 561 return ret; 562 } 563 564 /* pointer to cpuX/cache device */ 565 static DEFINE_PER_CPU(struct device *, ci_cache_dev); 566 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) 567 568 static cpumask_t cache_dev_map; 569 570 /* pointer to array of devices for cpuX/cache/indexY */ 571 static DEFINE_PER_CPU(struct device **, ci_index_dev); 572 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) 573 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) 574 575 #define show_one(file_name, object) \ 576 static ssize_t file_name##_show(struct device *dev, \ 577 struct device_attribute *attr, char *buf) \ 578 { \ 579 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ 580 return sysfs_emit(buf, "%u\n", this_leaf->object); \ 581 } 582 583 show_one(id, id); 584 show_one(level, level); 585 show_one(coherency_line_size, coherency_line_size); 586 show_one(number_of_sets, number_of_sets); 587 show_one(physical_line_partition, physical_line_partition); 588 show_one(ways_of_associativity, ways_of_associativity); 589 590 static ssize_t size_show(struct device *dev, 591 struct device_attribute *attr, char *buf) 592 { 593 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 594 595 return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10); 596 } 597 598 static ssize_t shared_cpu_map_show(struct device *dev, 599 struct device_attribute *attr, char *buf) 600 { 601 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 602 const struct cpumask *mask = &this_leaf->shared_cpu_map; 603 604 return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask); 605 } 606 607 static ssize_t shared_cpu_list_show(struct device *dev, 608 struct device_attribute *attr, char *buf) 609 { 610 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 611 const struct cpumask *mask = &this_leaf->shared_cpu_map; 612 613 return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask); 614 } 615 616 static ssize_t type_show(struct device *dev, 617 struct device_attribute *attr, char *buf) 618 { 619 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 620 const char *output; 621 622 switch (this_leaf->type) { 623 case CACHE_TYPE_DATA: 624 output = "Data"; 625 break; 626 case CACHE_TYPE_INST: 627 output = "Instruction"; 628 break; 629 case CACHE_TYPE_UNIFIED: 630 output = "Unified"; 631 break; 632 default: 633 return -EINVAL; 634 } 635 636 return sysfs_emit(buf, "%s\n", output); 637 } 638 639 static ssize_t allocation_policy_show(struct device *dev, 640 struct device_attribute *attr, char *buf) 641 { 642 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 643 unsigned int ci_attr = this_leaf->attributes; 644 const char *output; 645 646 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) 647 output = "ReadWriteAllocate"; 648 else if (ci_attr & CACHE_READ_ALLOCATE) 649 output = "ReadAllocate"; 650 else if (ci_attr & CACHE_WRITE_ALLOCATE) 651 output = "WriteAllocate"; 652 else 653 return 0; 654 655 return sysfs_emit(buf, "%s\n", output); 656 } 657 658 static ssize_t write_policy_show(struct device *dev, 659 struct device_attribute *attr, char *buf) 660 { 661 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 662 unsigned int ci_attr = this_leaf->attributes; 663 int n = 0; 664 665 if (ci_attr & CACHE_WRITE_THROUGH) 666 n = sysfs_emit(buf, "WriteThrough\n"); 667 else if (ci_attr & CACHE_WRITE_BACK) 668 n = sysfs_emit(buf, "WriteBack\n"); 669 return n; 670 } 671 672 static DEVICE_ATTR_RO(id); 673 static DEVICE_ATTR_RO(level); 674 static DEVICE_ATTR_RO(type); 675 static DEVICE_ATTR_RO(coherency_line_size); 676 static DEVICE_ATTR_RO(ways_of_associativity); 677 static DEVICE_ATTR_RO(number_of_sets); 678 static DEVICE_ATTR_RO(size); 679 static DEVICE_ATTR_RO(allocation_policy); 680 static DEVICE_ATTR_RO(write_policy); 681 static DEVICE_ATTR_RO(shared_cpu_map); 682 static DEVICE_ATTR_RO(shared_cpu_list); 683 static DEVICE_ATTR_RO(physical_line_partition); 684 685 static struct attribute *cache_default_attrs[] = { 686 &dev_attr_id.attr, 687 &dev_attr_type.attr, 688 &dev_attr_level.attr, 689 &dev_attr_shared_cpu_map.attr, 690 &dev_attr_shared_cpu_list.attr, 691 &dev_attr_coherency_line_size.attr, 692 &dev_attr_ways_of_associativity.attr, 693 &dev_attr_number_of_sets.attr, 694 &dev_attr_size.attr, 695 &dev_attr_allocation_policy.attr, 696 &dev_attr_write_policy.attr, 697 &dev_attr_physical_line_partition.attr, 698 NULL 699 }; 700 701 static umode_t 702 cache_default_attrs_is_visible(struct kobject *kobj, 703 struct attribute *attr, int unused) 704 { 705 struct device *dev = kobj_to_dev(kobj); 706 struct cacheinfo *this_leaf = dev_get_drvdata(dev); 707 const struct cpumask *mask = &this_leaf->shared_cpu_map; 708 umode_t mode = attr->mode; 709 710 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID)) 711 return mode; 712 if ((attr == &dev_attr_type.attr) && this_leaf->type) 713 return mode; 714 if ((attr == &dev_attr_level.attr) && this_leaf->level) 715 return mode; 716 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) 717 return mode; 718 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) 719 return mode; 720 if ((attr == &dev_attr_coherency_line_size.attr) && 721 this_leaf->coherency_line_size) 722 return mode; 723 if ((attr == &dev_attr_ways_of_associativity.attr) && 724 this_leaf->size) /* allow 0 = full associativity */ 725 return mode; 726 if ((attr == &dev_attr_number_of_sets.attr) && 727 this_leaf->number_of_sets) 728 return mode; 729 if ((attr == &dev_attr_size.attr) && this_leaf->size) 730 return mode; 731 if ((attr == &dev_attr_write_policy.attr) && 732 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) 733 return mode; 734 if ((attr == &dev_attr_allocation_policy.attr) && 735 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) 736 return mode; 737 if ((attr == &dev_attr_physical_line_partition.attr) && 738 this_leaf->physical_line_partition) 739 return mode; 740 741 return 0; 742 } 743 744 static const struct attribute_group cache_default_group = { 745 .attrs = cache_default_attrs, 746 .is_visible = cache_default_attrs_is_visible, 747 }; 748 749 static const struct attribute_group *cache_default_groups[] = { 750 &cache_default_group, 751 NULL, 752 }; 753 754 static const struct attribute_group *cache_private_groups[] = { 755 &cache_default_group, 756 NULL, /* Place holder for private group */ 757 NULL, 758 }; 759 760 const struct attribute_group * 761 __weak cache_get_priv_group(struct cacheinfo *this_leaf) 762 { 763 return NULL; 764 } 765 766 static const struct attribute_group ** 767 cache_get_attribute_groups(struct cacheinfo *this_leaf) 768 { 769 const struct attribute_group *priv_group = 770 cache_get_priv_group(this_leaf); 771 772 if (!priv_group) 773 return cache_default_groups; 774 775 if (!cache_private_groups[1]) 776 cache_private_groups[1] = priv_group; 777 778 return cache_private_groups; 779 } 780 781 /* Add/Remove cache interface for CPU device */ 782 static void cpu_cache_sysfs_exit(unsigned int cpu) 783 { 784 int i; 785 struct device *ci_dev; 786 787 if (per_cpu_index_dev(cpu)) { 788 for (i = 0; i < cache_leaves(cpu); i++) { 789 ci_dev = per_cache_index_dev(cpu, i); 790 if (!ci_dev) 791 continue; 792 device_unregister(ci_dev); 793 } 794 kfree(per_cpu_index_dev(cpu)); 795 per_cpu_index_dev(cpu) = NULL; 796 } 797 device_unregister(per_cpu_cache_dev(cpu)); 798 per_cpu_cache_dev(cpu) = NULL; 799 } 800 801 static int cpu_cache_sysfs_init(unsigned int cpu) 802 { 803 struct device *dev = get_cpu_device(cpu); 804 805 if (per_cpu_cacheinfo(cpu) == NULL) 806 return -ENOENT; 807 808 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); 809 if (IS_ERR(per_cpu_cache_dev(cpu))) 810 return PTR_ERR(per_cpu_cache_dev(cpu)); 811 812 /* Allocate all required memory */ 813 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), 814 sizeof(struct device *), GFP_KERNEL); 815 if (unlikely(per_cpu_index_dev(cpu) == NULL)) 816 goto err_out; 817 818 return 0; 819 820 err_out: 821 cpu_cache_sysfs_exit(cpu); 822 return -ENOMEM; 823 } 824 825 static int cache_add_dev(unsigned int cpu) 826 { 827 unsigned int i; 828 int rc; 829 struct device *ci_dev, *parent; 830 struct cacheinfo *this_leaf; 831 const struct attribute_group **cache_groups; 832 833 rc = cpu_cache_sysfs_init(cpu); 834 if (unlikely(rc < 0)) 835 return rc; 836 837 parent = per_cpu_cache_dev(cpu); 838 for (i = 0; i < cache_leaves(cpu); i++) { 839 this_leaf = per_cpu_cacheinfo_idx(cpu, i); 840 if (this_leaf->disable_sysfs) 841 continue; 842 if (this_leaf->type == CACHE_TYPE_NOCACHE) 843 break; 844 cache_groups = cache_get_attribute_groups(this_leaf); 845 ci_dev = cpu_device_create(parent, this_leaf, cache_groups, 846 "index%1u", i); 847 if (IS_ERR(ci_dev)) { 848 rc = PTR_ERR(ci_dev); 849 goto err; 850 } 851 per_cache_index_dev(cpu, i) = ci_dev; 852 } 853 cpumask_set_cpu(cpu, &cache_dev_map); 854 855 return 0; 856 err: 857 cpu_cache_sysfs_exit(cpu); 858 return rc; 859 } 860 861 static int cacheinfo_cpu_online(unsigned int cpu) 862 { 863 int rc = detect_cache_attributes(cpu); 864 865 if (rc) 866 return rc; 867 rc = cache_add_dev(cpu); 868 if (rc) 869 free_cache_attributes(cpu); 870 return rc; 871 } 872 873 static int cacheinfo_cpu_pre_down(unsigned int cpu) 874 { 875 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) 876 cpu_cache_sysfs_exit(cpu); 877 878 free_cache_attributes(cpu); 879 return 0; 880 } 881 882 static int __init cacheinfo_sysfs_init(void) 883 { 884 return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE, 885 "base/cacheinfo:online", 886 cacheinfo_cpu_online, cacheinfo_cpu_pre_down); 887 } 888 device_initcall(cacheinfo_sysfs_init); 889