| /linux-6.15/kernel/sched/ |
| H A D | topology.c | 435 for_each_cpu(i, cpu_map) { in build_perf_domains() 448 perf_domain_debug(cpu_map, pd); in build_perf_domains() 1503 __sdt_free(cpu_map); in __free_domain_allocs() 1515 if (__sdt_alloc(cpu_map)) in __visit_domain_allocation_hell() 2241 for_each_cpu(j, cpu_map) { in __sdt_alloc() 2292 for_each_cpu(j, cpu_map) { in __sdt_free() 2405 for_each_cpu(i, cpu_map) { in build_sched_domains() 2428 for_each_cpu(i, cpu_map) { in build_sched_domains() 2445 for_each_cpu(i, cpu_map) { in build_sched_domains() 2512 for_each_cpu(i, cpu_map) { in build_sched_domains() [all …]
|
| /linux-6.15/tools/power/x86/intel-speed-select/ |
| H A D | isst-config.c | 73 struct _cpu_map *cpu_map; variable 333 if (cpu_map && cpu_map[cpu].initialized) in get_physical_package_id() 357 if (cpu_map && cpu_map[cpu].initialized) in get_physical_core_id() 381 if (cpu_map && cpu_map[cpu].initialized) in get_physical_die_id() 410 if (cpu_map && cpu_map[cpu].initialized) in get_physical_punit_id() 761 cpu_map = calloc(topo_max_cpus, sizeof(*cpu_map)); in create_cpu_map() 762 if (!cpu_map) in create_cpu_map() 812 update_punit_cpu_info(map.cpu_map[0].physical_cpu, &cpu_map[i]); in create_cpu_map() 828 i, cpu_map[i].core_id, cpu_map[i].die_id, in create_cpu_map() 829 cpu_map[i].pkg_id, cpu_map[i].punit_id, in create_cpu_map() [all …]
|
| /linux-6.15/arch/mips/kernel/ |
| H A D | cacheinfo.c | 58 static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) in fill_cpumask_siblings() argument 64 cpumask_set_cpu(cpu1, cpu_map); in fill_cpumask_siblings() 67 static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) in fill_cpumask_cluster() argument 74 cpumask_set_cpu(cpu1, cpu_map); in fill_cpumask_cluster()
|
| /linux-6.15/tools/perf/arch/arm64/util/ |
| H A D | arm-spe.c | 82 struct perf_cpu_map *cpu_map = arm_spe_find_cpus(evlist); in arm_spe_info_priv_size() local 85 if (!cpu_map) in arm_spe_info_priv_size() 89 ARM_SPE_CPU_PRIV_MAX * perf_cpu_map__nr(cpu_map); in arm_spe_info_priv_size() 92 perf_cpu_map__put(cpu_map); in arm_spe_info_priv_size() 146 struct perf_cpu_map *cpu_map; in arm_spe_info_fill() local 156 cpu_map = arm_spe_find_cpus(session->evlist); in arm_spe_info_fill() 157 if (!cpu_map) in arm_spe_info_fill() 165 auxtrace_info->priv[ARM_SPE_CPUS_NUM] = perf_cpu_map__nr(cpu_map); in arm_spe_info_fill() 168 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { in arm_spe_info_fill() 179 perf_cpu_map__put(cpu_map); in arm_spe_info_fill()
|
| /linux-6.15/drivers/hwtracing/coresight/ |
| H A D | coresight-trace-id.c | 24 .cpu_map = &id_map_default_cpu_ids, 51 return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu)); in _coresight_trace_id_read_cpu_id() 137 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); in coresight_trace_id_release_all() 171 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id); in _coresight_trace_id_get_cpu_id() 194 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); in _coresight_trace_id_put_cpu_id()
|
| /linux-6.15/tools/testing/selftests/bpf/progs/ |
| H A D | freplace_progmap.c | 10 } cpu_map SEC(".maps"); 21 return bpf_redirect_map(&cpu_map, 0, XDP_PASS); in xdp_cpumap_prog()
|
| H A D | test_xdp_with_cpumap_helpers.c | 13 } cpu_map SEC(".maps"); 20 return bpf_redirect_map(&cpu_map, 0, 0); in xdp_redir_prog()
|
| H A D | test_xdp_with_cpumap_frags_helpers.c | 13 } cpu_map SEC(".maps");
|
| H A D | xdp_features.c | 52 } cpu_map SEC(".maps"); 222 return bpf_redirect_map(&cpu_map, 0, 0); in xdp_do_redirect()
|
| /linux-6.15/drivers/gpu/drm/xe/tests/ |
| H A D | xe_bo.c | 33 u64 *cpu_map; in ccs_test_migrate() local 97 cpu_map = kmap_local_page(page); in ccs_test_migrate() 100 if (cpu_map[0] != get_val) { in ccs_test_migrate() 104 (unsigned long long)cpu_map[0]); in ccs_test_migrate() 111 if (cpu_map[offset] != get_val) { in ccs_test_migrate() 115 (unsigned long long)cpu_map[offset]); in ccs_test_migrate() 119 cpu_map[0] = assign_val; in ccs_test_migrate() 120 cpu_map[offset] = assign_val; in ccs_test_migrate() 121 kunmap_local(cpu_map); in ccs_test_migrate()
|
| /linux-6.15/Documentation/bpf/ |
| H A D | map_cpumap.rst | 103 ``cpu_map`` and how to redirect packets to a remote CPU using a round robin scheme. 112 } cpu_map SEC(".maps"); 153 return bpf_redirect_map(&cpu_map, cpu_dest, 0); 164 int set_max_cpu_entries(struct bpf_map *cpu_map) 166 if (bpf_map__set_max_entries(cpu_map, libbpf_num_possible_cpus()) < 0) { 167 fprintf(stderr, "Failed to set max entries for cpu_map map: %s",
|
| /linux-6.15/tools/perf/util/ |
| H A D | mmap.c | 247 struct perf_cpu_map *cpu_map = cpu_map__online(); in build_node_mask() local 249 if (!cpu_map) in build_node_mask() 252 nr_cpus = perf_cpu_map__nr(cpu_map); in build_node_mask() 254 cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ in build_node_mask() 258 perf_cpu_map__put(cpu_map); in build_node_mask()
|
| H A D | mem-events.c | 258 struct perf_cpu_map *cpu_map = NULL; in perf_mem_events__record_args() local 297 ret = perf_cpu_map__merge(&cpu_map, pmu->cpus); in perf_mem_events__record_args() 305 if (cpu_map) { in perf_mem_events__record_args() 308 if (!perf_cpu_map__equal(cpu_map, online)) { in perf_mem_events__record_args() 311 cpu_map__snprint(cpu_map, buf, sizeof(buf)); in perf_mem_events__record_args() 315 perf_cpu_map__put(cpu_map); in perf_mem_events__record_args()
|
| H A D | tool.h | 75 cpu_map, member
|
| /linux-6.15/drivers/platform/x86/intel/speed_select_if/ |
| H A D | isst_if_common.c | 485 struct isst_if_cpu_map *cpu_map; in isst_if_proc_phyid_req() local 487 cpu_map = (struct isst_if_cpu_map *)cmd_ptr; in isst_if_proc_phyid_req() 488 if (cpu_map->logical_cpu >= nr_cpu_ids || in isst_if_proc_phyid_req() 489 cpu_map->logical_cpu >= num_possible_cpus()) in isst_if_proc_phyid_req() 493 cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id; in isst_if_proc_phyid_req() 617 cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map); in isst_if_def_ioctl()
|
| /linux-6.15/drivers/base/ |
| H A D | cacheinfo.c | 946 cpumask_t *cpu_map) in update_per_cpu_data_slice_size() argument 950 for_each_cpu(icpu, cpu_map) { in update_per_cpu_data_slice_size() 961 cpumask_t *cpu_map; in cacheinfo_cpu_online() local 968 if (cpu_map_shared_cache(true, cpu, &cpu_map)) in cacheinfo_cpu_online() 969 update_per_cpu_data_slice_size(true, cpu, cpu_map); in cacheinfo_cpu_online() 978 cpumask_t *cpu_map; in cacheinfo_cpu_pre_down() local 981 nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map); in cacheinfo_cpu_pre_down() 987 update_per_cpu_data_slice_size(false, cpu, cpu_map); in cacheinfo_cpu_pre_down()
|
| /linux-6.15/kernel/bpf/ |
| H A D | cpumap.c | 80 struct bpf_cpu_map_entry __rcu **cpu_map; member 106 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * in cpu_map_alloc() 109 if (!cmap->cpu_map) { in cpu_map_alloc() 551 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); in __cpu_map_entry_replace() 630 rcpu = rcu_dereference_raw(cmap->cpu_map[i]); in cpu_map_free() 637 bpf_map_area_free(cmap->cpu_map); in cpu_map_free() 653 rcpu = rcu_dereference_check(cmap->cpu_map[key], in __cpu_map_lookup_elem()
|
| /linux-6.15/mm/ |
| H A D | percpu.c | 2414 ai->groups[0].cpu_map = ptr; in pcpu_alloc_alloc_info() 2483 if (gi->cpu_map[unit] != NR_CPUS) in pcpu_dump_alloc_info() 2485 cpu_width, gi->cpu_map[unit]); in pcpu_dump_alloc_info() 2615 cpu = gi->cpu_map[i]; in pcpu_setup_first_chunk() 2803 unsigned int *cpu_map; in pcpu_build_alloc_info() local 2895 cpu_map = ai->groups[0].cpu_map; in pcpu_build_alloc_info() 2898 ai->groups[group].cpu_map = cpu_map; in pcpu_build_alloc_info() 2921 gi->cpu_map[gi->nr_units++] = cpu; in pcpu_build_alloc_info() 3033 cpu = gi->cpu_map[i]; in pcpu_embed_first_chunk() 3074 if (gi->cpu_map[i] == NR_CPUS) { in pcpu_embed_first_chunk() [all …]
|
| /linux-6.15/tools/testing/selftests/bpf/prog_tests/ |
| H A D | xdp_cpumap_attach.c | 41 map_fd = bpf_map__fd(skel->maps.cpu_map); in test_xdp_with_cpumap_helpers() 115 map_fd = bpf_map__fd(skel->maps.cpu_map); in test_xdp_with_cpumap_frags_helpers()
|
| /linux-6.15/tools/perf/tests/ |
| H A D | cpumap.c | 19 struct perf_record_cpu_map *map_event = &event->cpu_map; in process_event_mask() 55 struct perf_record_cpu_map *map_event = &event->cpu_map; in process_event_cpus() 81 struct perf_record_cpu_map *map_event = &event->cpu_map; in process_event_range_cpus()
|
| /linux-6.15/tools/perf/python/ |
| H A D | twatch.py | 12 cpus = perf.cpu_map()
|
| H A D | tracepoint.py | 17 cpus = perf.cpu_map()
|
| /linux-6.15/tools/perf/arch/arm/util/ |
| H A D | cs-etm.c | 781 struct perf_cpu_map *cpu_map; in cs_etm_info_fill() local 797 cpu_map = online_cpus; in cs_etm_info_fill() 805 cpu_map = event_cpus; in cs_etm_info_fill() 808 nr_cpu = perf_cpu_map__nr(cpu_map); in cs_etm_info_fill() 821 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { in cs_etm_info_fill()
|
| /linux-6.15/drivers/gpu/drm/imagination/ |
| H A D | pvr_queue.c | 1241 void *cpu_map; in pvr_queue_create() local 1300 cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value), in pvr_queue_create() 1303 if (IS_ERR(cpu_map)) { in pvr_queue_create() 1304 err = PTR_ERR(cpu_map); in pvr_queue_create() 1308 queue->timeline_ufo.value = cpu_map; in pvr_queue_create()
|
| /linux-6.15/include/linux/ |
| H A D | percpu.h | 81 unsigned int *cpu_map; /* unit->cpu map, empty member
|