| /linux-6.15/kernel/irq/ |
| H A D | cpuhotplug.c | 58 const struct cpumask *affinity; in migrate_one_irq() local 105 affinity = irq_desc_get_pending_mask(desc); in migrate_one_irq() 107 affinity = irq_data_get_affinity_mask(d); in migrate_one_irq() 113 if (!cpumask_intersects(affinity, cpu_online_mask)) { in migrate_one_irq() 123 affinity = cpu_online_mask; in migrate_one_irq() 132 err = irq_do_set_affinity(d, affinity, false); in migrate_one_irq() 141 d->irq, cpumask_pr_args(affinity)); in migrate_one_irq() 143 affinity = cpu_online_mask; in migrate_one_irq() 146 err = irq_do_set_affinity(d, affinity, false); in migrate_one_irq() 208 const struct cpumask *affinity = irq_data_get_affinity_mask(data); in irq_restore_affinity_of_irq() local [all …]
|
| H A D | irqdesc.c | 82 const struct cpumask *affinity) in desc_smp_init() argument 84 if (!affinity) in desc_smp_init() 85 affinity = irq_default_affinity; in desc_smp_init() 86 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init() 139 desc_smp_init(desc, node, affinity); in desc_set_defaults() 539 if (affinity) { in alloc_descs() 550 if (affinity) { in alloc_descs() 551 if (affinity->is_managed) { in alloc_descs() 556 mask = &affinity->mask; in alloc_descs() 558 affinity++; in alloc_descs() [all …]
|
| H A D | irqdomain.c | 30 bool realloc, const struct irq_affinity_desc *affinity); 797 const struct irq_affinity_desc *affinity) in irq_create_mapping_affinity_locked() argument 806 affinity); in irq_create_mapping_affinity_locked() 836 const struct irq_affinity_desc *affinity) in irq_create_mapping_affinity() argument 1220 int node, const struct irq_affinity_desc *affinity) in irq_domain_alloc_descs() argument 1226 affinity); in irq_domain_alloc_descs() 1232 affinity); in irq_domain_alloc_descs() 1235 affinity); in irq_domain_alloc_descs() 1605 bool realloc, const struct irq_affinity_desc *affinity) in irq_domain_alloc_irqs_locked() argument 1613 affinity); in irq_domain_alloc_irqs_locked() [all …]
|
| /linux-6.15/tools/testing/selftests/rseq/ |
| H A D | basic_test.c | 18 cpu_set_t affinity, test_affinity; in test_cpu_pointer() local 21 sched_getaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer() 24 if (CPU_ISSET(i, &affinity)) { in test_cpu_pointer() 39 sched_setaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer()
|
| /linux-6.15/tools/perf/util/ |
| H A D | affinity.h | 7 struct affinity { struct 13 void affinity__cleanup(struct affinity *a); argument 14 void affinity__set(struct affinity *a, int cpu); 15 int affinity__setup(struct affinity *a);
|
| H A D | affinity.c | 24 int affinity__setup(struct affinity *a) in affinity__setup() 48 void affinity__set(struct affinity *a, int cpu) in affinity__set() 71 static void __affinity__cleanup(struct affinity *a) in __affinity__cleanup() 81 void affinity__cleanup(struct affinity *a) in affinity__cleanup()
|
| H A D | mmap.c | 97 static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity) in perf_mmap__aio_bind() argument 105 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { in perf_mmap__aio_bind() 141 struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind() 175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap() 268 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask() 270 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask() 284 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
|
| H A D | evlist.h | 187 int affinity, int flush, int comp_level); 354 struct affinity *affinity; member 367 #define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \ argument 368 for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \ 373 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity);
|
| H A D | evlist.c | 359 .affinity = affinity, in evlist__cpu_begin() 367 if (itr.affinity) { in evlist__cpu_begin() 398 if (evlist_cpu_itr->affinity) in evlist_cpu_iterator__next() 444 struct affinity saved_affinity, *affinity = NULL; in __evlist__disable() local 451 affinity = &saved_affinity; in __evlist__disable() 474 affinity__cleanup(affinity); in __evlist__disable() 514 struct affinity saved_affinity, *affinity = NULL; in __evlist__enable() local 520 affinity = &saved_affinity; in __evlist__enable() 533 affinity__cleanup(affinity); in __evlist__enable() 961 .affinity = affinity, in evlist__mmap_ex() [all …]
|
| /linux-6.15/Documentation/arch/arm64/ |
| H A D | asymmetric-32bit.rst | 51 CPU affinity. 68 On a homogeneous system, the CPU affinity of a task is preserved across 71 affinity mask contains 64-bit-only CPUs. In this situation, the kernel 72 determines the new affinity mask as follows: 74 1. If the 32-bit-capable subset of the affinity mask is not empty, 75 then the affinity is restricted to that subset and the old affinity 84 affinity of the task is then changed to match the 32-bit-capable 92 affinity of the task using the saved mask if it was previously valid. 95 with the affinity unchanged. 99 affinity for the task is updated and any saved mask from a prior [all …]
|
| /linux-6.15/tools/virtio/ringtest/ |
| H A D | run-on-all.sh | 20 "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu 24 "$@" --host-affinity $HOST_AFFINITY
|
| /linux-6.15/drivers/infiniband/hw/hfi1/ |
| H A D | affinity.c | 964 struct hfi1_affinity_node_list *affinity) in find_hw_thread_mask() argument 968 affinity->num_core_siblings / in find_hw_thread_mask() 971 cpumask_copy(hw_thread_mask, &affinity->proc.mask); in find_hw_thread_mask() 972 if (affinity->num_core_siblings > 0) { in find_hw_thread_mask() 1002 struct cpu_mask_set *set = &affinity->proc; in hfi1_get_proc_affinity() 1060 mutex_lock(&affinity->lock); in hfi1_get_proc_affinity() 1090 if (affinity->num_core_siblings > 0) { in hfi1_get_proc_affinity() 1166 mutex_unlock(&affinity->lock); in hfi1_get_proc_affinity() 1183 struct cpu_mask_set *set = &affinity->proc; in hfi1_put_proc_affinity() 1188 mutex_lock(&affinity->lock); in hfi1_put_proc_affinity() [all …]
|
| /linux-6.15/arch/arm64/kernel/ |
| H A D | setup.c | 113 u32 i, affinity, fs[4], bits[4], ls; in smp_build_mpidr_hash() local 127 affinity = MPIDR_AFFINITY_LEVEL(mask, i); in smp_build_mpidr_hash() 133 ls = fls(affinity); in smp_build_mpidr_hash() 134 fs[i] = affinity ? ffs(affinity) - 1 : 0; in smp_build_mpidr_hash()
|
| /linux-6.15/Documentation/devicetree/bindings/interrupt-controller/ |
| H A D | apple,aic.yaml | 21 - Per-IRQ affinity setting 89 FIQ affinity can be expressed as a single "affinities" node, 91 affinity. 93 "^.+-affinity$": 100 the affinity is not the default.
|
| H A D | apple,aic2.yaml | 79 FIQ affinity can be expressed as a single "affinities" node, 81 affinity. 83 "^.+-affinity$": 90 the affinity is not the default.
|
| H A D | arm,gic-v3.yaml | 43 If the system requires describing PPI affinity, then the value must 143 PPI affinity can be expressed as a single "ppi-partitions" node, 150 affinity: 159 - affinity 299 affinity = <&cpu0>, <&cpu2>; 303 affinity = <&cpu1>, <&cpu3>;
|
| /linux-6.15/Documentation/core-api/irq/ |
| H A D | irq-affinity.rst | 2 SMP IRQ affinity 14 IRQ affinity then the value will not change from the default of all cpus. 16 /proc/irq/default_smp_affinity specifies default affinity mask that applies 17 to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask
|
| /linux-6.15/kernel/ |
| H A D | kthread.c | 382 cpumask_var_t affinity; in kthread_affine_node() local 404 kthread_fetch_affinity(kthread, affinity); in kthread_affine_node() 405 set_cpus_allowed_ptr(current, affinity); in kthread_affine_node() 408 free_cpumask_var(affinity); in kthread_affine_node() 860 cpumask_var_t affinity; in kthread_affine_preferred() local 888 do_set_cpus_allowed(p, affinity); in kthread_affine_preferred() 893 free_cpumask_var(affinity); in kthread_affine_preferred() 907 cpumask_var_t affinity; in kthreads_online_cpu() local 927 kthread_fetch_affinity(k, affinity); in kthreads_online_cpu() 928 set_cpus_allowed_ptr(k->task, affinity); in kthreads_online_cpu() [all …]
|
| /linux-6.15/Documentation/translations/zh_CN/core-api/irq/ |
| H A D | irq-affinity.rst | 3 :Original: Documentation/core-api/irq/irq-affinity.rst 9 .. _cn_irq-affinity.rst: 23 (IRQ affinity),那么所有cpu的默认值将保持不变(即关联到所有CPU).
|
| /linux-6.15/arch/alpha/kernel/ |
| H A D | sys_dp264.c | 136 cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in cpu_set_irq_affinity() argument 142 if (cpumask_test_cpu(cpu, &affinity)) in cpu_set_irq_affinity() 151 dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, in dp264_set_affinity() argument 155 cpu_set_irq_affinity(d->irq, *affinity); in dp264_set_affinity() 163 clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, in clipper_set_affinity() argument 167 cpu_set_irq_affinity(d->irq - 16, *affinity); in clipper_set_affinity()
|
| H A D | sys_titan.c | 135 titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in titan_cpu_set_irq_affinity() argument 140 if (cpumask_test_cpu(cpu, &affinity)) in titan_cpu_set_irq_affinity() 149 titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, in titan_set_irq_affinity() argument 154 titan_cpu_set_irq_affinity(irq - 16, *affinity); in titan_set_irq_affinity()
|
| /linux-6.15/tools/testing/selftests/rcutorture/bin/ |
| H A D | kvm-test-1-run-batch.sh | 65 print "echo No CPU-affinity information, so no taskset command."; 71 print "echo " scenario ": Bogus CPU-affinity information, so no taskset command.";
|
| /linux-6.15/drivers/irqchip/ |
| H A D | irq-bcm7038-l1.c | 47 u8 affinity[MAX_WORDS * IRQS_PER_WORD]; member 179 __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]); in bcm7038_l1_unmask() 189 __bcm7038_l1_mask(d, intc->affinity[d->hwirq]); in bcm7038_l1_mask() 208 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & in bcm7038_l1_set_affinity() 210 __bcm7038_l1_mask(d, intc->affinity[hw]); in bcm7038_l1_set_affinity() 211 intc->affinity[hw] = first_cpu; in bcm7038_l1_set_affinity()
|
| /linux-6.15/tools/perf/ |
| H A D | builtin-record.c | 98 struct mmap_cpu_mask affinity; member 1313 opts->nr_cblocks, opts->affinity, in record__mmap_evlist() 1522 thread->mask->affinity.nbits)) { in record__adjust_affinity() 1523 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); in record__adjust_affinity() 1524 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, in record__adjust_affinity() 3009 opts->affinity = PERF_AFFINITY_NODE; in record__parse_affinity() 3011 opts->affinity = PERF_AFFINITY_CPU; in record__parse_affinity() 3038 mask->affinity.bits = NULL; in record__thread_mask_alloc() 3730 if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits, in record__init_thread_masks_spec() 3744 if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits, in record__init_thread_masks_spec() [all …]
|
| /linux-6.15/arch/arm64/boot/dts/apple/ |
| H A D | t6001.dtsi | 52 e-core-pmu-affinity { 57 p-core-pmu-affinity {
|