/linux-6.1.9/kernel/irq/ |
D | cpuhotplug.c | 58 const struct cpumask *affinity; in migrate_one_irq() local 105 affinity = irq_desc_get_pending_mask(desc); in migrate_one_irq() 107 affinity = irq_data_get_affinity_mask(d); in migrate_one_irq() 113 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq() 123 affinity = cpu_online_mask; in migrate_one_irq() 132 err = irq_do_set_affinity(d, affinity, false); in migrate_one_irq() 192 const struct cpumask *affinity = irq_data_get_affinity_mask(data); in irq_restore_affinity_of_irq() local 195 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity)) in irq_restore_affinity_of_irq() 211 irq_set_affinity_locked(data, affinity, false); in irq_restore_affinity_of_irq()
|
D | irqdesc.c | 57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks() 64 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks() 74 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks() 82 const struct cpumask *affinity) in desc_smp_init() argument 84 if (!affinity) in desc_smp_init() 85 affinity = irq_default_affinity; in desc_smp_init() 86 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init() 100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument 104 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() argument 127 desc_smp_init(desc, node, affinity); in desc_set_defaults() [all …]
|
D | msi.c | 36 const struct irq_affinity_desc *affinity) in msi_alloc_desc() argument 45 if (affinity) { in msi_alloc_desc() 46 desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL); in msi_alloc_desc() 47 if (!desc->affinity) { in msi_alloc_desc() 57 kfree(desc->affinity); in msi_free_desc() 85 desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity); in msi_add_msi_desc() 895 desc->affinity); in __msi_domain_alloc_irqs()
|
/linux-6.1.9/tools/testing/selftests/rseq/ |
D | basic_test.c | 18 cpu_set_t affinity, test_affinity; in test_cpu_pointer() local 21 sched_getaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer() 24 if (CPU_ISSET(i, &affinity)) { in test_cpu_pointer() 35 sched_setaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer()
|
/linux-6.1.9/tools/perf/util/ |
D | affinity.h | 7 struct affinity { struct 13 void affinity__cleanup(struct affinity *a); argument 14 void affinity__set(struct affinity *a, int cpu); 15 int affinity__setup(struct affinity *a);
|
D | affinity.c | 24 int affinity__setup(struct affinity *a) in affinity__setup() 48 void affinity__set(struct affinity *a, int cpu) in affinity__set() 71 static void __affinity__cleanup(struct affinity *a) in __affinity__cleanup() 81 void affinity__cleanup(struct affinity *a) in affinity__cleanup()
|
D | evlist.h | 201 int affinity, int flush, int comp_level); 363 struct affinity *affinity; member 376 #define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \ argument 377 for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \ 382 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity);
|
D | mmap.c | 97 static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity) in perf_mmap__aio_bind() argument 105 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { in perf_mmap__aio_bind() 141 struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind() 175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap() 270 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask() 272 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask() 286 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
|
D | evlist.c | 389 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) in evlist__cpu_begin() argument 398 .affinity = affinity, in evlist__cpu_begin() 406 if (itr.affinity) { in evlist__cpu_begin() 408 affinity__set(itr.affinity, itr.cpu.cpu); in evlist__cpu_begin() 437 if (evlist_cpu_itr->affinity) in evlist_cpu_iterator__next() 438 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); in evlist_cpu_iterator__next() 483 struct affinity saved_affinity, *affinity = NULL; in __evlist__disable() local 490 affinity = &saved_affinity; in __evlist__disable() 495 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { in __evlist__disable() 513 affinity__cleanup(affinity); in __evlist__disable() [all …]
|
/linux-6.1.9/Documentation/arm64/ |
D | asymmetric-32bit.rst | 51 CPU affinity. 68 On a homogeneous system, the CPU affinity of a task is preserved across 71 affinity mask contains 64-bit-only CPUs. In this situation, the kernel 72 determines the new affinity mask as follows: 74 1. If the 32-bit-capable subset of the affinity mask is not empty, 75 then the affinity is restricted to that subset and the old affinity 84 affinity of the task is then changed to match the 32-bit-capable 87 3. On failure (i.e. out of memory), the affinity is changed to the set 91 invalidate the affinity mask saved in (1) and attempt to restore the CPU 92 affinity of the task using the saved mask if it was previously valid. [all …]
|
/linux-6.1.9/tools/virtio/ringtest/ |
D | run-on-all.sh | 20 "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu 24 "$@" --host-affinity $HOST_AFFINITY
|
/linux-6.1.9/drivers/infiniband/hw/hfi1/ |
D | affinity.c | 966 struct hfi1_affinity_node_list *affinity) in find_hw_thread_mask() argument 970 affinity->num_core_siblings / in find_hw_thread_mask() 973 cpumask_copy(hw_thread_mask, &affinity->proc.mask); in find_hw_thread_mask() 974 if (affinity->num_core_siblings > 0) { in find_hw_thread_mask() 1003 struct hfi1_affinity_node_list *affinity = &node_affinity; in hfi1_get_proc_affinity() local 1004 struct cpu_mask_set *set = &affinity->proc; in hfi1_get_proc_affinity() 1062 mutex_lock(&affinity->lock); in hfi1_get_proc_affinity() 1092 if (affinity->num_core_siblings > 0) { in hfi1_get_proc_affinity() 1093 for (i = 0; i < affinity->num_core_siblings; i++) { in hfi1_get_proc_affinity() 1094 find_hw_thread_mask(i, hw_thread_mask, affinity); in hfi1_get_proc_affinity() [all …]
|
/linux-6.1.9/Documentation/devicetree/bindings/interrupt-controller/ |
D | apple,aic.yaml | 21 - Per-IRQ affinity setting 77 FIQ affinity can be expressed as a single "affinities" node, 79 affinity. 81 "^.+-affinity$": 88 the affinity is not the default.
|
D | arm,gic-v3.yaml | 43 If the system requires describing PPI affinity, then the value must 137 PPI affinity can be expressed as a single "ppi-partitions" node, 143 affinity: 152 - affinity 281 affinity = <&cpu0>, <&cpu2>; 285 affinity = <&cpu1>, <&cpu3>;
|
/linux-6.1.9/arch/arm64/kernel/ |
D | setup.c | 109 u32 i, affinity, fs[4], bits[4], ls; in smp_build_mpidr_hash() local 123 affinity = MPIDR_AFFINITY_LEVEL(mask, i); in smp_build_mpidr_hash() 129 ls = fls(affinity); in smp_build_mpidr_hash() 130 fs[i] = affinity ? ffs(affinity) - 1 : 0; in smp_build_mpidr_hash()
|
/linux-6.1.9/Documentation/core-api/irq/ |
D | irq-affinity.rst | 2 SMP IRQ affinity 14 IRQ affinity then the value will not change from the default of all cpus. 16 /proc/irq/default_smp_affinity specifies default affinity mask that applies 17 to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask
|
/linux-6.1.9/Documentation/translations/zh_CN/core-api/irq/ |
D | irq-affinity.rst | 3 :Original: Documentation/core-api/irq/irq-affinity.rst 9 .. _cn_irq-affinity.rst: 23 (IRQ affinity),那么所有cpu的默认值将保持不变(即关联到所有CPU).
|
/linux-6.1.9/arch/alpha/kernel/ |
D | sys_dp264.c | 136 cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in cpu_set_irq_affinity() argument 142 if (cpumask_test_cpu(cpu, &affinity)) in cpu_set_irq_affinity() 151 dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, in dp264_set_affinity() argument 155 cpu_set_irq_affinity(d->irq, *affinity); in dp264_set_affinity() 163 clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, in clipper_set_affinity() argument 167 cpu_set_irq_affinity(d->irq - 16, *affinity); in clipper_set_affinity()
|
D | sys_titan.c | 135 titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in titan_cpu_set_irq_affinity() argument 140 if (cpumask_test_cpu(cpu, &affinity)) in titan_cpu_set_irq_affinity() 149 titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, in titan_set_irq_affinity() argument 154 titan_cpu_set_irq_affinity(irq - 16, *affinity); in titan_set_irq_affinity()
|
/linux-6.1.9/tools/testing/selftests/rcutorture/bin/ |
D | kvm-test-1-run-batch.sh | 64 print "echo No CPU-affinity information, so no taskset command."; 70 print "echo " scenario ": Bogus CPU-affinity information, so no taskset command.";
|
/linux-6.1.9/drivers/irqchip/ |
D | irq-bcm7038-l1.c | 48 u8 affinity[MAX_WORDS * IRQS_PER_WORD]; member 180 __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]); in bcm7038_l1_unmask() 190 __bcm7038_l1_mask(d, intc->affinity[d->hwirq]); in bcm7038_l1_mask() 209 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & in bcm7038_l1_set_affinity() 211 __bcm7038_l1_mask(d, intc->affinity[hw]); in bcm7038_l1_set_affinity() 212 intc->affinity[hw] = first_cpu; in bcm7038_l1_set_affinity()
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/ |
D | pci_irq.c | 209 const struct cpumask *affinity) in mlx5_irq_alloc() argument 238 if (affinity) { in mlx5_irq_alloc() 239 cpumask_copy(irq->mask, affinity); in mlx5_irq_alloc() 302 struct cpumask *affinity) in irq_pool_request_vector() argument 312 irq = mlx5_irq_alloc(pool, vecidx, affinity); in irq_pool_request_vector() 422 struct cpumask *affinity) in mlx5_irq_request() argument 429 irq = irq_pool_request_vector(pool, vecidx, affinity); in mlx5_irq_request() 433 irq->irqn, cpumask_pr_args(affinity), in mlx5_irq_request()
|
/linux-6.1.9/tools/perf/ |
D | builtin-record.c | 99 struct mmap_cpu_mask affinity; member 1230 if (opts->affinity != PERF_AFFINITY_SYS) in record__mmap_evlist() 1236 opts->nr_cblocks, opts->affinity, in record__mmap_evlist() 1472 if (rec->opts.affinity != PERF_AFFINITY_SYS && in record__adjust_affinity() 1473 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits, in record__adjust_affinity() 1474 thread->mask->affinity.nbits)) { in record__adjust_affinity() 1475 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); in record__adjust_affinity() 1476 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, in record__adjust_affinity() 1477 map->affinity_mask.bits, thread->mask->affinity.nbits); in record__adjust_affinity() 1478 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__adjust_affinity() [all …]
|
/linux-6.1.9/include/ras/ |
D | ras_event.h | 182 __field(u8, affinity) 187 __entry->affinity = proc->affinity_level; 189 __entry->affinity = ~0; 206 __entry->affinity, __entry->mpidr, __entry->midr,
|
/linux-6.1.9/Documentation/ia64/ |
D | irq-redir.rst | 2 IRQ affinity on IA64 platforms 10 that described in Documentation/core-api/irq/irq-affinity.rst for i386 systems. 77 For systems like the NEC AzusA we get IRQ node-affinity for free. This
|