Home
last modified time | relevance | path

Searched refs:next_cpu (Results 1 – 23 of 23) sorted by relevance

/linux-6.6.21/kernel/
Dwatchdog_buddy.c13 unsigned int next_cpu; in watchdog_next_cpu() local
15 next_cpu = cpumask_next(cpu, &watchdog_cpus); in watchdog_next_cpu()
16 if (next_cpu >= nr_cpu_ids) in watchdog_next_cpu()
17 next_cpu = cpumask_first(&watchdog_cpus); in watchdog_next_cpu()
19 if (next_cpu == cpu) in watchdog_next_cpu()
22 return next_cpu; in watchdog_next_cpu()
32 unsigned int next_cpu; in watchdog_hardlockup_enable() local
50 next_cpu = watchdog_next_cpu(cpu); in watchdog_hardlockup_enable()
51 if (next_cpu < nr_cpu_ids) in watchdog_hardlockup_enable()
52 watchdog_hardlockup_touch_cpu(next_cpu); in watchdog_hardlockup_enable()
[all …]
/linux-6.6.21/arch/x86/platform/uv/
Duv_time.c50 int next_cpu; member
159 head->next_cpu = -1; in uv_rtc_allocate_timers()
176 head->next_cpu = -1; in uv_rtc_find_next_timer()
185 head->next_cpu = bcpu; in uv_rtc_find_next_timer()
209 int next_cpu; in uv_rtc_set_timer() local
213 next_cpu = head->next_cpu; in uv_rtc_set_timer()
217 if (next_cpu < 0 || bcpu == next_cpu || in uv_rtc_set_timer()
218 expires < head->cpu[next_cpu].expires) { in uv_rtc_set_timer()
219 head->next_cpu = bcpu; in uv_rtc_set_timer()
249 if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) in uv_rtc_unset_timer()
[all …]
/linux-6.6.21/tools/testing/selftests/bpf/
Dtest_lru_map.c152 int next_cpu = 0; in test_lru_sanity0() local
157 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity0()
245 int next_cpu = 0; in test_lru_sanity1() local
254 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity1()
322 int next_cpu = 0; in test_lru_sanity2() local
331 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity2()
428 int next_cpu = 0; in test_lru_sanity3() local
437 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity3()
491 int next_cpu = 0; in test_lru_sanity4() local
496 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity4()
[all …]
Dbench.c458 static int next_cpu(struct cpu_set *cpu_set) in next_cpu() function
464 for (i = cpu_set->next_cpu; i < cpu_set->cpus_len; i++) { in next_cpu()
466 cpu_set->next_cpu = i + 1; in next_cpu()
474 return cpu_set->next_cpu++ % env.nr_cpus; in next_cpu()
619 next_cpu(&env.cons_cpus)); in setup_benchmark()
626 env.prod_cpus.next_cpu = env.cons_cpus.next_cpu; in setup_benchmark()
638 next_cpu(&env.prod_cpus)); in setup_benchmark()
Dbench.h17 int next_cpu; member
/linux-6.6.21/arch/parisc/kernel/
Dirq.c324 static int next_cpu = -1; in txn_alloc_addr() local
326 next_cpu++; /* assign to "next" CPU we want this bugger on */ in txn_alloc_addr()
329 while ((next_cpu < nr_cpu_ids) && in txn_alloc_addr()
330 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr()
331 !cpu_online(next_cpu))) in txn_alloc_addr()
332 next_cpu++; in txn_alloc_addr()
334 if (next_cpu >= nr_cpu_ids) in txn_alloc_addr()
335 next_cpu = 0; /* nothing else, assign monarch */ in txn_alloc_addr()
337 return txn_affinity_addr(virt_irq, next_cpu); in txn_alloc_addr()
/linux-6.6.21/arch/x86/kernel/
Dtsc_sync.c96 int next_cpu; in tsc_sync_check_timer_fn() local
101 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in tsc_sync_check_timer_fn()
102 if (next_cpu >= nr_cpu_ids) in tsc_sync_check_timer_fn()
103 next_cpu = cpumask_first(cpu_online_mask); in tsc_sync_check_timer_fn()
106 add_timer_on(&tsc_sync_check_timer, next_cpu); in tsc_sync_check_timer_fn()
/linux-6.6.21/kernel/trace/
Dtrace_hwlat.c318 int next_cpu; in move_to_next_cpu() local
330 next_cpu = cpumask_next(raw_smp_processor_id(), current_mask); in move_to_next_cpu()
333 if (next_cpu >= nr_cpu_ids) in move_to_next_cpu()
334 next_cpu = cpumask_first(current_mask); in move_to_next_cpu()
336 if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ in move_to_next_cpu()
340 cpumask_set_cpu(next_cpu, current_mask); in move_to_next_cpu()
426 int next_cpu; in start_single_kthread() local
443 next_cpu = cpumask_first(current_mask); in start_single_kthread()
445 cpumask_set_cpu(next_cpu, current_mask); in start_single_kthread()
Dtrace_entries.h143 __field( unsigned int, next_cpu ) \
160 __entry->next_cpu)
178 __entry->next_cpu)
Dtrace_sched_wakeup.c395 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace()
423 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace()
Dtrace_output.c1091 field->next_cpu, in trace_ctxwake_print()
1125 field->next_cpu, in trace_ctxwake_raw()
1161 SEQ_PUT_HEX_FIELD(s, field->next_cpu); in trace_ctxwake_hex()
1192 SEQ_PUT_FIELD(s, field->next_cpu); in trace_ctxwake_bin()
Dtrace.c3689 int next_cpu = -1; in __find_next_entry() local
3719 next_cpu = cpu; in __find_next_entry()
3729 *ent_cpu = next_cpu; in __find_next_entry()
/linux-6.6.21/tools/testing/selftests/kvm/
Drseq_test.c44 static int next_cpu(int cpu) in next_cpu() function
75 for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) { in migration_worker()
/linux-6.6.21/kernel/time/
Dclocksource.c405 int next_cpu, reset_pending; in clocksource_watchdog() local
567 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in clocksource_watchdog()
568 if (next_cpu >= nr_cpu_ids) in clocksource_watchdog()
569 next_cpu = cpumask_first(cpu_online_mask); in clocksource_watchdog()
577 add_timer_on(&watchdog_timer, next_cpu); in clocksource_watchdog()
Dtick-broadcast.c694 int cpu, next_cpu = 0; in tick_handle_oneshot_broadcast() local
723 next_cpu = cpu; in tick_handle_oneshot_broadcast()
760 tick_broadcast_set_event(dev, next_cpu, next_event); in tick_handle_oneshot_broadcast()
/linux-6.6.21/arch/powerpc/lib/
Dqspinlock.c705 int next_cpu = next->cpu; in queued_spin_lock_mcs_queue() local
709 if (vcpu_is_preempted(next_cpu)) in queued_spin_lock_mcs_queue()
710 prod_cpu(next_cpu); in queued_spin_lock_mcs_queue()
/linux-6.6.21/block/
Dblk-mq.c2204 int next_cpu = hctx->next_cpu; in blk_mq_hctx_next_cpu() local
2211 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, in blk_mq_hctx_next_cpu()
2213 if (next_cpu >= nr_cpu_ids) in blk_mq_hctx_next_cpu()
2214 next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_hctx_next_cpu()
2222 if (!cpu_online(next_cpu)) { in blk_mq_hctx_next_cpu()
2232 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
2237 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
2238 return next_cpu; in blk_mq_hctx_next_cpu()
3970 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_map_swqueue()
/linux-6.6.21/arch/powerpc/mm/book3s64/
Dhash_utils.c1017 int next_cpu; in stress_hpt_timer_fn() local
1023 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in stress_hpt_timer_fn()
1024 if (next_cpu >= nr_cpu_ids) in stress_hpt_timer_fn()
1025 next_cpu = cpumask_first(cpu_online_mask); in stress_hpt_timer_fn()
1027 add_timer_on(&stress_hpt_timer, next_cpu); in stress_hpt_timer_fn()
/linux-6.6.21/drivers/irqchip/
Dirq-gic-v3.c1309 int next_cpu, cpu = *base_cpu; in gic_compute_target_list() local
1318 next_cpu = cpumask_next(cpu, mask); in gic_compute_target_list()
1319 if (next_cpu >= nr_cpu_ids) in gic_compute_target_list()
1321 cpu = next_cpu; in gic_compute_target_list()
/linux-6.6.21/include/linux/
Dblk-mq.h317 int next_cpu; member
/linux-6.6.21/net/core/
Ddev.c4512 struct rps_dev_flow *rflow, u16 next_cpu) in set_rps_cpu() argument
4514 if (next_cpu < nr_cpu_ids) { in set_rps_cpu()
4527 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); in set_rps_cpu()
4548 per_cpu(softnet_data, next_cpu).input_queue_head; in set_rps_cpu()
4551 rflow->cpu = next_cpu; in set_rps_cpu()
4599 u32 next_cpu; in get_rps_cpu() local
4609 next_cpu = ident & rps_cpu_mask; in get_rps_cpu()
4628 if (unlikely(tcpu != next_cpu) && in get_rps_cpu()
4632 tcpu = next_cpu; in get_rps_cpu()
4633 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); in get_rps_cpu()
/linux-6.6.21/drivers/net/ethernet/mediatek/
Dmtk_eth_soc.c2273 u32 next_cpu = desc->txd2; in mtk_poll_tx_qdma() local
2296 cpu = next_cpu; in mtk_poll_tx_qdma()
/linux-6.6.21/kernel/sched/
Dfair.c12922 goto next_cpu; in sched_group_set_idle()
12950 next_cpu: in sched_group_set_idle()