Home
last modified time | relevance | path

Searched refs:task_cpu (Results 1 – 25 of 37) sorted by relevance

12

/linux-6.6.21/arch/x86/um/
Dptrace_32.c199 int err, n, cpu = task_cpu(child); in get_fpregs()
216 int n, cpu = task_cpu(child); in set_fpregs()
229 int err, n, cpu = task_cpu(child); in get_fpxregs()
245 int n, cpu = task_cpu(child); in set_fpxregs()
/linux-6.6.21/arch/ia64/include/asm/
Dswitch_to.h62 (task_cpu(current) != \
64 task_thread_info(current)->last_cpu = task_cpu(current); \
/linux-6.6.21/Documentation/translations/zh_CN/scheduler/
Dsched-capacity.rst302 task_util(p) < capacity(task_cpu(p))
358 则任务可能变为CPU受限的,也就是说 ``task_util(p) > capacity(task_cpu(p))`` ;CPU算力
374 task_uclamp_min(p) <= capacity(task_cpu(cpu))
387 task_bandwidth(p) < capacity(task_cpu(p))
/linux-6.6.21/kernel/rcu/
Dtasks.h912 cpu = task_cpu(t); in rcu_tasks_is_holdout()
988 cpu = task_cpu(t); in check_holdout_task()
1530 int cpu = task_cpu(t); in trc_inspect_reader()
1551 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); in trc_inspect_reader()
1610 cpu = task_cpu(t); in trc_wait_for_one_reader()
1735 if (task_curr(t) && cpu_online(task_cpu(t))) in trc_check_slow_task()
1754 cpu = task_cpu(t); in show_stalled_task_trace()
Dtree_stall.h426 cpu = task_cpu(rcuc); in rcu_is_rcuc_kthread_starving()
534 cpu = gpk ? task_cpu(gpk) : -1; in rcu_check_gp_kthread_starvation()
577 cpu = task_cpu(gpk); in rcu_check_gp_kthread_expired_fqs_timer()
Dtree_nocb.h1647 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, in show_rcu_nocb_gp_state()
1692 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1, in show_rcu_nocb_state()
/linux-6.6.21/kernel/sched/
Dstop_task.c15 return task_cpu(p); /* stop tasks as never migrate */ in select_task_rq_stop()
Dcore.c279 int cpu = task_cpu(p); in sched_core_next()
2198 return cpu_curr(task_cpu(p)) == p; in task_curr()
2528 WARN_ON_ONCE(task_cpu(p) != new_cpu); in move_queued_task()
2622 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2655 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2669 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, in migration_cpu_stop()
2972 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { in affine_move_task()
3162 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3388 if (task_cpu(p) != new_cpu) { in set_task_cpu()
3450 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop()
[all …]
Ddeadline.c430 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending()
435 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
1414 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer()
1423 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
2127 int cpu = task_cpu(task); in find_later_rq()
2273 WARN_ON_ONCE(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
2507 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2856 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
Dpsi.c899 task->pid, task->comm, task_cpu(task), in psi_flags_change()
910 int cpu = task_cpu(task); in psi_task_change()
931 int cpu = task_cpu(prev); in psi_task_switch()
1007 int cpu = task_cpu(task); in psi_account_irqtime()
Didle.c391 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
Dcpudeadline.c137 (cpu == task_cpu(p) && cap == max_cap)) { in cpudl_find()
Dcpuacct.c336 unsigned int cpu = task_cpu(tsk); in cpuacct_charge()
Dfair.c2421 .src_cpu = task_cpu(p), in task_numa_migrate()
3474 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period()
6806 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6829 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
7573 if (p && task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util()
7575 else if (p && task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util()
7646 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
10035 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
12703 set_task_rq(p, task_cpu(p)); in task_change_group_fair()
/linux-6.6.21/kernel/trace/
Dtrace_sched_wakeup.c395 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace()
423 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace()
569 wakeup_cpu = task_cpu(p); in probe_wakeup()
/linux-6.6.21/include/linux/
Dkdb.h193 unsigned int cpu = task_cpu(p); in kdb_process_cpu()
Dsched.h2263 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
2272 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
2316 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
/linux-6.6.21/include/linux/sched/
Dtopology.h280 return cpu_to_node(task_cpu(p)); in task_node()
/linux-6.6.21/Documentation/scheduler/
Dsched-capacity.rst341 task_util(p) < capacity(task_cpu(p))
404 then it might become CPU-bound, IOW ``task_util(p) > capacity(task_cpu(p))``;
423 task_uclamp_min(p) <= capacity(task_cpu(cpu))
437 task_bandwidth(p) < capacity(task_cpu(p))
/linux-6.6.21/include/trace/events/
Dsched.h158 __entry->target_cpu = task_cpu(p);
292 __entry->orig_cpu = task_cpu(p);
/linux-6.6.21/kernel/
Dstop_machine.c58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); in print_stop_info()
/linux-6.6.21/arch/parisc/kernel/
Dtraps.c151 level, task_cpu(current), cr30, cr31); in show_regs()
/linux-6.6.21/arch/mips/kernel/
Dprocess.c850 cpumask_set_cpu(task_cpu(t), &process_cpus); in mips_set_process_fp_mode()
/linux-6.6.21/fs/proc/
Darray.c635 seq_put_decimal_ll(m, " ", task_cpu(task)); in do_task_stat()
/linux-6.6.21/arch/powerpc/kernel/
Dprocess.c2147 unsigned long cpu = task_cpu(p); in valid_irq_stack()
2168 unsigned long cpu = task_cpu(p); in valid_emergency_stack()

12