/linux-5.19.10/drivers/gpu/drm/scheduler/ |
D | sched_main.c | 73 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument 79 rq->sched = sched; in drm_sched_rq_init() 96 atomic_inc(rq->sched->score); in drm_sched_rq_add_entity() 115 atomic_dec(rq->sched->score); in drm_sched_rq_remove_entity() 175 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() local 177 atomic_dec(&sched->hw_rq_count); in drm_sched_job_done() 178 atomic_dec(sched->score); in drm_sched_job_done() 185 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_job_done() 211 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized() local 219 if (s_fence && s_fence->sched == sched) in drm_sched_dependency_optimized() [all …]
|
D | sched_entity.c | 156 struct drm_gpu_scheduler *sched; in drm_sched_entity_flush() local 163 sched = entity->rq->sched; in drm_sched_entity_flush() 171 sched->job_scheduled, in drm_sched_entity_flush() 175 wait_event_killable(sched->job_scheduled, in drm_sched_entity_flush() 199 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_work() 221 if (job->sched->ops->dependency) in drm_sched_job_dependency() 222 return job->sched->ops->dependency(job, entity); in drm_sched_job_dependency() 276 struct drm_gpu_scheduler *sched = NULL; in drm_sched_entity_fini() local 279 sched = entity->rq->sched; in drm_sched_entity_fini() 287 if (sched) { in drm_sched_entity_fini() [all …]
|
/linux-5.19.10/tools/perf/ |
D | builtin-sched.c | 146 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel, 149 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel, 152 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel, 156 int (*fork_event)(struct perf_sched *sched, union perf_event *event, 159 int (*migrate_task_event)(struct perf_sched *sched, 304 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) in burn_nsecs() argument 310 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); in burn_nsecs() 323 static void calibrate_run_measurement_overhead(struct perf_sched *sched) in calibrate_run_measurement_overhead() argument 330 burn_nsecs(sched, 0); in calibrate_run_measurement_overhead() 335 sched->run_measurement_overhead = min_delta; in calibrate_run_measurement_overhead() [all …]
|
/linux-5.19.10/tools/testing/selftests/ftrace/test.d/trigger/ |
D | trigger-filter.tc | 14 echo 'traceoff if child_pid == 0' > events/sched/sched_process_fork/trigger 23 ! echo 'traceoff if a' > events/sched/sched_process_fork/trigger 24 ! echo 'traceoff if common_pid=0' > events/sched/sched_process_fork/trigger 25 ! echo 'traceoff if common_pid==b' > events/sched/sched_process_fork/trigger 26 echo 'traceoff if common_pid == 0' > events/sched/sched_process_fork/trigger 27 echo '!traceoff' > events/sched/sched_process_fork/trigger 28 ! echo 'traceoff if common_pid == child_pid' > events/sched/sched_process_fork/trigger 29 echo 'traceoff if common_pid <= 0' > events/sched/sched_process_fork/trigger 30 echo '!traceoff' > events/sched/sched_process_fork/trigger 31 echo 'traceoff if common_pid >= 0' > events/sched/sched_process_fork/trigger [all …]
|
D | trigger-eventonoff.tc | 12 FEATURE=`grep enable_event events/sched/sched_process_fork/trigger` 19 echo 0 > events/sched/sched_switch/enable 20 echo 'enable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger 22 if [ `cat events/sched/sched_switch/enable` != '1*' ]; then 29 echo 1 > events/sched/sched_switch/enable 30 echo 'disable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger 32 if [ `cat events/sched/sched_switch/enable` != '0*' ]; then 39 ! echo 'enable_event:nogroup:noevent' > events/sched/sched_process_fork/trigger 40 ! echo 'disable_event+1' > events/sched/sched_process_fork/trigger 41 echo 'enable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger [all …]
|
D | trigger-multihist.tc | 14 echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger 15 echo 'hist:keys=parent_comm:vals=child_pid' >> events/sched/sched_process_fork/trigger 17 grep parent_pid events/sched/sched_process_fork/hist > /dev/null || \ 19 grep child events/sched/sched_process_fork/hist > /dev/null || \ 22 grep "parent_comm: $COMM" events/sched/sched_process_fork/hist > /dev/null || \ 29 echo 'hist:name=test_hist:keys=common_pid' > events/sched/sched_process_fork/trigger 31 grep test_hist events/sched/sched_process_fork/hist > /dev/null || \ 36 echo 'hist:name=test_hist:keys=common_pid' > events/sched/sched_process_exit/trigger 38 grep test_hist events/sched/sched_process_exit/hist > /dev/null || \ 41 diffs=`diff events/sched/sched_process_exit/hist events/sched/sched_process_fork/hist | wc -l`
|
D | trigger-traceonoff.tc | 13 echo 'traceoff' > events/sched/sched_process_fork/trigger 23 echo 'traceon' > events/sched/sched_process_fork/trigger 32 ! echo 'traceoff:badparam' > events/sched/sched_process_fork/trigger 33 ! echo 'traceoff+0' > events/sched/sched_process_fork/trigger 34 echo 'traceon' > events/sched/sched_process_fork/trigger 35 ! echo 'traceon' > events/sched/sched_process_fork/trigger 36 ! echo 'traceoff' > events/sched/sched_process_fork/trigger
|
D | trigger-snapshot.tc | 11 FEATURE=`grep snapshot events/sched/sched_process_fork/trigger` 19 echo 1 > events/sched/sched_process_fork/enable 21 echo 'snapshot:1' > events/sched/sched_process_fork/trigger 28 echo 0 > events/sched/sched_process_fork/enable 32 ! echo "snapshot+1" > events/sched/sched_process_fork/trigger 33 echo "snapshot" > events/sched/sched_process_fork/trigger 34 ! echo "snapshot" > events/sched/sched_process_fork/trigger
|
D | trigger-hist.tc | 14 echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger 16 grep parent_pid events/sched/sched_process_fork/hist > /dev/null || \ 18 grep child events/sched/sched_process_fork/hist > /dev/null || \ 25 echo 'hist:keys=parent_pid,child_pid' > events/sched/sched_process_fork/trigger 27 grep '^{ parent_pid:.*, child_pid:.*}' events/sched/sched_process_fork/hist > /dev/null || \ 34 echo 'hist:keys=parent_comm' > events/sched/sched_process_fork/trigger 37 grep "parent_comm: $COMM" events/sched/sched_process_fork/hist > /dev/null || \ 62 echo 'hist:keys=parent_pid,child_pid:sort=child_pid.ascending' > events/sched/sched_process_fork/tr… 73 events/sched/sched_process_fork/hist | cut -d: -f2 ` ||
|
D | trigger-stacktrace.tc | 11 FEATURE=`grep stacktrace events/sched/sched_process_fork/trigger` 20 echo 'stacktrace' > events/sched/sched_process_fork/trigger 29 ! echo "stacktrace:foo" > events/sched/sched_process_fork/trigger 30 echo "stacktrace" > events/sched/sched_process_fork/trigger 31 ! echo "stacktrace" > events/sched/sched_process_fork/trigger
|
/linux-5.19.10/drivers/slimbus/ |
D | sched.c | 29 struct slim_sched *sched = &ctrl->sched; in slim_ctrl_clk_pause() local 38 mutex_lock(&sched->m_reconf); in slim_ctrl_clk_pause() 40 if (sched->clk_state == SLIM_CLK_ACTIVE) { in slim_ctrl_clk_pause() 41 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause() 49 ret = wait_for_completion_timeout(&sched->pause_comp, in slim_ctrl_clk_pause() 52 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause() 63 if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup) in slim_ctrl_clk_pause() 66 sched->clk_state = SLIM_CLK_ACTIVE; in slim_ctrl_clk_pause() 67 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause() 73 if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) { in slim_ctrl_clk_pause() [all …]
|
/linux-5.19.10/net/netfilter/ipvs/ |
D | ip_vs_sched.c | 61 struct ip_vs_scheduler *sched) in ip_vs_unbind_scheduler() argument 70 if (sched->done_service) in ip_vs_unbind_scheduler() 71 sched->done_service(svc); in ip_vs_unbind_scheduler() 81 struct ip_vs_scheduler *sched; in ip_vs_sched_getbyname() local 87 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { in ip_vs_sched_getbyname() 91 if (sched->module && !try_module_get(sched->module)) { in ip_vs_sched_getbyname() 97 if (strcmp(sched_name, sched->name)==0) { in ip_vs_sched_getbyname() 100 return sched; in ip_vs_sched_getbyname() 102 module_put(sched->module); in ip_vs_sched_getbyname() 115 struct ip_vs_scheduler *sched; in ip_vs_scheduler_get() local [all …]
|
/linux-5.19.10/Documentation/scheduler/ |
D | index.rst | 10 sched-arch 11 sched-bwc 12 sched-deadline 13 sched-design-CFS 14 sched-domains 15 sched-capacity 16 sched-energy 18 sched-nice-design 19 sched-rt-group 20 sched-stats [all …]
|
/linux-5.19.10/Documentation/translations/zh_CN/scheduler/ |
D | index.rst | 22 sched-arch 23 sched-bwc 24 sched-design-CFS 25 sched-domains 26 sched-capacity 27 sched-energy 29 sched-nice-design 30 sched-stats 31 sched-debug 35 sched-deadline [all …]
|
/linux-5.19.10/crypto/ |
D | fcrypt.c | 54 __be32 sched[ROUNDS]; member 223 #define F_ENCRYPT(R, L, sched) \ argument 226 u.l = sched ^ R; \ 242 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); in fcrypt_encrypt() 243 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); in fcrypt_encrypt() 244 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); in fcrypt_encrypt() 245 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); in fcrypt_encrypt() 246 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); in fcrypt_encrypt() 247 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); in fcrypt_encrypt() 248 F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); in fcrypt_encrypt() [all …]
|
/linux-5.19.10/drivers/net/wireless/ath/ath9k/ |
D | channel.c | 261 if (likely(sc->sched.channel_switch_time)) in ath_chanctx_check_active() 263 usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active() 311 ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active() 382 mod_timer(&sc->sched.timer, jiffies + timeout); in ath_chanctx_setup_timer() 399 if (ctx->active && sc->sched.extend_absence) { in ath_chanctx_handle_bmiss() 401 sc->sched.extend_absence = false; in ath_chanctx_handle_bmiss() 408 if (ctx->active && sc->sched.beacon_miss >= 2) { in ath_chanctx_handle_bmiss() 410 sc->sched.extend_absence = true; in ath_chanctx_handle_bmiss() 423 avp->offchannel_duration = sc->sched.offchannel_duration; in ath_chanctx_offchannel_noa() 451 if (sc->sched.extend_absence) in ath_chanctx_set_periodic_noa() [all …]
|
/linux-5.19.10/include/drm/ |
D | gpu_scheduler.h | 215 struct drm_gpu_scheduler *sched; member 252 struct drm_gpu_scheduler *sched; member 289 struct drm_gpu_scheduler *sched; member 464 int drm_sched_init(struct drm_gpu_scheduler *sched, 470 void drm_sched_fini(struct drm_gpu_scheduler *sched); 487 void drm_sched_wakeup(struct drm_gpu_scheduler *sched); 488 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); 489 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); 490 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); 491 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max); [all …]
|
/linux-5.19.10/tools/perf/Documentation/ |
D | perf-sched.txt | 1 perf-sched(1) 6 perf-sched - Tool to trace/measure scheduler properties (latencies) 11 'perf sched' {record|latency|map|replay|script|timehist} 15 There are several variants of 'perf sched': 17 'perf sched record <command>' to record the scheduling events 20 'perf sched latency' to report the per task scheduling latencies 23 'perf sched script' to see a detailed trace of the workload that 26 'perf sched replay' to simulate the workload that was recorded 27 via perf sched record. (this is done by starting up mockup threads 33 'perf sched map' to print a textual context-switching outline of [all …]
|
D | perf-daemon.txt | 33 …916507 916509 ... \_ perf record --control=fifo:control,ack -m 20M -e sched:* --overwrite --swit… 123 [session-sched] 124 run = -m 20M -e sched:* --overwrite --switch-output -a 137 [603351:sched] perf record -m 20M -e sched:* --overwrite --switch-output -a 155 [603351:sched] perf record -m 20M -e sched:* --overwrite --switch-output -a 156 base: /opt/perfdata/session-sched 157 output: /opt/perfdata/session-sched/output 158 control: /opt/perfdata/session-sched/control 159 ack: /opt/perfdata/session-sched/ack 174 OK sched [all …]
|
/linux-5.19.10/net/sctp/ |
D | stream_sched.c | 116 void sctp_sched_ops_register(enum sctp_sched_type sched, in sctp_sched_ops_register() argument 119 sctp_sched_ops[sched] = sched_ops; in sctp_sched_ops_register() 130 enum sctp_sched_type sched) in sctp_sched_set_sched() argument 132 struct sctp_sched_ops *n = sctp_sched_ops[sched]; in sctp_sched_set_sched() 133 struct sctp_sched_ops *old = asoc->outqueue.sched; in sctp_sched_set_sched() 141 if (sched > SCTP_SS_MAX) in sctp_sched_set_sched() 157 asoc->outqueue.sched = n; in sctp_sched_set_sched() 180 asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */ in sctp_sched_set_sched() 190 if (asoc->outqueue.sched == sctp_sched_ops[i]) in sctp_sched_get_sched() 210 return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp); in sctp_sched_set_value() [all …]
|
/linux-5.19.10/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_job.c | 35 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); in amdgpu_job_timedout() 44 __func__, s_job->sched->name); in amdgpu_job_timedout() 55 s_job->sched->name); in amdgpu_job_timedout() 61 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout() 71 drm_sched_suspend_timeout(&ring->sched); in amdgpu_job_timedout() 95 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc() 126 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); in amdgpu_job_free_resources() 197 job->base.sched = &ring->sched; in amdgpu_job_submit_direct() 213 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); in amdgpu_job_dependency() 241 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); in amdgpu_job_run() [all …]
|
/linux-5.19.10/tools/testing/selftests/ftrace/test.d/trigger/inter-event/ |
D | trigger-field-variable-support.tc | 14 echo 'hist:keys=comm:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_waking/trigger 15 …ext_pid,sched.sched_waking.prio,next_comm) if next_comm=="ping"' > events/sched/sched_switch/trigg… 23 if ! grep -q "synthetic_prio=prio" events/sched/sched_waking/hist; then 27 …xt_pid,sched.sched_waking.prio,next_comm) if next_comm=="ping"' >> events/sched/sched_switch/trigg… 29 if grep -q "synthetic_prio=prio" events/sched/sched_waking/hist; then
|
/linux-5.19.10/tools/perf/scripts/python/bin/ |
D | sched-migration-record | 2 perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched…
|
/linux-5.19.10/net/sched/ |
D | sch_taprio.c | 91 static ktime_t sched_base_time(const struct sched_gate_list *sched) in sched_base_time() argument 93 if (!sched) in sched_base_time() 96 return ns_to_ktime(sched->base_time); in sched_base_time() 119 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); in taprio_free_sched_cb() local 122 list_for_each_entry_safe(entry, n, &sched->entries, list) { in taprio_free_sched_cb() 127 kfree(sched); in taprio_free_sched_cb() 145 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) in get_cycle_time_elapsed() argument 150 time_since_sched_start = ktime_sub(time, sched->base_time); in get_cycle_time_elapsed() 151 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed); in get_cycle_time_elapsed() 156 static ktime_t get_interval_end_time(struct sched_gate_list *sched, in get_interval_end_time() argument [all …]
|
/linux-5.19.10/arch/x86/events/ |
D | core.c | 807 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, in perf_sched_init() argument 812 memset(sched, 0, sizeof(*sched)); in perf_sched_init() 813 sched->max_events = num; in perf_sched_init() 814 sched->max_weight = wmax; in perf_sched_init() 815 sched->max_gp = gpmax; in perf_sched_init() 816 sched->constraints = constraints; in perf_sched_init() 823 sched->state.event = idx; /* start with min weight */ in perf_sched_init() 824 sched->state.weight = wmin; in perf_sched_init() 825 sched->state.unassigned = num; in perf_sched_init() 828 static void perf_sched_save_state(struct perf_sched *sched) in perf_sched_save_state() argument [all …]
|