1 use core::intrinsics::unlikely;
2
3 use alloc::sync::Arc;
4
5 use crate::process::ProcessControlBlock;
6
7 use super::{
8 fair::{CfsRunQueue, FairSchedEntity},
9 CpuRunQueue, LoadWeight, SchedPolicy, SCHED_CAPACITY_SCALE, SCHED_CAPACITY_SHIFT,
10 };
11
12 const RUNNABLE_AVG_Y_N_INV: [u32; 32] = [
13 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6, 0xe0ccdeeb, 0xdbfbb796,
14 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85, 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46,
15 0xb504f333, 0xb123f581, 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
16 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80, 0x85aac367, 0x82cd8698,
17 ];
18
19 pub const LOAD_AVG_PERIOD: u64 = 32;
20 pub const LOAD_AVG_MAX: usize = 47742;
21 pub const PELT_MIN_DIVIDER: usize = LOAD_AVG_MAX - 1024;
22
23 #[derive(Debug, Default)]
24 pub struct SchedulerAvg {
25 /// 存储上次更新这些平均值的时间
26 pub last_update_time: u64,
27 /// 存储所有可运行任务的负载之和
28 pub load_sum: u64,
29 /// 存储所有可运行任务的时间之和
30 pub runnable_sum: u64,
31 /// 存储所有运行任务的时间之和
32 pub util_sum: u64,
33 /// 记录周期性任务的贡献值,用于计算平均CPU利用率
34 pub period_contrib: u32,
35
36 pub load_avg: usize,
37 pub runnable_avg: usize,
38 pub util_avg: usize,
39 }
40
41 impl SchedulerAvg {
42 #[inline]
get_pelt_divider(&self) -> usize43 pub fn get_pelt_divider(&self) -> usize {
44 return PELT_MIN_DIVIDER + self.period_contrib as usize;
45 }
46
update_load_sum( &mut self, now: u64, load: u32, mut runnable: u32, mut running: u32, ) -> bool47 pub fn update_load_sum(
48 &mut self,
49 now: u64,
50 load: u32,
51 mut runnable: u32,
52 mut running: u32,
53 ) -> bool {
54 if now < self.last_update_time {
55 self.last_update_time = now;
56 return false;
57 }
58
59 let mut delta = now - self.last_update_time;
60 delta >>= 10;
61
62 if delta == 0 {
63 return false;
64 }
65
66 self.last_update_time += delta << 10;
67
68 if load == 0 {
69 runnable = 0;
70 running = 0;
71 }
72
73 self.accumulate_sum(delta, load, runnable, running) != 0
74 }
75
accumulate_sum( &mut self, mut delta: u64, load: u32, runnable: u32, running: u32, ) -> u6476 pub fn accumulate_sum(
77 &mut self,
78 mut delta: u64,
79 load: u32,
80 runnable: u32,
81 running: u32,
82 ) -> u64 {
83 let mut contrib = delta as u32;
84
85 delta += self.period_contrib as u64;
86
87 let periods = delta / 1024;
88
89 if periods > 0 {
90 self.load_sum = Self::decay_load(self.load_sum, periods);
91 self.runnable_sum = Self::decay_load(self.runnable_sum, periods);
92 self.util_sum = Self::decay_load(self.util_sum, periods);
93
94 delta %= 1024;
95 if load > 0 {
96 contrib = Self::accumulate_pelt_segments(
97 periods,
98 1024 - self.period_contrib,
99 delta as u32,
100 );
101 }
102 }
103
104 self.period_contrib = delta as u32;
105
106 if load > 0 {
107 self.load_sum += (contrib * load) as u64;
108 }
109 if runnable > 0 {
110 self.runnable_sum += (runnable & contrib << SCHED_CAPACITY_SHIFT) as u64;
111 }
112
113 if running > 0 {
114 self.util_sum += (contrib << SCHED_CAPACITY_SHIFT) as u64;
115 }
116
117 return periods;
118 }
119
decay_load(mut val: u64, n: u64) -> u64120 fn decay_load(mut val: u64, n: u64) -> u64 {
121 if unlikely(n > LOAD_AVG_PERIOD) {
122 return 0;
123 }
124
125 let mut local_n = n;
126
127 if unlikely(local_n >= LOAD_AVG_PERIOD) {
128 val >>= local_n / LOAD_AVG_PERIOD;
129 local_n %= LOAD_AVG_PERIOD;
130 }
131
132 ((val as i128 * RUNNABLE_AVG_Y_N_INV[local_n as usize] as i128) >> 32) as u64
133 }
134
accumulate_pelt_segments(periods: u64, d1: u32, d3: u32) -> u32135 fn accumulate_pelt_segments(periods: u64, d1: u32, d3: u32) -> u32 {
136 /* y^0 == 1 */
137 let c3 = d3;
138
139 /*
140 * c1 = d1 y^p
141 */
142 let c1 = Self::decay_load(d1 as u64, periods) as u32;
143
144 /*
145 * p-1
146 * c2 = 1024 \Sum y^n
147 * n=1
148 *
149 * inf inf
150 * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
151 * n=0 n=p
152 */
153 let c2 = LOAD_AVG_MAX as u32 - Self::decay_load(LOAD_AVG_MAX as u64, periods) as u32 - 1024;
154
155 return c1 + c2 + c3;
156 }
157
update_load_avg(&mut self, load: u64)158 pub fn update_load_avg(&mut self, load: u64) {
159 let divider = self.get_pelt_divider();
160
161 self.load_avg = (load * self.load_sum) as usize / divider;
162 self.runnable_avg = self.runnable_sum as usize / divider;
163 self.util_avg = self.util_sum as usize / divider;
164 }
165
166 #[allow(dead_code)]
post_init_entity_util_avg(pcb: &Arc<ProcessControlBlock>)167 pub fn post_init_entity_util_avg(pcb: &Arc<ProcessControlBlock>) {
168 let se = pcb.sched_info().sched_entity();
169 let cfs_rq = se.cfs_rq();
170 let sa = &mut se.force_mut().avg;
171
172 // TODO: 这里和架构相关
173 let cpu_scale = SCHED_CAPACITY_SCALE;
174
175 let cap = (cpu_scale as isize - cfs_rq.avg.util_avg as isize) / 2;
176
177 if pcb.sched_info().policy() != SchedPolicy::CFS {
178 sa.last_update_time = cfs_rq.cfs_rq_clock_pelt();
179 }
180
181 if cap > 0 {
182 if cfs_rq.avg.util_avg != 0 {
183 sa.util_avg = cfs_rq.avg.util_avg * se.load.weight as usize;
184 sa.util_avg /= cfs_rq.avg.load_avg + 1;
185
186 if sa.util_avg as isize > cap {
187 sa.util_avg = cap as usize;
188 }
189 } else {
190 sa.util_avg = cap as usize;
191 }
192 }
193
194 sa.runnable_avg = sa.util_avg;
195 }
196 }
197
198 impl CpuRunQueue {
rq_clock_pelt(&self) -> u64199 pub fn rq_clock_pelt(&self) -> u64 {
200 self.clock_pelt - self.lost_idle_time
201 }
202 }
203
204 impl CfsRunQueue {
cfs_rq_clock_pelt(&self) -> u64205 pub fn cfs_rq_clock_pelt(&self) -> u64 {
206 if unlikely(self.throttled_count > 0) {
207 return self.throttled_clock_pelt - self.throttled_clock_pelt_time;
208 }
209
210 let rq = self.rq();
211 let (rq, _guard) = rq.self_lock();
212
213 return rq.rq_clock_pelt() - self.throttled_clock_pelt_time;
214 }
215 }
216
217 impl FairSchedEntity {
update_load_avg(&mut self, cfs_rq: &mut CfsRunQueue, now: u64) -> bool218 pub fn update_load_avg(&mut self, cfs_rq: &mut CfsRunQueue, now: u64) -> bool {
219 if self.avg.update_load_sum(
220 now,
221 self.on_rq as u32,
222 self.runnable() as u32,
223 cfs_rq.is_curr(&self.self_arc()) as u32,
224 ) {
225 self.avg
226 .update_load_avg(LoadWeight::scale_load_down(self.load.weight));
227
228 return true;
229 }
230
231 return false;
232 }
233 }
234
235 bitflags! {
236 pub struct UpdateAvgFlags: u8 {
237 /// 更新任务组(task group)信息
238 const UPDATE_TG = 0x1;
239
240 /// 跳过年龄和负载的更新
241 const SKIP_AGE_LOAD = 0x2;
242 /// 执行附加操作
243 const DO_ATTACH = 0x4;
244 /// 执行分离操作
245 const DO_DETACH = 0x8;
246 }
247 }
248
add_positive(x: &mut isize, y: isize)249 pub fn add_positive(x: &mut isize, y: isize) {
250 let res = *x + y;
251 *x = res.max(0);
252 }
253
sub_positive(x: &mut usize, y: usize)254 pub fn sub_positive(x: &mut usize, y: usize) {
255 if *x > y {
256 *x -= y;
257 } else {
258 *x = 0;
259 }
260 }
261