1 use core::sync::atomic::compiler_fence;
2
3 use alloc::{boxed::Box, sync::Arc, vec::Vec};
4
5 use crate::{
6 arch::CurrentIrqArch,
7 exception::InterruptArch,
8 include::bindings::bindings::MAX_CPU_NUM,
9 kBUG,
10 libs::{
11 rbtree::RBTree,
12 spinlock::{SpinLock, SpinLockGuard},
13 },
14 process::{
15 ProcessControlBlock, ProcessFlags, ProcessManager, ProcessSchedulerInfo, ProcessState,
16 },
17 smp::{core::smp_get_processor_id, cpu::ProcessorId},
18 };
19
20 use super::{
21 core::{sched_enqueue, Scheduler},
22 SchedPriority,
23 };
24
25 /// 声明全局的cfs调度器实例
26 pub static mut CFS_SCHEDULER_PTR: Option<Box<SchedulerCFS>> = None;
27
28 /// @brief 获取cfs调度器实例的可变引用
29 #[inline]
__get_cfs_scheduler() -> &'static mut SchedulerCFS30 pub fn __get_cfs_scheduler() -> &'static mut SchedulerCFS {
31 return unsafe { CFS_SCHEDULER_PTR.as_mut().unwrap() };
32 }
33
34 /// @brief 初始化cfs调度器
sched_cfs_init()35 pub unsafe fn sched_cfs_init() {
36 if CFS_SCHEDULER_PTR.is_none() {
37 CFS_SCHEDULER_PTR = Some(Box::new(SchedulerCFS::new()));
38 } else {
39 kBUG!("Try to init CFS Scheduler twice.");
40 panic!("Try to init CFS Scheduler twice.");
41 }
42 }
43
44 /// @brief CFS队列(per-cpu的)
45 #[derive(Debug)]
46 struct CFSQueue {
47 /// 当前cpu上执行的进程剩余的时间片
48 cpu_exec_proc_jiffies: i64,
49 /// 自旋锁保护的队列
50 locked_queue: SpinLock<RBTree<i64, Arc<ProcessControlBlock>>>,
51 /// 当前核心的队列专属的IDLE进程的pcb
52 idle_pcb: Arc<ProcessControlBlock>,
53 }
54
55 impl CFSQueue {
new(idle_pcb: Arc<ProcessControlBlock>) -> CFSQueue56 pub fn new(idle_pcb: Arc<ProcessControlBlock>) -> CFSQueue {
57 CFSQueue {
58 cpu_exec_proc_jiffies: 0,
59 locked_queue: SpinLock::new(RBTree::new()),
60 idle_pcb,
61 }
62 }
63
64 /// @brief 将pcb加入队列
enqueue(&mut self, pcb: Arc<ProcessControlBlock>)65 pub fn enqueue(&mut self, pcb: Arc<ProcessControlBlock>) {
66 let mut queue = self.locked_queue.lock_irqsave();
67
68 // 如果进程是IDLE进程,那么就不加入队列
69 if pcb.pid().into() == 0 {
70 return;
71 }
72
73 queue.insert(pcb.sched_info().virtual_runtime() as i64, pcb.clone());
74 }
75
76 /// @brief 将pcb从调度队列中弹出,若队列为空,则返回IDLE进程的pcb
dequeue(&mut self) -> Arc<ProcessControlBlock>77 pub fn dequeue(&mut self) -> Arc<ProcessControlBlock> {
78 let res: Arc<ProcessControlBlock>;
79 let mut queue = self.locked_queue.lock_irqsave();
80 if !queue.is_empty() {
81 // 队列不为空,返回下一个要执行的pcb
82 res = queue.pop_first().unwrap().1;
83 } else {
84 // 如果队列为空,则返回IDLE进程的pcb
85 res = self.idle_pcb.clone();
86 }
87 return res;
88 }
89
90 /// @brief 获取cfs队列的最小运行时间
91 ///
92 /// @return Option<i64> 如果队列不为空,那么返回队列中,最小的虚拟运行时间;否则返回None
min_vruntime( queue: &SpinLockGuard<RBTree<i64, Arc<ProcessControlBlock>>>, ) -> Option<i64>93 pub fn min_vruntime(
94 queue: &SpinLockGuard<RBTree<i64, Arc<ProcessControlBlock>>>,
95 ) -> Option<i64> {
96 if !queue.is_empty() {
97 return Some(queue.get_first().unwrap().1.sched_info().virtual_runtime() as i64);
98 } else {
99 return None;
100 }
101 }
102 /// 获取运行队列的长度
103 #[allow(dead_code)]
get_cfs_queue_size( queue: &SpinLockGuard<RBTree<i64, Arc<ProcessControlBlock>>>, ) -> usize104 pub fn get_cfs_queue_size(
105 queue: &SpinLockGuard<RBTree<i64, Arc<ProcessControlBlock>>>,
106 ) -> usize {
107 return queue.len();
108 }
109 }
110
111 /// @brief CFS调度器类
112 pub struct SchedulerCFS {
113 cpu_queue: Vec<&'static mut CFSQueue>,
114 }
115
116 impl SchedulerCFS {
new() -> SchedulerCFS117 pub fn new() -> SchedulerCFS {
118 // 暂时手动指定核心数目
119 // todo: 从cpu模块来获取核心的数目
120 let mut result = SchedulerCFS {
121 cpu_queue: Default::default(),
122 };
123
124 // 为每个cpu核心创建队列,进程重构后可以直接初始化Idle_pcb?
125 for i in 0..MAX_CPU_NUM {
126 let idle_pcb = ProcessManager::idle_pcb()[i as usize].clone();
127 result
128 .cpu_queue
129 .push(Box::leak(Box::new(CFSQueue::new(idle_pcb))));
130 }
131
132 return result;
133 }
134
135 /// @brief 更新这个cpu上,这个进程的可执行时间。
136 #[inline]
update_cpu_exec_proc_jiffies( _priority: SchedPriority, cfs_queue: &mut CFSQueue, is_idle: bool, ) -> &mut CFSQueue137 fn update_cpu_exec_proc_jiffies(
138 _priority: SchedPriority,
139 cfs_queue: &mut CFSQueue,
140 is_idle: bool,
141 ) -> &mut CFSQueue {
142 // todo: 引入调度周期以及所有进程的优先权进行计算,然后设置分配给进程的可执行时间
143 if !is_idle {
144 cfs_queue.cpu_exec_proc_jiffies = 10;
145 } else {
146 cfs_queue.cpu_exec_proc_jiffies = 0;
147 }
148
149 return cfs_queue;
150 }
151
152 /// @brief 时钟中断到来时,由sched的core模块中的函数,调用本函数,更新CFS进程的可执行时间
timer_update_jiffies(&mut self, sched_info: &ProcessSchedulerInfo)153 pub fn timer_update_jiffies(&mut self, sched_info: &ProcessSchedulerInfo) {
154 let current_cpu_queue: &mut CFSQueue =
155 self.cpu_queue[smp_get_processor_id().data() as usize];
156 // todo: 引入调度周期以及所有进程的优先权进行计算,然后设置进程的可执行时间
157
158 let mut queue = None;
159 for _ in 0..10 {
160 if let Ok(q) = current_cpu_queue.locked_queue.try_lock_irqsave() {
161 queue = Some(q);
162 break;
163 }
164 }
165 if queue.is_none() {
166 return;
167 }
168 let queue = queue.unwrap();
169 // 更新进程的剩余可执行时间
170 current_cpu_queue.cpu_exec_proc_jiffies -= 1;
171 // 时间片耗尽,标记需要被调度
172 if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
173 ProcessManager::current_pcb()
174 .flags()
175 .insert(ProcessFlags::NEED_SCHEDULE);
176 }
177 drop(queue);
178
179 // 更新当前进程的虚拟运行时间
180 sched_info.increase_virtual_runtime(1);
181 }
182
183 /// @brief 将进程加入cpu的cfs调度队列,并且重设其虚拟运行时间为当前队列的最小值
enqueue_reset_vruntime(&mut self, pcb: Arc<ProcessControlBlock>)184 pub fn enqueue_reset_vruntime(&mut self, pcb: Arc<ProcessControlBlock>) {
185 let cpu_queue = &mut self.cpu_queue[pcb.sched_info().on_cpu().unwrap().data() as usize];
186 let queue = cpu_queue.locked_queue.lock_irqsave();
187 if queue.len() > 0 {
188 pcb.sched_info()
189 .set_virtual_runtime(CFSQueue::min_vruntime(&queue).unwrap_or(0) as isize)
190 }
191 drop(queue);
192 cpu_queue.enqueue(pcb);
193 }
194
195 /// @brief 设置cpu的队列的IDLE进程的pcb
196 #[allow(dead_code)]
set_cpu_idle(&mut self, cpu_id: usize, pcb: Arc<ProcessControlBlock>)197 pub fn set_cpu_idle(&mut self, cpu_id: usize, pcb: Arc<ProcessControlBlock>) {
198 // kdebug!("set cpu idle: id={}", cpu_id);
199 self.cpu_queue[cpu_id].idle_pcb = pcb;
200 }
201 /// 获取某个cpu的运行队列中的进程数
get_cfs_queue_len(&mut self, cpu_id: ProcessorId) -> usize202 pub fn get_cfs_queue_len(&mut self, cpu_id: ProcessorId) -> usize {
203 let queue = self.cpu_queue[cpu_id.data() as usize]
204 .locked_queue
205 .lock_irqsave();
206 return CFSQueue::get_cfs_queue_size(&queue);
207 }
208 }
209
210 impl Scheduler for SchedulerCFS {
211 /// @brief 在当前cpu上进行调度。
212 /// 请注意,进入该函数之前,需要关中断
sched(&mut self) -> Option<Arc<ProcessControlBlock>>213 fn sched(&mut self) -> Option<Arc<ProcessControlBlock>> {
214 assert!(CurrentIrqArch::is_irq_enabled() == false);
215
216 ProcessManager::current_pcb()
217 .flags()
218 .remove(ProcessFlags::NEED_SCHEDULE);
219
220 let current_cpu_id = smp_get_processor_id().data() as usize;
221
222 let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_cpu_id];
223
224 let proc: Arc<ProcessControlBlock> = current_cpu_queue.dequeue();
225
226 compiler_fence(core::sync::atomic::Ordering::SeqCst);
227 // 如果当前不是running态,或者当前进程的虚拟运行时间大于等于下一个进程的,那就需要切换。
228 let state = ProcessManager::current_pcb()
229 .sched_info()
230 .inner_lock_read_irqsave()
231 .state();
232 if (state != ProcessState::Runnable)
233 || (ProcessManager::current_pcb().sched_info().virtual_runtime()
234 >= proc.sched_info().virtual_runtime())
235 {
236 compiler_fence(core::sync::atomic::Ordering::SeqCst);
237 // 本次切换由于时间片到期引发,则再次加入就绪队列,否则交由其它功能模块进行管理
238 if state == ProcessState::Runnable {
239 sched_enqueue(ProcessManager::current_pcb(), false);
240 compiler_fence(core::sync::atomic::Ordering::SeqCst);
241 }
242 compiler_fence(core::sync::atomic::Ordering::SeqCst);
243 // 设置进程可以执行的时间
244 if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
245 SchedulerCFS::update_cpu_exec_proc_jiffies(
246 proc.sched_info().priority(),
247 current_cpu_queue,
248 Arc::ptr_eq(&proc, ¤t_cpu_queue.idle_pcb),
249 );
250 }
251
252 compiler_fence(core::sync::atomic::Ordering::SeqCst);
253
254 return Some(proc);
255 } else {
256 // 不进行切换
257
258 // 设置进程可以执行的时间
259 compiler_fence(core::sync::atomic::Ordering::SeqCst);
260 if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
261 SchedulerCFS::update_cpu_exec_proc_jiffies(
262 ProcessManager::current_pcb().sched_info().priority(),
263 current_cpu_queue,
264 Arc::ptr_eq(&proc, ¤t_cpu_queue.idle_pcb),
265 );
266 // kdebug!("cpu:{:?}",current_cpu_id);
267 }
268
269 compiler_fence(core::sync::atomic::Ordering::SeqCst);
270 sched_enqueue(proc, false);
271 compiler_fence(core::sync::atomic::Ordering::SeqCst);
272 }
273 compiler_fence(core::sync::atomic::Ordering::SeqCst);
274
275 return None;
276 }
277
enqueue(&mut self, pcb: Arc<ProcessControlBlock>)278 fn enqueue(&mut self, pcb: Arc<ProcessControlBlock>) {
279 let cpu_queue = &mut self.cpu_queue[pcb.sched_info().on_cpu().unwrap().data() as usize];
280
281 cpu_queue.enqueue(pcb);
282 }
283 }
284