1 use core::{ptr::null_mut, sync::atomic::compiler_fence};
2
3 use alloc::{boxed::Box, vec::Vec};
4
5 use crate::{
6 arch::asm::current::current_pcb,
7 include::bindings::bindings::{
8 initial_proc_union, process_control_block, MAX_CPU_NUM, PF_NEED_SCHED, PROC_RUNNING,
9 },
10 kBUG,
11 libs::{rbtree::RBTree, spinlock::RawSpinlock},
12 smp::core::smp_get_processor_id,
13 };
14
15 use super::core::{sched_enqueue, Scheduler};
16
17 /// 声明全局的cfs调度器实例
18
19 pub static mut CFS_SCHEDULER_PTR: *mut SchedulerCFS = null_mut();
20
21 /// @brief 获取cfs调度器实例的可变引用
22 #[inline]
__get_cfs_scheduler() -> &'static mut SchedulerCFS23 pub fn __get_cfs_scheduler() -> &'static mut SchedulerCFS {
24 return unsafe { CFS_SCHEDULER_PTR.as_mut().unwrap() };
25 }
26
27 /// @brief 初始化cfs调度器
sched_cfs_init()28 pub unsafe fn sched_cfs_init() {
29 if CFS_SCHEDULER_PTR.is_null() {
30 CFS_SCHEDULER_PTR = Box::leak(Box::new(SchedulerCFS::new()));
31 } else {
32 kBUG!("Try to init CFS Scheduler twice.");
33 panic!("Try to init CFS Scheduler twice.");
34 }
35 }
36
37 /// @brief CFS队列(per-cpu的)
38 #[derive(Debug)]
39 struct CFSQueue {
40 /// 当前cpu上执行的进程,剩余的时间片
41 cpu_exec_proc_jiffies: i64,
42 /// 队列的锁
43 lock: RawSpinlock,
44 /// 进程的队列
45 queue: RBTree<i64, &'static mut process_control_block>,
46 /// 当前核心的队列专属的IDLE进程的pcb
47 idle_pcb: *mut process_control_block,
48 }
49
50 impl CFSQueue {
new(idle_pcb: *mut process_control_block) -> CFSQueue51 pub fn new(idle_pcb: *mut process_control_block) -> CFSQueue {
52 CFSQueue {
53 cpu_exec_proc_jiffies: 0,
54 lock: RawSpinlock::INIT,
55 queue: RBTree::new(),
56 idle_pcb: idle_pcb,
57 }
58 }
59
60 /// @brief 将pcb加入队列
enqueue(&mut self, pcb: &'static mut process_control_block)61 pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
62 let mut rflags = 0u64;
63 self.lock.lock_irqsave(&mut rflags);
64
65 // 如果进程是IDLE进程,那么就不加入队列
66 if pcb.pid == 0 {
67 self.lock.unlock_irqrestore(&rflags);
68 return;
69 }
70
71 self.queue.insert(pcb.virtual_runtime, pcb);
72
73 self.lock.unlock_irqrestore(&rflags);
74 }
75
76 /// @brief 将pcb从调度队列中弹出,若队列为空,则返回IDLE进程的pcb
dequeue(&mut self) -> &'static mut process_control_block77 pub fn dequeue(&mut self) -> &'static mut process_control_block {
78 let res: &'static mut process_control_block;
79 let mut rflags = 0u64;
80 self.lock.lock_irqsave(&mut rflags);
81 if !self.queue.is_empty() {
82 // 队列不为空,返回下一个要执行的pcb
83 res = self.queue.pop_first().unwrap().1;
84 } else {
85 // 如果队列为空,则返回IDLE进程的pcb
86 res = unsafe { self.idle_pcb.as_mut().unwrap() };
87 }
88 self.lock.unlock_irqrestore(&rflags);
89 return res;
90 }
91
92 /// @brief 获取cfs队列的最小运行时间
93 ///
94 /// @return Option<i64> 如果队列不为空,那么返回队列中,最小的虚拟运行时间;否则返回None
min_vruntime(&self) -> Option<i64>95 pub fn min_vruntime(&self) -> Option<i64> {
96 if !self.queue.is_empty() {
97 return Some(self.queue.get_first().unwrap().1.virtual_runtime);
98 } else {
99 return None;
100 }
101 }
102 /// 获取运行队列的长度
get_cfs_queue_size(&mut self) -> usize103 pub fn get_cfs_queue_size(&mut self) -> usize {
104 return self.queue.len();
105 }
106 }
107
108 /// @brief CFS调度器类
109 pub struct SchedulerCFS {
110 cpu_queue: Vec<&'static mut CFSQueue>,
111 }
112
113 impl SchedulerCFS {
new() -> SchedulerCFS114 pub fn new() -> SchedulerCFS {
115 // 暂时手动指定核心数目
116 // todo: 从cpu模块来获取核心的数目
117 let mut result = SchedulerCFS {
118 cpu_queue: Default::default(),
119 };
120
121 // 为每个cpu核心创建队列
122 for _ in 0..MAX_CPU_NUM {
123 result
124 .cpu_queue
125 .push(Box::leak(Box::new(CFSQueue::new(null_mut()))));
126 }
127 // 设置cpu0的pcb
128 result.cpu_queue[0].idle_pcb = unsafe { &mut initial_proc_union.pcb };
129
130 return result;
131 }
132
133 /// @brief 更新这个cpu上,这个进程的可执行时间。
134 #[inline]
update_cpu_exec_proc_jiffies(_priority: i64, cfs_queue: &mut CFSQueue) -> &mut CFSQueue135 fn update_cpu_exec_proc_jiffies(_priority: i64, cfs_queue: &mut CFSQueue) -> &mut CFSQueue {
136 // todo: 引入调度周期以及所有进程的优先权进行计算,然后设置分配给进程的可执行时间
137 cfs_queue.cpu_exec_proc_jiffies = 10;
138
139 return cfs_queue;
140 }
141
142 /// @brief 时钟中断到来时,由sched的core模块中的函数,调用本函数,更新CFS进程的可执行时间
timer_update_jiffies(&mut self)143 pub fn timer_update_jiffies(&mut self) {
144 let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_pcb().cpu_id as usize];
145 // todo: 引入调度周期以及所有进程的优先权进行计算,然后设置进程的可执行时间
146
147 // 更新进程的剩余可执行时间
148 current_cpu_queue.lock.lock();
149 current_cpu_queue.cpu_exec_proc_jiffies -= 1;
150 // 时间片耗尽,标记需要被调度
151 if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
152 current_pcb().flags |= PF_NEED_SCHED as u64;
153 }
154 current_cpu_queue.lock.unlock();
155
156 // 更新当前进程的虚拟运行时间
157 current_pcb().virtual_runtime += 1;
158 }
159
160 /// @brief 将进程加入cpu的cfs调度队列,并且重设其虚拟运行时间为当前队列的最小值
enqueue_reset_vruntime(&mut self, pcb: &'static mut process_control_block)161 pub fn enqueue_reset_vruntime(&mut self, pcb: &'static mut process_control_block) {
162 let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
163 if cpu_queue.queue.len() > 0 {
164 pcb.virtual_runtime = cpu_queue.min_vruntime().unwrap();
165 }
166
167 cpu_queue.enqueue(pcb);
168 }
169
170 /// @brief 设置cpu的队列的IDLE进程的pcb
set_cpu_idle(&mut self, cpu_id: usize, pcb: *mut process_control_block)171 pub fn set_cpu_idle(&mut self, cpu_id: usize, pcb: *mut process_control_block) {
172 // kdebug!("set cpu idle: id={}", cpu_id);
173 self.cpu_queue[cpu_id].idle_pcb = pcb;
174 }
175 /// 获取某个cpu的运行队列中的进程数
get_cfs_queue_len(&mut self, cpu_id: u32) -> usize176 pub fn get_cfs_queue_len(&mut self, cpu_id: u32) -> usize {
177 return self.cpu_queue[cpu_id as usize].get_cfs_queue_size();
178 }
179 }
180
181 impl Scheduler for SchedulerCFS {
182 /// @brief 在当前cpu上进行调度。
183 /// 请注意,进入该函数之前,需要关中断
sched(&mut self) -> Option<&'static mut process_control_block>184 fn sched(&mut self) -> Option<&'static mut process_control_block> {
185 current_pcb().flags &= !(PF_NEED_SCHED as u64);
186
187 let current_cpu_id = smp_get_processor_id() as usize;
188
189 let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_cpu_id];
190
191 let proc: &'static mut process_control_block = current_cpu_queue.dequeue();
192
193 compiler_fence(core::sync::atomic::Ordering::SeqCst);
194 // 如果当前不是running态,或者当前进程的虚拟运行时间大于等于下一个进程的,那就需要切换。
195 if (current_pcb().state & (PROC_RUNNING as u64)) == 0
196 || current_pcb().virtual_runtime >= proc.virtual_runtime
197 {
198 compiler_fence(core::sync::atomic::Ordering::SeqCst);
199 // 本次切换由于时间片到期引发,则再次加入就绪队列,否则交由其它功能模块进行管理
200 if current_pcb().state & (PROC_RUNNING as u64) != 0 {
201 sched_enqueue(current_pcb(), false);
202 compiler_fence(core::sync::atomic::Ordering::SeqCst);
203 }
204
205 compiler_fence(core::sync::atomic::Ordering::SeqCst);
206 // 设置进程可以执行的时间
207 if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
208 SchedulerCFS::update_cpu_exec_proc_jiffies(proc.priority, current_cpu_queue);
209 }
210
211 compiler_fence(core::sync::atomic::Ordering::SeqCst);
212
213 return Some(proc);
214 } else {
215 // 不进行切换
216
217 // 设置进程可以执行的时间
218 compiler_fence(core::sync::atomic::Ordering::SeqCst);
219 if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
220 SchedulerCFS::update_cpu_exec_proc_jiffies(proc.priority, current_cpu_queue);
221 // kdebug!("cpu:{:?}",current_cpu_id);
222 }
223
224 compiler_fence(core::sync::atomic::Ordering::SeqCst);
225 sched_enqueue(proc, false);
226 compiler_fence(core::sync::atomic::Ordering::SeqCst);
227 }
228 compiler_fence(core::sync::atomic::Ordering::SeqCst);
229
230 return None;
231 }
232
enqueue(&mut self, pcb: &'static mut process_control_block)233 fn enqueue(&mut self, pcb: &'static mut process_control_block) {
234 let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
235 cpu_queue.enqueue(pcb);
236 }
237 }
238