1 use core::{ptr::null_mut, sync::atomic::compiler_fence};
2
3 use alloc::{boxed::Box, vec::Vec};
4
5 use crate::{
6 arch::asm::current::current_pcb,
7 include::bindings::bindings::{
8 initial_proc_union, process_control_block, MAX_CPU_NUM, PF_NEED_SCHED, PROC_RUNNING,
9 },
10 kBUG,
11 libs::{rbtree::RBTree, spinlock::RawSpinlock},
12 smp::core::smp_get_processor_id,
13 };
14
15 use super::core::{sched_enqueue, Scheduler};
16
17 /// 声明全局的cfs调度器实例
18 pub static mut CFS_SCHEDULER_PTR: Option<Box<SchedulerCFS>> = None;
19
20 /// @brief 获取cfs调度器实例的可变引用
21 #[inline]
__get_cfs_scheduler() -> &'static mut SchedulerCFS22 pub fn __get_cfs_scheduler() -> &'static mut SchedulerCFS {
23 return unsafe { CFS_SCHEDULER_PTR.as_mut().unwrap() };
24 }
25
26 /// @brief 初始化cfs调度器
sched_cfs_init()27 pub unsafe fn sched_cfs_init() {
28 if CFS_SCHEDULER_PTR.is_none() {
29 CFS_SCHEDULER_PTR = Some(Box::new(SchedulerCFS::new()));
30 } else {
31 kBUG!("Try to init CFS Scheduler twice.");
32 panic!("Try to init CFS Scheduler twice.");
33 }
34 }
35
36 /// @brief CFS队列(per-cpu的)
37 #[derive(Debug)]
38 struct CFSQueue {
39 /// 当前cpu上执行的进程剩余的时间片
40 cpu_exec_proc_jiffies: i64,
41 /// 队列的锁
42 lock: RawSpinlock,
43 /// 进程的队列
44 queue: RBTree<i64, &'static mut process_control_block>,
45 /// 当前核心的队列专属的IDLE进程的pcb
46 idle_pcb: *mut process_control_block,
47 }
48
49 impl CFSQueue {
new(idle_pcb: *mut process_control_block) -> CFSQueue50 pub fn new(idle_pcb: *mut process_control_block) -> CFSQueue {
51 CFSQueue {
52 cpu_exec_proc_jiffies: 0,
53 lock: RawSpinlock::INIT,
54 queue: RBTree::new(),
55 idle_pcb: idle_pcb,
56 }
57 }
58
59 /// @brief 将pcb加入队列
enqueue(&mut self, pcb: &'static mut process_control_block)60 pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
61 let mut rflags = 0usize;
62 self.lock.lock_irqsave(&mut rflags);
63
64 // 如果进程是IDLE进程,那么就不加入队列
65 if pcb.pid == 0 {
66 self.lock.unlock_irqrestore(rflags);
67 return;
68 }
69
70 self.queue.insert(pcb.virtual_runtime, pcb);
71
72 self.lock.unlock_irqrestore(rflags);
73 }
74
75 /// @brief 将pcb从调度队列中弹出,若队列为空,则返回IDLE进程的pcb
dequeue(&mut self) -> &'static mut process_control_block76 pub fn dequeue(&mut self) -> &'static mut process_control_block {
77 let res: &'static mut process_control_block;
78 let mut rflags = 0usize;
79 self.lock.lock_irqsave(&mut rflags);
80 if !self.queue.is_empty() {
81 // 队列不为空,返回下一个要执行的pcb
82 res = self.queue.pop_first().unwrap().1;
83 } else {
84 // 如果队列为空,则返回IDLE进程的pcb
85 res = unsafe { self.idle_pcb.as_mut().unwrap() };
86 }
87 self.lock.unlock_irqrestore(rflags);
88 return res;
89 }
90
91 /// @brief 获取cfs队列的最小运行时间
92 ///
93 /// @return Option<i64> 如果队列不为空,那么返回队列中,最小的虚拟运行时间;否则返回None
min_vruntime(&self) -> Option<i64>94 pub fn min_vruntime(&self) -> Option<i64> {
95 if !self.queue.is_empty() {
96 return Some(self.queue.get_first().unwrap().1.virtual_runtime);
97 } else {
98 return None;
99 }
100 }
101 /// 获取运行队列的长度
get_cfs_queue_size(&mut self) -> usize102 fn get_cfs_queue_size(&mut self) -> usize {
103 return self.queue.len();
104 }
105 }
106
107 /// @brief CFS调度器类
108 pub struct SchedulerCFS {
109 cpu_queue: Vec<&'static mut CFSQueue>,
110 }
111
112 impl SchedulerCFS {
new() -> SchedulerCFS113 pub fn new() -> SchedulerCFS {
114 // 暂时手动指定核心数目
115 // todo: 从cpu模块来获取核心的数目
116 let mut result = SchedulerCFS {
117 cpu_queue: Default::default(),
118 };
119
120 // 为每个cpu核心创建队列
121 for _ in 0..MAX_CPU_NUM {
122 result
123 .cpu_queue
124 .push(Box::leak(Box::new(CFSQueue::new(null_mut()))));
125 }
126 // 设置cpu0的pcb
127 result.cpu_queue[0].idle_pcb = unsafe { &mut initial_proc_union.pcb };
128
129 return result;
130 }
131
132 /// @brief 更新这个cpu上,这个进程的可执行时间。
133 #[inline]
update_cpu_exec_proc_jiffies(_priority: i64, cfs_queue: &mut CFSQueue) -> &mut CFSQueue134 fn update_cpu_exec_proc_jiffies(_priority: i64, cfs_queue: &mut CFSQueue) -> &mut CFSQueue {
135 // todo: 引入调度周期以及所有进程的优先权进行计算,然后设置分配给进程的可执行时间
136 cfs_queue.cpu_exec_proc_jiffies = 10;
137
138 return cfs_queue;
139 }
140
141 /// @brief 时钟中断到来时,由sched的core模块中的函数,调用本函数,更新CFS进程的可执行时间
timer_update_jiffies(&mut self)142 pub fn timer_update_jiffies(&mut self) {
143 let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_pcb().cpu_id as usize];
144 // todo: 引入调度周期以及所有进程的优先权进行计算,然后设置进程的可执行时间
145
146 // 更新进程的剩余可执行时间
147 current_cpu_queue.lock.lock();
148 current_cpu_queue.cpu_exec_proc_jiffies -= 1;
149 // 时间片耗尽,标记需要被调度
150 if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
151 current_pcb().flags |= PF_NEED_SCHED as u64;
152 }
153 current_cpu_queue.lock.unlock();
154
155 // 更新当前进程的虚拟运行时间
156 current_pcb().virtual_runtime += 1;
157 }
158
159 /// @brief 将进程加入cpu的cfs调度队列,并且重设其虚拟运行时间为当前队列的最小值
enqueue_reset_vruntime(&mut self, pcb: &'static mut process_control_block)160 pub fn enqueue_reset_vruntime(&mut self, pcb: &'static mut process_control_block) {
161 let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
162 if cpu_queue.queue.len() > 0 {
163 pcb.virtual_runtime = cpu_queue.min_vruntime().unwrap();
164 }
165
166 cpu_queue.enqueue(pcb);
167 }
168
169 /// @brief 设置cpu的队列的IDLE进程的pcb
set_cpu_idle(&mut self, cpu_id: usize, pcb: *mut process_control_block)170 pub fn set_cpu_idle(&mut self, cpu_id: usize, pcb: *mut process_control_block) {
171 // kdebug!("set cpu idle: id={}", cpu_id);
172 self.cpu_queue[cpu_id].idle_pcb = pcb;
173 }
174 /// 获取某个cpu的运行队列中的进程数
get_cfs_queue_len(&mut self, cpu_id: u32) -> usize175 pub fn get_cfs_queue_len(&mut self, cpu_id: u32) -> usize {
176 return self.cpu_queue[cpu_id as usize].get_cfs_queue_size();
177 }
178 }
179
180 impl Scheduler for SchedulerCFS {
181 /// @brief 在当前cpu上进行调度。
182 /// 请注意,进入该函数之前,需要关中断
sched(&mut self) -> Option<&'static mut process_control_block>183 fn sched(&mut self) -> Option<&'static mut process_control_block> {
184 current_pcb().flags &= !(PF_NEED_SCHED as u64);
185
186 let current_cpu_id = smp_get_processor_id() as usize;
187
188 let current_cpu_queue: &mut CFSQueue = self.cpu_queue[current_cpu_id];
189
190 let proc: &'static mut process_control_block = current_cpu_queue.dequeue();
191
192 compiler_fence(core::sync::atomic::Ordering::SeqCst);
193 // 如果当前不是running态,或者当前进程的虚拟运行时间大于等于下一个进程的,那就需要切换。
194 if (current_pcb().state & (PROC_RUNNING as u64)) == 0
195 || current_pcb().virtual_runtime >= proc.virtual_runtime
196 {
197 compiler_fence(core::sync::atomic::Ordering::SeqCst);
198 // 本次切换由于时间片到期引发,则再次加入就绪队列,否则交由其它功能模块进行管理
199 if current_pcb().state & (PROC_RUNNING as u64) != 0 {
200 sched_enqueue(current_pcb(), false);
201 compiler_fence(core::sync::atomic::Ordering::SeqCst);
202 }
203
204 compiler_fence(core::sync::atomic::Ordering::SeqCst);
205 // 设置进程可以执行的时间
206 if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
207 SchedulerCFS::update_cpu_exec_proc_jiffies(proc.priority, current_cpu_queue);
208 }
209
210 compiler_fence(core::sync::atomic::Ordering::SeqCst);
211
212 return Some(proc);
213 } else {
214 // 不进行切换
215
216 // 设置进程可以执行的时间
217 compiler_fence(core::sync::atomic::Ordering::SeqCst);
218 if current_cpu_queue.cpu_exec_proc_jiffies <= 0 {
219 SchedulerCFS::update_cpu_exec_proc_jiffies(proc.priority, current_cpu_queue);
220 // kdebug!("cpu:{:?}",current_cpu_id);
221 }
222
223 compiler_fence(core::sync::atomic::Ordering::SeqCst);
224 sched_enqueue(proc, false);
225 compiler_fence(core::sync::atomic::Ordering::SeqCst);
226 }
227 compiler_fence(core::sync::atomic::Ordering::SeqCst);
228
229 return None;
230 }
231
enqueue(&mut self, pcb: &'static mut process_control_block)232 fn enqueue(&mut self, pcb: &'static mut process_control_block) {
233 let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
234 cpu_queue.enqueue(pcb);
235 }
236 }
237