1 use core::{ptr::null_mut, sync::atomic::compiler_fence};
2
3 use alloc::{boxed::Box, collections::LinkedList, vec::Vec};
4
5 use crate::{
6 arch::asm::current::current_pcb,
7 include::bindings::bindings::{
8 process_control_block, MAX_CPU_NUM, PF_NEED_SCHED, SCHED_FIFO, SCHED_RR,
9 },
10 kBUG, kdebug,
11 libs::spinlock::RawSpinlock,
12 };
13
14 use super::core::{sched_enqueue, Scheduler};
15
16 /// 声明全局的rt调度器实例
17
18 pub static mut RT_SCHEDULER_PTR: *mut SchedulerRT = null_mut();
19
20 /// @brief 获取rt调度器实例的可变引用
21 #[inline]
__get_rt_scheduler() -> &'static mut SchedulerRT22 pub fn __get_rt_scheduler() -> &'static mut SchedulerRT {
23 return unsafe { RT_SCHEDULER_PTR.as_mut().unwrap() };
24 }
25
26 /// @brief 初始化rt调度器
sched_rt_init()27 pub unsafe fn sched_rt_init() {
28 kdebug!("rt scheduler init");
29 if RT_SCHEDULER_PTR.is_null() {
30 RT_SCHEDULER_PTR = Box::leak(Box::new(SchedulerRT::new()));
31 } else {
32 kBUG!("Try to init RT Scheduler twice.");
33 panic!("Try to init RT Scheduler twice.");
34 }
35 }
36
37 /// @brief RT队列(per-cpu的)
38 #[derive(Debug)]
39 struct RTQueue {
40 /// 队列的锁
41 lock: RawSpinlock,
42 /// 存储进程的双向队列
43 queue: LinkedList<&'static mut process_control_block>,
44 }
45
46 impl RTQueue {
new() -> RTQueue47 pub fn new() -> RTQueue {
48 RTQueue {
49 queue: LinkedList::new(),
50 lock: RawSpinlock::INIT,
51 }
52 }
53 /// @brief 将pcb加入队列
enqueue(&mut self, pcb: &'static mut process_control_block)54 pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
55 let mut rflags = 0u64;
56 self.lock.lock_irqsave(&mut rflags);
57
58 // 如果进程是IDLE进程,那么就不加入队列
59 if pcb.pid == 0 {
60 self.lock.unlock_irqrestore(&rflags);
61 return;
62 }
63 self.queue.push_back(pcb);
64 self.lock.unlock_irqrestore(&rflags);
65 }
66
67 /// @brief 将pcb从调度队列头部取出,若队列为空,则返回None
dequeue(&mut self) -> Option<&'static mut process_control_block>68 pub fn dequeue(&mut self) -> Option<&'static mut process_control_block> {
69 let res: Option<&'static mut process_control_block>;
70 let mut rflags = 0u64;
71 self.lock.lock_irqsave(&mut rflags);
72 if self.queue.len() > 0 {
73 // 队列不为空,返回下一个要执行的pcb
74 res = Some(self.queue.pop_front().unwrap());
75 } else {
76 // 如果队列为空,则返回None
77 res = None;
78 }
79 self.lock.unlock_irqrestore(&rflags);
80 return res;
81 }
enqueue_front(&mut self, pcb: &'static mut process_control_block)82 pub fn enqueue_front(&mut self, pcb: &'static mut process_control_block) {
83 let mut rflags = 0u64;
84 self.lock.lock_irqsave(&mut rflags);
85
86 // 如果进程是IDLE进程,那么就不加入队列
87 if pcb.pid == 0 {
88 self.lock.unlock_irqrestore(&rflags);
89 return;
90 }
91 self.queue.push_front(pcb);
92 self.lock.unlock_irqrestore(&rflags);
93 }
get_rt_queue_size(&mut self) -> usize94 pub fn get_rt_queue_size(&mut self) -> usize {
95 return self.queue.len();
96 }
97 }
98
99 /// @brief RT调度器类
100 pub struct SchedulerRT {
101 cpu_queue: Vec<Vec<&'static mut RTQueue>>,
102 load_list: Vec<&'static mut LinkedList<u64>>,
103 }
104
105 impl SchedulerRT {
106 const RR_TIMESLICE: i64 = 100;
107 const MAX_RT_PRIO: i64 = 100;
108
new() -> SchedulerRT109 pub fn new() -> SchedulerRT {
110 // 暂时手动指定核心数目
111 // todo: 从cpu模块来获取核心的数目
112 let mut result = SchedulerRT {
113 cpu_queue: Default::default(),
114 load_list: Default::default(),
115 };
116
117 // 为每个cpu核心创建队列
118 for cpu_id in 0..MAX_CPU_NUM {
119 result.cpu_queue.push(Vec::new());
120 // 每个CPU有MAX_RT_PRIO个优先级队列
121 for _ in 0..SchedulerRT::MAX_RT_PRIO {
122 result.cpu_queue[cpu_id as usize].push(Box::leak(Box::new(RTQueue::new())));
123 }
124 }
125 // 为每个cpu核心创建负载统计队列
126 for _ in 0..MAX_CPU_NUM {
127 result
128 .load_list
129 .push(Box::leak(Box::new(LinkedList::new())));
130 }
131 return result;
132 }
133
134 /// @brief 挑选下一个可执行的rt进程
pick_next_task_rt(&mut self, cpu_id: u32) -> Option<&'static mut process_control_block>135 pub fn pick_next_task_rt(&mut self, cpu_id: u32) -> Option<&'static mut process_control_block> {
136 // 循环查找,直到找到
137 // 这里应该是优先级数量,而不是CPU数量,需要修改
138 for i in 0..SchedulerRT::MAX_RT_PRIO {
139 let cpu_queue_i: &mut RTQueue = self.cpu_queue[cpu_id as usize][i as usize];
140 let proc: Option<&'static mut process_control_block> = cpu_queue_i.dequeue();
141 if proc.is_some() {
142 return proc;
143 }
144 }
145 // return 一个空值
146 None
147 }
148
rt_queue_len(&mut self, cpu_id: u32) -> usize149 pub fn rt_queue_len(&mut self, cpu_id: u32) -> usize {
150 let mut sum = 0;
151 for prio in 0..SchedulerRT::MAX_RT_PRIO {
152 sum += self.cpu_queue[cpu_id as usize][prio as usize].get_rt_queue_size();
153 }
154 return sum as usize;
155 }
156
157 #[allow(dead_code)]
158 #[inline]
load_list_len(&mut self, cpu_id: u32) -> usize159 pub fn load_list_len(&mut self, cpu_id: u32) -> usize {
160 return self.load_list[cpu_id as usize].len();
161 }
162
enqueue_front(&mut self, pcb: &'static mut process_control_block)163 pub fn enqueue_front(&mut self, pcb: &'static mut process_control_block) {
164 self.cpu_queue[pcb.cpu_id as usize][pcb.priority as usize].enqueue_front(pcb);
165 }
166 }
167
168 impl Scheduler for SchedulerRT {
169 /// @brief 在当前cpu上进行调度。
170 /// 请注意,进入该函数之前,需要关中断
sched(&mut self) -> Option<&'static mut process_control_block>171 fn sched(&mut self) -> Option<&'static mut process_control_block> {
172 current_pcb().flags &= !(PF_NEED_SCHED as u64);
173 // 正常流程下,这里一定是会pick到next的pcb的,如果是None的话,要抛出错误
174 let cpu_id = current_pcb().cpu_id;
175 let proc: &'static mut process_control_block =
176 self.pick_next_task_rt(cpu_id).expect("No RT process found");
177
178 // 如果是fifo策略,则可以一直占有cpu直到有优先级更高的任务就绪(即使优先级相同也不行)或者主动放弃(等待资源)
179 if proc.policy == SCHED_FIFO {
180 // 如果挑选的进程优先级小于当前进程,则不进行切换
181 if proc.priority <= current_pcb().priority {
182 sched_enqueue(proc, false);
183 } else {
184 // 将当前的进程加进队列
185 sched_enqueue(current_pcb(), false);
186 compiler_fence(core::sync::atomic::Ordering::SeqCst);
187 return Some(proc);
188 }
189 }
190 // RR调度策略需要考虑时间片
191 else if proc.policy == SCHED_RR {
192 // 同等优先级的,考虑切换
193 if proc.priority >= current_pcb().priority {
194 // 判断这个进程时间片是否耗尽,若耗尽则将其时间片赋初值然后入队
195 if proc.rt_time_slice <= 0 {
196 proc.rt_time_slice = SchedulerRT::RR_TIMESLICE;
197 proc.flags |= !(PF_NEED_SCHED as u64);
198 sched_enqueue(proc, false);
199 }
200 // 目标进程时间片未耗尽,切换到目标进程
201 else {
202 // 将当前进程加进队列
203 sched_enqueue(current_pcb(), false);
204 compiler_fence(core::sync::atomic::Ordering::SeqCst);
205 return Some(proc);
206 }
207 }
208 // curr优先级更大,说明一定是实时进程,将所选进程入队列,此时需要入队首
209 else {
210 self.cpu_queue[cpu_id as usize][proc.cpu_id as usize].enqueue_front(proc);
211 }
212 }
213 return None;
214 }
215
enqueue(&mut self, pcb: &'static mut process_control_block)216 fn enqueue(&mut self, pcb: &'static mut process_control_block) {
217 let cpu_id = pcb.cpu_id;
218 let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
219 cpu_queue[cpu_id as usize].enqueue(pcb);
220 // // 获取当前时间
221 // let time = unsafe { _rdtsc() };
222 // let freq = unsafe { Cpu_tsc_freq };
223 // // kdebug!("this is timeeeeeeer {},freq is {}, {}", time, freq, cpu_id);
224 // // 将当前时间加入负载记录队列
225 // self.load_list[cpu_id as usize].push_back(time);
226 // // 如果队首元素与当前时间差超过设定值,则移除队首元素
227 // while self.load_list[cpu_id as usize].len() > 1
228 // && (time - *self.load_list[cpu_id as usize].front().unwrap() > 10000000000)
229 // {
230 // self.load_list[cpu_id as usize].pop_front();
231 // }
232 }
233 }
234