1 use core::sync::atomic::compiler_fence;
2
3 use alloc::{boxed::Box, collections::LinkedList, vec::Vec};
4
5 use crate::{
6 arch::asm::current::current_pcb,
7 include::bindings::bindings::{
8 process_control_block, MAX_CPU_NUM, PF_NEED_SCHED, SCHED_FIFO, SCHED_RR,
9 },
10 kBUG, kdebug,
11 libs::spinlock::RawSpinlock,
12 };
13
14 use super::core::{sched_enqueue, Scheduler};
15
16 /// 声明全局的rt调度器实例
17 pub static mut RT_SCHEDULER_PTR: Option<Box<SchedulerRT>> = None;
18
19 /// @brief 获取rt调度器实例的可变引用
20 #[inline]
__get_rt_scheduler() -> &'static mut SchedulerRT21 pub fn __get_rt_scheduler() -> &'static mut SchedulerRT {
22 return unsafe { RT_SCHEDULER_PTR.as_mut().unwrap() };
23 }
24
25 /// @brief 初始化rt调度器
sched_rt_init()26 pub unsafe fn sched_rt_init() {
27 kdebug!("rt scheduler init");
28 if RT_SCHEDULER_PTR.is_none() {
29 RT_SCHEDULER_PTR = Some(Box::new(SchedulerRT::new()));
30 } else {
31 kBUG!("Try to init RT Scheduler twice.");
32 panic!("Try to init RT Scheduler twice.");
33 }
34 }
35 /// @brief RT队列(per-cpu的)
36 #[derive(Debug)]
37 struct RTQueue {
38 /// 队列的锁
39 lock: RawSpinlock,
40 /// 存储进程的双向队列
41 queue: LinkedList<&'static mut process_control_block>,
42 }
43
44 impl RTQueue {
new() -> RTQueue45 pub fn new() -> RTQueue {
46 RTQueue {
47 queue: LinkedList::new(),
48 lock: RawSpinlock::INIT,
49 }
50 }
51 /// @brief 将pcb加入队列
enqueue(&mut self, pcb: &'static mut process_control_block)52 pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
53 let mut rflags = 0usize;
54 self.lock.lock_irqsave(&mut rflags);
55
56 // 如果进程是IDLE进程,那么就不加入队列
57 if pcb.pid == 0 {
58 self.lock.unlock_irqrestore(rflags);
59 return;
60 }
61 self.queue.push_back(pcb);
62 self.lock.unlock_irqrestore(rflags);
63 }
64
65 /// @brief 将pcb从调度队列头部取出,若队列为空,则返回None
dequeue(&mut self) -> Option<&'static mut process_control_block>66 pub fn dequeue(&mut self) -> Option<&'static mut process_control_block> {
67 let res: Option<&'static mut process_control_block>;
68 let mut rflags = 0usize;
69 self.lock.lock_irqsave(&mut rflags);
70 if self.queue.len() > 0 {
71 // 队列不为空,返回下一个要执行的pcb
72 res = Some(self.queue.pop_front().unwrap());
73 } else {
74 // 如果队列为空,则返回None
75 res = None;
76 }
77 self.lock.unlock_irqrestore(rflags);
78 return res;
79 }
enqueue_front(&mut self, pcb: &'static mut process_control_block)80 pub fn enqueue_front(&mut self, pcb: &'static mut process_control_block) {
81 let mut rflags = 0usize;
82 self.lock.lock_irqsave(&mut rflags);
83
84 // 如果进程是IDLE进程,那么就不加入队列
85 if pcb.pid == 0 {
86 self.lock.unlock_irqrestore(rflags);
87 return;
88 }
89 self.queue.push_front(pcb);
90 self.lock.unlock_irqrestore(rflags);
91 }
get_rt_queue_size(&mut self) -> usize92 pub fn get_rt_queue_size(&mut self) -> usize {
93 return self.queue.len();
94 }
95 }
96
97 /// @brief RT调度器类
98 pub struct SchedulerRT {
99 cpu_queue: Vec<Vec<&'static mut RTQueue>>,
100 load_list: Vec<&'static mut LinkedList<u64>>,
101 }
102
103 impl SchedulerRT {
104 const RR_TIMESLICE: i64 = 100;
105 const MAX_RT_PRIO: i64 = 100;
106
new() -> SchedulerRT107 pub fn new() -> SchedulerRT {
108 // 暂时手动指定核心数目
109 // todo: 从cpu模块来获取核心的数目
110 let mut result = SchedulerRT {
111 cpu_queue: Default::default(),
112 load_list: Default::default(),
113 };
114
115 // 为每个cpu核心创建队列
116 for cpu_id in 0..MAX_CPU_NUM {
117 result.cpu_queue.push(Vec::new());
118 // 每个CPU有MAX_RT_PRIO个优先级队列
119 for _ in 0..SchedulerRT::MAX_RT_PRIO {
120 result.cpu_queue[cpu_id as usize].push(Box::leak(Box::new(RTQueue::new())));
121 }
122 }
123 // 为每个cpu核心创建负载统计队列
124 for _ in 0..MAX_CPU_NUM {
125 result
126 .load_list
127 .push(Box::leak(Box::new(LinkedList::new())));
128 }
129 return result;
130 }
131
132 /// @brief 挑选下一个可执行的rt进程
pick_next_task_rt(&mut self, cpu_id: u32) -> Option<&'static mut process_control_block>133 pub fn pick_next_task_rt(&mut self, cpu_id: u32) -> Option<&'static mut process_control_block> {
134 // 循环查找,直到找到
135 // 这里应该是优先级数量,而不是CPU数量,需要修改
136 for i in 0..SchedulerRT::MAX_RT_PRIO {
137 let cpu_queue_i: &mut RTQueue = self.cpu_queue[cpu_id as usize][i as usize];
138 let proc: Option<&'static mut process_control_block> = cpu_queue_i.dequeue();
139 if proc.is_some() {
140 return proc;
141 }
142 }
143 // return 一个空值
144 None
145 }
146
rt_queue_len(&mut self, cpu_id: u32) -> usize147 pub fn rt_queue_len(&mut self, cpu_id: u32) -> usize {
148 let mut sum = 0;
149 for prio in 0..SchedulerRT::MAX_RT_PRIO {
150 sum += self.cpu_queue[cpu_id as usize][prio as usize].get_rt_queue_size();
151 }
152 return sum as usize;
153 }
154
155 #[allow(dead_code)]
156 #[inline]
load_list_len(&mut self, cpu_id: u32) -> usize157 pub fn load_list_len(&mut self, cpu_id: u32) -> usize {
158 return self.load_list[cpu_id as usize].len();
159 }
160
enqueue_front(&mut self, pcb: &'static mut process_control_block)161 pub fn enqueue_front(&mut self, pcb: &'static mut process_control_block) {
162 self.cpu_queue[pcb.cpu_id as usize][pcb.priority as usize].enqueue_front(pcb);
163 }
164 }
165
166 impl Scheduler for SchedulerRT {
167 /// @brief 在当前cpu上进行调度。
168 /// 请注意,进入该函数之前,需要关中断
sched(&mut self) -> Option<&'static mut process_control_block>169 fn sched(&mut self) -> Option<&'static mut process_control_block> {
170 current_pcb().flags &= !(PF_NEED_SCHED as u64);
171 // 正常流程下,这里一定是会pick到next的pcb的,如果是None的话,要抛出错误
172 let cpu_id = current_pcb().cpu_id;
173 let proc: &'static mut process_control_block =
174 self.pick_next_task_rt(cpu_id).expect("No RT process found");
175
176 // 如果是fifo策略,则可以一直占有cpu直到有优先级更高的任务就绪(即使优先级相同也不行)或者主动放弃(等待资源)
177 if proc.policy == SCHED_FIFO {
178 // 如果挑选的进程优先级小于当前进程,则不进行切换
179 if proc.priority <= current_pcb().priority {
180 sched_enqueue(proc, false);
181 } else {
182 // 将当前的进程加进队列
183 sched_enqueue(current_pcb(), false);
184 compiler_fence(core::sync::atomic::Ordering::SeqCst);
185 return Some(proc);
186 }
187 }
188 // RR调度策略需要考虑时间片
189 else if proc.policy == SCHED_RR {
190 // 同等优先级的,考虑切换
191 if proc.priority >= current_pcb().priority {
192 // 判断这个进程时间片是否耗尽,若耗尽则将其时间片赋初值然后入队
193 if proc.rt_time_slice <= 0 {
194 proc.rt_time_slice = SchedulerRT::RR_TIMESLICE;
195 proc.flags |= !(PF_NEED_SCHED as u64);
196 sched_enqueue(proc, false);
197 }
198 // 目标进程时间片未耗尽,切换到目标进程
199 else {
200 // 将当前进程加进队列
201 sched_enqueue(current_pcb(), false);
202 compiler_fence(core::sync::atomic::Ordering::SeqCst);
203 return Some(proc);
204 }
205 }
206 // curr优先级更大,说明一定是实时进程,将所选进程入队列,此时需要入队首
207 else {
208 self.cpu_queue[cpu_id as usize][proc.cpu_id as usize].enqueue_front(proc);
209 }
210 }
211 return None;
212 }
213
enqueue(&mut self, pcb: &'static mut process_control_block)214 fn enqueue(&mut self, pcb: &'static mut process_control_block) {
215 let cpu_id = pcb.cpu_id;
216 let cpu_queue = &mut self.cpu_queue[pcb.cpu_id as usize];
217 cpu_queue[cpu_id as usize].enqueue(pcb);
218 // // 获取当前时间
219 // let time = unsafe { _rdtsc() };
220 // let freq = unsafe { Cpu_tsc_freq };
221 // // kdebug!("this is timeeeeeeer {},freq is {}, {}", time, freq, cpu_id);
222 // // 将当前时间加入负载记录队列
223 // self.load_list[cpu_id as usize].push_back(time);
224 // // 如果队首元素与当前时间差超过设定值,则移除队首元素
225 // while self.load_list[cpu_id as usize].len() > 1
226 // && (time - *self.load_list[cpu_id as usize].front().unwrap() > 10000000000)
227 // {
228 // self.load_list[cpu_id as usize].pop_front();
229 // }
230 }
231 }
232