1 use core::sync::atomic::compiler_fence;
2 
3 use crate::{
4     arch::asm::current::current_pcb,
5     include::bindings::bindings::smp_get_total_cpu,
6     include::bindings::bindings::{
7         process_control_block, MAX_CPU_NUM, PF_NEED_MIGRATE, PROC_RUNNING, SCHED_FIFO,
8         SCHED_NORMAL, SCHED_RR,
9     },
10     kinfo,
11     process::process::process_cpu,
12     syscall::SystemError,
13 };
14 
15 use super::cfs::{sched_cfs_init, SchedulerCFS, __get_cfs_scheduler};
16 use super::rt::{sched_rt_init, SchedulerRT, __get_rt_scheduler};
17 
18 /// @brief 获取指定的cpu上正在执行的进程的pcb
19 #[inline]
cpu_executing(cpu_id: u32) -> &'static mut process_control_block20 pub fn cpu_executing(cpu_id: u32) -> &'static mut process_control_block {
21     // todo: 引入per_cpu之后,该函数真正执行“返回指定的cpu上正在执行的pcb”的功能
22 
23     if cpu_id == process_cpu(current_pcb()) {
24         return current_pcb();
25     } else {
26         todo!()
27     }
28 }
29 // 获取某个cpu的负载情况,返回当前负载,cpu_id 是获取负载的cpu的id
30 // TODO:将获取负载情况调整为最近一段时间运行进程的数量
get_cpu_loads(cpu_id: u32) -> u3231 pub fn get_cpu_loads(cpu_id: u32) -> u32 {
32     let cfs_scheduler = __get_cfs_scheduler();
33     let rt_scheduler = __get_rt_scheduler();
34     let len_cfs = cfs_scheduler.get_cfs_queue_len(cpu_id);
35     let len_rt = rt_scheduler.rt_queue_len(cpu_id);
36     // let load_rt = rt_scheduler.get_load_list_len(cpu_id);
37     // kdebug!("this cpu_id {} is load rt {}", cpu_id, load_rt);
38 
39     return (len_rt + len_cfs) as u32;
40 }
41 // 负载均衡
loads_balance(pcb: &mut process_control_block)42 pub fn loads_balance(pcb: &mut process_control_block) {
43     // 对pcb的迁移情况进行调整
44     // 获取总的CPU数量
45     let cpu_num = unsafe { smp_get_total_cpu() };
46     // 获取当前负载最小的CPU的id
47     let mut min_loads_cpu_id = pcb.cpu_id;
48     let mut min_loads = get_cpu_loads(pcb.cpu_id);
49     for cpu_id in 0..cpu_num {
50         let tmp_cpu_loads = get_cpu_loads(cpu_id);
51         if min_loads - tmp_cpu_loads > 0 {
52             min_loads_cpu_id = cpu_id;
53             min_loads = tmp_cpu_loads;
54         }
55     }
56 
57     // 将当前pcb迁移到负载最小的CPU
58     // 如果当前pcb的PF_NEED_MIGRATE已经置位,则不进行迁移操作
59     if (min_loads_cpu_id != pcb.cpu_id) && (pcb.flags & (PF_NEED_MIGRATE as u64)) == 0 {
60         // sched_migrate_process(pcb, min_loads_cpu_id as usize);
61         pcb.flags |= PF_NEED_MIGRATE as u64;
62         pcb.migrate_to = min_loads_cpu_id;
63         // kdebug!("set migrating, pcb:{:?}", pcb);
64     }
65 }
66 /// @brief 具体的调度器应当实现的trait
67 pub trait Scheduler {
68     /// @brief 使用该调度器发起调度的时候,要调用的函数
sched(&mut self) -> Option<&'static mut process_control_block>69     fn sched(&mut self) -> Option<&'static mut process_control_block>;
70 
71     /// @brief 将pcb加入这个调度器的调度队列
enqueue(&mut self, pcb: &'static mut process_control_block)72     fn enqueue(&mut self, pcb: &'static mut process_control_block);
73 }
74 
do_sched() -> Option<&'static mut process_control_block>75 pub fn do_sched() -> Option<&'static mut process_control_block> {
76     compiler_fence(core::sync::atomic::Ordering::SeqCst);
77     let cfs_scheduler: &mut SchedulerCFS = __get_cfs_scheduler();
78     let rt_scheduler: &mut SchedulerRT = __get_rt_scheduler();
79     compiler_fence(core::sync::atomic::Ordering::SeqCst);
80 
81     let next: &'static mut process_control_block;
82     match rt_scheduler.pick_next_task_rt(current_pcb().cpu_id) {
83         Some(p) => {
84             next = p;
85             // kdebug!("next pcb is {}",next.pid);
86             // 将pick的进程放回原处
87             rt_scheduler.enqueue_front(next);
88 
89             return rt_scheduler.sched();
90         }
91         None => {
92             return cfs_scheduler.sched();
93         }
94     }
95 }
96 
97 /// @brief 将进程加入调度队列
98 ///
99 /// @param pcb 要被加入队列的pcb
100 /// @param reset_time 是否重置虚拟运行时间
101 #[allow(dead_code)]
102 #[no_mangle]
sched_enqueue(pcb: &'static mut process_control_block, mut reset_time: bool)103 pub extern "C" fn sched_enqueue(pcb: &'static mut process_control_block, mut reset_time: bool) {
104     compiler_fence(core::sync::atomic::Ordering::SeqCst);
105 
106     // 调度器不处理running位为0的进程
107     if pcb.state & (PROC_RUNNING as u64) == 0 {
108         return;
109     }
110     let cfs_scheduler = __get_cfs_scheduler();
111     let rt_scheduler = __get_rt_scheduler();
112 
113     // 除了IDLE以外的进程,都进行负载均衡
114     if pcb.pid > 0 {
115         loads_balance(pcb);
116     }
117     compiler_fence(core::sync::atomic::Ordering::SeqCst);
118 
119     if (pcb.flags & (PF_NEED_MIGRATE as u64)) != 0 {
120         // kdebug!("migrating pcb:{:?}", pcb);
121         pcb.flags &= !(PF_NEED_MIGRATE as u64);
122         pcb.cpu_id = pcb.migrate_to;
123         reset_time = true;
124     }
125     compiler_fence(core::sync::atomic::Ordering::SeqCst);
126 
127     if pcb.policy == SCHED_NORMAL {
128         if reset_time {
129             cfs_scheduler.enqueue_reset_vruntime(pcb);
130         } else {
131             cfs_scheduler.enqueue(pcb);
132         }
133     } else if pcb.policy == SCHED_FIFO || pcb.policy == SCHED_RR {
134         rt_scheduler.enqueue(pcb);
135     } else {
136         panic!("This policy is not supported at this time");
137     }
138 }
139 
140 /// @brief 初始化进程调度器模块
141 #[allow(dead_code)]
142 #[no_mangle]
sched_init()143 pub extern "C" fn sched_init() {
144     kinfo!("Initializing schedulers...");
145     unsafe {
146         sched_cfs_init();
147         sched_rt_init();
148     }
149     kinfo!("Schedulers initialized");
150 }
151 
152 /// @brief 当时钟中断到达时,更新时间片
153 /// 请注意,该函数只能被时钟中断处理程序调用
154 #[allow(dead_code)]
155 #[no_mangle]
sched_update_jiffies()156 pub extern "C" fn sched_update_jiffies() {
157     match current_pcb().policy {
158         SCHED_NORMAL => {
159             __get_cfs_scheduler().timer_update_jiffies();
160         }
161         SCHED_FIFO | SCHED_RR => {
162             current_pcb().rt_time_slice -= 1;
163         }
164         _ => {
165             todo!()
166         }
167     }
168 }
169 
170 #[allow(dead_code)]
171 #[no_mangle]
sched_set_cpu_idle(cpu_id: usize, pcb: *mut process_control_block)172 pub extern "C" fn sched_set_cpu_idle(cpu_id: usize, pcb: *mut process_control_block) {
173     __get_cfs_scheduler().set_cpu_idle(cpu_id, pcb);
174 }
175 
176 /// @brief 设置进程需要等待迁移到另一个cpu核心。
177 /// 当进程被重新加入队列时,将会更新其cpu_id,并加入正确的队列
178 ///
179 /// @return i32 成功返回0,否则返回posix错误码
180 #[allow(dead_code)]
181 #[no_mangle]
sched_migrate_process( pcb: &'static mut process_control_block, target: usize, ) -> i32182 pub extern "C" fn sched_migrate_process(
183     pcb: &'static mut process_control_block,
184     target: usize,
185 ) -> i32 {
186     if target > MAX_CPU_NUM.try_into().unwrap() {
187         // panic!("sched_migrate_process: target > MAX_CPU_NUM");
188         return SystemError::EINVAL.to_posix_errno();
189     }
190 
191     pcb.flags |= PF_NEED_MIGRATE as u64;
192     pcb.migrate_to = target as u32;
193     // kdebug!("pid:{} migrate to cpu:{}", pcb.pid, target);
194     return 0;
195 }
196