1 use core::{
2 intrinsics::unlikely,
3 sync::atomic::{compiler_fence, Ordering},
4 };
5
6 use alloc::{sync::Arc, vec::Vec};
7
8 use crate::{
9 kinfo,
10 mm::percpu::PerCpu,
11 process::{AtomicPid, Pid, ProcessControlBlock, ProcessFlags, ProcessManager, ProcessState},
12 smp::{core::smp_get_processor_id, cpu::ProcessorId},
13 };
14
15 use super::rt::{sched_rt_init, SchedulerRT, __get_rt_scheduler};
16 use super::{
17 cfs::{sched_cfs_init, SchedulerCFS, __get_cfs_scheduler},
18 SchedPolicy,
19 };
20
21 lazy_static! {
22 /// 记录每个cpu上正在执行的进程的pid
23 pub static ref CPU_EXECUTING: CpuExecuting = CpuExecuting::new();
24 }
25
26 #[derive(Debug)]
27 pub struct CpuExecuting {
28 data: Vec<AtomicPid>,
29 }
30
31 impl CpuExecuting {
new() -> Self32 pub fn new() -> Self {
33 let mut data = Vec::new();
34 for _ in 0..PerCpu::MAX_CPU_NUM {
35 data.push(AtomicPid::new(Pid::new(0)));
36 }
37 Self { data }
38 }
39
40 #[inline(always)]
set(&self, cpu_id: ProcessorId, pid: Pid)41 pub fn set(&self, cpu_id: ProcessorId, pid: Pid) {
42 self.data[cpu_id.data() as usize].store(pid, Ordering::SeqCst);
43 }
44
45 #[inline(always)]
get(&self, cpu_id: ProcessorId) -> Pid46 pub fn get(&self, cpu_id: ProcessorId) -> Pid {
47 self.data[cpu_id.data() as usize].load(Ordering::SeqCst)
48 }
49 }
50
51 // 获取某个cpu的负载情况,返回当前负载,cpu_id 是获取负载的cpu的id
52 // TODO:将获取负载情况调整为最近一段时间运行进程的数量
53 #[allow(dead_code)]
get_cpu_loads(cpu_id: ProcessorId) -> u3254 pub fn get_cpu_loads(cpu_id: ProcessorId) -> u32 {
55 let cfs_scheduler = __get_cfs_scheduler();
56 let rt_scheduler = __get_rt_scheduler();
57 let len_cfs = cfs_scheduler.get_cfs_queue_len(cpu_id);
58 let len_rt = rt_scheduler.rt_queue_len(cpu_id);
59 // let load_rt = rt_scheduler.get_load_list_len(cpu_id);
60 // kdebug!("this cpu_id {} is load rt {}", cpu_id, load_rt);
61
62 return (len_rt + len_cfs) as u32;
63 }
64 // 负载均衡
loads_balance(pcb: Arc<ProcessControlBlock>)65 pub fn loads_balance(pcb: Arc<ProcessControlBlock>) {
66 // FIXME: 由于目前负载均衡是直接添加到目标CPU的队列中,导致会由于时序问题导致进程在两个CPU上都存在。
67 // 在调度子系统重写/改进之前,暂时只设置进程在0号CPU上运行
68 // 由于调度器问题,暂时不进行负载均衡,见issue: https://github.com/DragonOS-Community/DragonOS/issues/571
69 let min_loads_cpu_id = ProcessorId::new(0);
70
71 // 获取总的CPU数量
72 // let cpu_num = unsafe { smp_get_total_cpu() };
73 // 获取当前负载最小的CPU的id
74 // let mut min_loads = get_cpu_loads(smp_get_processor_id());
75 // for cpu_id in 0..cpu_num {
76 // let cpu_id = ProcessorId::new(cpu_id);
77 // let tmp_cpu_loads = get_cpu_loads(cpu_id);
78 // if min_loads - tmp_cpu_loads > 0 {
79 // min_loads_cpu_id = cpu_id;
80 // min_loads = tmp_cpu_loads;
81 // }
82 // }
83
84 let pcb_cpu = pcb.sched_info().on_cpu();
85 // 将当前pcb迁移到负载最小的CPU
86 // 如果当前pcb的PF_NEED_MIGRATE已经置位,则不进行迁移操作
87 if pcb_cpu.is_none()
88 || (min_loads_cpu_id != pcb_cpu.unwrap()
89 && !pcb.flags().contains(ProcessFlags::NEED_MIGRATE))
90 {
91 pcb.flags().insert(ProcessFlags::NEED_MIGRATE);
92 pcb.sched_info().set_migrate_to(Some(min_loads_cpu_id));
93 // kdebug!("set migrating, pcb:{:?}", pcb);
94 }
95 }
96 /// @brief 具体的调度器应当实现的trait
97 pub trait Scheduler {
98 /// @brief 使用该调度器发起调度的时候,要调用的函数
sched(&mut self) -> Option<Arc<ProcessControlBlock>>99 fn sched(&mut self) -> Option<Arc<ProcessControlBlock>>;
100
101 /// @brief 将pcb加入这个调度器的调度队列
enqueue(&mut self, pcb: Arc<ProcessControlBlock>)102 fn enqueue(&mut self, pcb: Arc<ProcessControlBlock>);
103 }
104
do_sched() -> Option<Arc<ProcessControlBlock>>105 pub fn do_sched() -> Option<Arc<ProcessControlBlock>> {
106 // 当前进程持有锁,不切换,避免死锁
107 if ProcessManager::current_pcb().preempt_count() != 0 {
108 let binding = ProcessManager::current_pcb();
109 let guard = binding
110 .sched_info()
111 .inner_lock_try_upgradable_read_irqsave(5);
112 if unlikely(guard.is_none()) {
113 return None;
114 }
115
116 let mut guard = guard.unwrap();
117
118 let state = guard.state();
119 if state.is_blocked() {
120 // try to upgrade
121 for _ in 0..50 {
122 match guard.try_upgrade() {
123 Ok(mut writer) => {
124 // 被mark_sleep但是还在临界区的进程将其设置为Runnable
125 writer.set_state(ProcessState::Runnable);
126 break;
127 }
128 Err(s) => {
129 guard = s;
130 }
131 }
132 }
133 }
134 return None;
135 }
136
137 compiler_fence(core::sync::atomic::Ordering::SeqCst);
138 let cfs_scheduler: &mut SchedulerCFS = __get_cfs_scheduler();
139 let rt_scheduler: &mut SchedulerRT = __get_rt_scheduler();
140 compiler_fence(core::sync::atomic::Ordering::SeqCst);
141
142 let next: Arc<ProcessControlBlock>;
143 match rt_scheduler.pick_next_task_rt(smp_get_processor_id()) {
144 Some(p) => {
145 next = p;
146 // 将pick的进程放回原处
147 rt_scheduler.enqueue_front(next);
148
149 return rt_scheduler.sched();
150 }
151 None => {
152 return cfs_scheduler.sched();
153 }
154 }
155 }
156
157 /// @brief 将进程加入调度队列
158 ///
159 /// @param pcb 要被加入队列的pcb
160 /// @param reset_time 是否重置虚拟运行时间
sched_enqueue(pcb: Arc<ProcessControlBlock>, mut reset_time: bool)161 pub fn sched_enqueue(pcb: Arc<ProcessControlBlock>, mut reset_time: bool) {
162 compiler_fence(core::sync::atomic::Ordering::SeqCst);
163 if pcb.sched_info().inner_lock_read_irqsave().state() != ProcessState::Runnable {
164 return;
165 }
166 let cfs_scheduler = __get_cfs_scheduler();
167 let rt_scheduler = __get_rt_scheduler();
168 // 除了IDLE以外的进程,都进行负载均衡
169 if pcb.pid().into() > 0 {
170 loads_balance(pcb.clone());
171 }
172
173 if pcb.flags().contains(ProcessFlags::NEED_MIGRATE) {
174 // kdebug!("migrating pcb:{:?}", pcb);
175 pcb.flags().remove(ProcessFlags::NEED_MIGRATE);
176 pcb.sched_info().set_on_cpu(pcb.sched_info().migrate_to());
177 reset_time = true;
178 }
179
180 assert!(pcb.sched_info().on_cpu().is_some());
181
182 match pcb.sched_info().inner_lock_read_irqsave().policy() {
183 SchedPolicy::CFS => {
184 if reset_time {
185 cfs_scheduler.enqueue_reset_vruntime(pcb.clone());
186 } else {
187 cfs_scheduler.enqueue(pcb.clone());
188 }
189 }
190 SchedPolicy::FIFO | SchedPolicy::RR => rt_scheduler.enqueue(pcb.clone()),
191 }
192 }
193
194 /// 初始化进程调度器模块
195 #[inline(never)]
sched_init()196 pub fn sched_init() {
197 kinfo!("Initializing schedulers...");
198 unsafe {
199 sched_cfs_init();
200 sched_rt_init();
201 }
202 kinfo!("Schedulers initialized");
203 }
204
205 /// @brief 当时钟中断到达时,更新时间片
206 /// 请注意,该函数只能被时钟中断处理程序调用
207 #[inline(never)]
sched_update_jiffies()208 pub fn sched_update_jiffies() {
209 let binding = ProcessManager::current_pcb();
210 let guard = binding.sched_info().inner_lock_try_read_irqsave(10);
211 if unlikely(guard.is_none()) {
212 return;
213 }
214 let guard = guard.unwrap();
215 let policy = guard.policy();
216 drop(guard);
217 match policy {
218 SchedPolicy::CFS => {
219 __get_cfs_scheduler().timer_update_jiffies(binding.sched_info());
220 }
221 SchedPolicy::FIFO | SchedPolicy::RR => {
222 __get_rt_scheduler().timer_update_jiffies();
223 }
224 }
225 }
226