1 use core::{
2 fmt,
3 hash::Hash,
4 hint::spin_loop,
5 intrinsics::{likely, unlikely},
6 mem::ManuallyDrop,
7 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering},
8 };
9
10 use alloc::{
11 ffi::CString,
12 string::{String, ToString},
13 sync::{Arc, Weak},
14 vec::Vec,
15 };
16 use cred::INIT_CRED;
17 use hashbrown::HashMap;
18 use log::{debug, error, info, warn};
19 use system_error::SystemError;
20
21 use crate::{
22 arch::{
23 cpu::current_cpu_id,
24 ipc::signal::{AtomicSignal, SigSet, Signal},
25 process::ArchPCBInfo,
26 CurrentIrqArch,
27 },
28 driver::tty::tty_core::TtyCore,
29 exception::InterruptArch,
30 filesystem::{
31 procfs::procfs_unregister_pid,
32 vfs::{file::FileDescriptorVec, FileType},
33 },
34 ipc::signal_types::{SigInfo, SigPending, SignalStruct},
35 libs::{
36 align::AlignedBox,
37 casting::DowncastArc,
38 futex::{
39 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY},
40 futex::{Futex, RobustListHead},
41 },
42 lock_free_flags::LockFreeFlags,
43 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard},
44 spinlock::{SpinLock, SpinLockGuard},
45 wait_queue::WaitQueue,
46 },
47 mm::{
48 percpu::{PerCpu, PerCpuVar},
49 set_IDLE_PROCESS_ADDRESS_SPACE,
50 ucontext::AddressSpace,
51 VirtAddr,
52 },
53 namespaces::{mnt_namespace::FsStruct, pid_namespace::PidStrcut, NsProxy},
54 net::socket::SocketInode,
55 sched::{
56 completion::Completion, cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag,
57 EnqueueFlag, OnRq, SchedMode, WakeupFlags, __schedule,
58 },
59 smp::{
60 core::smp_get_processor_id,
61 cpu::{AtomicProcessorId, ProcessorId},
62 kick_cpu,
63 },
64 syscall::{user_access::clear_user, Syscall},
65 };
66 use timer::AlarmTimer;
67
68 use self::{cred::Cred, kthread::WorkerPrivate};
69
70 pub mod abi;
71 pub mod c_adapter;
72 pub mod cred;
73 pub mod exec;
74 pub mod exit;
75 pub mod fork;
76 pub mod idle;
77 pub mod kthread;
78 pub mod pid;
79 pub mod resource;
80 pub mod stdio;
81 pub mod syscall;
82 pub mod timer;
83 pub mod utils;
84
85 /// 系统中所有进程的pcb
86 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None);
87
88 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None;
89
90 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成
91 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false;
92
93 pub struct SwitchResult {
94 pub prev_pcb: Option<Arc<ProcessControlBlock>>,
95 pub next_pcb: Option<Arc<ProcessControlBlock>>,
96 }
97
98 impl SwitchResult {
new() -> Self99 pub fn new() -> Self {
100 Self {
101 prev_pcb: None,
102 next_pcb: None,
103 }
104 }
105 }
106
107 #[derive(Debug)]
108 pub struct ProcessManager;
109 impl ProcessManager {
110 #[inline(never)]
init()111 fn init() {
112 static INIT_FLAG: AtomicBool = AtomicBool::new(false);
113 if INIT_FLAG
114 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
115 .is_err()
116 {
117 panic!("ProcessManager has been initialized!");
118 }
119
120 unsafe {
121 compiler_fence(Ordering::SeqCst);
122 debug!("To create address space for INIT process.");
123 // test_buddy();
124 set_IDLE_PROCESS_ADDRESS_SPACE(
125 AddressSpace::new(true).expect("Failed to create address space for INIT process."),
126 );
127 debug!("INIT process address space created.");
128 compiler_fence(Ordering::SeqCst);
129 };
130
131 ALL_PROCESS.lock_irqsave().replace(HashMap::new());
132 Self::init_switch_result();
133 Self::arch_init();
134 debug!("process arch init done.");
135 Self::init_idle();
136 debug!("process idle init done.");
137
138 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true };
139 info!("Process Manager initialized.");
140 }
141
init_switch_result()142 fn init_switch_result() {
143 let mut switch_res_vec: Vec<SwitchResult> = Vec::new();
144 for _ in 0..PerCpu::MAX_CPU_NUM {
145 switch_res_vec.push(SwitchResult::new());
146 }
147 unsafe {
148 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap());
149 }
150 }
151
152 /// 判断进程管理器是否已经初始化完成
153 #[allow(dead_code)]
initialized() -> bool154 pub fn initialized() -> bool {
155 unsafe { __PROCESS_MANAGEMENT_INIT_DONE }
156 }
157
158 /// 获取当前进程的pcb
current_pcb() -> Arc<ProcessControlBlock>159 pub fn current_pcb() -> Arc<ProcessControlBlock> {
160 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) {
161 error!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false");
162 loop {
163 spin_loop();
164 }
165 }
166 return ProcessControlBlock::arch_current_pcb();
167 }
168
169 /// 获取当前进程的pid
170 ///
171 /// 如果进程管理器未初始化完成,那么返回0
current_pid() -> Pid172 pub fn current_pid() -> Pid {
173 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) {
174 return Pid(0);
175 }
176
177 return ProcessManager::current_pcb().pid();
178 }
179
180 /// 增加当前进程的锁持有计数
181 #[inline(always)]
preempt_disable()182 pub fn preempt_disable() {
183 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) {
184 ProcessManager::current_pcb().preempt_disable();
185 }
186 }
187
188 /// 减少当前进程的锁持有计数
189 #[inline(always)]
preempt_enable()190 pub fn preempt_enable() {
191 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) {
192 ProcessManager::current_pcb().preempt_enable();
193 }
194 }
195
196 /// 根据pid获取进程的pcb
197 ///
198 /// ## 参数
199 ///
200 /// - `pid` : 进程的pid
201 ///
202 /// ## 返回值
203 ///
204 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None
find(pid: Pid) -> Option<Arc<ProcessControlBlock>>205 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> {
206 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned();
207 }
208
209 /// 向系统中添加一个进程的pcb
210 ///
211 /// ## 参数
212 ///
213 /// - `pcb` : 进程的pcb
214 ///
215 /// ## 返回值
216 ///
217 /// 无
add_pcb(pcb: Arc<ProcessControlBlock>)218 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) {
219 ALL_PROCESS
220 .lock_irqsave()
221 .as_mut()
222 .unwrap()
223 .insert(pcb.pid(), pcb.clone());
224 }
225
226 /// 唤醒一个进程
wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError>227 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> {
228 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
229 let state = pcb.sched_info().inner_lock_read_irqsave().state();
230 if state.is_blocked() {
231 let mut writer = pcb.sched_info().inner_lock_write_irqsave();
232 let state = writer.state();
233 if state.is_blocked() {
234 writer.set_state(ProcessState::Runnable);
235 writer.set_wakeup();
236
237 // avoid deadlock
238 drop(writer);
239
240 let rq =
241 cpu_rq(pcb.sched_info().on_cpu().unwrap_or(current_cpu_id()).data() as usize);
242
243 let (rq, _guard) = rq.self_lock();
244 rq.update_rq_clock();
245 rq.activate_task(
246 pcb,
247 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK,
248 );
249
250 rq.check_preempt_currnet(pcb, WakeupFlags::empty());
251
252 // sched_enqueue(pcb.clone(), true);
253 return Ok(());
254 } else if state.is_exited() {
255 return Err(SystemError::EINVAL);
256 } else {
257 return Ok(());
258 }
259 } else if state.is_exited() {
260 return Err(SystemError::EINVAL);
261 } else {
262 return Ok(());
263 }
264 }
265
266 /// 唤醒暂停的进程
wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError>267 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> {
268 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
269 let state = pcb.sched_info().inner_lock_read_irqsave().state();
270 if let ProcessState::Stopped = state {
271 let mut writer = pcb.sched_info().inner_lock_write_irqsave();
272 let state = writer.state();
273 if let ProcessState::Stopped = state {
274 writer.set_state(ProcessState::Runnable);
275 // avoid deadlock
276 drop(writer);
277
278 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize);
279
280 let (rq, _guard) = rq.self_lock();
281 rq.update_rq_clock();
282 rq.activate_task(
283 pcb,
284 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK,
285 );
286
287 rq.check_preempt_currnet(pcb, WakeupFlags::empty());
288
289 // sched_enqueue(pcb.clone(), true);
290 return Ok(());
291 } else if state.is_runnable() {
292 return Ok(());
293 } else {
294 return Err(SystemError::EINVAL);
295 }
296 } else if state.is_runnable() {
297 return Ok(());
298 } else {
299 return Err(SystemError::EINVAL);
300 }
301 }
302
303 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成
304 ///
305 /// ## 注意
306 ///
307 /// - 进入当前函数之前,不能持有sched_info的锁
308 /// - 进入当前函数之前,必须关闭中断
309 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列
mark_sleep(interruptable: bool) -> Result<(), SystemError>310 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> {
311 assert!(
312 !CurrentIrqArch::is_irq_enabled(),
313 "interrupt must be disabled before enter ProcessManager::mark_sleep()"
314 );
315 let pcb = ProcessManager::current_pcb();
316 let mut writer = pcb.sched_info().inner_lock_write_irqsave();
317 if !matches!(writer.state(), ProcessState::Exited(_)) {
318 writer.set_state(ProcessState::Blocked(interruptable));
319 writer.set_sleep();
320 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE);
321 fence(Ordering::SeqCst);
322 drop(writer);
323 return Ok(());
324 }
325 return Err(SystemError::EINTR);
326 }
327
328 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成
329 ///
330 /// ## 注意
331 ///
332 /// - 进入当前函数之前,不能持有sched_info的锁
333 /// - 进入当前函数之前,必须关闭中断
mark_stop() -> Result<(), SystemError>334 pub fn mark_stop() -> Result<(), SystemError> {
335 assert!(
336 !CurrentIrqArch::is_irq_enabled(),
337 "interrupt must be disabled before enter ProcessManager::mark_stop()"
338 );
339
340 let pcb = ProcessManager::current_pcb();
341 let mut writer = pcb.sched_info().inner_lock_write_irqsave();
342 if !matches!(writer.state(), ProcessState::Exited(_)) {
343 writer.set_state(ProcessState::Stopped);
344 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE);
345 drop(writer);
346
347 return Ok(());
348 }
349 return Err(SystemError::EINTR);
350 }
351 /// 当子进程退出后向父进程发送通知
exit_notify()352 fn exit_notify() {
353 let current = ProcessManager::current_pcb();
354 // 让INIT进程收养所有子进程
355 if current.pid() != Pid(1) {
356 unsafe {
357 current
358 .adopt_childen()
359 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}"))
360 };
361 let r = current.parent_pcb.read_irqsave().upgrade();
362 if r.is_none() {
363 return;
364 }
365 let parent_pcb = r.unwrap();
366 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32);
367 if r.is_err() {
368 warn!(
369 "failed to send kill signal to {:?}'s parent pcb {:?}",
370 current.pid(),
371 parent_pcb.pid()
372 );
373 }
374 // todo: 这里需要向父进程发送SIGCHLD信号
375 // todo: 这里还需要根据线程组的信息,决定信号的发送
376 }
377 }
378
379 /// 退出当前进程
380 ///
381 /// ## 参数
382 ///
383 /// - `exit_code` : 进程的退出码
exit(exit_code: usize) -> !384 pub fn exit(exit_code: usize) -> ! {
385 // 关中断
386 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
387 let pcb = ProcessManager::current_pcb();
388 let pid = pcb.pid();
389 pcb.sched_info
390 .inner_lock_write_irqsave()
391 .set_state(ProcessState::Exited(exit_code));
392 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true)));
393
394 let rq = cpu_rq(smp_get_processor_id().data() as usize);
395 let (rq, guard) = rq.self_lock();
396 rq.deactivate_task(
397 pcb.clone(),
398 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK,
399 );
400 drop(guard);
401
402 // 进行进程退出后的工作
403 let thread = pcb.thread.write_irqsave();
404 if let Some(addr) = thread.set_child_tid {
405 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") };
406 }
407
408 if let Some(addr) = thread.clear_child_tid {
409 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 {
410 let _ =
411 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY);
412 }
413 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") };
414 }
415
416 RobustListHead::exit_robust_list(pcb.clone());
417
418 // 如果是vfork出来的进程,则需要处理completion
419 if thread.vfork_done.is_some() {
420 thread.vfork_done.as_ref().unwrap().complete_all();
421 }
422 drop(thread);
423 unsafe { pcb.basic_mut().set_user_vm(None) };
424
425 // TODO 由于未实现进程组,tty记录的前台进程组等于当前进程,故退出前要置空
426 // 后续相关逻辑需要在SYS_EXIT_GROUP系统调用中实现
427 if let Some(tty) = pcb.sig_info_irqsave().tty() {
428 tty.core().contorl_info_irqsave().pgid = None;
429 }
430 pcb.sig_info_mut().set_tty(None);
431
432 drop(pcb);
433 ProcessManager::exit_notify();
434 // unsafe { CurrentIrqArch::interrupt_enable() };
435 __schedule(SchedMode::SM_NONE);
436 error!("pid {pid:?} exited but sched again!");
437 #[allow(clippy::empty_loop)]
438 loop {
439 spin_loop();
440 }
441 }
442
release(pid: Pid)443 pub unsafe fn release(pid: Pid) {
444 let pcb = ProcessManager::find(pid);
445 if pcb.is_some() {
446 // let pcb = pcb.unwrap();
447 // 判断该pcb是否在全局没有任何引用
448 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。
449 // 因此目前暂时注释掉,使得能跑
450 // if Arc::strong_count(&pcb) <= 2 {
451 // drop(pcb);
452 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid);
453 // } else {
454 // // 如果不为1就panic
455 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb));
456 // error!("{}", msg);
457 // panic!()
458 // }
459
460 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid);
461 }
462 }
463
464 /// 上下文切换完成后的钩子函数
switch_finish_hook()465 unsafe fn switch_finish_hook() {
466 // debug!("switch_finish_hook");
467 let prev_pcb = PROCESS_SWITCH_RESULT
468 .as_mut()
469 .unwrap()
470 .get_mut()
471 .prev_pcb
472 .take()
473 .expect("prev_pcb is None");
474 let next_pcb = PROCESS_SWITCH_RESULT
475 .as_mut()
476 .unwrap()
477 .get_mut()
478 .next_pcb
479 .take()
480 .expect("next_pcb is None");
481
482 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁
483 fence(Ordering::SeqCst);
484
485 prev_pcb.arch_info.force_unlock();
486 fence(Ordering::SeqCst);
487
488 next_pcb.arch_info.force_unlock();
489 fence(Ordering::SeqCst);
490 }
491
492 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态
493 ///
494 /// ## 参数
495 ///
496 /// - `pcb` : 进程的pcb
497 #[allow(dead_code)]
kick(pcb: &Arc<ProcessControlBlock>)498 pub fn kick(pcb: &Arc<ProcessControlBlock>) {
499 ProcessManager::current_pcb().preempt_disable();
500 let cpu_id = pcb.sched_info().on_cpu();
501
502 if let Some(cpu_id) = cpu_id {
503 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() {
504 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu");
505 }
506 }
507
508 ProcessManager::current_pcb().preempt_enable();
509 }
510 }
511
512 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换
513 #[cfg(target_arch = "x86_64")]
514 #[inline(never)]
switch_finish_hook()515 pub unsafe extern "sysv64" fn switch_finish_hook() {
516 ProcessManager::switch_finish_hook();
517 }
518 #[cfg(target_arch = "riscv64")]
519 #[inline(always)]
switch_finish_hook()520 pub unsafe fn switch_finish_hook() {
521 ProcessManager::switch_finish_hook();
522 }
523
524 int_like!(Pid, AtomicPid, usize, AtomicUsize);
525
526 impl fmt::Display for Pid {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result527 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
528 write!(f, "{}", self.0)
529 }
530 }
531
532 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
533 pub enum ProcessState {
534 /// The process is running on a CPU or in a run queue.
535 Runnable,
536 /// The process is waiting for an event to occur.
537 /// 其中的bool表示该等待过程是否可以被打断。
538 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。
539 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。
540 Blocked(bool),
541 /// 进程被信号终止
542 Stopped,
543 /// 进程已经退出,usize表示进程的退出码
544 Exited(usize),
545 }
546
547 #[allow(dead_code)]
548 impl ProcessState {
549 #[inline(always)]
is_runnable(&self) -> bool550 pub fn is_runnable(&self) -> bool {
551 return matches!(self, ProcessState::Runnable);
552 }
553
554 #[inline(always)]
is_blocked(&self) -> bool555 pub fn is_blocked(&self) -> bool {
556 return matches!(self, ProcessState::Blocked(_));
557 }
558
559 #[inline(always)]
is_blocked_interruptable(&self) -> bool560 pub fn is_blocked_interruptable(&self) -> bool {
561 return matches!(self, ProcessState::Blocked(true));
562 }
563
564 /// Returns `true` if the process state is [`Exited`].
565 #[inline(always)]
is_exited(&self) -> bool566 pub fn is_exited(&self) -> bool {
567 return matches!(self, ProcessState::Exited(_));
568 }
569
570 /// Returns `true` if the process state is [`Stopped`].
571 ///
572 /// [`Stopped`]: ProcessState::Stopped
573 #[inline(always)]
is_stopped(&self) -> bool574 pub fn is_stopped(&self) -> bool {
575 matches!(self, ProcessState::Stopped)
576 }
577
578 /// Returns exit code if the process state is [`Exited`].
579 #[inline(always)]
exit_code(&self) -> Option<usize>580 pub fn exit_code(&self) -> Option<usize> {
581 match self {
582 ProcessState::Exited(code) => Some(*code),
583 _ => None,
584 }
585 }
586 }
587
588 bitflags! {
589 /// pcb的标志位
590 pub struct ProcessFlags: usize {
591 /// 当前pcb表示一个内核线程
592 const KTHREAD = 1 << 0;
593 /// 当前进程需要被调度
594 const NEED_SCHEDULE = 1 << 1;
595 /// 进程由于vfork而与父进程存在资源共享
596 const VFORK = 1 << 2;
597 /// 进程不可被冻结
598 const NOFREEZE = 1 << 3;
599 /// 进程正在退出
600 const EXITING = 1 << 4;
601 /// 进程由于接收到终止信号唤醒
602 const WAKEKILL = 1 << 5;
603 /// 进程由于接收到信号而退出.(Killed by a signal)
604 const SIGNALED = 1 << 6;
605 /// 进程需要迁移到其他cpu上
606 const NEED_MIGRATE = 1 << 7;
607 /// 随机化的虚拟地址空间,主要用于动态链接器的加载
608 const RANDOMIZE = 1 << 8;
609 }
610 }
611 #[derive(Debug)]
612 pub struct ProcessControlBlock {
613 /// 当前进程的pid
614 pid: Pid,
615 /// 当前进程的线程组id(这个值在同一个线程组内永远不变)
616 tgid: Pid,
617 /// 有关Pid的相关的信息
618 thread_pid: Arc<RwLock<PidStrcut>>,
619 basic: RwLock<ProcessBasicInfo>,
620 /// 当前进程的自旋锁持有计数
621 preempt_count: AtomicUsize,
622
623 flags: LockFreeFlags<ProcessFlags>,
624 worker_private: SpinLock<Option<WorkerPrivate>>,
625 /// 进程的内核栈
626 kernel_stack: RwLock<KernelStack>,
627
628 /// 系统调用栈
629 syscall_stack: RwLock<KernelStack>,
630
631 /// 与调度相关的信息
632 sched_info: ProcessSchedulerInfo,
633 /// 与处理器架构相关的信息
634 arch_info: SpinLock<ArchPCBInfo>,
635 /// 与信号处理相关的信息(似乎可以是无锁的)
636 sig_info: RwLock<ProcessSignalInfo>,
637 /// 信号处理结构体
638 sig_struct: SpinLock<SignalStruct>,
639 /// 退出信号S
640 exit_signal: AtomicSignal,
641
642 /// 父进程指针
643 parent_pcb: RwLock<Weak<ProcessControlBlock>>,
644 /// 真实父进程指针
645 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>,
646
647 /// 子进程链表
648 children: RwLock<Vec<Pid>>,
649
650 /// 等待队列
651 wait_queue: WaitQueue,
652
653 /// 线程信息
654 thread: RwLock<ThreadInfo>,
655
656 /// 进程文件系统的状态
657 fs: Arc<SpinLock<FsStruct>>,
658
659 ///闹钟定时器
660 alarm_timer: SpinLock<Option<AlarmTimer>>,
661
662 /// 进程的robust lock列表
663 robust_list: RwLock<Option<RobustListHead>>,
664
665 /// namespace的指针
666 nsproxy: Arc<RwLock<NsProxy>>,
667
668 /// 进程作为主体的凭证集
669 cred: SpinLock<Cred>,
670 }
671
672 impl ProcessControlBlock {
673 /// Generate a new pcb.
674 ///
675 /// ## 参数
676 ///
677 /// - `name` : 进程的名字
678 /// - `kstack` : 进程的内核栈
679 ///
680 /// ## 返回值
681 ///
682 /// 返回一个新的pcb
new(name: String, kstack: KernelStack) -> Arc<Self>683 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> {
684 return Self::do_create_pcb(name, kstack, false);
685 }
686
687 /// 创建一个新的idle进程
688 ///
689 /// 请注意,这个函数只能在进程管理初始化的时候调用。
new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self>690 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> {
691 let name = format!("idle-{}", cpu_id);
692 return Self::do_create_pcb(name, kstack, true);
693 }
694
695 /// # 函数的功能
696 ///
697 /// 返回此函数是否是内核进程
698 ///
699 /// # 返回值
700 ///
701 /// 若进程是内核进程则返回true 否则返回false
is_kthread(&self) -> bool702 pub fn is_kthread(&self) -> bool {
703 return matches!(self.flags(), &mut ProcessFlags::KTHREAD);
704 }
705
706 #[inline(never)]
do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self>707 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> {
708 let (pid, ppid, cwd, cred, tty) = if is_idle {
709 let cred = INIT_CRED.clone();
710 (Pid(0), Pid(0), "/".to_string(), cred, None)
711 } else {
712 let ppid = ProcessManager::current_pcb().pid();
713 let mut cred = ProcessManager::current_pcb().cred();
714 cred.cap_permitted = cred.cap_ambient;
715 cred.cap_effective = cred.cap_ambient;
716 let cwd = ProcessManager::current_pcb().basic().cwd();
717 let tty = ProcessManager::current_pcb().sig_info_irqsave().tty();
718 (Self::generate_pid(), ppid, cwd, cred, tty)
719 };
720
721 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, Pid(0), name, cwd, None);
722 let preempt_count = AtomicUsize::new(0);
723 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) };
724
725 let sched_info = ProcessSchedulerInfo::new(None);
726 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack));
727
728 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid)
729 .map(|p| Arc::downgrade(&p))
730 .unwrap_or_default();
731 let pcb = Self {
732 pid,
733 tgid: pid,
734 thread_pid: Arc::new(RwLock::new(PidStrcut::new())),
735 basic: basic_info,
736 preempt_count,
737 flags,
738 kernel_stack: RwLock::new(kstack),
739 syscall_stack: RwLock::new(KernelStack::new().unwrap()),
740 worker_private: SpinLock::new(None),
741 sched_info,
742 arch_info,
743 sig_info: RwLock::new(ProcessSignalInfo::default()),
744 sig_struct: SpinLock::new(SignalStruct::new()),
745 exit_signal: AtomicSignal::new(Signal::SIGCHLD),
746 parent_pcb: RwLock::new(ppcb.clone()),
747 real_parent_pcb: RwLock::new(ppcb),
748 children: RwLock::new(Vec::new()),
749 wait_queue: WaitQueue::default(),
750 thread: RwLock::new(ThreadInfo::new()),
751 fs: Arc::new(SpinLock::new(FsStruct::new())),
752 alarm_timer: SpinLock::new(None),
753 robust_list: RwLock::new(None),
754 nsproxy: Arc::new(RwLock::new(NsProxy::new())),
755 cred: SpinLock::new(cred),
756 };
757
758 pcb.sig_info.write().set_tty(tty);
759
760 // 初始化系统调用栈
761 #[cfg(target_arch = "x86_64")]
762 pcb.arch_info
763 .lock()
764 .init_syscall_stack(&pcb.syscall_stack.read());
765
766 let pcb = Arc::new(pcb);
767
768 pcb.sched_info()
769 .sched_entity()
770 .force_mut()
771 .set_pcb(Arc::downgrade(&pcb));
772 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处
773 unsafe {
774 pcb.kernel_stack
775 .write()
776 .set_pcb(Arc::downgrade(&pcb))
777 .unwrap();
778
779 pcb.syscall_stack
780 .write()
781 .set_pcb(Arc::downgrade(&pcb))
782 .unwrap()
783 };
784
785 // 将当前pcb加入父进程的子进程哈希表中
786 if pcb.pid() > Pid(1) {
787 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() {
788 let mut children = ppcb_arc.children.write_irqsave();
789 children.push(pcb.pid());
790 } else {
791 panic!("parent pcb is None");
792 }
793 }
794
795 return pcb;
796 }
797
798 /// 生成一个新的pid
799 #[inline(always)]
generate_pid() -> Pid800 fn generate_pid() -> Pid {
801 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1));
802 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst);
803 }
804
805 /// 返回当前进程的锁持有计数
806 #[inline(always)]
preempt_count(&self) -> usize807 pub fn preempt_count(&self) -> usize {
808 return self.preempt_count.load(Ordering::SeqCst);
809 }
810
811 /// 增加当前进程的锁持有计数
812 #[inline(always)]
preempt_disable(&self)813 pub fn preempt_disable(&self) {
814 self.preempt_count.fetch_add(1, Ordering::SeqCst);
815 }
816
817 /// 减少当前进程的锁持有计数
818 #[inline(always)]
preempt_enable(&self)819 pub fn preempt_enable(&self) {
820 self.preempt_count.fetch_sub(1, Ordering::SeqCst);
821 }
822
823 #[inline(always)]
set_preempt_count(&self, count: usize)824 pub unsafe fn set_preempt_count(&self, count: usize) {
825 self.preempt_count.store(count, Ordering::SeqCst);
826 }
827
828 #[inline(always)]
flags(&self) -> &mut ProcessFlags829 pub fn flags(&self) -> &mut ProcessFlags {
830 return self.flags.get_mut();
831 }
832
833 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改
834 /// 否则会导致死锁
835 #[inline(always)]
basic(&self) -> RwLockReadGuard<ProcessBasicInfo>836 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> {
837 return self.basic.read_irqsave();
838 }
839
840 #[inline(always)]
set_name(&self, name: String)841 pub fn set_name(&self, name: String) {
842 self.basic.write().set_name(name);
843 }
844
845 #[inline(always)]
basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo>846 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> {
847 return self.basic.write_irqsave();
848 }
849
850 /// # 获取arch info的锁,同时关闭中断
851 #[inline(always)]
arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo>852 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> {
853 return self.arch_info.lock_irqsave();
854 }
855
856 /// # 获取arch info的锁,但是不关闭中断
857 ///
858 /// 由于arch info在进程切换的时候会使用到,
859 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的.
860 ///
861 /// 只能在以下情况下使用这个函数:
862 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。
863 /// - 刚刚创建新的pcb
864 #[inline(always)]
arch_info(&self) -> SpinLockGuard<ArchPCBInfo>865 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> {
866 return self.arch_info.lock();
867 }
868
869 #[inline(always)]
kernel_stack(&self) -> RwLockReadGuard<KernelStack>870 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> {
871 return self.kernel_stack.read();
872 }
873
kernel_stack_force_ref(&self) -> &KernelStack874 pub unsafe fn kernel_stack_force_ref(&self) -> &KernelStack {
875 self.kernel_stack.force_get_ref()
876 }
877
878 #[inline(always)]
879 #[allow(dead_code)]
kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack>880 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> {
881 return self.kernel_stack.write();
882 }
883
884 #[inline(always)]
sched_info(&self) -> &ProcessSchedulerInfo885 pub fn sched_info(&self) -> &ProcessSchedulerInfo {
886 return &self.sched_info;
887 }
888
889 #[inline(always)]
worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>>890 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> {
891 return self.worker_private.lock();
892 }
893
894 #[inline(always)]
pid(&self) -> Pid895 pub fn pid(&self) -> Pid {
896 return self.pid;
897 }
898
899 #[inline(always)]
pid_strcut(&self) -> Arc<RwLock<PidStrcut>>900 pub fn pid_strcut(&self) -> Arc<RwLock<PidStrcut>> {
901 self.thread_pid.clone()
902 }
903
904 #[inline(always)]
tgid(&self) -> Pid905 pub fn tgid(&self) -> Pid {
906 return self.tgid;
907 }
908
909 #[inline(always)]
fs_struct(&self) -> Arc<SpinLock<FsStruct>>910 pub fn fs_struct(&self) -> Arc<SpinLock<FsStruct>> {
911 self.fs.clone()
912 }
913
914 /// 获取文件描述符表的Arc指针
915 #[inline(always)]
fd_table(&self) -> Arc<RwLock<FileDescriptorVec>>916 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> {
917 return self.basic.read().fd_table().unwrap();
918 }
919
920 #[inline(always)]
cred(&self) -> Cred921 pub fn cred(&self) -> Cred {
922 self.cred.lock().clone()
923 }
924
925 /// 根据文件描述符序号,获取socket对象的Arc指针
926 ///
927 /// ## 参数
928 ///
929 /// - `fd` 文件描述符序号
930 ///
931 /// ## 返回值
932 ///
933 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None
get_socket(&self, fd: i32) -> Option<Arc<SocketInode>>934 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> {
935 let binding = ProcessManager::current_pcb().fd_table();
936 let fd_table_guard = binding.read();
937
938 let f = fd_table_guard.get_file_by_fd(fd)?;
939 drop(fd_table_guard);
940
941 if f.file_type() != FileType::Socket {
942 return None;
943 }
944 let socket: Arc<SocketInode> = f
945 .inode()
946 .downcast_arc::<SocketInode>()
947 .expect("Not a socket inode");
948 return Some(socket);
949 }
950
951 /// 当前进程退出时,让初始进程收养所有子进程
adopt_childen(&self) -> Result<(), SystemError>952 unsafe fn adopt_childen(&self) -> Result<(), SystemError> {
953 match ProcessManager::find(Pid(1)) {
954 Some(init_pcb) => {
955 let childen_guard = self.children.write();
956 let mut init_childen_guard = init_pcb.children.write();
957
958 childen_guard.iter().for_each(|pid| {
959 init_childen_guard.push(*pid);
960 });
961
962 return Ok(());
963 }
964 _ => Err(SystemError::ECHILD),
965 }
966 }
967
968 /// 生成进程的名字
generate_name(program_path: &str, args: &Vec<CString>) -> String969 pub fn generate_name(program_path: &str, args: &Vec<CString>) -> String {
970 let mut name = program_path.to_string();
971 for arg in args {
972 name.push(' ');
973 name.push_str(arg.to_string_lossy().as_ref());
974 }
975 return name;
976 }
977
sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo>978 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> {
979 self.sig_info.read_irqsave()
980 }
981
try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>>982 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> {
983 for _ in 0..times {
984 if let Some(r) = self.sig_info.try_read_irqsave() {
985 return Some(r);
986 }
987 }
988
989 return None;
990 }
991
sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo>992 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> {
993 self.sig_info.write_irqsave()
994 }
995
try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>>996 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> {
997 for _ in 0..times {
998 if let Some(r) = self.sig_info.try_write_irqsave() {
999 return Some(r);
1000 }
1001 }
1002
1003 return None;
1004 }
1005
1006 /// 判断当前进程是否有未处理的信号
has_pending_signal(&self) -> bool1007 pub fn has_pending_signal(&self) -> bool {
1008 let sig_info = self.sig_info_irqsave();
1009 let has_pending = sig_info.sig_pending().has_pending();
1010 drop(sig_info);
1011 return has_pending;
1012 }
1013
sig_struct(&self) -> SpinLockGuard<SignalStruct>1014 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> {
1015 self.sig_struct.lock_irqsave()
1016 }
1017
try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>>1018 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> {
1019 for _ in 0..times {
1020 if let Ok(r) = self.sig_struct.try_lock_irqsave() {
1021 return Some(r);
1022 }
1023 }
1024
1025 return None;
1026 }
1027
sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct>1028 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> {
1029 self.sig_struct.lock_irqsave()
1030 }
1031
1032 #[inline(always)]
get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>>1033 pub fn get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>> {
1034 return self.robust_list.read_irqsave();
1035 }
1036
1037 #[inline(always)]
set_robust_list(&self, new_robust_list: Option<RobustListHead>)1038 pub fn set_robust_list(&self, new_robust_list: Option<RobustListHead>) {
1039 *self.robust_list.write_irqsave() = new_robust_list;
1040 }
1041
alarm_timer_irqsave(&self) -> SpinLockGuard<Option<AlarmTimer>>1042 pub fn alarm_timer_irqsave(&self) -> SpinLockGuard<Option<AlarmTimer>> {
1043 return self.alarm_timer.lock_irqsave();
1044 }
1045
get_nsproxy(&self) -> Arc<RwLock<NsProxy>>1046 pub fn get_nsproxy(&self) -> Arc<RwLock<NsProxy>> {
1047 self.nsproxy.clone()
1048 }
1049
set_nsproxy(&self, nsprsy: NsProxy)1050 pub fn set_nsproxy(&self, nsprsy: NsProxy) {
1051 *self.nsproxy.write() = nsprsy;
1052 }
1053 }
1054
1055 impl Drop for ProcessControlBlock {
drop(&mut self)1056 fn drop(&mut self) {
1057 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
1058 // 在ProcFS中,解除进程的注册
1059 procfs_unregister_pid(self.pid())
1060 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}"));
1061
1062 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() {
1063 ppcb.children
1064 .write_irqsave()
1065 .retain(|pid| *pid != self.pid());
1066 }
1067
1068 drop(irq_guard);
1069 }
1070 }
1071
1072 /// 线程信息
1073 #[derive(Debug)]
1074 pub struct ThreadInfo {
1075 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程
1076 clear_child_tid: Option<VirtAddr>,
1077 set_child_tid: Option<VirtAddr>,
1078
1079 vfork_done: Option<Arc<Completion>>,
1080 /// 线程组的组长
1081 group_leader: Weak<ProcessControlBlock>,
1082 }
1083
1084 impl ThreadInfo {
new() -> Self1085 pub fn new() -> Self {
1086 Self {
1087 clear_child_tid: None,
1088 set_child_tid: None,
1089 vfork_done: None,
1090 group_leader: Weak::default(),
1091 }
1092 }
1093
group_leader(&self) -> Option<Arc<ProcessControlBlock>>1094 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> {
1095 return self.group_leader.upgrade();
1096 }
1097 }
1098
1099 /// 进程的基本信息
1100 ///
1101 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。
1102 #[derive(Debug)]
1103 pub struct ProcessBasicInfo {
1104 /// 当前进程的进程组id
1105 pgid: Pid,
1106 /// 当前进程的父进程的pid
1107 ppid: Pid,
1108 /// 当前进程所属会话id
1109 sid: Pid,
1110 /// 进程的名字
1111 name: String,
1112
1113 /// 当前进程的工作目录
1114 cwd: String,
1115
1116 /// 用户地址空间
1117 user_vm: Option<Arc<AddressSpace>>,
1118
1119 /// 文件描述符表
1120 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>,
1121 }
1122
1123 impl ProcessBasicInfo {
1124 #[inline(never)]
new( pgid: Pid, ppid: Pid, sid: Pid, name: String, cwd: String, user_vm: Option<Arc<AddressSpace>>, ) -> RwLock<Self>1125 pub fn new(
1126 pgid: Pid,
1127 ppid: Pid,
1128 sid: Pid,
1129 name: String,
1130 cwd: String,
1131 user_vm: Option<Arc<AddressSpace>>,
1132 ) -> RwLock<Self> {
1133 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new()));
1134 return RwLock::new(Self {
1135 pgid,
1136 ppid,
1137 sid,
1138 name,
1139 cwd,
1140 user_vm,
1141 fd_table: Some(fd_table),
1142 });
1143 }
1144
pgid(&self) -> Pid1145 pub fn pgid(&self) -> Pid {
1146 return self.pgid;
1147 }
1148
ppid(&self) -> Pid1149 pub fn ppid(&self) -> Pid {
1150 return self.ppid;
1151 }
1152
sid(&self) -> Pid1153 pub fn sid(&self) -> Pid {
1154 return self.sid;
1155 }
1156
name(&self) -> &str1157 pub fn name(&self) -> &str {
1158 return &self.name;
1159 }
1160
set_name(&mut self, name: String)1161 pub fn set_name(&mut self, name: String) {
1162 self.name = name;
1163 }
1164
cwd(&self) -> String1165 pub fn cwd(&self) -> String {
1166 return self.cwd.clone();
1167 }
set_cwd(&mut self, path: String)1168 pub fn set_cwd(&mut self, path: String) {
1169 return self.cwd = path;
1170 }
1171
user_vm(&self) -> Option<Arc<AddressSpace>>1172 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> {
1173 return self.user_vm.clone();
1174 }
1175
set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>)1176 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) {
1177 self.user_vm = user_vm;
1178 }
1179
fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>>1180 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> {
1181 return self.fd_table.clone();
1182 }
1183
set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>)1184 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) {
1185 self.fd_table = fd_table;
1186 }
1187 }
1188
1189 #[derive(Debug)]
1190 pub struct ProcessSchedulerInfo {
1191 /// 当前进程所在的cpu
1192 on_cpu: AtomicProcessorId,
1193 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位),
1194 /// 该字段存储要被迁移到的目标处理器核心号
1195 // migrate_to: AtomicProcessorId,
1196 inner_locked: RwLock<InnerSchedInfo>,
1197 /// 进程的调度优先级
1198 // priority: SchedPriority,
1199 /// 当前进程的虚拟运行时间
1200 // virtual_runtime: AtomicIsize,
1201 /// 由实时调度器管理的时间片
1202 // rt_time_slice: AtomicIsize,
1203 pub sched_stat: RwLock<SchedInfo>,
1204 /// 调度策略
1205 pub sched_policy: RwLock<crate::sched::SchedPolicy>,
1206 /// cfs调度实体
1207 pub sched_entity: Arc<FairSchedEntity>,
1208 pub on_rq: SpinLock<OnRq>,
1209
1210 pub prio_data: RwLock<PrioData>,
1211 }
1212
1213 #[derive(Debug, Default)]
1214 #[allow(dead_code)]
1215 pub struct SchedInfo {
1216 /// 记录任务在特定 CPU 上运行的次数
1217 pub pcount: usize,
1218 /// 记录任务等待在运行队列上的时间
1219 pub run_delay: usize,
1220 /// 记录任务上次在 CPU 上运行的时间戳
1221 pub last_arrival: u64,
1222 /// 记录任务上次被加入到运行队列中的时间戳
1223 pub last_queued: u64,
1224 }
1225
1226 #[derive(Debug)]
1227 #[allow(dead_code)]
1228 pub struct PrioData {
1229 pub prio: i32,
1230 pub static_prio: i32,
1231 pub normal_prio: i32,
1232 }
1233
1234 impl Default for PrioData {
default() -> Self1235 fn default() -> Self {
1236 Self {
1237 prio: MAX_PRIO - 20,
1238 static_prio: MAX_PRIO - 20,
1239 normal_prio: MAX_PRIO - 20,
1240 }
1241 }
1242 }
1243
1244 #[derive(Debug)]
1245 pub struct InnerSchedInfo {
1246 /// 当前进程的状态
1247 state: ProcessState,
1248 /// 进程的调度策略
1249 sleep: bool,
1250 }
1251
1252 impl InnerSchedInfo {
state(&self) -> ProcessState1253 pub fn state(&self) -> ProcessState {
1254 return self.state;
1255 }
1256
set_state(&mut self, state: ProcessState)1257 pub fn set_state(&mut self, state: ProcessState) {
1258 self.state = state;
1259 }
1260
set_sleep(&mut self)1261 pub fn set_sleep(&mut self) {
1262 self.sleep = true;
1263 }
1264
set_wakeup(&mut self)1265 pub fn set_wakeup(&mut self) {
1266 self.sleep = false;
1267 }
1268
is_mark_sleep(&self) -> bool1269 pub fn is_mark_sleep(&self) -> bool {
1270 self.sleep
1271 }
1272 }
1273
1274 impl ProcessSchedulerInfo {
1275 #[inline(never)]
new(on_cpu: Option<ProcessorId>) -> Self1276 pub fn new(on_cpu: Option<ProcessorId>) -> Self {
1277 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID);
1278 return Self {
1279 on_cpu: AtomicProcessorId::new(cpu_id),
1280 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID),
1281 inner_locked: RwLock::new(InnerSchedInfo {
1282 state: ProcessState::Blocked(false),
1283 sleep: false,
1284 }),
1285 // virtual_runtime: AtomicIsize::new(0),
1286 // rt_time_slice: AtomicIsize::new(0),
1287 // priority: SchedPriority::new(100).unwrap(),
1288 sched_stat: RwLock::new(SchedInfo::default()),
1289 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS),
1290 sched_entity: FairSchedEntity::new(),
1291 on_rq: SpinLock::new(OnRq::None),
1292 prio_data: RwLock::new(PrioData::default()),
1293 };
1294 }
1295
sched_entity(&self) -> Arc<FairSchedEntity>1296 pub fn sched_entity(&self) -> Arc<FairSchedEntity> {
1297 return self.sched_entity.clone();
1298 }
1299
on_cpu(&self) -> Option<ProcessorId>1300 pub fn on_cpu(&self) -> Option<ProcessorId> {
1301 let on_cpu = self.on_cpu.load(Ordering::SeqCst);
1302 if on_cpu == ProcessorId::INVALID {
1303 return None;
1304 } else {
1305 return Some(on_cpu);
1306 }
1307 }
1308
set_on_cpu(&self, on_cpu: Option<ProcessorId>)1309 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) {
1310 if let Some(cpu_id) = on_cpu {
1311 self.on_cpu.store(cpu_id, Ordering::SeqCst);
1312 } else {
1313 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst);
1314 }
1315 }
1316
1317 // pub fn migrate_to(&self) -> Option<ProcessorId> {
1318 // let migrate_to = self.migrate_to.load(Ordering::SeqCst);
1319 // if migrate_to == ProcessorId::INVALID {
1320 // return None;
1321 // } else {
1322 // return Some(migrate_to);
1323 // }
1324 // }
1325
1326 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) {
1327 // if let Some(data) = migrate_to {
1328 // self.migrate_to.store(data, Ordering::SeqCst);
1329 // } else {
1330 // self.migrate_to
1331 // .store(ProcessorId::INVALID, Ordering::SeqCst)
1332 // }
1333 // }
1334
inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo>1335 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> {
1336 return self.inner_locked.write_irqsave();
1337 }
1338
inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo>1339 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> {
1340 return self.inner_locked.read_irqsave();
1341 }
1342
1343 // pub fn inner_lock_try_read_irqsave(
1344 // &self,
1345 // times: u8,
1346 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> {
1347 // for _ in 0..times {
1348 // if let Some(r) = self.inner_locked.try_read_irqsave() {
1349 // return Some(r);
1350 // }
1351 // }
1352
1353 // return None;
1354 // }
1355
1356 // pub fn inner_lock_try_upgradable_read_irqsave(
1357 // &self,
1358 // times: u8,
1359 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> {
1360 // for _ in 0..times {
1361 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() {
1362 // return Some(r);
1363 // }
1364 // }
1365
1366 // return None;
1367 // }
1368
1369 // pub fn virtual_runtime(&self) -> isize {
1370 // return self.virtual_runtime.load(Ordering::SeqCst);
1371 // }
1372
1373 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) {
1374 // self.virtual_runtime
1375 // .store(virtual_runtime, Ordering::SeqCst);
1376 // }
1377 // pub fn increase_virtual_runtime(&self, delta: isize) {
1378 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst);
1379 // }
1380
1381 // pub fn rt_time_slice(&self) -> isize {
1382 // return self.rt_time_slice.load(Ordering::SeqCst);
1383 // }
1384
1385 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) {
1386 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst);
1387 // }
1388
1389 // pub fn increase_rt_time_slice(&self, delta: isize) {
1390 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst);
1391 // }
1392
policy(&self) -> crate::sched::SchedPolicy1393 pub fn policy(&self) -> crate::sched::SchedPolicy {
1394 return *self.sched_policy.read_irqsave();
1395 }
1396 }
1397
1398 #[derive(Debug, Clone)]
1399 pub struct KernelStack {
1400 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>,
1401 /// 标记该内核栈是否可以被释放
1402 can_be_freed: bool,
1403 }
1404
1405 impl KernelStack {
1406 pub const SIZE: usize = 0x4000;
1407 pub const ALIGN: usize = 0x4000;
1408
new() -> Result<Self, SystemError>1409 pub fn new() -> Result<Self, SystemError> {
1410 return Ok(Self {
1411 stack: Some(
1412 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?,
1413 ),
1414 can_be_freed: true,
1415 });
1416 }
1417
1418 /// 根据已有的空间,构造一个内核栈结构体
1419 ///
1420 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误!
from_existed(base: VirtAddr) -> Result<Self, SystemError>1421 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> {
1422 if base.is_null() || !base.check_aligned(Self::ALIGN) {
1423 return Err(SystemError::EFAULT);
1424 }
1425
1426 return Ok(Self {
1427 stack: Some(
1428 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked(
1429 base.data() as *mut [u8; KernelStack::SIZE],
1430 ),
1431 ),
1432 can_be_freed: false,
1433 });
1434 }
1435
1436 /// 返回内核栈的起始虚拟地址(低地址)
start_address(&self) -> VirtAddr1437 pub fn start_address(&self) -> VirtAddr {
1438 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize);
1439 }
1440
1441 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址)
stack_max_address(&self) -> VirtAddr1442 pub fn stack_max_address(&self) -> VirtAddr {
1443 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE);
1444 }
1445
set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError>1446 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> {
1447 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处
1448 let p: *const ProcessControlBlock = Weak::into_raw(pcb);
1449 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock;
1450
1451 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误
1452 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) {
1453 error!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr);
1454 return Err(SystemError::EPERM);
1455 }
1456 // 将pcb的地址放到内核栈的最低地址处
1457 unsafe {
1458 *stack_bottom_ptr = p;
1459 }
1460
1461 return Ok(());
1462 }
1463
1464 /// 清除内核栈的pcb指针
1465 ///
1466 /// ## 参数
1467 ///
1468 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题
clear_pcb(&mut self, force: bool)1469 pub unsafe fn clear_pcb(&mut self, force: bool) {
1470 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock;
1471 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) {
1472 return;
1473 }
1474
1475 if !force {
1476 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr);
1477 drop(pcb_ptr);
1478 }
1479
1480 *stack_bottom_ptr = core::ptr::null();
1481 }
1482
1483 /// 返回指向当前内核栈pcb的Arc指针
1484 #[allow(dead_code)]
pcb(&self) -> Option<Arc<ProcessControlBlock>>1485 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> {
1486 // 从内核栈的最低地址处取出pcb的地址
1487 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock;
1488 if unlikely(unsafe { (*p).is_null() }) {
1489 return None;
1490 }
1491
1492 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用
1493 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> =
1494 ManuallyDrop::new(Weak::from_raw(*p));
1495
1496 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?;
1497 return Some(new_arc);
1498 }
1499 }
1500
1501 impl Drop for KernelStack {
drop(&mut self)1502 fn drop(&mut self) {
1503 if self.stack.is_some() {
1504 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock;
1505 if unsafe { !(*ptr).is_null() } {
1506 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) };
1507 drop(pcb_ptr);
1508 }
1509 }
1510 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数
1511 if !self.can_be_freed {
1512 let bx = self.stack.take();
1513 core::mem::forget(bx);
1514 }
1515 }
1516 }
1517
process_init()1518 pub fn process_init() {
1519 ProcessManager::init();
1520 }
1521
1522 #[derive(Debug)]
1523 pub struct ProcessSignalInfo {
1524 // 当前进程
1525 sig_block: SigSet,
1526 // sig_pending 中存储当前线程要处理的信号
1527 sig_pending: SigPending,
1528 // sig_shared_pending 中存储当前线程所属进程要处理的信号
1529 sig_shared_pending: SigPending,
1530 // 当前进程对应的tty
1531 tty: Option<Arc<TtyCore>>,
1532 }
1533
1534 impl ProcessSignalInfo {
sig_block(&self) -> &SigSet1535 pub fn sig_block(&self) -> &SigSet {
1536 &self.sig_block
1537 }
1538
sig_pending(&self) -> &SigPending1539 pub fn sig_pending(&self) -> &SigPending {
1540 &self.sig_pending
1541 }
1542
sig_pending_mut(&mut self) -> &mut SigPending1543 pub fn sig_pending_mut(&mut self) -> &mut SigPending {
1544 &mut self.sig_pending
1545 }
1546
sig_block_mut(&mut self) -> &mut SigSet1547 pub fn sig_block_mut(&mut self) -> &mut SigSet {
1548 &mut self.sig_block
1549 }
1550
sig_shared_pending_mut(&mut self) -> &mut SigPending1551 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending {
1552 &mut self.sig_shared_pending
1553 }
1554
sig_shared_pending(&self) -> &SigPending1555 pub fn sig_shared_pending(&self) -> &SigPending {
1556 &self.sig_shared_pending
1557 }
1558
tty(&self) -> Option<Arc<TtyCore>>1559 pub fn tty(&self) -> Option<Arc<TtyCore>> {
1560 self.tty.clone()
1561 }
1562
set_tty(&mut self, tty: Option<Arc<TtyCore>>)1563 pub fn set_tty(&mut self, tty: Option<Arc<TtyCore>>) {
1564 self.tty = tty;
1565 }
1566
1567 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号
1568 ///
1569 /// ## 参数
1570 ///
1571 /// - `sig_mask` 被忽略掉的信号
1572 ///
dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>)1573 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) {
1574 let res = self.sig_pending.dequeue_signal(sig_mask);
1575 if res.0 != Signal::INVALID {
1576 return res;
1577 } else {
1578 return self.sig_shared_pending.dequeue_signal(sig_mask);
1579 }
1580 }
1581 }
1582
1583 impl Default for ProcessSignalInfo {
default() -> Self1584 fn default() -> Self {
1585 Self {
1586 sig_block: SigSet::empty(),
1587 sig_pending: SigPending::default(),
1588 sig_shared_pending: SigPending::default(),
1589 tty: None,
1590 }
1591 }
1592 }
1593