1 use core::{
2 fmt,
3 hash::Hash,
4 hint::spin_loop,
5 intrinsics::{likely, unlikely},
6 mem::ManuallyDrop,
7 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering},
8 };
9
10 use alloc::{
11 ffi::CString,
12 string::{String, ToString},
13 sync::{Arc, Weak},
14 vec::Vec,
15 };
16 use cred::INIT_CRED;
17 use hashbrown::HashMap;
18 use log::{debug, error, info, warn};
19 use system_error::SystemError;
20
21 use crate::{
22 arch::{
23 cpu::current_cpu_id,
24 ipc::signal::{AtomicSignal, SigSet, Signal},
25 process::ArchPCBInfo,
26 CurrentIrqArch,
27 },
28 driver::tty::tty_core::TtyCore,
29 exception::InterruptArch,
30 filesystem::{
31 procfs::procfs_unregister_pid,
32 vfs::{file::FileDescriptorVec, FileType},
33 },
34 ipc::signal_types::{SigInfo, SigPending, SignalStruct},
35 libs::{
36 align::AlignedBox,
37 casting::DowncastArc,
38 futex::{
39 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY},
40 futex::{Futex, RobustListHead},
41 },
42 lock_free_flags::LockFreeFlags,
43 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard},
44 spinlock::{SpinLock, SpinLockGuard},
45 wait_queue::WaitQueue,
46 },
47 mm::{
48 percpu::{PerCpu, PerCpuVar},
49 set_IDLE_PROCESS_ADDRESS_SPACE,
50 ucontext::AddressSpace,
51 VirtAddr,
52 },
53 namespaces::{mnt_namespace::FsStruct, pid_namespace::PidStrcut, NsProxy},
54 net::socket::SocketInode,
55 sched::{
56 completion::Completion, cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag,
57 EnqueueFlag, OnRq, SchedMode, WakeupFlags, __schedule,
58 },
59 smp::{
60 core::smp_get_processor_id,
61 cpu::{AtomicProcessorId, ProcessorId},
62 kick_cpu,
63 },
64 syscall::{user_access::clear_user, Syscall},
65 };
66 use timer::AlarmTimer;
67
68 use self::{cred::Cred, kthread::WorkerPrivate};
69
70 pub mod abi;
71 pub mod c_adapter;
72 pub mod cred;
73 pub mod exec;
74 pub mod exit;
75 pub mod fork;
76 pub mod idle;
77 pub mod kthread;
78 pub mod pid;
79 pub mod resource;
80 pub mod stdio;
81 pub mod syscall;
82 pub mod timer;
83 pub mod utils;
84
85 /// 系统中所有进程的pcb
86 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None);
87
88 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None;
89
90 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成
91 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false;
92
93 pub struct SwitchResult {
94 pub prev_pcb: Option<Arc<ProcessControlBlock>>,
95 pub next_pcb: Option<Arc<ProcessControlBlock>>,
96 }
97
98 impl SwitchResult {
new() -> Self99 pub fn new() -> Self {
100 Self {
101 prev_pcb: None,
102 next_pcb: None,
103 }
104 }
105 }
106
107 #[derive(Debug)]
108 pub struct ProcessManager;
109 impl ProcessManager {
110 #[inline(never)]
init()111 fn init() {
112 static INIT_FLAG: AtomicBool = AtomicBool::new(false);
113 if INIT_FLAG
114 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
115 .is_err()
116 {
117 panic!("ProcessManager has been initialized!");
118 }
119
120 unsafe {
121 compiler_fence(Ordering::SeqCst);
122 debug!("To create address space for INIT process.");
123 // test_buddy();
124 set_IDLE_PROCESS_ADDRESS_SPACE(
125 AddressSpace::new(true).expect("Failed to create address space for INIT process."),
126 );
127 debug!("INIT process address space created.");
128 compiler_fence(Ordering::SeqCst);
129 };
130
131 ALL_PROCESS.lock_irqsave().replace(HashMap::new());
132 Self::init_switch_result();
133 Self::arch_init();
134 debug!("process arch init done.");
135 Self::init_idle();
136 debug!("process idle init done.");
137
138 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true };
139 info!("Process Manager initialized.");
140 }
141
init_switch_result()142 fn init_switch_result() {
143 let mut switch_res_vec: Vec<SwitchResult> = Vec::new();
144 for _ in 0..PerCpu::MAX_CPU_NUM {
145 switch_res_vec.push(SwitchResult::new());
146 }
147 unsafe {
148 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap());
149 }
150 }
151
152 /// 判断进程管理器是否已经初始化完成
153 #[allow(dead_code)]
initialized() -> bool154 pub fn initialized() -> bool {
155 unsafe { __PROCESS_MANAGEMENT_INIT_DONE }
156 }
157
158 /// 获取当前进程的pcb
current_pcb() -> Arc<ProcessControlBlock>159 pub fn current_pcb() -> Arc<ProcessControlBlock> {
160 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) {
161 error!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false");
162 loop {
163 spin_loop();
164 }
165 }
166 return ProcessControlBlock::arch_current_pcb();
167 }
168
169 /// 获取当前进程的pid
170 ///
171 /// 如果进程管理器未初始化完成,那么返回0
current_pid() -> Pid172 pub fn current_pid() -> Pid {
173 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) {
174 return Pid(0);
175 }
176
177 return ProcessManager::current_pcb().pid();
178 }
179
180 /// 增加当前进程的锁持有计数
181 #[inline(always)]
preempt_disable()182 pub fn preempt_disable() {
183 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) {
184 ProcessManager::current_pcb().preempt_disable();
185 }
186 }
187
188 /// 减少当前进程的锁持有计数
189 #[inline(always)]
preempt_enable()190 pub fn preempt_enable() {
191 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) {
192 ProcessManager::current_pcb().preempt_enable();
193 }
194 }
195
196 /// 根据pid获取进程的pcb
197 ///
198 /// ## 参数
199 ///
200 /// - `pid` : 进程的pid
201 ///
202 /// ## 返回值
203 ///
204 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None
find(pid: Pid) -> Option<Arc<ProcessControlBlock>>205 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> {
206 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned();
207 }
208
209 /// 向系统中添加一个进程的pcb
210 ///
211 /// ## 参数
212 ///
213 /// - `pcb` : 进程的pcb
214 ///
215 /// ## 返回值
216 ///
217 /// 无
add_pcb(pcb: Arc<ProcessControlBlock>)218 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) {
219 ALL_PROCESS
220 .lock_irqsave()
221 .as_mut()
222 .unwrap()
223 .insert(pcb.pid(), pcb.clone());
224 }
225
226 /// 唤醒一个进程
wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError>227 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> {
228 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
229 let state = pcb.sched_info().inner_lock_read_irqsave().state();
230 if state.is_blocked() {
231 let mut writer = pcb.sched_info().inner_lock_write_irqsave();
232 let state = writer.state();
233 if state.is_blocked() {
234 writer.set_state(ProcessState::Runnable);
235 writer.set_wakeup();
236
237 // avoid deadlock
238 drop(writer);
239
240 let rq =
241 cpu_rq(pcb.sched_info().on_cpu().unwrap_or(current_cpu_id()).data() as usize);
242
243 let (rq, _guard) = rq.self_lock();
244 rq.update_rq_clock();
245 rq.activate_task(
246 pcb,
247 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK,
248 );
249
250 rq.check_preempt_currnet(pcb, WakeupFlags::empty());
251
252 // sched_enqueue(pcb.clone(), true);
253 return Ok(());
254 } else if state.is_exited() {
255 return Err(SystemError::EINVAL);
256 } else {
257 return Ok(());
258 }
259 } else if state.is_exited() {
260 return Err(SystemError::EINVAL);
261 } else {
262 return Ok(());
263 }
264 }
265
266 /// 唤醒暂停的进程
wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError>267 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> {
268 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
269 let state = pcb.sched_info().inner_lock_read_irqsave().state();
270 if let ProcessState::Stopped = state {
271 let mut writer = pcb.sched_info().inner_lock_write_irqsave();
272 let state = writer.state();
273 if let ProcessState::Stopped = state {
274 writer.set_state(ProcessState::Runnable);
275 // avoid deadlock
276 drop(writer);
277
278 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize);
279
280 let (rq, _guard) = rq.self_lock();
281 rq.update_rq_clock();
282 rq.activate_task(
283 pcb,
284 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK,
285 );
286
287 rq.check_preempt_currnet(pcb, WakeupFlags::empty());
288
289 // sched_enqueue(pcb.clone(), true);
290 return Ok(());
291 } else if state.is_runnable() {
292 return Ok(());
293 } else {
294 return Err(SystemError::EINVAL);
295 }
296 } else if state.is_runnable() {
297 return Ok(());
298 } else {
299 return Err(SystemError::EINVAL);
300 }
301 }
302
303 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成
304 ///
305 /// ## 注意
306 ///
307 /// - 进入当前函数之前,不能持有sched_info的锁
308 /// - 进入当前函数之前,必须关闭中断
309 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列
mark_sleep(interruptable: bool) -> Result<(), SystemError>310 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> {
311 assert!(
312 !CurrentIrqArch::is_irq_enabled(),
313 "interrupt must be disabled before enter ProcessManager::mark_sleep()"
314 );
315 let pcb = ProcessManager::current_pcb();
316 let mut writer = pcb.sched_info().inner_lock_write_irqsave();
317 if !matches!(writer.state(), ProcessState::Exited(_)) {
318 writer.set_state(ProcessState::Blocked(interruptable));
319 writer.set_sleep();
320 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE);
321 fence(Ordering::SeqCst);
322 drop(writer);
323 return Ok(());
324 }
325 return Err(SystemError::EINTR);
326 }
327
328 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成
329 ///
330 /// ## 注意
331 ///
332 /// - 进入当前函数之前,不能持有sched_info的锁
333 /// - 进入当前函数之前,必须关闭中断
mark_stop() -> Result<(), SystemError>334 pub fn mark_stop() -> Result<(), SystemError> {
335 assert!(
336 !CurrentIrqArch::is_irq_enabled(),
337 "interrupt must be disabled before enter ProcessManager::mark_stop()"
338 );
339
340 let pcb = ProcessManager::current_pcb();
341 let mut writer = pcb.sched_info().inner_lock_write_irqsave();
342 if !matches!(writer.state(), ProcessState::Exited(_)) {
343 writer.set_state(ProcessState::Stopped);
344 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE);
345 drop(writer);
346
347 return Ok(());
348 }
349 return Err(SystemError::EINTR);
350 }
351 /// 当子进程退出后向父进程发送通知
exit_notify()352 fn exit_notify() {
353 let current = ProcessManager::current_pcb();
354 // 让INIT进程收养所有子进程
355 if current.pid() != Pid(1) {
356 unsafe {
357 current
358 .adopt_childen()
359 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}"))
360 };
361 let r = current.parent_pcb.read_irqsave().upgrade();
362 if r.is_none() {
363 return;
364 }
365 let parent_pcb = r.unwrap();
366 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32);
367 if r.is_err() {
368 warn!(
369 "failed to send kill signal to {:?}'s parent pcb {:?}",
370 current.pid(),
371 parent_pcb.pid()
372 );
373 }
374 // todo: 这里需要向父进程发送SIGCHLD信号
375 // todo: 这里还需要根据线程组的信息,决定信号的发送
376 }
377 }
378
379 /// 退出当前进程
380 ///
381 /// ## 参数
382 ///
383 /// - `exit_code` : 进程的退出码
exit(exit_code: usize) -> !384 pub fn exit(exit_code: usize) -> ! {
385 // 关中断
386 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
387 let pcb = ProcessManager::current_pcb();
388 let pid = pcb.pid();
389 pcb.sched_info
390 .inner_lock_write_irqsave()
391 .set_state(ProcessState::Exited(exit_code));
392 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true)));
393
394 let rq = cpu_rq(smp_get_processor_id().data() as usize);
395 let (rq, guard) = rq.self_lock();
396 rq.deactivate_task(
397 pcb.clone(),
398 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK,
399 );
400 drop(guard);
401
402 // 进行进程退出后的工作
403 let thread = pcb.thread.write_irqsave();
404 if let Some(addr) = thread.set_child_tid {
405 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") };
406 }
407
408 if let Some(addr) = thread.clear_child_tid {
409 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 {
410 let _ =
411 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY);
412 }
413 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") };
414 }
415
416 RobustListHead::exit_robust_list(pcb.clone());
417
418 // 如果是vfork出来的进程,则需要处理completion
419 if thread.vfork_done.is_some() {
420 thread.vfork_done.as_ref().unwrap().complete_all();
421 }
422 drop(thread);
423 unsafe { pcb.basic_mut().set_user_vm(None) };
424
425 // TODO 由于未实现进程组,tty记录的前台进程组等于当前进程,故退出前要置空
426 // 后续相关逻辑需要在SYS_EXIT_GROUP系统调用中实现
427 if let Some(tty) = pcb.sig_info_irqsave().tty() {
428 tty.core().contorl_info_irqsave().pgid = None;
429 }
430 pcb.sig_info_mut().set_tty(None);
431
432 drop(pcb);
433 ProcessManager::exit_notify();
434 // unsafe { CurrentIrqArch::interrupt_enable() };
435 __schedule(SchedMode::SM_NONE);
436 error!("pid {pid:?} exited but sched again!");
437 #[allow(clippy::empty_loop)]
438 loop {
439 spin_loop();
440 }
441 }
442
release(pid: Pid)443 pub unsafe fn release(pid: Pid) {
444 let pcb = ProcessManager::find(pid);
445 if pcb.is_some() {
446 // let pcb = pcb.unwrap();
447 // 判断该pcb是否在全局没有任何引用
448 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。
449 // 因此目前暂时注释掉,使得能跑
450 // if Arc::strong_count(&pcb) <= 2 {
451 // drop(pcb);
452 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid);
453 // } else {
454 // // 如果不为1就panic
455 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb));
456 // error!("{}", msg);
457 // panic!()
458 // }
459
460 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid);
461 }
462 }
463
464 /// 上下文切换完成后的钩子函数
switch_finish_hook()465 unsafe fn switch_finish_hook() {
466 // debug!("switch_finish_hook");
467 let prev_pcb = PROCESS_SWITCH_RESULT
468 .as_mut()
469 .unwrap()
470 .get_mut()
471 .prev_pcb
472 .take()
473 .expect("prev_pcb is None");
474 let next_pcb = PROCESS_SWITCH_RESULT
475 .as_mut()
476 .unwrap()
477 .get_mut()
478 .next_pcb
479 .take()
480 .expect("next_pcb is None");
481
482 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁
483 fence(Ordering::SeqCst);
484
485 prev_pcb.arch_info.force_unlock();
486 fence(Ordering::SeqCst);
487
488 next_pcb.arch_info.force_unlock();
489 fence(Ordering::SeqCst);
490 }
491
492 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态
493 ///
494 /// ## 参数
495 ///
496 /// - `pcb` : 进程的pcb
497 #[allow(dead_code)]
kick(pcb: &Arc<ProcessControlBlock>)498 pub fn kick(pcb: &Arc<ProcessControlBlock>) {
499 ProcessManager::current_pcb().preempt_disable();
500 let cpu_id = pcb.sched_info().on_cpu();
501
502 if let Some(cpu_id) = cpu_id {
503 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() {
504 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu");
505 }
506 }
507
508 ProcessManager::current_pcb().preempt_enable();
509 }
510 }
511
512 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换
513 #[cfg(target_arch = "x86_64")]
514 #[inline(never)]
switch_finish_hook()515 pub unsafe extern "sysv64" fn switch_finish_hook() {
516 ProcessManager::switch_finish_hook();
517 }
518 #[cfg(target_arch = "riscv64")]
519 #[inline(always)]
switch_finish_hook()520 pub unsafe fn switch_finish_hook() {
521 ProcessManager::switch_finish_hook();
522 }
523
524 int_like!(Pid, AtomicPid, usize, AtomicUsize);
525
526 impl fmt::Display for Pid {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result527 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
528 write!(f, "{}", self.0)
529 }
530 }
531
532 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
533 pub enum ProcessState {
534 /// The process is running on a CPU or in a run queue.
535 Runnable,
536 /// The process is waiting for an event to occur.
537 /// 其中的bool表示该等待过程是否可以被打断。
538 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。
539 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。
540 Blocked(bool),
541 /// 进程被信号终止
542 Stopped,
543 /// 进程已经退出,usize表示进程的退出码
544 Exited(usize),
545 }
546
547 #[allow(dead_code)]
548 impl ProcessState {
549 #[inline(always)]
is_runnable(&self) -> bool550 pub fn is_runnable(&self) -> bool {
551 return matches!(self, ProcessState::Runnable);
552 }
553
554 #[inline(always)]
is_blocked(&self) -> bool555 pub fn is_blocked(&self) -> bool {
556 return matches!(self, ProcessState::Blocked(_));
557 }
558
559 #[inline(always)]
is_blocked_interruptable(&self) -> bool560 pub fn is_blocked_interruptable(&self) -> bool {
561 return matches!(self, ProcessState::Blocked(true));
562 }
563
564 /// Returns `true` if the process state is [`Exited`].
565 #[inline(always)]
is_exited(&self) -> bool566 pub fn is_exited(&self) -> bool {
567 return matches!(self, ProcessState::Exited(_));
568 }
569
570 /// Returns `true` if the process state is [`Stopped`].
571 ///
572 /// [`Stopped`]: ProcessState::Stopped
573 #[inline(always)]
is_stopped(&self) -> bool574 pub fn is_stopped(&self) -> bool {
575 matches!(self, ProcessState::Stopped)
576 }
577
578 /// Returns exit code if the process state is [`Exited`].
579 #[inline(always)]
exit_code(&self) -> Option<usize>580 pub fn exit_code(&self) -> Option<usize> {
581 match self {
582 ProcessState::Exited(code) => Some(*code),
583 _ => None,
584 }
585 }
586 }
587
588 bitflags! {
589 /// pcb的标志位
590 pub struct ProcessFlags: usize {
591 /// 当前pcb表示一个内核线程
592 const KTHREAD = 1 << 0;
593 /// 当前进程需要被调度
594 const NEED_SCHEDULE = 1 << 1;
595 /// 进程由于vfork而与父进程存在资源共享
596 const VFORK = 1 << 2;
597 /// 进程不可被冻结
598 const NOFREEZE = 1 << 3;
599 /// 进程正在退出
600 const EXITING = 1 << 4;
601 /// 进程由于接收到终止信号唤醒
602 const WAKEKILL = 1 << 5;
603 /// 进程由于接收到信号而退出.(Killed by a signal)
604 const SIGNALED = 1 << 6;
605 /// 进程需要迁移到其他cpu上
606 const NEED_MIGRATE = 1 << 7;
607 /// 随机化的虚拟地址空间,主要用于动态链接器的加载
608 const RANDOMIZE = 1 << 8;
609 }
610 }
611 #[derive(Debug)]
612 pub struct ProcessControlBlock {
613 /// 当前进程的pid
614 pid: Pid,
615 /// 当前进程的线程组id(这个值在同一个线程组内永远不变)
616 tgid: Pid,
617 /// 有关Pid的相关的信息
618 thread_pid: Arc<RwLock<PidStrcut>>,
619 basic: RwLock<ProcessBasicInfo>,
620 /// 当前进程的自旋锁持有计数
621 preempt_count: AtomicUsize,
622
623 flags: LockFreeFlags<ProcessFlags>,
624 worker_private: SpinLock<Option<WorkerPrivate>>,
625 /// 进程的内核栈
626 kernel_stack: RwLock<KernelStack>,
627
628 /// 系统调用栈
629 syscall_stack: RwLock<KernelStack>,
630
631 /// 与调度相关的信息
632 sched_info: ProcessSchedulerInfo,
633 /// 与处理器架构相关的信息
634 arch_info: SpinLock<ArchPCBInfo>,
635 /// 与信号处理相关的信息(似乎可以是无锁的)
636 sig_info: RwLock<ProcessSignalInfo>,
637 /// 信号处理结构体
638 sig_struct: SpinLock<SignalStruct>,
639 /// 退出信号S
640 exit_signal: AtomicSignal,
641
642 /// 父进程指针
643 parent_pcb: RwLock<Weak<ProcessControlBlock>>,
644 /// 真实父进程指针
645 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>,
646
647 /// 子进程链表
648 children: RwLock<Vec<Pid>>,
649
650 /// 等待队列
651 wait_queue: WaitQueue,
652
653 /// 线程信息
654 thread: RwLock<ThreadInfo>,
655
656 /// 进程文件系统的状态
657 fs: Arc<SpinLock<FsStruct>>,
658
659 ///闹钟定时器
660 alarm_timer: SpinLock<Option<AlarmTimer>>,
661
662 /// 进程的robust lock列表
663 robust_list: RwLock<Option<RobustListHead>>,
664
665 /// namespace的指针
666 nsproxy: Arc<RwLock<NsProxy>>,
667
668 /// 进程作为主体的凭证集
669 cred: SpinLock<Cred>,
670 }
671
672 impl ProcessControlBlock {
673 /// Generate a new pcb.
674 ///
675 /// ## 参数
676 ///
677 /// - `name` : 进程的名字
678 /// - `kstack` : 进程的内核栈
679 ///
680 /// ## 返回值
681 ///
682 /// 返回一个新的pcb
new(name: String, kstack: KernelStack) -> Arc<Self>683 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> {
684 return Self::do_create_pcb(name, kstack, false);
685 }
686
687 /// 创建一个新的idle进程
688 ///
689 /// 请注意,这个函数只能在进程管理初始化的时候调用。
new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self>690 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> {
691 let name = format!("idle-{}", cpu_id);
692 return Self::do_create_pcb(name, kstack, true);
693 }
694
695 /// # 函数的功能
696 ///
697 /// 返回此函数是否是内核进程
698 ///
699 /// # 返回值
700 ///
701 /// 若进程是内核进程则返回true 否则返回false
is_kthread(&self) -> bool702 pub fn is_kthread(&self) -> bool {
703 return matches!(self.flags(), &mut ProcessFlags::KTHREAD);
704 }
705
706 #[inline(never)]
do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self>707 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> {
708 let (pid, ppid, cwd, cred) = if is_idle {
709 let cred = INIT_CRED.clone();
710 (Pid(0), Pid(0), "/".to_string(), cred)
711 } else {
712 let ppid = ProcessManager::current_pcb().pid();
713 let mut cred = ProcessManager::current_pcb().cred();
714 cred.cap_permitted = cred.cap_ambient;
715 cred.cap_effective = cred.cap_ambient;
716 let cwd = ProcessManager::current_pcb().basic().cwd();
717 (Self::generate_pid(), ppid, cwd, cred)
718 };
719
720 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, Pid(0), name, cwd, None);
721 let preempt_count = AtomicUsize::new(0);
722 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) };
723
724 let sched_info = ProcessSchedulerInfo::new(None);
725 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack));
726
727 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid)
728 .map(|p| Arc::downgrade(&p))
729 .unwrap_or_default();
730 let pcb = Self {
731 pid,
732 tgid: pid,
733 thread_pid: Arc::new(RwLock::new(PidStrcut::new())),
734 basic: basic_info,
735 preempt_count,
736 flags,
737 kernel_stack: RwLock::new(kstack),
738 syscall_stack: RwLock::new(KernelStack::new().unwrap()),
739 worker_private: SpinLock::new(None),
740 sched_info,
741 arch_info,
742 sig_info: RwLock::new(ProcessSignalInfo::default()),
743 sig_struct: SpinLock::new(SignalStruct::new()),
744 exit_signal: AtomicSignal::new(Signal::SIGCHLD),
745 parent_pcb: RwLock::new(ppcb.clone()),
746 real_parent_pcb: RwLock::new(ppcb),
747 children: RwLock::new(Vec::new()),
748 wait_queue: WaitQueue::default(),
749 thread: RwLock::new(ThreadInfo::new()),
750 fs: Arc::new(SpinLock::new(FsStruct::new())),
751 alarm_timer: SpinLock::new(None),
752 robust_list: RwLock::new(None),
753 nsproxy: Arc::new(RwLock::new(NsProxy::new())),
754 cred: SpinLock::new(cred),
755 };
756
757 // 初始化系统调用栈
758 #[cfg(target_arch = "x86_64")]
759 pcb.arch_info
760 .lock()
761 .init_syscall_stack(&pcb.syscall_stack.read());
762
763 let pcb = Arc::new(pcb);
764
765 pcb.sched_info()
766 .sched_entity()
767 .force_mut()
768 .set_pcb(Arc::downgrade(&pcb));
769 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处
770 unsafe {
771 pcb.kernel_stack
772 .write()
773 .set_pcb(Arc::downgrade(&pcb))
774 .unwrap();
775
776 pcb.syscall_stack
777 .write()
778 .set_pcb(Arc::downgrade(&pcb))
779 .unwrap()
780 };
781
782 // 将当前pcb加入父进程的子进程哈希表中
783 if pcb.pid() > Pid(1) {
784 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() {
785 let mut children = ppcb_arc.children.write_irqsave();
786 children.push(pcb.pid());
787 } else {
788 panic!("parent pcb is None");
789 }
790 }
791
792 return pcb;
793 }
794
795 /// 生成一个新的pid
796 #[inline(always)]
generate_pid() -> Pid797 fn generate_pid() -> Pid {
798 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1));
799 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst);
800 }
801
802 /// 返回当前进程的锁持有计数
803 #[inline(always)]
preempt_count(&self) -> usize804 pub fn preempt_count(&self) -> usize {
805 return self.preempt_count.load(Ordering::SeqCst);
806 }
807
808 /// 增加当前进程的锁持有计数
809 #[inline(always)]
preempt_disable(&self)810 pub fn preempt_disable(&self) {
811 self.preempt_count.fetch_add(1, Ordering::SeqCst);
812 }
813
814 /// 减少当前进程的锁持有计数
815 #[inline(always)]
preempt_enable(&self)816 pub fn preempt_enable(&self) {
817 self.preempt_count.fetch_sub(1, Ordering::SeqCst);
818 }
819
820 #[inline(always)]
set_preempt_count(&self, count: usize)821 pub unsafe fn set_preempt_count(&self, count: usize) {
822 self.preempt_count.store(count, Ordering::SeqCst);
823 }
824
825 #[inline(always)]
flags(&self) -> &mut ProcessFlags826 pub fn flags(&self) -> &mut ProcessFlags {
827 return self.flags.get_mut();
828 }
829
830 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改
831 /// 否则会导致死锁
832 #[inline(always)]
basic(&self) -> RwLockReadGuard<ProcessBasicInfo>833 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> {
834 return self.basic.read_irqsave();
835 }
836
837 #[inline(always)]
set_name(&self, name: String)838 pub fn set_name(&self, name: String) {
839 self.basic.write().set_name(name);
840 }
841
842 #[inline(always)]
basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo>843 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> {
844 return self.basic.write_irqsave();
845 }
846
847 /// # 获取arch info的锁,同时关闭中断
848 #[inline(always)]
arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo>849 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> {
850 return self.arch_info.lock_irqsave();
851 }
852
853 /// # 获取arch info的锁,但是不关闭中断
854 ///
855 /// 由于arch info在进程切换的时候会使用到,
856 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的.
857 ///
858 /// 只能在以下情况下使用这个函数:
859 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。
860 /// - 刚刚创建新的pcb
861 #[inline(always)]
arch_info(&self) -> SpinLockGuard<ArchPCBInfo>862 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> {
863 return self.arch_info.lock();
864 }
865
866 #[inline(always)]
kernel_stack(&self) -> RwLockReadGuard<KernelStack>867 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> {
868 return self.kernel_stack.read();
869 }
870
kernel_stack_force_ref(&self) -> &KernelStack871 pub unsafe fn kernel_stack_force_ref(&self) -> &KernelStack {
872 self.kernel_stack.force_get_ref()
873 }
874
875 #[inline(always)]
876 #[allow(dead_code)]
kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack>877 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> {
878 return self.kernel_stack.write();
879 }
880
881 #[inline(always)]
sched_info(&self) -> &ProcessSchedulerInfo882 pub fn sched_info(&self) -> &ProcessSchedulerInfo {
883 return &self.sched_info;
884 }
885
886 #[inline(always)]
worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>>887 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> {
888 return self.worker_private.lock();
889 }
890
891 #[inline(always)]
pid(&self) -> Pid892 pub fn pid(&self) -> Pid {
893 return self.pid;
894 }
895
896 #[inline(always)]
pid_strcut(&self) -> Arc<RwLock<PidStrcut>>897 pub fn pid_strcut(&self) -> Arc<RwLock<PidStrcut>> {
898 self.thread_pid.clone()
899 }
900
901 #[inline(always)]
tgid(&self) -> Pid902 pub fn tgid(&self) -> Pid {
903 return self.tgid;
904 }
905
906 #[inline(always)]
fs_struct(&self) -> Arc<SpinLock<FsStruct>>907 pub fn fs_struct(&self) -> Arc<SpinLock<FsStruct>> {
908 self.fs.clone()
909 }
910
911 /// 获取文件描述符表的Arc指针
912 #[inline(always)]
fd_table(&self) -> Arc<RwLock<FileDescriptorVec>>913 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> {
914 return self.basic.read().fd_table().unwrap();
915 }
916
917 #[inline(always)]
cred(&self) -> Cred918 pub fn cred(&self) -> Cred {
919 self.cred.lock().clone()
920 }
921
922 /// 根据文件描述符序号,获取socket对象的Arc指针
923 ///
924 /// ## 参数
925 ///
926 /// - `fd` 文件描述符序号
927 ///
928 /// ## 返回值
929 ///
930 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None
get_socket(&self, fd: i32) -> Option<Arc<SocketInode>>931 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> {
932 let binding = ProcessManager::current_pcb().fd_table();
933 let fd_table_guard = binding.read();
934
935 let f = fd_table_guard.get_file_by_fd(fd)?;
936 drop(fd_table_guard);
937
938 if f.file_type() != FileType::Socket {
939 return None;
940 }
941 let socket: Arc<SocketInode> = f
942 .inode()
943 .downcast_arc::<SocketInode>()
944 .expect("Not a socket inode");
945 return Some(socket);
946 }
947
948 /// 当前进程退出时,让初始进程收养所有子进程
adopt_childen(&self) -> Result<(), SystemError>949 unsafe fn adopt_childen(&self) -> Result<(), SystemError> {
950 match ProcessManager::find(Pid(1)) {
951 Some(init_pcb) => {
952 let childen_guard = self.children.write();
953 let mut init_childen_guard = init_pcb.children.write();
954
955 childen_guard.iter().for_each(|pid| {
956 init_childen_guard.push(*pid);
957 });
958
959 return Ok(());
960 }
961 _ => Err(SystemError::ECHILD),
962 }
963 }
964
965 /// 生成进程的名字
generate_name(program_path: &str, args: &Vec<CString>) -> String966 pub fn generate_name(program_path: &str, args: &Vec<CString>) -> String {
967 let mut name = program_path.to_string();
968 for arg in args {
969 name.push(' ');
970 name.push_str(arg.to_string_lossy().as_ref());
971 }
972 return name;
973 }
974
sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo>975 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> {
976 self.sig_info.read_irqsave()
977 }
978
try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>>979 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> {
980 for _ in 0..times {
981 if let Some(r) = self.sig_info.try_read_irqsave() {
982 return Some(r);
983 }
984 }
985
986 return None;
987 }
988
sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo>989 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> {
990 self.sig_info.write_irqsave()
991 }
992
try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>>993 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> {
994 for _ in 0..times {
995 if let Some(r) = self.sig_info.try_write_irqsave() {
996 return Some(r);
997 }
998 }
999
1000 return None;
1001 }
1002
1003 /// 判断当前进程是否有未处理的信号
has_pending_signal(&self) -> bool1004 pub fn has_pending_signal(&self) -> bool {
1005 let sig_info = self.sig_info_irqsave();
1006 let has_pending = sig_info.sig_pending().has_pending();
1007 drop(sig_info);
1008 return has_pending;
1009 }
1010
sig_struct(&self) -> SpinLockGuard<SignalStruct>1011 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> {
1012 self.sig_struct.lock_irqsave()
1013 }
1014
try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>>1015 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> {
1016 for _ in 0..times {
1017 if let Ok(r) = self.sig_struct.try_lock_irqsave() {
1018 return Some(r);
1019 }
1020 }
1021
1022 return None;
1023 }
1024
sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct>1025 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> {
1026 self.sig_struct.lock_irqsave()
1027 }
1028
1029 #[inline(always)]
get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>>1030 pub fn get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>> {
1031 return self.robust_list.read_irqsave();
1032 }
1033
1034 #[inline(always)]
set_robust_list(&self, new_robust_list: Option<RobustListHead>)1035 pub fn set_robust_list(&self, new_robust_list: Option<RobustListHead>) {
1036 *self.robust_list.write_irqsave() = new_robust_list;
1037 }
1038
alarm_timer_irqsave(&self) -> SpinLockGuard<Option<AlarmTimer>>1039 pub fn alarm_timer_irqsave(&self) -> SpinLockGuard<Option<AlarmTimer>> {
1040 return self.alarm_timer.lock_irqsave();
1041 }
1042
get_nsproxy(&self) -> Arc<RwLock<NsProxy>>1043 pub fn get_nsproxy(&self) -> Arc<RwLock<NsProxy>> {
1044 self.nsproxy.clone()
1045 }
1046
set_nsproxy(&self, nsprsy: NsProxy)1047 pub fn set_nsproxy(&self, nsprsy: NsProxy) {
1048 *self.nsproxy.write() = nsprsy;
1049 }
1050 }
1051
1052 impl Drop for ProcessControlBlock {
drop(&mut self)1053 fn drop(&mut self) {
1054 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
1055 // 在ProcFS中,解除进程的注册
1056 procfs_unregister_pid(self.pid())
1057 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}"));
1058
1059 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() {
1060 ppcb.children
1061 .write_irqsave()
1062 .retain(|pid| *pid != self.pid());
1063 }
1064
1065 drop(irq_guard);
1066 }
1067 }
1068
1069 /// 线程信息
1070 #[derive(Debug)]
1071 pub struct ThreadInfo {
1072 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程
1073 clear_child_tid: Option<VirtAddr>,
1074 set_child_tid: Option<VirtAddr>,
1075
1076 vfork_done: Option<Arc<Completion>>,
1077 /// 线程组的组长
1078 group_leader: Weak<ProcessControlBlock>,
1079 }
1080
1081 impl ThreadInfo {
new() -> Self1082 pub fn new() -> Self {
1083 Self {
1084 clear_child_tid: None,
1085 set_child_tid: None,
1086 vfork_done: None,
1087 group_leader: Weak::default(),
1088 }
1089 }
1090
group_leader(&self) -> Option<Arc<ProcessControlBlock>>1091 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> {
1092 return self.group_leader.upgrade();
1093 }
1094 }
1095
1096 /// 进程的基本信息
1097 ///
1098 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。
1099 #[derive(Debug)]
1100 pub struct ProcessBasicInfo {
1101 /// 当前进程的进程组id
1102 pgid: Pid,
1103 /// 当前进程的父进程的pid
1104 ppid: Pid,
1105 /// 当前进程所属会话id
1106 sid: Pid,
1107 /// 进程的名字
1108 name: String,
1109
1110 /// 当前进程的工作目录
1111 cwd: String,
1112
1113 /// 用户地址空间
1114 user_vm: Option<Arc<AddressSpace>>,
1115
1116 /// 文件描述符表
1117 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>,
1118 }
1119
1120 impl ProcessBasicInfo {
1121 #[inline(never)]
new( pgid: Pid, ppid: Pid, sid: Pid, name: String, cwd: String, user_vm: Option<Arc<AddressSpace>>, ) -> RwLock<Self>1122 pub fn new(
1123 pgid: Pid,
1124 ppid: Pid,
1125 sid: Pid,
1126 name: String,
1127 cwd: String,
1128 user_vm: Option<Arc<AddressSpace>>,
1129 ) -> RwLock<Self> {
1130 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new()));
1131 return RwLock::new(Self {
1132 pgid,
1133 ppid,
1134 sid,
1135 name,
1136 cwd,
1137 user_vm,
1138 fd_table: Some(fd_table),
1139 });
1140 }
1141
pgid(&self) -> Pid1142 pub fn pgid(&self) -> Pid {
1143 return self.pgid;
1144 }
1145
ppid(&self) -> Pid1146 pub fn ppid(&self) -> Pid {
1147 return self.ppid;
1148 }
1149
sid(&self) -> Pid1150 pub fn sid(&self) -> Pid {
1151 return self.sid;
1152 }
1153
name(&self) -> &str1154 pub fn name(&self) -> &str {
1155 return &self.name;
1156 }
1157
set_name(&mut self, name: String)1158 pub fn set_name(&mut self, name: String) {
1159 self.name = name;
1160 }
1161
cwd(&self) -> String1162 pub fn cwd(&self) -> String {
1163 return self.cwd.clone();
1164 }
set_cwd(&mut self, path: String)1165 pub fn set_cwd(&mut self, path: String) {
1166 return self.cwd = path;
1167 }
1168
user_vm(&self) -> Option<Arc<AddressSpace>>1169 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> {
1170 return self.user_vm.clone();
1171 }
1172
set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>)1173 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) {
1174 self.user_vm = user_vm;
1175 }
1176
fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>>1177 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> {
1178 return self.fd_table.clone();
1179 }
1180
set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>)1181 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) {
1182 self.fd_table = fd_table;
1183 }
1184 }
1185
1186 #[derive(Debug)]
1187 pub struct ProcessSchedulerInfo {
1188 /// 当前进程所在的cpu
1189 on_cpu: AtomicProcessorId,
1190 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位),
1191 /// 该字段存储要被迁移到的目标处理器核心号
1192 // migrate_to: AtomicProcessorId,
1193 inner_locked: RwLock<InnerSchedInfo>,
1194 /// 进程的调度优先级
1195 // priority: SchedPriority,
1196 /// 当前进程的虚拟运行时间
1197 // virtual_runtime: AtomicIsize,
1198 /// 由实时调度器管理的时间片
1199 // rt_time_slice: AtomicIsize,
1200 pub sched_stat: RwLock<SchedInfo>,
1201 /// 调度策略
1202 pub sched_policy: RwLock<crate::sched::SchedPolicy>,
1203 /// cfs调度实体
1204 pub sched_entity: Arc<FairSchedEntity>,
1205 pub on_rq: SpinLock<OnRq>,
1206
1207 pub prio_data: RwLock<PrioData>,
1208 }
1209
1210 #[derive(Debug, Default)]
1211 #[allow(dead_code)]
1212 pub struct SchedInfo {
1213 /// 记录任务在特定 CPU 上运行的次数
1214 pub pcount: usize,
1215 /// 记录任务等待在运行队列上的时间
1216 pub run_delay: usize,
1217 /// 记录任务上次在 CPU 上运行的时间戳
1218 pub last_arrival: u64,
1219 /// 记录任务上次被加入到运行队列中的时间戳
1220 pub last_queued: u64,
1221 }
1222
1223 #[derive(Debug)]
1224 #[allow(dead_code)]
1225 pub struct PrioData {
1226 pub prio: i32,
1227 pub static_prio: i32,
1228 pub normal_prio: i32,
1229 }
1230
1231 impl Default for PrioData {
default() -> Self1232 fn default() -> Self {
1233 Self {
1234 prio: MAX_PRIO - 20,
1235 static_prio: MAX_PRIO - 20,
1236 normal_prio: MAX_PRIO - 20,
1237 }
1238 }
1239 }
1240
1241 #[derive(Debug)]
1242 pub struct InnerSchedInfo {
1243 /// 当前进程的状态
1244 state: ProcessState,
1245 /// 进程的调度策略
1246 sleep: bool,
1247 }
1248
1249 impl InnerSchedInfo {
state(&self) -> ProcessState1250 pub fn state(&self) -> ProcessState {
1251 return self.state;
1252 }
1253
set_state(&mut self, state: ProcessState)1254 pub fn set_state(&mut self, state: ProcessState) {
1255 self.state = state;
1256 }
1257
set_sleep(&mut self)1258 pub fn set_sleep(&mut self) {
1259 self.sleep = true;
1260 }
1261
set_wakeup(&mut self)1262 pub fn set_wakeup(&mut self) {
1263 self.sleep = false;
1264 }
1265
is_mark_sleep(&self) -> bool1266 pub fn is_mark_sleep(&self) -> bool {
1267 self.sleep
1268 }
1269 }
1270
1271 impl ProcessSchedulerInfo {
1272 #[inline(never)]
new(on_cpu: Option<ProcessorId>) -> Self1273 pub fn new(on_cpu: Option<ProcessorId>) -> Self {
1274 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID);
1275 return Self {
1276 on_cpu: AtomicProcessorId::new(cpu_id),
1277 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID),
1278 inner_locked: RwLock::new(InnerSchedInfo {
1279 state: ProcessState::Blocked(false),
1280 sleep: false,
1281 }),
1282 // virtual_runtime: AtomicIsize::new(0),
1283 // rt_time_slice: AtomicIsize::new(0),
1284 // priority: SchedPriority::new(100).unwrap(),
1285 sched_stat: RwLock::new(SchedInfo::default()),
1286 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS),
1287 sched_entity: FairSchedEntity::new(),
1288 on_rq: SpinLock::new(OnRq::None),
1289 prio_data: RwLock::new(PrioData::default()),
1290 };
1291 }
1292
sched_entity(&self) -> Arc<FairSchedEntity>1293 pub fn sched_entity(&self) -> Arc<FairSchedEntity> {
1294 return self.sched_entity.clone();
1295 }
1296
on_cpu(&self) -> Option<ProcessorId>1297 pub fn on_cpu(&self) -> Option<ProcessorId> {
1298 let on_cpu = self.on_cpu.load(Ordering::SeqCst);
1299 if on_cpu == ProcessorId::INVALID {
1300 return None;
1301 } else {
1302 return Some(on_cpu);
1303 }
1304 }
1305
set_on_cpu(&self, on_cpu: Option<ProcessorId>)1306 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) {
1307 if let Some(cpu_id) = on_cpu {
1308 self.on_cpu.store(cpu_id, Ordering::SeqCst);
1309 } else {
1310 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst);
1311 }
1312 }
1313
1314 // pub fn migrate_to(&self) -> Option<ProcessorId> {
1315 // let migrate_to = self.migrate_to.load(Ordering::SeqCst);
1316 // if migrate_to == ProcessorId::INVALID {
1317 // return None;
1318 // } else {
1319 // return Some(migrate_to);
1320 // }
1321 // }
1322
1323 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) {
1324 // if let Some(data) = migrate_to {
1325 // self.migrate_to.store(data, Ordering::SeqCst);
1326 // } else {
1327 // self.migrate_to
1328 // .store(ProcessorId::INVALID, Ordering::SeqCst)
1329 // }
1330 // }
1331
inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo>1332 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> {
1333 return self.inner_locked.write_irqsave();
1334 }
1335
inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo>1336 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> {
1337 return self.inner_locked.read_irqsave();
1338 }
1339
1340 // pub fn inner_lock_try_read_irqsave(
1341 // &self,
1342 // times: u8,
1343 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> {
1344 // for _ in 0..times {
1345 // if let Some(r) = self.inner_locked.try_read_irqsave() {
1346 // return Some(r);
1347 // }
1348 // }
1349
1350 // return None;
1351 // }
1352
1353 // pub fn inner_lock_try_upgradable_read_irqsave(
1354 // &self,
1355 // times: u8,
1356 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> {
1357 // for _ in 0..times {
1358 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() {
1359 // return Some(r);
1360 // }
1361 // }
1362
1363 // return None;
1364 // }
1365
1366 // pub fn virtual_runtime(&self) -> isize {
1367 // return self.virtual_runtime.load(Ordering::SeqCst);
1368 // }
1369
1370 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) {
1371 // self.virtual_runtime
1372 // .store(virtual_runtime, Ordering::SeqCst);
1373 // }
1374 // pub fn increase_virtual_runtime(&self, delta: isize) {
1375 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst);
1376 // }
1377
1378 // pub fn rt_time_slice(&self) -> isize {
1379 // return self.rt_time_slice.load(Ordering::SeqCst);
1380 // }
1381
1382 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) {
1383 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst);
1384 // }
1385
1386 // pub fn increase_rt_time_slice(&self, delta: isize) {
1387 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst);
1388 // }
1389
policy(&self) -> crate::sched::SchedPolicy1390 pub fn policy(&self) -> crate::sched::SchedPolicy {
1391 return *self.sched_policy.read_irqsave();
1392 }
1393 }
1394
1395 #[derive(Debug, Clone)]
1396 pub struct KernelStack {
1397 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>,
1398 /// 标记该内核栈是否可以被释放
1399 can_be_freed: bool,
1400 }
1401
1402 impl KernelStack {
1403 pub const SIZE: usize = 0x4000;
1404 pub const ALIGN: usize = 0x4000;
1405
new() -> Result<Self, SystemError>1406 pub fn new() -> Result<Self, SystemError> {
1407 return Ok(Self {
1408 stack: Some(
1409 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?,
1410 ),
1411 can_be_freed: true,
1412 });
1413 }
1414
1415 /// 根据已有的空间,构造一个内核栈结构体
1416 ///
1417 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误!
from_existed(base: VirtAddr) -> Result<Self, SystemError>1418 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> {
1419 if base.is_null() || !base.check_aligned(Self::ALIGN) {
1420 return Err(SystemError::EFAULT);
1421 }
1422
1423 return Ok(Self {
1424 stack: Some(
1425 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked(
1426 base.data() as *mut [u8; KernelStack::SIZE],
1427 ),
1428 ),
1429 can_be_freed: false,
1430 });
1431 }
1432
1433 /// 返回内核栈的起始虚拟地址(低地址)
start_address(&self) -> VirtAddr1434 pub fn start_address(&self) -> VirtAddr {
1435 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize);
1436 }
1437
1438 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址)
stack_max_address(&self) -> VirtAddr1439 pub fn stack_max_address(&self) -> VirtAddr {
1440 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE);
1441 }
1442
set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError>1443 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> {
1444 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处
1445 let p: *const ProcessControlBlock = Weak::into_raw(pcb);
1446 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock;
1447
1448 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误
1449 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) {
1450 error!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr);
1451 return Err(SystemError::EPERM);
1452 }
1453 // 将pcb的地址放到内核栈的最低地址处
1454 unsafe {
1455 *stack_bottom_ptr = p;
1456 }
1457
1458 return Ok(());
1459 }
1460
1461 /// 清除内核栈的pcb指针
1462 ///
1463 /// ## 参数
1464 ///
1465 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题
clear_pcb(&mut self, force: bool)1466 pub unsafe fn clear_pcb(&mut self, force: bool) {
1467 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock;
1468 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) {
1469 return;
1470 }
1471
1472 if !force {
1473 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr);
1474 drop(pcb_ptr);
1475 }
1476
1477 *stack_bottom_ptr = core::ptr::null();
1478 }
1479
1480 /// 返回指向当前内核栈pcb的Arc指针
1481 #[allow(dead_code)]
pcb(&self) -> Option<Arc<ProcessControlBlock>>1482 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> {
1483 // 从内核栈的最低地址处取出pcb的地址
1484 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock;
1485 if unlikely(unsafe { (*p).is_null() }) {
1486 return None;
1487 }
1488
1489 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用
1490 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> =
1491 ManuallyDrop::new(Weak::from_raw(*p));
1492
1493 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?;
1494 return Some(new_arc);
1495 }
1496 }
1497
1498 impl Drop for KernelStack {
drop(&mut self)1499 fn drop(&mut self) {
1500 if self.stack.is_some() {
1501 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock;
1502 if unsafe { !(*ptr).is_null() } {
1503 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) };
1504 drop(pcb_ptr);
1505 }
1506 }
1507 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数
1508 if !self.can_be_freed {
1509 let bx = self.stack.take();
1510 core::mem::forget(bx);
1511 }
1512 }
1513 }
1514
process_init()1515 pub fn process_init() {
1516 ProcessManager::init();
1517 }
1518
1519 #[derive(Debug)]
1520 pub struct ProcessSignalInfo {
1521 // 当前进程
1522 sig_block: SigSet,
1523 // sig_pending 中存储当前线程要处理的信号
1524 sig_pending: SigPending,
1525 // sig_shared_pending 中存储当前线程所属进程要处理的信号
1526 sig_shared_pending: SigPending,
1527 // 当前进程对应的tty
1528 tty: Option<Arc<TtyCore>>,
1529 }
1530
1531 impl ProcessSignalInfo {
sig_block(&self) -> &SigSet1532 pub fn sig_block(&self) -> &SigSet {
1533 &self.sig_block
1534 }
1535
sig_pending(&self) -> &SigPending1536 pub fn sig_pending(&self) -> &SigPending {
1537 &self.sig_pending
1538 }
1539
sig_pending_mut(&mut self) -> &mut SigPending1540 pub fn sig_pending_mut(&mut self) -> &mut SigPending {
1541 &mut self.sig_pending
1542 }
1543
sig_block_mut(&mut self) -> &mut SigSet1544 pub fn sig_block_mut(&mut self) -> &mut SigSet {
1545 &mut self.sig_block
1546 }
1547
sig_shared_pending_mut(&mut self) -> &mut SigPending1548 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending {
1549 &mut self.sig_shared_pending
1550 }
1551
sig_shared_pending(&self) -> &SigPending1552 pub fn sig_shared_pending(&self) -> &SigPending {
1553 &self.sig_shared_pending
1554 }
1555
tty(&self) -> Option<Arc<TtyCore>>1556 pub fn tty(&self) -> Option<Arc<TtyCore>> {
1557 self.tty.clone()
1558 }
1559
set_tty(&mut self, tty: Option<Arc<TtyCore>>)1560 pub fn set_tty(&mut self, tty: Option<Arc<TtyCore>>) {
1561 self.tty = tty;
1562 }
1563
1564 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号
1565 ///
1566 /// ## 参数
1567 ///
1568 /// - `sig_mask` 被忽略掉的信号
1569 ///
dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>)1570 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) {
1571 let res = self.sig_pending.dequeue_signal(sig_mask);
1572 if res.0 != Signal::INVALID {
1573 return res;
1574 } else {
1575 return self.sig_shared_pending.dequeue_signal(sig_mask);
1576 }
1577 }
1578 }
1579
1580 impl Default for ProcessSignalInfo {
default() -> Self1581 fn default() -> Self {
1582 Self {
1583 sig_block: SigSet::empty(),
1584 sig_pending: SigPending::default(),
1585 sig_shared_pending: SigPending::default(),
1586 tty: None,
1587 }
1588 }
1589 }
1590