1 use core::{ 2 hash::Hash, 3 hint::spin_loop, 4 intrinsics::{likely, unlikely}, 5 mem::ManuallyDrop, 6 sync::atomic::{compiler_fence, AtomicBool, AtomicIsize, AtomicUsize, Ordering}, 7 }; 8 9 use alloc::{ 10 string::{String, ToString}, 11 sync::{Arc, Weak}, 12 vec::Vec, 13 }; 14 use hashbrown::HashMap; 15 use system_error::SystemError; 16 17 use crate::{ 18 arch::{ 19 ipc::signal::{AtomicSignal, SigSet, Signal}, 20 process::ArchPCBInfo, 21 sched::sched, 22 CurrentIrqArch, 23 }, 24 driver::tty::tty_core::TtyCore, 25 exception::InterruptArch, 26 filesystem::{ 27 procfs::procfs_unregister_pid, 28 vfs::{file::FileDescriptorVec, FileType}, 29 }, 30 ipc::signal_types::{SigInfo, SigPending, SignalStruct}, 31 kdebug, kinfo, 32 libs::{ 33 align::AlignedBox, 34 casting::DowncastArc, 35 futex::{ 36 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY}, 37 futex::Futex, 38 }, 39 lock_free_flags::LockFreeFlags, 40 rwlock::{RwLock, RwLockReadGuard, RwLockUpgradableGuard, RwLockWriteGuard}, 41 spinlock::{SpinLock, SpinLockGuard}, 42 wait_queue::WaitQueue, 43 }, 44 mm::{ 45 percpu::{PerCpu, PerCpuVar}, 46 set_IDLE_PROCESS_ADDRESS_SPACE, 47 ucontext::AddressSpace, 48 VirtAddr, 49 }, 50 net::socket::SocketInode, 51 sched::{ 52 completion::Completion, 53 core::{sched_enqueue, CPU_EXECUTING}, 54 SchedPolicy, SchedPriority, 55 }, 56 smp::{ 57 cpu::{AtomicProcessorId, ProcessorId}, 58 kick_cpu, 59 }, 60 syscall::{user_access::clear_user, Syscall}, 61 }; 62 63 use self::kthread::WorkerPrivate; 64 65 pub mod abi; 66 pub mod c_adapter; 67 pub mod exec; 68 pub mod exit; 69 pub mod fork; 70 pub mod idle; 71 pub mod kthread; 72 pub mod pid; 73 pub mod resource; 74 pub mod stdio; 75 pub mod syscall; 76 pub mod utils; 77 78 /// 系统中所有进程的pcb 79 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None); 80 81 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None; 82 83 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成 84 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false; 85 86 #[derive(Debug)] 87 pub struct SwitchResult { 88 pub prev_pcb: Option<Arc<ProcessControlBlock>>, 89 pub next_pcb: Option<Arc<ProcessControlBlock>>, 90 } 91 92 impl SwitchResult { 93 pub fn new() -> Self { 94 Self { 95 prev_pcb: None, 96 next_pcb: None, 97 } 98 } 99 } 100 101 #[derive(Debug)] 102 pub struct ProcessManager; 103 impl ProcessManager { 104 #[inline(never)] 105 fn init() { 106 static INIT_FLAG: AtomicBool = AtomicBool::new(false); 107 if INIT_FLAG 108 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 109 .is_err() 110 { 111 panic!("ProcessManager has been initialized!"); 112 } 113 114 unsafe { 115 compiler_fence(Ordering::SeqCst); 116 kdebug!("To create address space for INIT process."); 117 // test_buddy(); 118 set_IDLE_PROCESS_ADDRESS_SPACE( 119 AddressSpace::new(true).expect("Failed to create address space for INIT process."), 120 ); 121 kdebug!("INIT process address space created."); 122 compiler_fence(Ordering::SeqCst); 123 }; 124 125 ALL_PROCESS.lock_irqsave().replace(HashMap::new()); 126 Self::init_switch_result(); 127 Self::arch_init(); 128 kdebug!("process arch init done."); 129 Self::init_idle(); 130 kdebug!("process idle init done."); 131 132 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true }; 133 kinfo!("Process Manager initialized."); 134 } 135 136 fn init_switch_result() { 137 let mut switch_res_vec: Vec<SwitchResult> = Vec::new(); 138 for _ in 0..PerCpu::MAX_CPU_NUM { 139 switch_res_vec.push(SwitchResult::new()); 140 } 141 unsafe { 142 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap()); 143 } 144 } 145 146 /// 判断进程管理器是否已经初始化完成 147 pub fn initialized() -> bool { 148 unsafe { __PROCESS_MANAGEMENT_INIT_DONE } 149 } 150 151 /// 获取当前进程的pcb 152 pub fn current_pcb() -> Arc<ProcessControlBlock> { 153 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 154 kerror!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false"); 155 loop { 156 spin_loop(); 157 } 158 } 159 return ProcessControlBlock::arch_current_pcb(); 160 } 161 162 /// 获取当前进程的pid 163 /// 164 /// 如果进程管理器未初始化完成,那么返回0 165 pub fn current_pid() -> Pid { 166 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 167 return Pid(0); 168 } 169 170 return ProcessManager::current_pcb().pid(); 171 } 172 173 /// 增加当前进程的锁持有计数 174 #[inline(always)] 175 pub fn preempt_disable() { 176 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 177 ProcessManager::current_pcb().preempt_disable(); 178 } 179 } 180 181 /// 减少当前进程的锁持有计数 182 #[inline(always)] 183 pub fn preempt_enable() { 184 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 185 ProcessManager::current_pcb().preempt_enable(); 186 } 187 } 188 189 /// 根据pid获取进程的pcb 190 /// 191 /// ## 参数 192 /// 193 /// - `pid` : 进程的pid 194 /// 195 /// ## 返回值 196 /// 197 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None 198 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> { 199 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned(); 200 } 201 202 /// 向系统中添加一个进程的pcb 203 /// 204 /// ## 参数 205 /// 206 /// - `pcb` : 进程的pcb 207 /// 208 /// ## 返回值 209 /// 210 /// 无 211 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) { 212 ALL_PROCESS 213 .lock_irqsave() 214 .as_mut() 215 .unwrap() 216 .insert(pcb.pid(), pcb.clone()); 217 } 218 219 /// 唤醒一个进程 220 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 221 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 222 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 223 if state.is_blocked() { 224 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 225 let state = writer.state(); 226 if state.is_blocked() { 227 writer.set_state(ProcessState::Runnable); 228 // avoid deadlock 229 drop(writer); 230 231 sched_enqueue(pcb.clone(), true); 232 return Ok(()); 233 } else if state.is_exited() { 234 return Err(SystemError::EINVAL); 235 } else { 236 return Ok(()); 237 } 238 } else if state.is_exited() { 239 return Err(SystemError::EINVAL); 240 } else { 241 return Ok(()); 242 } 243 } 244 245 /// 唤醒暂停的进程 246 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 247 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 248 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 249 if let ProcessState::Stopped = state { 250 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 251 let state = writer.state(); 252 if let ProcessState::Stopped = state { 253 writer.set_state(ProcessState::Runnable); 254 // avoid deadlock 255 drop(writer); 256 257 sched_enqueue(pcb.clone(), true); 258 return Ok(()); 259 } else if state.is_runnable() { 260 return Ok(()); 261 } else { 262 return Err(SystemError::EINVAL); 263 } 264 } else if state.is_runnable() { 265 return Ok(()); 266 } else { 267 return Err(SystemError::EINVAL); 268 } 269 } 270 271 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成 272 /// 273 /// ## 注意 274 /// 275 /// - 进入当前函数之前,不能持有sched_info的锁 276 /// - 进入当前函数之前,必须关闭中断 277 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列 278 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> { 279 assert!( 280 !CurrentIrqArch::is_irq_enabled(), 281 "interrupt must be disabled before enter ProcessManager::mark_sleep()" 282 ); 283 284 let pcb = ProcessManager::current_pcb(); 285 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 286 if !matches!(writer.state(), ProcessState::Exited(_)) { 287 writer.set_state(ProcessState::Blocked(interruptable)); 288 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 289 drop(writer); 290 291 return Ok(()); 292 } 293 return Err(SystemError::EINTR); 294 } 295 296 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成 297 /// 298 /// ## 注意 299 /// 300 /// - 进入当前函数之前,不能持有sched_info的锁 301 /// - 进入当前函数之前,必须关闭中断 302 pub fn mark_stop() -> Result<(), SystemError> { 303 assert!( 304 !CurrentIrqArch::is_irq_enabled(), 305 "interrupt must be disabled before enter ProcessManager::mark_stop()" 306 ); 307 308 let pcb = ProcessManager::current_pcb(); 309 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 310 if !matches!(writer.state(), ProcessState::Exited(_)) { 311 writer.set_state(ProcessState::Stopped); 312 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 313 drop(writer); 314 315 return Ok(()); 316 } 317 return Err(SystemError::EINTR); 318 } 319 /// 当子进程退出后向父进程发送通知 320 fn exit_notify() { 321 let current = ProcessManager::current_pcb(); 322 // 让INIT进程收养所有子进程 323 if current.pid() != Pid(1) { 324 unsafe { 325 current 326 .adopt_childen() 327 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}")) 328 }; 329 let r = current.parent_pcb.read_irqsave().upgrade(); 330 if r.is_none() { 331 return; 332 } 333 let parent_pcb = r.unwrap(); 334 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32); 335 if r.is_err() { 336 kwarn!( 337 "failed to send kill signal to {:?}'s parent pcb {:?}", 338 current.pid(), 339 parent_pcb.pid() 340 ); 341 } 342 // todo: 这里需要向父进程发送SIGCHLD信号 343 // todo: 这里还需要根据线程组的信息,决定信号的发送 344 } 345 } 346 347 /// 退出当前进程 348 /// 349 /// ## 参数 350 /// 351 /// - `exit_code` : 进程的退出码 352 pub fn exit(exit_code: usize) -> ! { 353 // 关中断 354 unsafe { CurrentIrqArch::interrupt_disable() }; 355 let pcb = ProcessManager::current_pcb(); 356 let pid = pcb.pid(); 357 pcb.sched_info 358 .inner_lock_write_irqsave() 359 .set_state(ProcessState::Exited(exit_code)); 360 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true))); 361 362 // 进行进程退出后的工作 363 let thread = pcb.thread.write_irqsave(); 364 if let Some(addr) = thread.set_child_tid { 365 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 366 } 367 368 if let Some(addr) = thread.clear_child_tid { 369 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 { 370 let _ = 371 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY); 372 } 373 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 374 } 375 376 // 如果是vfork出来的进程,则需要处理completion 377 if thread.vfork_done.is_some() { 378 thread.vfork_done.as_ref().unwrap().complete_all(); 379 } 380 drop(thread); 381 unsafe { pcb.basic_mut().set_user_vm(None) }; 382 drop(pcb); 383 ProcessManager::exit_notify(); 384 unsafe { CurrentIrqArch::interrupt_enable() }; 385 386 sched(); 387 kerror!("pid {pid:?} exited but sched again!"); 388 #[allow(clippy::empty_loop)] 389 loop { 390 spin_loop(); 391 } 392 } 393 394 pub unsafe fn release(pid: Pid) { 395 let pcb = ProcessManager::find(pid); 396 if pcb.is_some() { 397 // let pcb = pcb.unwrap(); 398 // 判断该pcb是否在全局没有任何引用 399 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。 400 // 因此目前暂时注释掉,使得能跑 401 // if Arc::strong_count(&pcb) <= 2 { 402 // drop(pcb); 403 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid); 404 // } else { 405 // // 如果不为1就panic 406 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb)); 407 // kerror!("{}", msg); 408 // panic!() 409 // } 410 411 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid); 412 } 413 } 414 415 /// 上下文切换完成后的钩子函数 416 unsafe fn switch_finish_hook() { 417 // kdebug!("switch_finish_hook"); 418 let prev_pcb = PROCESS_SWITCH_RESULT 419 .as_mut() 420 .unwrap() 421 .get_mut() 422 .prev_pcb 423 .take() 424 .expect("prev_pcb is None"); 425 let next_pcb = PROCESS_SWITCH_RESULT 426 .as_mut() 427 .unwrap() 428 .get_mut() 429 .next_pcb 430 .take() 431 .expect("next_pcb is None"); 432 433 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁 434 prev_pcb.arch_info.force_unlock(); 435 next_pcb.arch_info.force_unlock(); 436 } 437 438 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态 439 /// 440 /// ## 参数 441 /// 442 /// - `pcb` : 进程的pcb 443 #[allow(dead_code)] 444 pub fn kick(pcb: &Arc<ProcessControlBlock>) { 445 ProcessManager::current_pcb().preempt_disable(); 446 let cpu_id = pcb.sched_info().on_cpu(); 447 448 if let Some(cpu_id) = cpu_id { 449 if pcb.pid() == CPU_EXECUTING.get(cpu_id) { 450 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu"); 451 } 452 } 453 454 ProcessManager::current_pcb().preempt_enable(); 455 } 456 } 457 458 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换 459 #[cfg(target_arch = "x86_64")] 460 #[inline(never)] 461 pub unsafe extern "sysv64" fn switch_finish_hook() { 462 ProcessManager::switch_finish_hook(); 463 } 464 #[cfg(target_arch = "riscv64")] 465 pub unsafe extern "C" fn switch_finish_hook() { 466 ProcessManager::switch_finish_hook(); 467 } 468 469 int_like!(Pid, AtomicPid, usize, AtomicUsize); 470 471 impl ToString for Pid { 472 fn to_string(&self) -> String { 473 self.0.to_string() 474 } 475 } 476 477 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 478 pub enum ProcessState { 479 /// The process is running on a CPU or in a run queue. 480 Runnable, 481 /// The process is waiting for an event to occur. 482 /// 其中的bool表示该等待过程是否可以被打断。 483 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。 484 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。 485 Blocked(bool), 486 /// 进程被信号终止 487 Stopped, 488 /// 进程已经退出,usize表示进程的退出码 489 Exited(usize), 490 } 491 492 #[allow(dead_code)] 493 impl ProcessState { 494 #[inline(always)] 495 pub fn is_runnable(&self) -> bool { 496 return matches!(self, ProcessState::Runnable); 497 } 498 499 #[inline(always)] 500 pub fn is_blocked(&self) -> bool { 501 return matches!(self, ProcessState::Blocked(_)); 502 } 503 504 #[inline(always)] 505 pub fn is_blocked_interruptable(&self) -> bool { 506 return matches!(self, ProcessState::Blocked(true)); 507 } 508 509 /// Returns `true` if the process state is [`Exited`]. 510 #[inline(always)] 511 pub fn is_exited(&self) -> bool { 512 return matches!(self, ProcessState::Exited(_)); 513 } 514 515 /// Returns `true` if the process state is [`Stopped`]. 516 /// 517 /// [`Stopped`]: ProcessState::Stopped 518 #[inline(always)] 519 pub fn is_stopped(&self) -> bool { 520 matches!(self, ProcessState::Stopped) 521 } 522 523 /// Returns exit code if the process state is [`Exited`]. 524 #[inline(always)] 525 pub fn exit_code(&self) -> Option<usize> { 526 match self { 527 ProcessState::Exited(code) => Some(*code), 528 _ => None, 529 } 530 } 531 } 532 533 bitflags! { 534 /// pcb的标志位 535 pub struct ProcessFlags: usize { 536 /// 当前pcb表示一个内核线程 537 const KTHREAD = 1 << 0; 538 /// 当前进程需要被调度 539 const NEED_SCHEDULE = 1 << 1; 540 /// 进程由于vfork而与父进程存在资源共享 541 const VFORK = 1 << 2; 542 /// 进程不可被冻结 543 const NOFREEZE = 1 << 3; 544 /// 进程正在退出 545 const EXITING = 1 << 4; 546 /// 进程由于接收到终止信号唤醒 547 const WAKEKILL = 1 << 5; 548 /// 进程由于接收到信号而退出.(Killed by a signal) 549 const SIGNALED = 1 << 6; 550 /// 进程需要迁移到其他cpu上 551 const NEED_MIGRATE = 1 << 7; 552 /// 随机化的虚拟地址空间,主要用于动态链接器的加载 553 const RANDOMIZE = 1 << 8; 554 } 555 } 556 557 #[derive(Debug)] 558 pub struct ProcessControlBlock { 559 /// 当前进程的pid 560 pid: Pid, 561 /// 当前进程的线程组id(这个值在同一个线程组内永远不变) 562 tgid: Pid, 563 564 basic: RwLock<ProcessBasicInfo>, 565 /// 当前进程的自旋锁持有计数 566 preempt_count: AtomicUsize, 567 568 flags: LockFreeFlags<ProcessFlags>, 569 worker_private: SpinLock<Option<WorkerPrivate>>, 570 /// 进程的内核栈 571 kernel_stack: RwLock<KernelStack>, 572 573 /// 系统调用栈 574 syscall_stack: RwLock<KernelStack>, 575 576 /// 与调度相关的信息 577 sched_info: ProcessSchedulerInfo, 578 /// 与处理器架构相关的信息 579 arch_info: SpinLock<ArchPCBInfo>, 580 /// 与信号处理相关的信息(似乎可以是无锁的) 581 sig_info: RwLock<ProcessSignalInfo>, 582 /// 信号处理结构体 583 sig_struct: SpinLock<SignalStruct>, 584 /// 退出信号S 585 exit_signal: AtomicSignal, 586 587 /// 父进程指针 588 parent_pcb: RwLock<Weak<ProcessControlBlock>>, 589 /// 真实父进程指针 590 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>, 591 592 /// 子进程链表 593 children: RwLock<Vec<Pid>>, 594 595 /// 等待队列 596 wait_queue: WaitQueue, 597 598 /// 线程信息 599 thread: RwLock<ThreadInfo>, 600 } 601 602 impl ProcessControlBlock { 603 /// Generate a new pcb. 604 /// 605 /// ## 参数 606 /// 607 /// - `name` : 进程的名字 608 /// - `kstack` : 进程的内核栈 609 /// 610 /// ## 返回值 611 /// 612 /// 返回一个新的pcb 613 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> { 614 return Self::do_create_pcb(name, kstack, false); 615 } 616 617 /// 创建一个新的idle进程 618 /// 619 /// 请注意,这个函数只能在进程管理初始化的时候调用。 620 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> { 621 let name = format!("idle-{}", cpu_id); 622 return Self::do_create_pcb(name, kstack, true); 623 } 624 625 #[inline(never)] 626 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> { 627 let (pid, ppid, cwd) = if is_idle { 628 (Pid(0), Pid(0), "/".to_string()) 629 } else { 630 let ppid = ProcessManager::current_pcb().pid(); 631 let cwd = ProcessManager::current_pcb().basic().cwd(); 632 (Self::generate_pid(), ppid, cwd) 633 }; 634 635 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, name, cwd, None); 636 let preempt_count = AtomicUsize::new(0); 637 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) }; 638 639 let sched_info = ProcessSchedulerInfo::new(None); 640 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack)); 641 642 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid) 643 .map(|p| Arc::downgrade(&p)) 644 .unwrap_or_default(); 645 646 let pcb = Self { 647 pid, 648 tgid: pid, 649 basic: basic_info, 650 preempt_count, 651 flags, 652 kernel_stack: RwLock::new(kstack), 653 syscall_stack: RwLock::new(KernelStack::new().unwrap()), 654 worker_private: SpinLock::new(None), 655 sched_info, 656 arch_info, 657 sig_info: RwLock::new(ProcessSignalInfo::default()), 658 sig_struct: SpinLock::new(SignalStruct::new()), 659 exit_signal: AtomicSignal::new(Signal::SIGCHLD), 660 parent_pcb: RwLock::new(ppcb.clone()), 661 real_parent_pcb: RwLock::new(ppcb), 662 children: RwLock::new(Vec::new()), 663 wait_queue: WaitQueue::default(), 664 thread: RwLock::new(ThreadInfo::new()), 665 }; 666 667 // 初始化系统调用栈 668 #[cfg(target_arch = "x86_64")] 669 pcb.arch_info 670 .lock() 671 .init_syscall_stack(&pcb.syscall_stack.read()); 672 673 let pcb = Arc::new(pcb); 674 675 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处 676 unsafe { 677 pcb.kernel_stack 678 .write() 679 .set_pcb(Arc::downgrade(&pcb)) 680 .unwrap(); 681 682 pcb.syscall_stack 683 .write() 684 .set_pcb(Arc::downgrade(&pcb)) 685 .unwrap() 686 }; 687 688 // 将当前pcb加入父进程的子进程哈希表中 689 if pcb.pid() > Pid(1) { 690 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() { 691 let mut children = ppcb_arc.children.write_irqsave(); 692 children.push(pcb.pid()); 693 } else { 694 panic!("parent pcb is None"); 695 } 696 } 697 698 return pcb; 699 } 700 701 /// 生成一个新的pid 702 #[inline(always)] 703 fn generate_pid() -> Pid { 704 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1)); 705 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst); 706 } 707 708 /// 返回当前进程的锁持有计数 709 #[inline(always)] 710 pub fn preempt_count(&self) -> usize { 711 return self.preempt_count.load(Ordering::SeqCst); 712 } 713 714 /// 增加当前进程的锁持有计数 715 #[inline(always)] 716 pub fn preempt_disable(&self) { 717 self.preempt_count.fetch_add(1, Ordering::SeqCst); 718 } 719 720 /// 减少当前进程的锁持有计数 721 #[inline(always)] 722 pub fn preempt_enable(&self) { 723 self.preempt_count.fetch_sub(1, Ordering::SeqCst); 724 } 725 726 #[inline(always)] 727 pub unsafe fn set_preempt_count(&self, count: usize) { 728 self.preempt_count.store(count, Ordering::SeqCst); 729 } 730 731 #[inline(always)] 732 pub fn flags(&self) -> &mut ProcessFlags { 733 return self.flags.get_mut(); 734 } 735 736 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改 737 /// 否则会导致死锁 738 #[inline(always)] 739 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> { 740 return self.basic.read_irqsave(); 741 } 742 743 #[inline(always)] 744 pub fn set_name(&self, name: String) { 745 self.basic.write().set_name(name); 746 } 747 748 #[inline(always)] 749 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> { 750 return self.basic.write_irqsave(); 751 } 752 753 /// # 获取arch info的锁,同时关闭中断 754 #[inline(always)] 755 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> { 756 return self.arch_info.lock_irqsave(); 757 } 758 759 /// # 获取arch info的锁,但是不关闭中断 760 /// 761 /// 由于arch info在进程切换的时候会使用到, 762 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的. 763 /// 764 /// 只能在以下情况下使用这个函数: 765 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。 766 /// - 刚刚创建新的pcb 767 #[inline(always)] 768 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> { 769 return self.arch_info.lock(); 770 } 771 772 #[inline(always)] 773 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> { 774 return self.kernel_stack.read(); 775 } 776 777 #[inline(always)] 778 #[allow(dead_code)] 779 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> { 780 return self.kernel_stack.write(); 781 } 782 783 #[inline(always)] 784 pub fn sched_info(&self) -> &ProcessSchedulerInfo { 785 return &self.sched_info; 786 } 787 788 #[inline(always)] 789 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> { 790 return self.worker_private.lock(); 791 } 792 793 #[inline(always)] 794 pub fn pid(&self) -> Pid { 795 return self.pid; 796 } 797 798 #[inline(always)] 799 pub fn tgid(&self) -> Pid { 800 return self.tgid; 801 } 802 803 /// 获取文件描述符表的Arc指针 804 #[inline(always)] 805 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> { 806 return self.basic.read().fd_table().unwrap(); 807 } 808 809 /// 根据文件描述符序号,获取socket对象的Arc指针 810 /// 811 /// ## 参数 812 /// 813 /// - `fd` 文件描述符序号 814 /// 815 /// ## 返回值 816 /// 817 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None 818 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> { 819 let binding = ProcessManager::current_pcb().fd_table(); 820 let fd_table_guard = binding.read(); 821 822 let f = fd_table_guard.get_file_by_fd(fd)?; 823 drop(fd_table_guard); 824 825 let guard = f.lock(); 826 if guard.file_type() != FileType::Socket { 827 return None; 828 } 829 let socket: Arc<SocketInode> = guard 830 .inode() 831 .downcast_arc::<SocketInode>() 832 .expect("Not a socket inode"); 833 return Some(socket); 834 } 835 836 /// 当前进程退出时,让初始进程收养所有子进程 837 unsafe fn adopt_childen(&self) -> Result<(), SystemError> { 838 match ProcessManager::find(Pid(1)) { 839 Some(init_pcb) => { 840 let childen_guard = self.children.write(); 841 let mut init_childen_guard = init_pcb.children.write(); 842 843 childen_guard.iter().for_each(|pid| { 844 init_childen_guard.push(*pid); 845 }); 846 847 return Ok(()); 848 } 849 _ => Err(SystemError::ECHILD), 850 } 851 } 852 853 /// 生成进程的名字 854 pub fn generate_name(program_path: &str, args: &Vec<String>) -> String { 855 let mut name = program_path.to_string(); 856 for arg in args { 857 name.push(' '); 858 name.push_str(arg); 859 } 860 return name; 861 } 862 863 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> { 864 self.sig_info.read_irqsave() 865 } 866 867 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> { 868 for _ in 0..times { 869 if let Some(r) = self.sig_info.try_read_irqsave() { 870 return Some(r); 871 } 872 } 873 874 return None; 875 } 876 877 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> { 878 self.sig_info.write_irqsave() 879 } 880 881 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> { 882 for _ in 0..times { 883 if let Some(r) = self.sig_info.try_write_irqsave() { 884 return Some(r); 885 } 886 } 887 888 return None; 889 } 890 891 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> { 892 self.sig_struct.lock_irqsave() 893 } 894 895 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> { 896 for _ in 0..times { 897 if let Ok(r) = self.sig_struct.try_lock_irqsave() { 898 return Some(r); 899 } 900 } 901 902 return None; 903 } 904 905 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> { 906 self.sig_struct.lock_irqsave() 907 } 908 } 909 910 impl Drop for ProcessControlBlock { 911 fn drop(&mut self) { 912 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 913 // 在ProcFS中,解除进程的注册 914 procfs_unregister_pid(self.pid()) 915 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}")); 916 917 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() { 918 ppcb.children 919 .write_irqsave() 920 .retain(|pid| *pid != self.pid()); 921 } 922 923 drop(irq_guard); 924 } 925 } 926 927 /// 线程信息 928 #[derive(Debug)] 929 pub struct ThreadInfo { 930 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程 931 clear_child_tid: Option<VirtAddr>, 932 set_child_tid: Option<VirtAddr>, 933 934 vfork_done: Option<Arc<Completion>>, 935 /// 线程组的组长 936 group_leader: Weak<ProcessControlBlock>, 937 } 938 939 impl ThreadInfo { 940 pub fn new() -> Self { 941 Self { 942 clear_child_tid: None, 943 set_child_tid: None, 944 vfork_done: None, 945 group_leader: Weak::default(), 946 } 947 } 948 949 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> { 950 return self.group_leader.upgrade(); 951 } 952 } 953 954 /// 进程的基本信息 955 /// 956 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。 957 #[derive(Debug)] 958 pub struct ProcessBasicInfo { 959 /// 当前进程的进程组id 960 pgid: Pid, 961 /// 当前进程的父进程的pid 962 ppid: Pid, 963 /// 进程的名字 964 name: String, 965 966 /// 当前进程的工作目录 967 cwd: String, 968 969 /// 用户地址空间 970 user_vm: Option<Arc<AddressSpace>>, 971 972 /// 文件描述符表 973 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>, 974 } 975 976 impl ProcessBasicInfo { 977 #[inline(never)] 978 pub fn new( 979 pgid: Pid, 980 ppid: Pid, 981 name: String, 982 cwd: String, 983 user_vm: Option<Arc<AddressSpace>>, 984 ) -> RwLock<Self> { 985 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new())); 986 return RwLock::new(Self { 987 pgid, 988 ppid, 989 name, 990 cwd, 991 user_vm, 992 fd_table: Some(fd_table), 993 }); 994 } 995 996 pub fn pgid(&self) -> Pid { 997 return self.pgid; 998 } 999 1000 pub fn ppid(&self) -> Pid { 1001 return self.ppid; 1002 } 1003 1004 pub fn name(&self) -> &str { 1005 return &self.name; 1006 } 1007 1008 pub fn set_name(&mut self, name: String) { 1009 self.name = name; 1010 } 1011 1012 pub fn cwd(&self) -> String { 1013 return self.cwd.clone(); 1014 } 1015 pub fn set_cwd(&mut self, path: String) { 1016 return self.cwd = path; 1017 } 1018 1019 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> { 1020 return self.user_vm.clone(); 1021 } 1022 1023 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) { 1024 self.user_vm = user_vm; 1025 } 1026 1027 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> { 1028 return self.fd_table.clone(); 1029 } 1030 1031 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) { 1032 self.fd_table = fd_table; 1033 } 1034 } 1035 1036 #[derive(Debug)] 1037 pub struct ProcessSchedulerInfo { 1038 /// 当前进程所在的cpu 1039 on_cpu: AtomicProcessorId, 1040 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位), 1041 /// 该字段存储要被迁移到的目标处理器核心号 1042 migrate_to: AtomicProcessorId, 1043 inner_locked: RwLock<InnerSchedInfo>, 1044 /// 进程的调度优先级 1045 priority: SchedPriority, 1046 /// 当前进程的虚拟运行时间 1047 virtual_runtime: AtomicIsize, 1048 /// 由实时调度器管理的时间片 1049 rt_time_slice: AtomicIsize, 1050 } 1051 1052 #[derive(Debug)] 1053 pub struct InnerSchedInfo { 1054 /// 当前进程的状态 1055 state: ProcessState, 1056 /// 进程的调度策略 1057 sched_policy: SchedPolicy, 1058 } 1059 1060 impl InnerSchedInfo { 1061 pub fn state(&self) -> ProcessState { 1062 return self.state; 1063 } 1064 1065 pub fn set_state(&mut self, state: ProcessState) { 1066 self.state = state; 1067 } 1068 1069 pub fn policy(&self) -> SchedPolicy { 1070 return self.sched_policy; 1071 } 1072 } 1073 1074 impl ProcessSchedulerInfo { 1075 #[inline(never)] 1076 pub fn new(on_cpu: Option<ProcessorId>) -> Self { 1077 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID); 1078 return Self { 1079 on_cpu: AtomicProcessorId::new(cpu_id), 1080 migrate_to: AtomicProcessorId::new(ProcessorId::INVALID), 1081 inner_locked: RwLock::new(InnerSchedInfo { 1082 state: ProcessState::Blocked(false), 1083 sched_policy: SchedPolicy::CFS, 1084 }), 1085 virtual_runtime: AtomicIsize::new(0), 1086 rt_time_slice: AtomicIsize::new(0), 1087 priority: SchedPriority::new(100).unwrap(), 1088 }; 1089 } 1090 1091 pub fn on_cpu(&self) -> Option<ProcessorId> { 1092 let on_cpu = self.on_cpu.load(Ordering::SeqCst); 1093 if on_cpu == ProcessorId::INVALID { 1094 return None; 1095 } else { 1096 return Some(on_cpu); 1097 } 1098 } 1099 1100 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) { 1101 if let Some(cpu_id) = on_cpu { 1102 self.on_cpu.store(cpu_id, Ordering::SeqCst); 1103 } else { 1104 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst); 1105 } 1106 } 1107 1108 pub fn migrate_to(&self) -> Option<ProcessorId> { 1109 let migrate_to = self.migrate_to.load(Ordering::SeqCst); 1110 if migrate_to == ProcessorId::INVALID { 1111 return None; 1112 } else { 1113 return Some(migrate_to); 1114 } 1115 } 1116 1117 pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) { 1118 if let Some(data) = migrate_to { 1119 self.migrate_to.store(data, Ordering::SeqCst); 1120 } else { 1121 self.migrate_to 1122 .store(ProcessorId::INVALID, Ordering::SeqCst) 1123 } 1124 } 1125 1126 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> { 1127 return self.inner_locked.write_irqsave(); 1128 } 1129 1130 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> { 1131 return self.inner_locked.read_irqsave(); 1132 } 1133 1134 pub fn inner_lock_try_read_irqsave( 1135 &self, 1136 times: u8, 1137 ) -> Option<RwLockReadGuard<InnerSchedInfo>> { 1138 for _ in 0..times { 1139 if let Some(r) = self.inner_locked.try_read_irqsave() { 1140 return Some(r); 1141 } 1142 } 1143 1144 return None; 1145 } 1146 1147 pub fn inner_lock_try_upgradable_read_irqsave( 1148 &self, 1149 times: u8, 1150 ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> { 1151 for _ in 0..times { 1152 if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() { 1153 return Some(r); 1154 } 1155 } 1156 1157 return None; 1158 } 1159 1160 pub fn virtual_runtime(&self) -> isize { 1161 return self.virtual_runtime.load(Ordering::SeqCst); 1162 } 1163 1164 pub fn set_virtual_runtime(&self, virtual_runtime: isize) { 1165 self.virtual_runtime 1166 .store(virtual_runtime, Ordering::SeqCst); 1167 } 1168 pub fn increase_virtual_runtime(&self, delta: isize) { 1169 self.virtual_runtime.fetch_add(delta, Ordering::SeqCst); 1170 } 1171 1172 pub fn rt_time_slice(&self) -> isize { 1173 return self.rt_time_slice.load(Ordering::SeqCst); 1174 } 1175 1176 pub fn set_rt_time_slice(&self, rt_time_slice: isize) { 1177 self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst); 1178 } 1179 1180 pub fn increase_rt_time_slice(&self, delta: isize) { 1181 self.rt_time_slice.fetch_add(delta, Ordering::SeqCst); 1182 } 1183 1184 pub fn priority(&self) -> SchedPriority { 1185 return self.priority; 1186 } 1187 } 1188 1189 #[derive(Debug, Clone)] 1190 pub struct KernelStack { 1191 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>, 1192 /// 标记该内核栈是否可以被释放 1193 can_be_freed: bool, 1194 } 1195 1196 impl KernelStack { 1197 pub const SIZE: usize = 0x4000; 1198 pub const ALIGN: usize = 0x4000; 1199 1200 pub fn new() -> Result<Self, SystemError> { 1201 return Ok(Self { 1202 stack: Some( 1203 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?, 1204 ), 1205 can_be_freed: true, 1206 }); 1207 } 1208 1209 /// 根据已有的空间,构造一个内核栈结构体 1210 /// 1211 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误! 1212 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> { 1213 if base.is_null() || !base.check_aligned(Self::ALIGN) { 1214 return Err(SystemError::EFAULT); 1215 } 1216 1217 return Ok(Self { 1218 stack: Some( 1219 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked( 1220 base.data() as *mut [u8; KernelStack::SIZE], 1221 ), 1222 ), 1223 can_be_freed: false, 1224 }); 1225 } 1226 1227 /// 返回内核栈的起始虚拟地址(低地址) 1228 pub fn start_address(&self) -> VirtAddr { 1229 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize); 1230 } 1231 1232 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址) 1233 pub fn stack_max_address(&self) -> VirtAddr { 1234 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE); 1235 } 1236 1237 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> { 1238 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处 1239 let p: *const ProcessControlBlock = Weak::into_raw(pcb); 1240 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1241 1242 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误 1243 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) { 1244 kerror!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr); 1245 return Err(SystemError::EPERM); 1246 } 1247 // 将pcb的地址放到内核栈的最低地址处 1248 unsafe { 1249 *stack_bottom_ptr = p; 1250 } 1251 1252 return Ok(()); 1253 } 1254 1255 /// 清除内核栈的pcb指针 1256 /// 1257 /// ## 参数 1258 /// 1259 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题 1260 pub unsafe fn clear_pcb(&mut self, force: bool) { 1261 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1262 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) { 1263 return; 1264 } 1265 1266 if !force { 1267 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr); 1268 drop(pcb_ptr); 1269 } 1270 1271 *stack_bottom_ptr = core::ptr::null(); 1272 } 1273 1274 /// 返回指向当前内核栈pcb的Arc指针 1275 #[allow(dead_code)] 1276 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> { 1277 // 从内核栈的最低地址处取出pcb的地址 1278 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1279 if unlikely(unsafe { (*p).is_null() }) { 1280 return None; 1281 } 1282 1283 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用 1284 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> = 1285 ManuallyDrop::new(Weak::from_raw(*p)); 1286 1287 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?; 1288 return Some(new_arc); 1289 } 1290 } 1291 1292 impl Drop for KernelStack { 1293 fn drop(&mut self) { 1294 if self.stack.is_some() { 1295 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1296 if unsafe { !(*ptr).is_null() } { 1297 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) }; 1298 drop(pcb_ptr); 1299 } 1300 } 1301 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数 1302 if !self.can_be_freed { 1303 let bx = self.stack.take(); 1304 core::mem::forget(bx); 1305 } 1306 } 1307 } 1308 1309 pub fn process_init() { 1310 ProcessManager::init(); 1311 } 1312 1313 #[derive(Debug)] 1314 pub struct ProcessSignalInfo { 1315 // 当前进程 1316 sig_block: SigSet, 1317 // sig_pending 中存储当前线程要处理的信号 1318 sig_pending: SigPending, 1319 // sig_shared_pending 中存储当前线程所属进程要处理的信号 1320 sig_shared_pending: SigPending, 1321 // 当前进程对应的tty 1322 tty: Option<Arc<TtyCore>>, 1323 } 1324 1325 impl ProcessSignalInfo { 1326 pub fn sig_block(&self) -> &SigSet { 1327 &self.sig_block 1328 } 1329 1330 pub fn sig_pending(&self) -> &SigPending { 1331 &self.sig_pending 1332 } 1333 1334 pub fn sig_pending_mut(&mut self) -> &mut SigPending { 1335 &mut self.sig_pending 1336 } 1337 1338 pub fn sig_block_mut(&mut self) -> &mut SigSet { 1339 &mut self.sig_block 1340 } 1341 1342 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending { 1343 &mut self.sig_shared_pending 1344 } 1345 1346 pub fn sig_shared_pending(&self) -> &SigPending { 1347 &self.sig_shared_pending 1348 } 1349 1350 pub fn tty(&self) -> Option<Arc<TtyCore>> { 1351 self.tty.clone() 1352 } 1353 1354 pub fn set_tty(&mut self, tty: Arc<TtyCore>) { 1355 self.tty = Some(tty); 1356 } 1357 1358 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号 1359 /// 1360 /// ## 参数 1361 /// 1362 /// - `sig_mask` 被忽略掉的信号 1363 /// 1364 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) { 1365 let res = self.sig_pending.dequeue_signal(sig_mask); 1366 if res.0 != Signal::INVALID { 1367 return res; 1368 } else { 1369 return self.sig_shared_pending.dequeue_signal(sig_mask); 1370 } 1371 } 1372 } 1373 1374 impl Default for ProcessSignalInfo { 1375 fn default() -> Self { 1376 Self { 1377 sig_block: SigSet::empty(), 1378 sig_pending: SigPending::default(), 1379 sig_shared_pending: SigPending::default(), 1380 tty: None, 1381 } 1382 } 1383 } 1384