1 use core::{ 2 hash::Hash, 3 hint::spin_loop, 4 intrinsics::{likely, unlikely}, 5 mem::ManuallyDrop, 6 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering}, 7 }; 8 9 use alloc::{ 10 string::{String, ToString}, 11 sync::{Arc, Weak}, 12 vec::Vec, 13 }; 14 use hashbrown::HashMap; 15 use system_error::SystemError; 16 17 use crate::{ 18 arch::{ 19 ipc::signal::{AtomicSignal, SigSet, Signal}, 20 process::ArchPCBInfo, 21 CurrentIrqArch, 22 }, 23 driver::tty::tty_core::TtyCore, 24 exception::InterruptArch, 25 filesystem::{ 26 procfs::procfs_unregister_pid, 27 vfs::{file::FileDescriptorVec, FileType}, 28 }, 29 ipc::signal_types::{SigInfo, SigPending, SignalStruct}, 30 kdebug, kinfo, 31 libs::{ 32 align::AlignedBox, 33 casting::DowncastArc, 34 futex::{ 35 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY}, 36 futex::Futex, 37 }, 38 lock_free_flags::LockFreeFlags, 39 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, 40 spinlock::{SpinLock, SpinLockGuard}, 41 wait_queue::WaitQueue, 42 }, 43 mm::{ 44 percpu::{PerCpu, PerCpuVar}, 45 set_IDLE_PROCESS_ADDRESS_SPACE, 46 ucontext::AddressSpace, 47 VirtAddr, 48 }, 49 net::socket::SocketInode, 50 sched::completion::Completion, 51 sched::{ 52 cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, 53 WakeupFlags, __schedule, 54 }, 55 smp::{ 56 core::smp_get_processor_id, 57 cpu::{AtomicProcessorId, ProcessorId}, 58 kick_cpu, 59 }, 60 syscall::{user_access::clear_user, Syscall}, 61 }; 62 63 use self::kthread::WorkerPrivate; 64 65 pub mod abi; 66 pub mod c_adapter; 67 pub mod exec; 68 pub mod exit; 69 pub mod fork; 70 pub mod idle; 71 pub mod kthread; 72 pub mod pid; 73 pub mod resource; 74 pub mod stdio; 75 pub mod syscall; 76 pub mod utils; 77 78 /// 系统中所有进程的pcb 79 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None); 80 81 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None; 82 83 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成 84 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false; 85 86 #[derive(Debug)] 87 pub struct SwitchResult { 88 pub prev_pcb: Option<Arc<ProcessControlBlock>>, 89 pub next_pcb: Option<Arc<ProcessControlBlock>>, 90 } 91 92 impl SwitchResult { 93 pub fn new() -> Self { 94 Self { 95 prev_pcb: None, 96 next_pcb: None, 97 } 98 } 99 } 100 101 #[derive(Debug)] 102 pub struct ProcessManager; 103 impl ProcessManager { 104 #[inline(never)] 105 fn init() { 106 static INIT_FLAG: AtomicBool = AtomicBool::new(false); 107 if INIT_FLAG 108 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 109 .is_err() 110 { 111 panic!("ProcessManager has been initialized!"); 112 } 113 114 unsafe { 115 compiler_fence(Ordering::SeqCst); 116 kdebug!("To create address space for INIT process."); 117 // test_buddy(); 118 set_IDLE_PROCESS_ADDRESS_SPACE( 119 AddressSpace::new(true).expect("Failed to create address space for INIT process."), 120 ); 121 kdebug!("INIT process address space created."); 122 compiler_fence(Ordering::SeqCst); 123 }; 124 125 ALL_PROCESS.lock_irqsave().replace(HashMap::new()); 126 Self::init_switch_result(); 127 Self::arch_init(); 128 kdebug!("process arch init done."); 129 Self::init_idle(); 130 kdebug!("process idle init done."); 131 132 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true }; 133 kinfo!("Process Manager initialized."); 134 } 135 136 fn init_switch_result() { 137 let mut switch_res_vec: Vec<SwitchResult> = Vec::new(); 138 for _ in 0..PerCpu::MAX_CPU_NUM { 139 switch_res_vec.push(SwitchResult::new()); 140 } 141 unsafe { 142 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap()); 143 } 144 } 145 146 /// 判断进程管理器是否已经初始化完成 147 pub fn initialized() -> bool { 148 unsafe { __PROCESS_MANAGEMENT_INIT_DONE } 149 } 150 151 /// 获取当前进程的pcb 152 pub fn current_pcb() -> Arc<ProcessControlBlock> { 153 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 154 kerror!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false"); 155 loop { 156 spin_loop(); 157 } 158 } 159 return ProcessControlBlock::arch_current_pcb(); 160 } 161 162 /// 获取当前进程的pid 163 /// 164 /// 如果进程管理器未初始化完成,那么返回0 165 pub fn current_pid() -> Pid { 166 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 167 return Pid(0); 168 } 169 170 return ProcessManager::current_pcb().pid(); 171 } 172 173 /// 增加当前进程的锁持有计数 174 #[inline(always)] 175 pub fn preempt_disable() { 176 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 177 ProcessManager::current_pcb().preempt_disable(); 178 } 179 } 180 181 /// 减少当前进程的锁持有计数 182 #[inline(always)] 183 pub fn preempt_enable() { 184 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 185 ProcessManager::current_pcb().preempt_enable(); 186 } 187 } 188 189 /// 根据pid获取进程的pcb 190 /// 191 /// ## 参数 192 /// 193 /// - `pid` : 进程的pid 194 /// 195 /// ## 返回值 196 /// 197 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None 198 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> { 199 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned(); 200 } 201 202 /// 向系统中添加一个进程的pcb 203 /// 204 /// ## 参数 205 /// 206 /// - `pcb` : 进程的pcb 207 /// 208 /// ## 返回值 209 /// 210 /// 无 211 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) { 212 ALL_PROCESS 213 .lock_irqsave() 214 .as_mut() 215 .unwrap() 216 .insert(pcb.pid(), pcb.clone()); 217 } 218 219 /// 唤醒一个进程 220 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 221 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 222 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 223 if state.is_blocked() { 224 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 225 let state = writer.state(); 226 if state.is_blocked() { 227 writer.set_state(ProcessState::Runnable); 228 writer.set_wakeup(); 229 230 // avoid deadlock 231 drop(writer); 232 233 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 234 235 let (rq, _guard) = rq.self_lock(); 236 rq.update_rq_clock(); 237 rq.activate_task( 238 pcb, 239 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 240 ); 241 242 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 243 244 // sched_enqueue(pcb.clone(), true); 245 return Ok(()); 246 } else if state.is_exited() { 247 return Err(SystemError::EINVAL); 248 } else { 249 return Ok(()); 250 } 251 } else if state.is_exited() { 252 return Err(SystemError::EINVAL); 253 } else { 254 return Ok(()); 255 } 256 } 257 258 /// 唤醒暂停的进程 259 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 260 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 261 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 262 if let ProcessState::Stopped = state { 263 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 264 let state = writer.state(); 265 if let ProcessState::Stopped = state { 266 writer.set_state(ProcessState::Runnable); 267 // avoid deadlock 268 drop(writer); 269 270 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 271 272 let (rq, _guard) = rq.self_lock(); 273 rq.update_rq_clock(); 274 rq.activate_task( 275 pcb, 276 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 277 ); 278 279 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 280 281 // sched_enqueue(pcb.clone(), true); 282 return Ok(()); 283 } else if state.is_runnable() { 284 return Ok(()); 285 } else { 286 return Err(SystemError::EINVAL); 287 } 288 } else if state.is_runnable() { 289 return Ok(()); 290 } else { 291 return Err(SystemError::EINVAL); 292 } 293 } 294 295 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成 296 /// 297 /// ## 注意 298 /// 299 /// - 进入当前函数之前,不能持有sched_info的锁 300 /// - 进入当前函数之前,必须关闭中断 301 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列 302 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> { 303 assert!( 304 !CurrentIrqArch::is_irq_enabled(), 305 "interrupt must be disabled before enter ProcessManager::mark_sleep()" 306 ); 307 let pcb = ProcessManager::current_pcb(); 308 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 309 if !matches!(writer.state(), ProcessState::Exited(_)) { 310 writer.set_state(ProcessState::Blocked(interruptable)); 311 writer.set_sleep(); 312 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 313 fence(Ordering::SeqCst); 314 drop(writer); 315 return Ok(()); 316 } 317 return Err(SystemError::EINTR); 318 } 319 320 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成 321 /// 322 /// ## 注意 323 /// 324 /// - 进入当前函数之前,不能持有sched_info的锁 325 /// - 进入当前函数之前,必须关闭中断 326 pub fn mark_stop() -> Result<(), SystemError> { 327 assert!( 328 !CurrentIrqArch::is_irq_enabled(), 329 "interrupt must be disabled before enter ProcessManager::mark_stop()" 330 ); 331 332 let pcb = ProcessManager::current_pcb(); 333 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 334 if !matches!(writer.state(), ProcessState::Exited(_)) { 335 writer.set_state(ProcessState::Stopped); 336 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 337 drop(writer); 338 339 return Ok(()); 340 } 341 return Err(SystemError::EINTR); 342 } 343 /// 当子进程退出后向父进程发送通知 344 fn exit_notify() { 345 let current = ProcessManager::current_pcb(); 346 // 让INIT进程收养所有子进程 347 if current.pid() != Pid(1) { 348 unsafe { 349 current 350 .adopt_childen() 351 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}")) 352 }; 353 let r = current.parent_pcb.read_irqsave().upgrade(); 354 if r.is_none() { 355 return; 356 } 357 let parent_pcb = r.unwrap(); 358 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32); 359 if r.is_err() { 360 kwarn!( 361 "failed to send kill signal to {:?}'s parent pcb {:?}", 362 current.pid(), 363 parent_pcb.pid() 364 ); 365 } 366 // todo: 这里需要向父进程发送SIGCHLD信号 367 // todo: 这里还需要根据线程组的信息,决定信号的发送 368 } 369 } 370 371 /// 退出当前进程 372 /// 373 /// ## 参数 374 /// 375 /// - `exit_code` : 进程的退出码 376 pub fn exit(exit_code: usize) -> ! { 377 // 关中断 378 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 379 let pcb = ProcessManager::current_pcb(); 380 let pid = pcb.pid(); 381 pcb.sched_info 382 .inner_lock_write_irqsave() 383 .set_state(ProcessState::Exited(exit_code)); 384 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true))); 385 386 let rq = cpu_rq(smp_get_processor_id().data() as usize); 387 let (rq, guard) = rq.self_lock(); 388 rq.deactivate_task( 389 pcb.clone(), 390 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK, 391 ); 392 drop(guard); 393 394 // 进行进程退出后的工作 395 let thread = pcb.thread.write_irqsave(); 396 if let Some(addr) = thread.set_child_tid { 397 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 398 } 399 400 if let Some(addr) = thread.clear_child_tid { 401 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 { 402 let _ = 403 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY); 404 } 405 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 406 } 407 408 // 如果是vfork出来的进程,则需要处理completion 409 if thread.vfork_done.is_some() { 410 thread.vfork_done.as_ref().unwrap().complete_all(); 411 } 412 drop(thread); 413 unsafe { pcb.basic_mut().set_user_vm(None) }; 414 drop(pcb); 415 ProcessManager::exit_notify(); 416 // unsafe { CurrentIrqArch::interrupt_enable() }; 417 __schedule(SchedMode::SM_NONE); 418 kerror!("pid {pid:?} exited but sched again!"); 419 #[allow(clippy::empty_loop)] 420 loop { 421 spin_loop(); 422 } 423 } 424 425 pub unsafe fn release(pid: Pid) { 426 let pcb = ProcessManager::find(pid); 427 if pcb.is_some() { 428 // let pcb = pcb.unwrap(); 429 // 判断该pcb是否在全局没有任何引用 430 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。 431 // 因此目前暂时注释掉,使得能跑 432 // if Arc::strong_count(&pcb) <= 2 { 433 // drop(pcb); 434 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid); 435 // } else { 436 // // 如果不为1就panic 437 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb)); 438 // kerror!("{}", msg); 439 // panic!() 440 // } 441 442 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid); 443 } 444 } 445 446 /// 上下文切换完成后的钩子函数 447 unsafe fn switch_finish_hook() { 448 // kdebug!("switch_finish_hook"); 449 let prev_pcb = PROCESS_SWITCH_RESULT 450 .as_mut() 451 .unwrap() 452 .get_mut() 453 .prev_pcb 454 .take() 455 .expect("prev_pcb is None"); 456 let next_pcb = PROCESS_SWITCH_RESULT 457 .as_mut() 458 .unwrap() 459 .get_mut() 460 .next_pcb 461 .take() 462 .expect("next_pcb is None"); 463 464 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁 465 prev_pcb.arch_info.force_unlock(); 466 next_pcb.arch_info.force_unlock(); 467 } 468 469 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态 470 /// 471 /// ## 参数 472 /// 473 /// - `pcb` : 进程的pcb 474 #[allow(dead_code)] 475 pub fn kick(pcb: &Arc<ProcessControlBlock>) { 476 ProcessManager::current_pcb().preempt_disable(); 477 let cpu_id = pcb.sched_info().on_cpu(); 478 479 if let Some(cpu_id) = cpu_id { 480 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() { 481 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu"); 482 } 483 } 484 485 ProcessManager::current_pcb().preempt_enable(); 486 } 487 } 488 489 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换 490 #[cfg(target_arch = "x86_64")] 491 #[inline(never)] 492 pub unsafe extern "sysv64" fn switch_finish_hook() { 493 ProcessManager::switch_finish_hook(); 494 } 495 #[cfg(target_arch = "riscv64")] 496 #[inline(always)] 497 pub unsafe fn switch_finish_hook() { 498 ProcessManager::switch_finish_hook(); 499 } 500 501 int_like!(Pid, AtomicPid, usize, AtomicUsize); 502 503 impl ToString for Pid { 504 fn to_string(&self) -> String { 505 self.0.to_string() 506 } 507 } 508 509 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 510 pub enum ProcessState { 511 /// The process is running on a CPU or in a run queue. 512 Runnable, 513 /// The process is waiting for an event to occur. 514 /// 其中的bool表示该等待过程是否可以被打断。 515 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。 516 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。 517 Blocked(bool), 518 /// 进程被信号终止 519 Stopped, 520 /// 进程已经退出,usize表示进程的退出码 521 Exited(usize), 522 } 523 524 #[allow(dead_code)] 525 impl ProcessState { 526 #[inline(always)] 527 pub fn is_runnable(&self) -> bool { 528 return matches!(self, ProcessState::Runnable); 529 } 530 531 #[inline(always)] 532 pub fn is_blocked(&self) -> bool { 533 return matches!(self, ProcessState::Blocked(_)); 534 } 535 536 #[inline(always)] 537 pub fn is_blocked_interruptable(&self) -> bool { 538 return matches!(self, ProcessState::Blocked(true)); 539 } 540 541 /// Returns `true` if the process state is [`Exited`]. 542 #[inline(always)] 543 pub fn is_exited(&self) -> bool { 544 return matches!(self, ProcessState::Exited(_)); 545 } 546 547 /// Returns `true` if the process state is [`Stopped`]. 548 /// 549 /// [`Stopped`]: ProcessState::Stopped 550 #[inline(always)] 551 pub fn is_stopped(&self) -> bool { 552 matches!(self, ProcessState::Stopped) 553 } 554 555 /// Returns exit code if the process state is [`Exited`]. 556 #[inline(always)] 557 pub fn exit_code(&self) -> Option<usize> { 558 match self { 559 ProcessState::Exited(code) => Some(*code), 560 _ => None, 561 } 562 } 563 } 564 565 bitflags! { 566 /// pcb的标志位 567 pub struct ProcessFlags: usize { 568 /// 当前pcb表示一个内核线程 569 const KTHREAD = 1 << 0; 570 /// 当前进程需要被调度 571 const NEED_SCHEDULE = 1 << 1; 572 /// 进程由于vfork而与父进程存在资源共享 573 const VFORK = 1 << 2; 574 /// 进程不可被冻结 575 const NOFREEZE = 1 << 3; 576 /// 进程正在退出 577 const EXITING = 1 << 4; 578 /// 进程由于接收到终止信号唤醒 579 const WAKEKILL = 1 << 5; 580 /// 进程由于接收到信号而退出.(Killed by a signal) 581 const SIGNALED = 1 << 6; 582 /// 进程需要迁移到其他cpu上 583 const NEED_MIGRATE = 1 << 7; 584 /// 随机化的虚拟地址空间,主要用于动态链接器的加载 585 const RANDOMIZE = 1 << 8; 586 } 587 } 588 589 #[derive(Debug)] 590 pub struct ProcessControlBlock { 591 /// 当前进程的pid 592 pid: Pid, 593 /// 当前进程的线程组id(这个值在同一个线程组内永远不变) 594 tgid: Pid, 595 596 basic: RwLock<ProcessBasicInfo>, 597 /// 当前进程的自旋锁持有计数 598 preempt_count: AtomicUsize, 599 600 flags: LockFreeFlags<ProcessFlags>, 601 worker_private: SpinLock<Option<WorkerPrivate>>, 602 /// 进程的内核栈 603 kernel_stack: RwLock<KernelStack>, 604 605 /// 系统调用栈 606 syscall_stack: RwLock<KernelStack>, 607 608 /// 与调度相关的信息 609 sched_info: ProcessSchedulerInfo, 610 /// 与处理器架构相关的信息 611 arch_info: SpinLock<ArchPCBInfo>, 612 /// 与信号处理相关的信息(似乎可以是无锁的) 613 sig_info: RwLock<ProcessSignalInfo>, 614 /// 信号处理结构体 615 sig_struct: SpinLock<SignalStruct>, 616 /// 退出信号S 617 exit_signal: AtomicSignal, 618 619 /// 父进程指针 620 parent_pcb: RwLock<Weak<ProcessControlBlock>>, 621 /// 真实父进程指针 622 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>, 623 624 /// 子进程链表 625 children: RwLock<Vec<Pid>>, 626 627 /// 等待队列 628 wait_queue: WaitQueue, 629 630 /// 线程信息 631 thread: RwLock<ThreadInfo>, 632 } 633 634 impl ProcessControlBlock { 635 /// Generate a new pcb. 636 /// 637 /// ## 参数 638 /// 639 /// - `name` : 进程的名字 640 /// - `kstack` : 进程的内核栈 641 /// 642 /// ## 返回值 643 /// 644 /// 返回一个新的pcb 645 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> { 646 return Self::do_create_pcb(name, kstack, false); 647 } 648 649 /// 创建一个新的idle进程 650 /// 651 /// 请注意,这个函数只能在进程管理初始化的时候调用。 652 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> { 653 let name = format!("idle-{}", cpu_id); 654 return Self::do_create_pcb(name, kstack, true); 655 } 656 657 #[inline(never)] 658 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> { 659 let (pid, ppid, cwd) = if is_idle { 660 (Pid(0), Pid(0), "/".to_string()) 661 } else { 662 let ppid = ProcessManager::current_pcb().pid(); 663 let cwd = ProcessManager::current_pcb().basic().cwd(); 664 (Self::generate_pid(), ppid, cwd) 665 }; 666 667 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, name, cwd, None); 668 let preempt_count = AtomicUsize::new(0); 669 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) }; 670 671 let sched_info = ProcessSchedulerInfo::new(None); 672 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack)); 673 674 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid) 675 .map(|p| Arc::downgrade(&p)) 676 .unwrap_or_default(); 677 678 let pcb = Self { 679 pid, 680 tgid: pid, 681 basic: basic_info, 682 preempt_count, 683 flags, 684 kernel_stack: RwLock::new(kstack), 685 syscall_stack: RwLock::new(KernelStack::new().unwrap()), 686 worker_private: SpinLock::new(None), 687 sched_info, 688 arch_info, 689 sig_info: RwLock::new(ProcessSignalInfo::default()), 690 sig_struct: SpinLock::new(SignalStruct::new()), 691 exit_signal: AtomicSignal::new(Signal::SIGCHLD), 692 parent_pcb: RwLock::new(ppcb.clone()), 693 real_parent_pcb: RwLock::new(ppcb), 694 children: RwLock::new(Vec::new()), 695 wait_queue: WaitQueue::default(), 696 thread: RwLock::new(ThreadInfo::new()), 697 }; 698 699 // 初始化系统调用栈 700 #[cfg(target_arch = "x86_64")] 701 pcb.arch_info 702 .lock() 703 .init_syscall_stack(&pcb.syscall_stack.read()); 704 705 let pcb = Arc::new(pcb); 706 707 pcb.sched_info() 708 .sched_entity() 709 .force_mut() 710 .set_pcb(Arc::downgrade(&pcb)); 711 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处 712 unsafe { 713 pcb.kernel_stack 714 .write() 715 .set_pcb(Arc::downgrade(&pcb)) 716 .unwrap(); 717 718 pcb.syscall_stack 719 .write() 720 .set_pcb(Arc::downgrade(&pcb)) 721 .unwrap() 722 }; 723 724 // 将当前pcb加入父进程的子进程哈希表中 725 if pcb.pid() > Pid(1) { 726 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() { 727 let mut children = ppcb_arc.children.write_irqsave(); 728 children.push(pcb.pid()); 729 } else { 730 panic!("parent pcb is None"); 731 } 732 } 733 734 return pcb; 735 } 736 737 /// 生成一个新的pid 738 #[inline(always)] 739 fn generate_pid() -> Pid { 740 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1)); 741 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst); 742 } 743 744 /// 返回当前进程的锁持有计数 745 #[inline(always)] 746 pub fn preempt_count(&self) -> usize { 747 return self.preempt_count.load(Ordering::SeqCst); 748 } 749 750 /// 增加当前进程的锁持有计数 751 #[inline(always)] 752 pub fn preempt_disable(&self) { 753 self.preempt_count.fetch_add(1, Ordering::SeqCst); 754 } 755 756 /// 减少当前进程的锁持有计数 757 #[inline(always)] 758 pub fn preempt_enable(&self) { 759 self.preempt_count.fetch_sub(1, Ordering::SeqCst); 760 } 761 762 #[inline(always)] 763 pub unsafe fn set_preempt_count(&self, count: usize) { 764 self.preempt_count.store(count, Ordering::SeqCst); 765 } 766 767 #[inline(always)] 768 pub fn flags(&self) -> &mut ProcessFlags { 769 return self.flags.get_mut(); 770 } 771 772 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改 773 /// 否则会导致死锁 774 #[inline(always)] 775 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> { 776 return self.basic.read_irqsave(); 777 } 778 779 #[inline(always)] 780 pub fn set_name(&self, name: String) { 781 self.basic.write().set_name(name); 782 } 783 784 #[inline(always)] 785 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> { 786 return self.basic.write_irqsave(); 787 } 788 789 /// # 获取arch info的锁,同时关闭中断 790 #[inline(always)] 791 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> { 792 return self.arch_info.lock_irqsave(); 793 } 794 795 /// # 获取arch info的锁,但是不关闭中断 796 /// 797 /// 由于arch info在进程切换的时候会使用到, 798 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的. 799 /// 800 /// 只能在以下情况下使用这个函数: 801 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。 802 /// - 刚刚创建新的pcb 803 #[inline(always)] 804 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> { 805 return self.arch_info.lock(); 806 } 807 808 #[inline(always)] 809 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> { 810 return self.kernel_stack.read(); 811 } 812 813 #[inline(always)] 814 #[allow(dead_code)] 815 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> { 816 return self.kernel_stack.write(); 817 } 818 819 #[inline(always)] 820 pub fn sched_info(&self) -> &ProcessSchedulerInfo { 821 return &self.sched_info; 822 } 823 824 #[inline(always)] 825 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> { 826 return self.worker_private.lock(); 827 } 828 829 #[inline(always)] 830 pub fn pid(&self) -> Pid { 831 return self.pid; 832 } 833 834 #[inline(always)] 835 pub fn tgid(&self) -> Pid { 836 return self.tgid; 837 } 838 839 /// 获取文件描述符表的Arc指针 840 #[inline(always)] 841 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> { 842 return self.basic.read().fd_table().unwrap(); 843 } 844 845 /// 根据文件描述符序号,获取socket对象的Arc指针 846 /// 847 /// ## 参数 848 /// 849 /// - `fd` 文件描述符序号 850 /// 851 /// ## 返回值 852 /// 853 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None 854 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> { 855 let binding = ProcessManager::current_pcb().fd_table(); 856 let fd_table_guard = binding.read(); 857 858 let f = fd_table_guard.get_file_by_fd(fd)?; 859 drop(fd_table_guard); 860 861 if f.file_type() != FileType::Socket { 862 return None; 863 } 864 let socket: Arc<SocketInode> = f 865 .inode() 866 .downcast_arc::<SocketInode>() 867 .expect("Not a socket inode"); 868 return Some(socket); 869 } 870 871 /// 当前进程退出时,让初始进程收养所有子进程 872 unsafe fn adopt_childen(&self) -> Result<(), SystemError> { 873 match ProcessManager::find(Pid(1)) { 874 Some(init_pcb) => { 875 let childen_guard = self.children.write(); 876 let mut init_childen_guard = init_pcb.children.write(); 877 878 childen_guard.iter().for_each(|pid| { 879 init_childen_guard.push(*pid); 880 }); 881 882 return Ok(()); 883 } 884 _ => Err(SystemError::ECHILD), 885 } 886 } 887 888 /// 生成进程的名字 889 pub fn generate_name(program_path: &str, args: &Vec<String>) -> String { 890 let mut name = program_path.to_string(); 891 for arg in args { 892 name.push(' '); 893 name.push_str(arg); 894 } 895 return name; 896 } 897 898 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> { 899 self.sig_info.read_irqsave() 900 } 901 902 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> { 903 for _ in 0..times { 904 if let Some(r) = self.sig_info.try_read_irqsave() { 905 return Some(r); 906 } 907 } 908 909 return None; 910 } 911 912 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> { 913 self.sig_info.write_irqsave() 914 } 915 916 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> { 917 for _ in 0..times { 918 if let Some(r) = self.sig_info.try_write_irqsave() { 919 return Some(r); 920 } 921 } 922 923 return None; 924 } 925 926 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> { 927 self.sig_struct.lock_irqsave() 928 } 929 930 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> { 931 for _ in 0..times { 932 if let Ok(r) = self.sig_struct.try_lock_irqsave() { 933 return Some(r); 934 } 935 } 936 937 return None; 938 } 939 940 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> { 941 self.sig_struct.lock_irqsave() 942 } 943 } 944 945 impl Drop for ProcessControlBlock { 946 fn drop(&mut self) { 947 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 948 // 在ProcFS中,解除进程的注册 949 procfs_unregister_pid(self.pid()) 950 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}")); 951 952 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() { 953 ppcb.children 954 .write_irqsave() 955 .retain(|pid| *pid != self.pid()); 956 } 957 958 drop(irq_guard); 959 } 960 } 961 962 /// 线程信息 963 #[derive(Debug)] 964 pub struct ThreadInfo { 965 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程 966 clear_child_tid: Option<VirtAddr>, 967 set_child_tid: Option<VirtAddr>, 968 969 vfork_done: Option<Arc<Completion>>, 970 /// 线程组的组长 971 group_leader: Weak<ProcessControlBlock>, 972 } 973 974 impl ThreadInfo { 975 pub fn new() -> Self { 976 Self { 977 clear_child_tid: None, 978 set_child_tid: None, 979 vfork_done: None, 980 group_leader: Weak::default(), 981 } 982 } 983 984 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> { 985 return self.group_leader.upgrade(); 986 } 987 } 988 989 /// 进程的基本信息 990 /// 991 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。 992 #[derive(Debug)] 993 pub struct ProcessBasicInfo { 994 /// 当前进程的进程组id 995 pgid: Pid, 996 /// 当前进程的父进程的pid 997 ppid: Pid, 998 /// 进程的名字 999 name: String, 1000 1001 /// 当前进程的工作目录 1002 cwd: String, 1003 1004 /// 用户地址空间 1005 user_vm: Option<Arc<AddressSpace>>, 1006 1007 /// 文件描述符表 1008 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>, 1009 } 1010 1011 impl ProcessBasicInfo { 1012 #[inline(never)] 1013 pub fn new( 1014 pgid: Pid, 1015 ppid: Pid, 1016 name: String, 1017 cwd: String, 1018 user_vm: Option<Arc<AddressSpace>>, 1019 ) -> RwLock<Self> { 1020 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new())); 1021 return RwLock::new(Self { 1022 pgid, 1023 ppid, 1024 name, 1025 cwd, 1026 user_vm, 1027 fd_table: Some(fd_table), 1028 }); 1029 } 1030 1031 pub fn pgid(&self) -> Pid { 1032 return self.pgid; 1033 } 1034 1035 pub fn ppid(&self) -> Pid { 1036 return self.ppid; 1037 } 1038 1039 pub fn name(&self) -> &str { 1040 return &self.name; 1041 } 1042 1043 pub fn set_name(&mut self, name: String) { 1044 self.name = name; 1045 } 1046 1047 pub fn cwd(&self) -> String { 1048 return self.cwd.clone(); 1049 } 1050 pub fn set_cwd(&mut self, path: String) { 1051 return self.cwd = path; 1052 } 1053 1054 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> { 1055 return self.user_vm.clone(); 1056 } 1057 1058 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) { 1059 self.user_vm = user_vm; 1060 } 1061 1062 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> { 1063 return self.fd_table.clone(); 1064 } 1065 1066 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) { 1067 self.fd_table = fd_table; 1068 } 1069 } 1070 1071 #[derive(Debug)] 1072 pub struct ProcessSchedulerInfo { 1073 /// 当前进程所在的cpu 1074 on_cpu: AtomicProcessorId, 1075 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位), 1076 /// 该字段存储要被迁移到的目标处理器核心号 1077 // migrate_to: AtomicProcessorId, 1078 inner_locked: RwLock<InnerSchedInfo>, 1079 /// 进程的调度优先级 1080 // priority: SchedPriority, 1081 /// 当前进程的虚拟运行时间 1082 // virtual_runtime: AtomicIsize, 1083 /// 由实时调度器管理的时间片 1084 // rt_time_slice: AtomicIsize, 1085 pub sched_stat: RwLock<SchedInfo>, 1086 /// 调度策略 1087 pub sched_policy: RwLock<crate::sched::SchedPolicy>, 1088 /// cfs调度实体 1089 pub sched_entity: Arc<FairSchedEntity>, 1090 pub on_rq: SpinLock<OnRq>, 1091 1092 pub prio_data: RwLock<PrioData>, 1093 } 1094 1095 #[derive(Debug, Default)] 1096 pub struct SchedInfo { 1097 /// 记录任务在特定 CPU 上运行的次数 1098 pub pcount: usize, 1099 /// 记录任务等待在运行队列上的时间 1100 pub run_delay: usize, 1101 /// 记录任务上次在 CPU 上运行的时间戳 1102 pub last_arrival: u64, 1103 /// 记录任务上次被加入到运行队列中的时间戳 1104 pub last_queued: u64, 1105 } 1106 1107 #[derive(Debug)] 1108 pub struct PrioData { 1109 pub prio: i32, 1110 pub static_prio: i32, 1111 pub normal_prio: i32, 1112 } 1113 1114 impl Default for PrioData { 1115 fn default() -> Self { 1116 Self { 1117 prio: MAX_PRIO - 20, 1118 static_prio: MAX_PRIO - 20, 1119 normal_prio: MAX_PRIO - 20, 1120 } 1121 } 1122 } 1123 1124 #[derive(Debug)] 1125 pub struct InnerSchedInfo { 1126 /// 当前进程的状态 1127 state: ProcessState, 1128 /// 进程的调度策略 1129 sleep: bool, 1130 } 1131 1132 impl InnerSchedInfo { 1133 pub fn state(&self) -> ProcessState { 1134 return self.state; 1135 } 1136 1137 pub fn set_state(&mut self, state: ProcessState) { 1138 self.state = state; 1139 } 1140 1141 pub fn set_sleep(&mut self) { 1142 self.sleep = true; 1143 } 1144 1145 pub fn set_wakeup(&mut self) { 1146 self.sleep = false; 1147 } 1148 1149 pub fn is_mark_sleep(&self) -> bool { 1150 self.sleep 1151 } 1152 } 1153 1154 impl ProcessSchedulerInfo { 1155 #[inline(never)] 1156 pub fn new(on_cpu: Option<ProcessorId>) -> Self { 1157 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID); 1158 return Self { 1159 on_cpu: AtomicProcessorId::new(cpu_id), 1160 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID), 1161 inner_locked: RwLock::new(InnerSchedInfo { 1162 state: ProcessState::Blocked(false), 1163 sleep: false, 1164 }), 1165 // virtual_runtime: AtomicIsize::new(0), 1166 // rt_time_slice: AtomicIsize::new(0), 1167 // priority: SchedPriority::new(100).unwrap(), 1168 sched_stat: RwLock::new(SchedInfo::default()), 1169 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS), 1170 sched_entity: FairSchedEntity::new(), 1171 on_rq: SpinLock::new(OnRq::None), 1172 prio_data: RwLock::new(PrioData::default()), 1173 }; 1174 } 1175 1176 pub fn sched_entity(&self) -> Arc<FairSchedEntity> { 1177 return self.sched_entity.clone(); 1178 } 1179 1180 pub fn on_cpu(&self) -> Option<ProcessorId> { 1181 let on_cpu = self.on_cpu.load(Ordering::SeqCst); 1182 if on_cpu == ProcessorId::INVALID { 1183 return None; 1184 } else { 1185 return Some(on_cpu); 1186 } 1187 } 1188 1189 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) { 1190 if let Some(cpu_id) = on_cpu { 1191 self.on_cpu.store(cpu_id, Ordering::SeqCst); 1192 } else { 1193 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst); 1194 } 1195 } 1196 1197 // pub fn migrate_to(&self) -> Option<ProcessorId> { 1198 // let migrate_to = self.migrate_to.load(Ordering::SeqCst); 1199 // if migrate_to == ProcessorId::INVALID { 1200 // return None; 1201 // } else { 1202 // return Some(migrate_to); 1203 // } 1204 // } 1205 1206 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) { 1207 // if let Some(data) = migrate_to { 1208 // self.migrate_to.store(data, Ordering::SeqCst); 1209 // } else { 1210 // self.migrate_to 1211 // .store(ProcessorId::INVALID, Ordering::SeqCst) 1212 // } 1213 // } 1214 1215 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> { 1216 return self.inner_locked.write_irqsave(); 1217 } 1218 1219 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> { 1220 return self.inner_locked.read_irqsave(); 1221 } 1222 1223 // pub fn inner_lock_try_read_irqsave( 1224 // &self, 1225 // times: u8, 1226 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> { 1227 // for _ in 0..times { 1228 // if let Some(r) = self.inner_locked.try_read_irqsave() { 1229 // return Some(r); 1230 // } 1231 // } 1232 1233 // return None; 1234 // } 1235 1236 // pub fn inner_lock_try_upgradable_read_irqsave( 1237 // &self, 1238 // times: u8, 1239 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> { 1240 // for _ in 0..times { 1241 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() { 1242 // return Some(r); 1243 // } 1244 // } 1245 1246 // return None; 1247 // } 1248 1249 // pub fn virtual_runtime(&self) -> isize { 1250 // return self.virtual_runtime.load(Ordering::SeqCst); 1251 // } 1252 1253 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) { 1254 // self.virtual_runtime 1255 // .store(virtual_runtime, Ordering::SeqCst); 1256 // } 1257 // pub fn increase_virtual_runtime(&self, delta: isize) { 1258 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst); 1259 // } 1260 1261 // pub fn rt_time_slice(&self) -> isize { 1262 // return self.rt_time_slice.load(Ordering::SeqCst); 1263 // } 1264 1265 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) { 1266 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst); 1267 // } 1268 1269 // pub fn increase_rt_time_slice(&self, delta: isize) { 1270 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst); 1271 // } 1272 1273 pub fn policy(&self) -> crate::sched::SchedPolicy { 1274 return *self.sched_policy.read_irqsave(); 1275 } 1276 } 1277 1278 #[derive(Debug, Clone)] 1279 pub struct KernelStack { 1280 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>, 1281 /// 标记该内核栈是否可以被释放 1282 can_be_freed: bool, 1283 } 1284 1285 impl KernelStack { 1286 pub const SIZE: usize = 0x4000; 1287 pub const ALIGN: usize = 0x4000; 1288 1289 pub fn new() -> Result<Self, SystemError> { 1290 return Ok(Self { 1291 stack: Some( 1292 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?, 1293 ), 1294 can_be_freed: true, 1295 }); 1296 } 1297 1298 /// 根据已有的空间,构造一个内核栈结构体 1299 /// 1300 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误! 1301 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> { 1302 if base.is_null() || !base.check_aligned(Self::ALIGN) { 1303 return Err(SystemError::EFAULT); 1304 } 1305 1306 return Ok(Self { 1307 stack: Some( 1308 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked( 1309 base.data() as *mut [u8; KernelStack::SIZE], 1310 ), 1311 ), 1312 can_be_freed: false, 1313 }); 1314 } 1315 1316 /// 返回内核栈的起始虚拟地址(低地址) 1317 pub fn start_address(&self) -> VirtAddr { 1318 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize); 1319 } 1320 1321 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址) 1322 pub fn stack_max_address(&self) -> VirtAddr { 1323 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE); 1324 } 1325 1326 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> { 1327 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处 1328 let p: *const ProcessControlBlock = Weak::into_raw(pcb); 1329 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1330 1331 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误 1332 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) { 1333 kerror!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr); 1334 return Err(SystemError::EPERM); 1335 } 1336 // 将pcb的地址放到内核栈的最低地址处 1337 unsafe { 1338 *stack_bottom_ptr = p; 1339 } 1340 1341 return Ok(()); 1342 } 1343 1344 /// 清除内核栈的pcb指针 1345 /// 1346 /// ## 参数 1347 /// 1348 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题 1349 pub unsafe fn clear_pcb(&mut self, force: bool) { 1350 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1351 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) { 1352 return; 1353 } 1354 1355 if !force { 1356 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr); 1357 drop(pcb_ptr); 1358 } 1359 1360 *stack_bottom_ptr = core::ptr::null(); 1361 } 1362 1363 /// 返回指向当前内核栈pcb的Arc指针 1364 #[allow(dead_code)] 1365 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> { 1366 // 从内核栈的最低地址处取出pcb的地址 1367 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1368 if unlikely(unsafe { (*p).is_null() }) { 1369 return None; 1370 } 1371 1372 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用 1373 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> = 1374 ManuallyDrop::new(Weak::from_raw(*p)); 1375 1376 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?; 1377 return Some(new_arc); 1378 } 1379 } 1380 1381 impl Drop for KernelStack { 1382 fn drop(&mut self) { 1383 if self.stack.is_some() { 1384 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1385 if unsafe { !(*ptr).is_null() } { 1386 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) }; 1387 drop(pcb_ptr); 1388 } 1389 } 1390 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数 1391 if !self.can_be_freed { 1392 let bx = self.stack.take(); 1393 core::mem::forget(bx); 1394 } 1395 } 1396 } 1397 1398 pub fn process_init() { 1399 ProcessManager::init(); 1400 } 1401 1402 #[derive(Debug)] 1403 pub struct ProcessSignalInfo { 1404 // 当前进程 1405 sig_block: SigSet, 1406 // sig_pending 中存储当前线程要处理的信号 1407 sig_pending: SigPending, 1408 // sig_shared_pending 中存储当前线程所属进程要处理的信号 1409 sig_shared_pending: SigPending, 1410 // 当前进程对应的tty 1411 tty: Option<Arc<TtyCore>>, 1412 } 1413 1414 impl ProcessSignalInfo { 1415 pub fn sig_block(&self) -> &SigSet { 1416 &self.sig_block 1417 } 1418 1419 pub fn sig_pending(&self) -> &SigPending { 1420 &self.sig_pending 1421 } 1422 1423 pub fn sig_pending_mut(&mut self) -> &mut SigPending { 1424 &mut self.sig_pending 1425 } 1426 1427 pub fn sig_block_mut(&mut self) -> &mut SigSet { 1428 &mut self.sig_block 1429 } 1430 1431 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending { 1432 &mut self.sig_shared_pending 1433 } 1434 1435 pub fn sig_shared_pending(&self) -> &SigPending { 1436 &self.sig_shared_pending 1437 } 1438 1439 pub fn tty(&self) -> Option<Arc<TtyCore>> { 1440 self.tty.clone() 1441 } 1442 1443 pub fn set_tty(&mut self, tty: Arc<TtyCore>) { 1444 self.tty = Some(tty); 1445 } 1446 1447 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号 1448 /// 1449 /// ## 参数 1450 /// 1451 /// - `sig_mask` 被忽略掉的信号 1452 /// 1453 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) { 1454 let res = self.sig_pending.dequeue_signal(sig_mask); 1455 if res.0 != Signal::INVALID { 1456 return res; 1457 } else { 1458 return self.sig_shared_pending.dequeue_signal(sig_mask); 1459 } 1460 } 1461 } 1462 1463 impl Default for ProcessSignalInfo { 1464 fn default() -> Self { 1465 Self { 1466 sig_block: SigSet::empty(), 1467 sig_pending: SigPending::default(), 1468 sig_shared_pending: SigPending::default(), 1469 tty: None, 1470 } 1471 } 1472 } 1473