1 use core::{ 2 hash::Hash, 3 hint::spin_loop, 4 intrinsics::{likely, unlikely}, 5 mem::ManuallyDrop, 6 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering}, 7 }; 8 9 use alloc::{ 10 string::{String, ToString}, 11 sync::{Arc, Weak}, 12 vec::Vec, 13 }; 14 use hashbrown::HashMap; 15 use system_error::SystemError; 16 17 use crate::{ 18 arch::{ 19 ipc::signal::{AtomicSignal, SigSet, Signal}, 20 process::ArchPCBInfo, 21 CurrentIrqArch, 22 }, 23 driver::tty::tty_core::TtyCore, 24 exception::InterruptArch, 25 filesystem::{ 26 procfs::procfs_unregister_pid, 27 vfs::{file::FileDescriptorVec, FileType}, 28 }, 29 ipc::signal_types::{SigInfo, SigPending, SignalStruct}, 30 kdebug, kinfo, 31 libs::{ 32 align::AlignedBox, 33 casting::DowncastArc, 34 futex::{ 35 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY}, 36 futex::Futex, 37 }, 38 lock_free_flags::LockFreeFlags, 39 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, 40 spinlock::{SpinLock, SpinLockGuard}, 41 wait_queue::WaitQueue, 42 }, 43 mm::{ 44 percpu::{PerCpu, PerCpuVar}, 45 set_IDLE_PROCESS_ADDRESS_SPACE, 46 ucontext::AddressSpace, 47 VirtAddr, 48 }, 49 net::socket::SocketInode, 50 sched::completion::Completion, 51 sched::{ 52 cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, 53 WakeupFlags, __schedule, 54 }, 55 smp::{ 56 core::smp_get_processor_id, 57 cpu::{AtomicProcessorId, ProcessorId}, 58 kick_cpu, 59 }, 60 syscall::{user_access::clear_user, Syscall}, 61 }; 62 63 use self::kthread::WorkerPrivate; 64 65 pub mod abi; 66 pub mod c_adapter; 67 pub mod exec; 68 pub mod exit; 69 pub mod fork; 70 pub mod idle; 71 pub mod kthread; 72 pub mod pid; 73 pub mod resource; 74 pub mod stdio; 75 pub mod syscall; 76 pub mod utils; 77 78 /// 系统中所有进程的pcb 79 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None); 80 81 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None; 82 83 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成 84 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false; 85 86 #[derive(Debug)] 87 pub struct SwitchResult { 88 pub prev_pcb: Option<Arc<ProcessControlBlock>>, 89 pub next_pcb: Option<Arc<ProcessControlBlock>>, 90 } 91 92 impl SwitchResult { 93 pub fn new() -> Self { 94 Self { 95 prev_pcb: None, 96 next_pcb: None, 97 } 98 } 99 } 100 101 #[derive(Debug)] 102 pub struct ProcessManager; 103 impl ProcessManager { 104 #[inline(never)] 105 fn init() { 106 static INIT_FLAG: AtomicBool = AtomicBool::new(false); 107 if INIT_FLAG 108 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 109 .is_err() 110 { 111 panic!("ProcessManager has been initialized!"); 112 } 113 114 unsafe { 115 compiler_fence(Ordering::SeqCst); 116 kdebug!("To create address space for INIT process."); 117 // test_buddy(); 118 set_IDLE_PROCESS_ADDRESS_SPACE( 119 AddressSpace::new(true).expect("Failed to create address space for INIT process."), 120 ); 121 kdebug!("INIT process address space created."); 122 compiler_fence(Ordering::SeqCst); 123 }; 124 125 ALL_PROCESS.lock_irqsave().replace(HashMap::new()); 126 Self::init_switch_result(); 127 Self::arch_init(); 128 kdebug!("process arch init done."); 129 Self::init_idle(); 130 kdebug!("process idle init done."); 131 132 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true }; 133 kinfo!("Process Manager initialized."); 134 } 135 136 fn init_switch_result() { 137 let mut switch_res_vec: Vec<SwitchResult> = Vec::new(); 138 for _ in 0..PerCpu::MAX_CPU_NUM { 139 switch_res_vec.push(SwitchResult::new()); 140 } 141 unsafe { 142 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap()); 143 } 144 } 145 146 /// 判断进程管理器是否已经初始化完成 147 pub fn initialized() -> bool { 148 unsafe { __PROCESS_MANAGEMENT_INIT_DONE } 149 } 150 151 /// 获取当前进程的pcb 152 pub fn current_pcb() -> Arc<ProcessControlBlock> { 153 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 154 kerror!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false"); 155 loop { 156 spin_loop(); 157 } 158 } 159 return ProcessControlBlock::arch_current_pcb(); 160 } 161 162 /// 获取当前进程的pid 163 /// 164 /// 如果进程管理器未初始化完成,那么返回0 165 pub fn current_pid() -> Pid { 166 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 167 return Pid(0); 168 } 169 170 return ProcessManager::current_pcb().pid(); 171 } 172 173 /// 增加当前进程的锁持有计数 174 #[inline(always)] 175 pub fn preempt_disable() { 176 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 177 ProcessManager::current_pcb().preempt_disable(); 178 } 179 } 180 181 /// 减少当前进程的锁持有计数 182 #[inline(always)] 183 pub fn preempt_enable() { 184 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 185 ProcessManager::current_pcb().preempt_enable(); 186 } 187 } 188 189 /// 根据pid获取进程的pcb 190 /// 191 /// ## 参数 192 /// 193 /// - `pid` : 进程的pid 194 /// 195 /// ## 返回值 196 /// 197 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None 198 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> { 199 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned(); 200 } 201 202 /// 向系统中添加一个进程的pcb 203 /// 204 /// ## 参数 205 /// 206 /// - `pcb` : 进程的pcb 207 /// 208 /// ## 返回值 209 /// 210 /// 无 211 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) { 212 ALL_PROCESS 213 .lock_irqsave() 214 .as_mut() 215 .unwrap() 216 .insert(pcb.pid(), pcb.clone()); 217 } 218 219 /// 唤醒一个进程 220 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 221 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 222 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 223 if state.is_blocked() { 224 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 225 let state = writer.state(); 226 if state.is_blocked() { 227 writer.set_state(ProcessState::Runnable); 228 writer.set_wakeup(); 229 230 // avoid deadlock 231 drop(writer); 232 233 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 234 235 let (rq, _guard) = rq.self_lock(); 236 rq.update_rq_clock(); 237 rq.activate_task( 238 pcb, 239 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 240 ); 241 242 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 243 244 // sched_enqueue(pcb.clone(), true); 245 return Ok(()); 246 } else if state.is_exited() { 247 return Err(SystemError::EINVAL); 248 } else { 249 return Ok(()); 250 } 251 } else if state.is_exited() { 252 return Err(SystemError::EINVAL); 253 } else { 254 return Ok(()); 255 } 256 } 257 258 /// 唤醒暂停的进程 259 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 260 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 261 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 262 if let ProcessState::Stopped = state { 263 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 264 let state = writer.state(); 265 if let ProcessState::Stopped = state { 266 writer.set_state(ProcessState::Runnable); 267 // avoid deadlock 268 drop(writer); 269 270 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 271 272 let (rq, _guard) = rq.self_lock(); 273 rq.update_rq_clock(); 274 rq.activate_task( 275 pcb, 276 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 277 ); 278 279 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 280 281 // sched_enqueue(pcb.clone(), true); 282 return Ok(()); 283 } else if state.is_runnable() { 284 return Ok(()); 285 } else { 286 return Err(SystemError::EINVAL); 287 } 288 } else if state.is_runnable() { 289 return Ok(()); 290 } else { 291 return Err(SystemError::EINVAL); 292 } 293 } 294 295 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成 296 /// 297 /// ## 注意 298 /// 299 /// - 进入当前函数之前,不能持有sched_info的锁 300 /// - 进入当前函数之前,必须关闭中断 301 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列 302 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> { 303 assert!( 304 !CurrentIrqArch::is_irq_enabled(), 305 "interrupt must be disabled before enter ProcessManager::mark_sleep()" 306 ); 307 let pcb = ProcessManager::current_pcb(); 308 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 309 if !matches!(writer.state(), ProcessState::Exited(_)) { 310 writer.set_state(ProcessState::Blocked(interruptable)); 311 writer.set_sleep(); 312 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 313 fence(Ordering::SeqCst); 314 drop(writer); 315 return Ok(()); 316 } 317 return Err(SystemError::EINTR); 318 } 319 320 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成 321 /// 322 /// ## 注意 323 /// 324 /// - 进入当前函数之前,不能持有sched_info的锁 325 /// - 进入当前函数之前,必须关闭中断 326 pub fn mark_stop() -> Result<(), SystemError> { 327 assert!( 328 !CurrentIrqArch::is_irq_enabled(), 329 "interrupt must be disabled before enter ProcessManager::mark_stop()" 330 ); 331 332 let pcb = ProcessManager::current_pcb(); 333 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 334 if !matches!(writer.state(), ProcessState::Exited(_)) { 335 writer.set_state(ProcessState::Stopped); 336 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 337 drop(writer); 338 339 return Ok(()); 340 } 341 return Err(SystemError::EINTR); 342 } 343 /// 当子进程退出后向父进程发送通知 344 fn exit_notify() { 345 let current = ProcessManager::current_pcb(); 346 // 让INIT进程收养所有子进程 347 if current.pid() != Pid(1) { 348 unsafe { 349 current 350 .adopt_childen() 351 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}")) 352 }; 353 let r = current.parent_pcb.read_irqsave().upgrade(); 354 if r.is_none() { 355 return; 356 } 357 let parent_pcb = r.unwrap(); 358 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32); 359 if r.is_err() { 360 kwarn!( 361 "failed to send kill signal to {:?}'s parent pcb {:?}", 362 current.pid(), 363 parent_pcb.pid() 364 ); 365 } 366 // todo: 这里需要向父进程发送SIGCHLD信号 367 // todo: 这里还需要根据线程组的信息,决定信号的发送 368 } 369 } 370 371 /// 退出当前进程 372 /// 373 /// ## 参数 374 /// 375 /// - `exit_code` : 进程的退出码 376 pub fn exit(exit_code: usize) -> ! { 377 // 关中断 378 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 379 let pcb = ProcessManager::current_pcb(); 380 let pid = pcb.pid(); 381 pcb.sched_info 382 .inner_lock_write_irqsave() 383 .set_state(ProcessState::Exited(exit_code)); 384 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true))); 385 386 let rq = cpu_rq(smp_get_processor_id().data() as usize); 387 let (rq, guard) = rq.self_lock(); 388 rq.deactivate_task( 389 pcb.clone(), 390 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK, 391 ); 392 drop(guard); 393 394 // 进行进程退出后的工作 395 let thread = pcb.thread.write_irqsave(); 396 if let Some(addr) = thread.set_child_tid { 397 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 398 } 399 400 if let Some(addr) = thread.clear_child_tid { 401 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 { 402 let _ = 403 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY); 404 } 405 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 406 } 407 408 // 如果是vfork出来的进程,则需要处理completion 409 if thread.vfork_done.is_some() { 410 thread.vfork_done.as_ref().unwrap().complete_all(); 411 } 412 drop(thread); 413 unsafe { pcb.basic_mut().set_user_vm(None) }; 414 drop(pcb); 415 ProcessManager::exit_notify(); 416 // unsafe { CurrentIrqArch::interrupt_enable() }; 417 __schedule(SchedMode::SM_NONE); 418 kerror!("pid {pid:?} exited but sched again!"); 419 #[allow(clippy::empty_loop)] 420 loop { 421 spin_loop(); 422 } 423 } 424 425 pub unsafe fn release(pid: Pid) { 426 let pcb = ProcessManager::find(pid); 427 if pcb.is_some() { 428 // let pcb = pcb.unwrap(); 429 // 判断该pcb是否在全局没有任何引用 430 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。 431 // 因此目前暂时注释掉,使得能跑 432 // if Arc::strong_count(&pcb) <= 2 { 433 // drop(pcb); 434 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid); 435 // } else { 436 // // 如果不为1就panic 437 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb)); 438 // kerror!("{}", msg); 439 // panic!() 440 // } 441 442 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid); 443 } 444 } 445 446 /// 上下文切换完成后的钩子函数 447 unsafe fn switch_finish_hook() { 448 // kdebug!("switch_finish_hook"); 449 let prev_pcb = PROCESS_SWITCH_RESULT 450 .as_mut() 451 .unwrap() 452 .get_mut() 453 .prev_pcb 454 .take() 455 .expect("prev_pcb is None"); 456 let next_pcb = PROCESS_SWITCH_RESULT 457 .as_mut() 458 .unwrap() 459 .get_mut() 460 .next_pcb 461 .take() 462 .expect("next_pcb is None"); 463 464 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁 465 prev_pcb.arch_info.force_unlock(); 466 next_pcb.arch_info.force_unlock(); 467 } 468 469 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态 470 /// 471 /// ## 参数 472 /// 473 /// - `pcb` : 进程的pcb 474 #[allow(dead_code)] 475 pub fn kick(pcb: &Arc<ProcessControlBlock>) { 476 ProcessManager::current_pcb().preempt_disable(); 477 let cpu_id = pcb.sched_info().on_cpu(); 478 479 if let Some(cpu_id) = cpu_id { 480 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() { 481 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu"); 482 } 483 } 484 485 ProcessManager::current_pcb().preempt_enable(); 486 } 487 } 488 489 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换 490 #[cfg(target_arch = "x86_64")] 491 #[inline(never)] 492 pub unsafe extern "sysv64" fn switch_finish_hook() { 493 ProcessManager::switch_finish_hook(); 494 } 495 #[cfg(target_arch = "riscv64")] 496 pub unsafe extern "C" fn switch_finish_hook() { 497 ProcessManager::switch_finish_hook(); 498 } 499 500 int_like!(Pid, AtomicPid, usize, AtomicUsize); 501 502 impl ToString for Pid { 503 fn to_string(&self) -> String { 504 self.0.to_string() 505 } 506 } 507 508 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 509 pub enum ProcessState { 510 /// The process is running on a CPU or in a run queue. 511 Runnable, 512 /// The process is waiting for an event to occur. 513 /// 其中的bool表示该等待过程是否可以被打断。 514 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。 515 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。 516 Blocked(bool), 517 /// 进程被信号终止 518 Stopped, 519 /// 进程已经退出,usize表示进程的退出码 520 Exited(usize), 521 } 522 523 #[allow(dead_code)] 524 impl ProcessState { 525 #[inline(always)] 526 pub fn is_runnable(&self) -> bool { 527 return matches!(self, ProcessState::Runnable); 528 } 529 530 #[inline(always)] 531 pub fn is_blocked(&self) -> bool { 532 return matches!(self, ProcessState::Blocked(_)); 533 } 534 535 #[inline(always)] 536 pub fn is_blocked_interruptable(&self) -> bool { 537 return matches!(self, ProcessState::Blocked(true)); 538 } 539 540 /// Returns `true` if the process state is [`Exited`]. 541 #[inline(always)] 542 pub fn is_exited(&self) -> bool { 543 return matches!(self, ProcessState::Exited(_)); 544 } 545 546 /// Returns `true` if the process state is [`Stopped`]. 547 /// 548 /// [`Stopped`]: ProcessState::Stopped 549 #[inline(always)] 550 pub fn is_stopped(&self) -> bool { 551 matches!(self, ProcessState::Stopped) 552 } 553 554 /// Returns exit code if the process state is [`Exited`]. 555 #[inline(always)] 556 pub fn exit_code(&self) -> Option<usize> { 557 match self { 558 ProcessState::Exited(code) => Some(*code), 559 _ => None, 560 } 561 } 562 } 563 564 bitflags! { 565 /// pcb的标志位 566 pub struct ProcessFlags: usize { 567 /// 当前pcb表示一个内核线程 568 const KTHREAD = 1 << 0; 569 /// 当前进程需要被调度 570 const NEED_SCHEDULE = 1 << 1; 571 /// 进程由于vfork而与父进程存在资源共享 572 const VFORK = 1 << 2; 573 /// 进程不可被冻结 574 const NOFREEZE = 1 << 3; 575 /// 进程正在退出 576 const EXITING = 1 << 4; 577 /// 进程由于接收到终止信号唤醒 578 const WAKEKILL = 1 << 5; 579 /// 进程由于接收到信号而退出.(Killed by a signal) 580 const SIGNALED = 1 << 6; 581 /// 进程需要迁移到其他cpu上 582 const NEED_MIGRATE = 1 << 7; 583 /// 随机化的虚拟地址空间,主要用于动态链接器的加载 584 const RANDOMIZE = 1 << 8; 585 } 586 } 587 588 #[derive(Debug)] 589 pub struct ProcessControlBlock { 590 /// 当前进程的pid 591 pid: Pid, 592 /// 当前进程的线程组id(这个值在同一个线程组内永远不变) 593 tgid: Pid, 594 595 basic: RwLock<ProcessBasicInfo>, 596 /// 当前进程的自旋锁持有计数 597 preempt_count: AtomicUsize, 598 599 flags: LockFreeFlags<ProcessFlags>, 600 worker_private: SpinLock<Option<WorkerPrivate>>, 601 /// 进程的内核栈 602 kernel_stack: RwLock<KernelStack>, 603 604 /// 系统调用栈 605 syscall_stack: RwLock<KernelStack>, 606 607 /// 与调度相关的信息 608 sched_info: ProcessSchedulerInfo, 609 /// 与处理器架构相关的信息 610 arch_info: SpinLock<ArchPCBInfo>, 611 /// 与信号处理相关的信息(似乎可以是无锁的) 612 sig_info: RwLock<ProcessSignalInfo>, 613 /// 信号处理结构体 614 sig_struct: SpinLock<SignalStruct>, 615 /// 退出信号S 616 exit_signal: AtomicSignal, 617 618 /// 父进程指针 619 parent_pcb: RwLock<Weak<ProcessControlBlock>>, 620 /// 真实父进程指针 621 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>, 622 623 /// 子进程链表 624 children: RwLock<Vec<Pid>>, 625 626 /// 等待队列 627 wait_queue: WaitQueue, 628 629 /// 线程信息 630 thread: RwLock<ThreadInfo>, 631 } 632 633 impl ProcessControlBlock { 634 /// Generate a new pcb. 635 /// 636 /// ## 参数 637 /// 638 /// - `name` : 进程的名字 639 /// - `kstack` : 进程的内核栈 640 /// 641 /// ## 返回值 642 /// 643 /// 返回一个新的pcb 644 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> { 645 return Self::do_create_pcb(name, kstack, false); 646 } 647 648 /// 创建一个新的idle进程 649 /// 650 /// 请注意,这个函数只能在进程管理初始化的时候调用。 651 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> { 652 let name = format!("idle-{}", cpu_id); 653 return Self::do_create_pcb(name, kstack, true); 654 } 655 656 #[inline(never)] 657 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> { 658 let (pid, ppid, cwd) = if is_idle { 659 (Pid(0), Pid(0), "/".to_string()) 660 } else { 661 let ppid = ProcessManager::current_pcb().pid(); 662 let cwd = ProcessManager::current_pcb().basic().cwd(); 663 (Self::generate_pid(), ppid, cwd) 664 }; 665 666 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, name, cwd, None); 667 let preempt_count = AtomicUsize::new(0); 668 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) }; 669 670 let sched_info = ProcessSchedulerInfo::new(None); 671 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack)); 672 673 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid) 674 .map(|p| Arc::downgrade(&p)) 675 .unwrap_or_default(); 676 677 let pcb = Self { 678 pid, 679 tgid: pid, 680 basic: basic_info, 681 preempt_count, 682 flags, 683 kernel_stack: RwLock::new(kstack), 684 syscall_stack: RwLock::new(KernelStack::new().unwrap()), 685 worker_private: SpinLock::new(None), 686 sched_info, 687 arch_info, 688 sig_info: RwLock::new(ProcessSignalInfo::default()), 689 sig_struct: SpinLock::new(SignalStruct::new()), 690 exit_signal: AtomicSignal::new(Signal::SIGCHLD), 691 parent_pcb: RwLock::new(ppcb.clone()), 692 real_parent_pcb: RwLock::new(ppcb), 693 children: RwLock::new(Vec::new()), 694 wait_queue: WaitQueue::default(), 695 thread: RwLock::new(ThreadInfo::new()), 696 }; 697 698 // 初始化系统调用栈 699 #[cfg(target_arch = "x86_64")] 700 pcb.arch_info 701 .lock() 702 .init_syscall_stack(&pcb.syscall_stack.read()); 703 704 let pcb = Arc::new(pcb); 705 706 pcb.sched_info() 707 .sched_entity() 708 .force_mut() 709 .set_pcb(Arc::downgrade(&pcb)); 710 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处 711 unsafe { 712 pcb.kernel_stack 713 .write() 714 .set_pcb(Arc::downgrade(&pcb)) 715 .unwrap(); 716 717 pcb.syscall_stack 718 .write() 719 .set_pcb(Arc::downgrade(&pcb)) 720 .unwrap() 721 }; 722 723 // 将当前pcb加入父进程的子进程哈希表中 724 if pcb.pid() > Pid(1) { 725 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() { 726 let mut children = ppcb_arc.children.write_irqsave(); 727 children.push(pcb.pid()); 728 } else { 729 panic!("parent pcb is None"); 730 } 731 } 732 733 return pcb; 734 } 735 736 /// 生成一个新的pid 737 #[inline(always)] 738 fn generate_pid() -> Pid { 739 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1)); 740 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst); 741 } 742 743 /// 返回当前进程的锁持有计数 744 #[inline(always)] 745 pub fn preempt_count(&self) -> usize { 746 return self.preempt_count.load(Ordering::SeqCst); 747 } 748 749 /// 增加当前进程的锁持有计数 750 #[inline(always)] 751 pub fn preempt_disable(&self) { 752 self.preempt_count.fetch_add(1, Ordering::SeqCst); 753 } 754 755 /// 减少当前进程的锁持有计数 756 #[inline(always)] 757 pub fn preempt_enable(&self) { 758 self.preempt_count.fetch_sub(1, Ordering::SeqCst); 759 } 760 761 #[inline(always)] 762 pub unsafe fn set_preempt_count(&self, count: usize) { 763 self.preempt_count.store(count, Ordering::SeqCst); 764 } 765 766 #[inline(always)] 767 pub fn flags(&self) -> &mut ProcessFlags { 768 return self.flags.get_mut(); 769 } 770 771 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改 772 /// 否则会导致死锁 773 #[inline(always)] 774 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> { 775 return self.basic.read_irqsave(); 776 } 777 778 #[inline(always)] 779 pub fn set_name(&self, name: String) { 780 self.basic.write().set_name(name); 781 } 782 783 #[inline(always)] 784 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> { 785 return self.basic.write_irqsave(); 786 } 787 788 /// # 获取arch info的锁,同时关闭中断 789 #[inline(always)] 790 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> { 791 return self.arch_info.lock_irqsave(); 792 } 793 794 /// # 获取arch info的锁,但是不关闭中断 795 /// 796 /// 由于arch info在进程切换的时候会使用到, 797 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的. 798 /// 799 /// 只能在以下情况下使用这个函数: 800 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。 801 /// - 刚刚创建新的pcb 802 #[inline(always)] 803 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> { 804 return self.arch_info.lock(); 805 } 806 807 #[inline(always)] 808 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> { 809 return self.kernel_stack.read(); 810 } 811 812 #[inline(always)] 813 #[allow(dead_code)] 814 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> { 815 return self.kernel_stack.write(); 816 } 817 818 #[inline(always)] 819 pub fn sched_info(&self) -> &ProcessSchedulerInfo { 820 return &self.sched_info; 821 } 822 823 #[inline(always)] 824 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> { 825 return self.worker_private.lock(); 826 } 827 828 #[inline(always)] 829 pub fn pid(&self) -> Pid { 830 return self.pid; 831 } 832 833 #[inline(always)] 834 pub fn tgid(&self) -> Pid { 835 return self.tgid; 836 } 837 838 /// 获取文件描述符表的Arc指针 839 #[inline(always)] 840 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> { 841 return self.basic.read().fd_table().unwrap(); 842 } 843 844 /// 根据文件描述符序号,获取socket对象的Arc指针 845 /// 846 /// ## 参数 847 /// 848 /// - `fd` 文件描述符序号 849 /// 850 /// ## 返回值 851 /// 852 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None 853 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> { 854 let binding = ProcessManager::current_pcb().fd_table(); 855 let fd_table_guard = binding.read(); 856 857 let f = fd_table_guard.get_file_by_fd(fd)?; 858 drop(fd_table_guard); 859 860 if f.file_type() != FileType::Socket { 861 return None; 862 } 863 let socket: Arc<SocketInode> = f 864 .inode() 865 .downcast_arc::<SocketInode>() 866 .expect("Not a socket inode"); 867 return Some(socket); 868 } 869 870 /// 当前进程退出时,让初始进程收养所有子进程 871 unsafe fn adopt_childen(&self) -> Result<(), SystemError> { 872 match ProcessManager::find(Pid(1)) { 873 Some(init_pcb) => { 874 let childen_guard = self.children.write(); 875 let mut init_childen_guard = init_pcb.children.write(); 876 877 childen_guard.iter().for_each(|pid| { 878 init_childen_guard.push(*pid); 879 }); 880 881 return Ok(()); 882 } 883 _ => Err(SystemError::ECHILD), 884 } 885 } 886 887 /// 生成进程的名字 888 pub fn generate_name(program_path: &str, args: &Vec<String>) -> String { 889 let mut name = program_path.to_string(); 890 for arg in args { 891 name.push(' '); 892 name.push_str(arg); 893 } 894 return name; 895 } 896 897 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> { 898 self.sig_info.read_irqsave() 899 } 900 901 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> { 902 for _ in 0..times { 903 if let Some(r) = self.sig_info.try_read_irqsave() { 904 return Some(r); 905 } 906 } 907 908 return None; 909 } 910 911 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> { 912 self.sig_info.write_irqsave() 913 } 914 915 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> { 916 for _ in 0..times { 917 if let Some(r) = self.sig_info.try_write_irqsave() { 918 return Some(r); 919 } 920 } 921 922 return None; 923 } 924 925 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> { 926 self.sig_struct.lock_irqsave() 927 } 928 929 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> { 930 for _ in 0..times { 931 if let Ok(r) = self.sig_struct.try_lock_irqsave() { 932 return Some(r); 933 } 934 } 935 936 return None; 937 } 938 939 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> { 940 self.sig_struct.lock_irqsave() 941 } 942 } 943 944 impl Drop for ProcessControlBlock { 945 fn drop(&mut self) { 946 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 947 // 在ProcFS中,解除进程的注册 948 procfs_unregister_pid(self.pid()) 949 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}")); 950 951 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() { 952 ppcb.children 953 .write_irqsave() 954 .retain(|pid| *pid != self.pid()); 955 } 956 957 drop(irq_guard); 958 } 959 } 960 961 /// 线程信息 962 #[derive(Debug)] 963 pub struct ThreadInfo { 964 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程 965 clear_child_tid: Option<VirtAddr>, 966 set_child_tid: Option<VirtAddr>, 967 968 vfork_done: Option<Arc<Completion>>, 969 /// 线程组的组长 970 group_leader: Weak<ProcessControlBlock>, 971 } 972 973 impl ThreadInfo { 974 pub fn new() -> Self { 975 Self { 976 clear_child_tid: None, 977 set_child_tid: None, 978 vfork_done: None, 979 group_leader: Weak::default(), 980 } 981 } 982 983 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> { 984 return self.group_leader.upgrade(); 985 } 986 } 987 988 /// 进程的基本信息 989 /// 990 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。 991 #[derive(Debug)] 992 pub struct ProcessBasicInfo { 993 /// 当前进程的进程组id 994 pgid: Pid, 995 /// 当前进程的父进程的pid 996 ppid: Pid, 997 /// 进程的名字 998 name: String, 999 1000 /// 当前进程的工作目录 1001 cwd: String, 1002 1003 /// 用户地址空间 1004 user_vm: Option<Arc<AddressSpace>>, 1005 1006 /// 文件描述符表 1007 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>, 1008 } 1009 1010 impl ProcessBasicInfo { 1011 #[inline(never)] 1012 pub fn new( 1013 pgid: Pid, 1014 ppid: Pid, 1015 name: String, 1016 cwd: String, 1017 user_vm: Option<Arc<AddressSpace>>, 1018 ) -> RwLock<Self> { 1019 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new())); 1020 return RwLock::new(Self { 1021 pgid, 1022 ppid, 1023 name, 1024 cwd, 1025 user_vm, 1026 fd_table: Some(fd_table), 1027 }); 1028 } 1029 1030 pub fn pgid(&self) -> Pid { 1031 return self.pgid; 1032 } 1033 1034 pub fn ppid(&self) -> Pid { 1035 return self.ppid; 1036 } 1037 1038 pub fn name(&self) -> &str { 1039 return &self.name; 1040 } 1041 1042 pub fn set_name(&mut self, name: String) { 1043 self.name = name; 1044 } 1045 1046 pub fn cwd(&self) -> String { 1047 return self.cwd.clone(); 1048 } 1049 pub fn set_cwd(&mut self, path: String) { 1050 return self.cwd = path; 1051 } 1052 1053 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> { 1054 return self.user_vm.clone(); 1055 } 1056 1057 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) { 1058 self.user_vm = user_vm; 1059 } 1060 1061 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> { 1062 return self.fd_table.clone(); 1063 } 1064 1065 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) { 1066 self.fd_table = fd_table; 1067 } 1068 } 1069 1070 #[derive(Debug)] 1071 pub struct ProcessSchedulerInfo { 1072 /// 当前进程所在的cpu 1073 on_cpu: AtomicProcessorId, 1074 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位), 1075 /// 该字段存储要被迁移到的目标处理器核心号 1076 // migrate_to: AtomicProcessorId, 1077 inner_locked: RwLock<InnerSchedInfo>, 1078 /// 进程的调度优先级 1079 // priority: SchedPriority, 1080 /// 当前进程的虚拟运行时间 1081 // virtual_runtime: AtomicIsize, 1082 /// 由实时调度器管理的时间片 1083 // rt_time_slice: AtomicIsize, 1084 pub sched_stat: RwLock<SchedInfo>, 1085 /// 调度策略 1086 pub sched_policy: RwLock<crate::sched::SchedPolicy>, 1087 /// cfs调度实体 1088 pub sched_entity: Arc<FairSchedEntity>, 1089 pub on_rq: SpinLock<OnRq>, 1090 1091 pub prio_data: RwLock<PrioData>, 1092 } 1093 1094 #[derive(Debug, Default)] 1095 pub struct SchedInfo { 1096 /// 记录任务在特定 CPU 上运行的次数 1097 pub pcount: usize, 1098 /// 记录任务等待在运行队列上的时间 1099 pub run_delay: usize, 1100 /// 记录任务上次在 CPU 上运行的时间戳 1101 pub last_arrival: u64, 1102 /// 记录任务上次被加入到运行队列中的时间戳 1103 pub last_queued: u64, 1104 } 1105 1106 #[derive(Debug)] 1107 pub struct PrioData { 1108 pub prio: i32, 1109 pub static_prio: i32, 1110 pub normal_prio: i32, 1111 } 1112 1113 impl Default for PrioData { 1114 fn default() -> Self { 1115 Self { 1116 prio: MAX_PRIO - 20, 1117 static_prio: MAX_PRIO - 20, 1118 normal_prio: MAX_PRIO - 20, 1119 } 1120 } 1121 } 1122 1123 #[derive(Debug)] 1124 pub struct InnerSchedInfo { 1125 /// 当前进程的状态 1126 state: ProcessState, 1127 /// 进程的调度策略 1128 sleep: bool, 1129 } 1130 1131 impl InnerSchedInfo { 1132 pub fn state(&self) -> ProcessState { 1133 return self.state; 1134 } 1135 1136 pub fn set_state(&mut self, state: ProcessState) { 1137 self.state = state; 1138 } 1139 1140 pub fn set_sleep(&mut self) { 1141 self.sleep = true; 1142 } 1143 1144 pub fn set_wakeup(&mut self) { 1145 self.sleep = false; 1146 } 1147 1148 pub fn is_mark_sleep(&self) -> bool { 1149 self.sleep 1150 } 1151 } 1152 1153 impl ProcessSchedulerInfo { 1154 #[inline(never)] 1155 pub fn new(on_cpu: Option<ProcessorId>) -> Self { 1156 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID); 1157 return Self { 1158 on_cpu: AtomicProcessorId::new(cpu_id), 1159 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID), 1160 inner_locked: RwLock::new(InnerSchedInfo { 1161 state: ProcessState::Blocked(false), 1162 sleep: false, 1163 }), 1164 // virtual_runtime: AtomicIsize::new(0), 1165 // rt_time_slice: AtomicIsize::new(0), 1166 // priority: SchedPriority::new(100).unwrap(), 1167 sched_stat: RwLock::new(SchedInfo::default()), 1168 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS), 1169 sched_entity: FairSchedEntity::new(), 1170 on_rq: SpinLock::new(OnRq::None), 1171 prio_data: RwLock::new(PrioData::default()), 1172 }; 1173 } 1174 1175 pub fn sched_entity(&self) -> Arc<FairSchedEntity> { 1176 return self.sched_entity.clone(); 1177 } 1178 1179 pub fn on_cpu(&self) -> Option<ProcessorId> { 1180 let on_cpu = self.on_cpu.load(Ordering::SeqCst); 1181 if on_cpu == ProcessorId::INVALID { 1182 return None; 1183 } else { 1184 return Some(on_cpu); 1185 } 1186 } 1187 1188 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) { 1189 if let Some(cpu_id) = on_cpu { 1190 self.on_cpu.store(cpu_id, Ordering::SeqCst); 1191 } else { 1192 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst); 1193 } 1194 } 1195 1196 // pub fn migrate_to(&self) -> Option<ProcessorId> { 1197 // let migrate_to = self.migrate_to.load(Ordering::SeqCst); 1198 // if migrate_to == ProcessorId::INVALID { 1199 // return None; 1200 // } else { 1201 // return Some(migrate_to); 1202 // } 1203 // } 1204 1205 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) { 1206 // if let Some(data) = migrate_to { 1207 // self.migrate_to.store(data, Ordering::SeqCst); 1208 // } else { 1209 // self.migrate_to 1210 // .store(ProcessorId::INVALID, Ordering::SeqCst) 1211 // } 1212 // } 1213 1214 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> { 1215 return self.inner_locked.write_irqsave(); 1216 } 1217 1218 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> { 1219 return self.inner_locked.read_irqsave(); 1220 } 1221 1222 // pub fn inner_lock_try_read_irqsave( 1223 // &self, 1224 // times: u8, 1225 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> { 1226 // for _ in 0..times { 1227 // if let Some(r) = self.inner_locked.try_read_irqsave() { 1228 // return Some(r); 1229 // } 1230 // } 1231 1232 // return None; 1233 // } 1234 1235 // pub fn inner_lock_try_upgradable_read_irqsave( 1236 // &self, 1237 // times: u8, 1238 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> { 1239 // for _ in 0..times { 1240 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() { 1241 // return Some(r); 1242 // } 1243 // } 1244 1245 // return None; 1246 // } 1247 1248 // pub fn virtual_runtime(&self) -> isize { 1249 // return self.virtual_runtime.load(Ordering::SeqCst); 1250 // } 1251 1252 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) { 1253 // self.virtual_runtime 1254 // .store(virtual_runtime, Ordering::SeqCst); 1255 // } 1256 // pub fn increase_virtual_runtime(&self, delta: isize) { 1257 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst); 1258 // } 1259 1260 // pub fn rt_time_slice(&self) -> isize { 1261 // return self.rt_time_slice.load(Ordering::SeqCst); 1262 // } 1263 1264 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) { 1265 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst); 1266 // } 1267 1268 // pub fn increase_rt_time_slice(&self, delta: isize) { 1269 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst); 1270 // } 1271 1272 pub fn policy(&self) -> crate::sched::SchedPolicy { 1273 return *self.sched_policy.read_irqsave(); 1274 } 1275 } 1276 1277 #[derive(Debug, Clone)] 1278 pub struct KernelStack { 1279 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>, 1280 /// 标记该内核栈是否可以被释放 1281 can_be_freed: bool, 1282 } 1283 1284 impl KernelStack { 1285 pub const SIZE: usize = 0x4000; 1286 pub const ALIGN: usize = 0x4000; 1287 1288 pub fn new() -> Result<Self, SystemError> { 1289 return Ok(Self { 1290 stack: Some( 1291 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?, 1292 ), 1293 can_be_freed: true, 1294 }); 1295 } 1296 1297 /// 根据已有的空间,构造一个内核栈结构体 1298 /// 1299 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误! 1300 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> { 1301 if base.is_null() || !base.check_aligned(Self::ALIGN) { 1302 return Err(SystemError::EFAULT); 1303 } 1304 1305 return Ok(Self { 1306 stack: Some( 1307 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked( 1308 base.data() as *mut [u8; KernelStack::SIZE], 1309 ), 1310 ), 1311 can_be_freed: false, 1312 }); 1313 } 1314 1315 /// 返回内核栈的起始虚拟地址(低地址) 1316 pub fn start_address(&self) -> VirtAddr { 1317 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize); 1318 } 1319 1320 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址) 1321 pub fn stack_max_address(&self) -> VirtAddr { 1322 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE); 1323 } 1324 1325 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> { 1326 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处 1327 let p: *const ProcessControlBlock = Weak::into_raw(pcb); 1328 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1329 1330 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误 1331 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) { 1332 kerror!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr); 1333 return Err(SystemError::EPERM); 1334 } 1335 // 将pcb的地址放到内核栈的最低地址处 1336 unsafe { 1337 *stack_bottom_ptr = p; 1338 } 1339 1340 return Ok(()); 1341 } 1342 1343 /// 清除内核栈的pcb指针 1344 /// 1345 /// ## 参数 1346 /// 1347 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题 1348 pub unsafe fn clear_pcb(&mut self, force: bool) { 1349 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1350 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) { 1351 return; 1352 } 1353 1354 if !force { 1355 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr); 1356 drop(pcb_ptr); 1357 } 1358 1359 *stack_bottom_ptr = core::ptr::null(); 1360 } 1361 1362 /// 返回指向当前内核栈pcb的Arc指针 1363 #[allow(dead_code)] 1364 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> { 1365 // 从内核栈的最低地址处取出pcb的地址 1366 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1367 if unlikely(unsafe { (*p).is_null() }) { 1368 return None; 1369 } 1370 1371 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用 1372 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> = 1373 ManuallyDrop::new(Weak::from_raw(*p)); 1374 1375 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?; 1376 return Some(new_arc); 1377 } 1378 } 1379 1380 impl Drop for KernelStack { 1381 fn drop(&mut self) { 1382 if self.stack.is_some() { 1383 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1384 if unsafe { !(*ptr).is_null() } { 1385 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) }; 1386 drop(pcb_ptr); 1387 } 1388 } 1389 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数 1390 if !self.can_be_freed { 1391 let bx = self.stack.take(); 1392 core::mem::forget(bx); 1393 } 1394 } 1395 } 1396 1397 pub fn process_init() { 1398 ProcessManager::init(); 1399 } 1400 1401 #[derive(Debug)] 1402 pub struct ProcessSignalInfo { 1403 // 当前进程 1404 sig_block: SigSet, 1405 // sig_pending 中存储当前线程要处理的信号 1406 sig_pending: SigPending, 1407 // sig_shared_pending 中存储当前线程所属进程要处理的信号 1408 sig_shared_pending: SigPending, 1409 // 当前进程对应的tty 1410 tty: Option<Arc<TtyCore>>, 1411 } 1412 1413 impl ProcessSignalInfo { 1414 pub fn sig_block(&self) -> &SigSet { 1415 &self.sig_block 1416 } 1417 1418 pub fn sig_pending(&self) -> &SigPending { 1419 &self.sig_pending 1420 } 1421 1422 pub fn sig_pending_mut(&mut self) -> &mut SigPending { 1423 &mut self.sig_pending 1424 } 1425 1426 pub fn sig_block_mut(&mut self) -> &mut SigSet { 1427 &mut self.sig_block 1428 } 1429 1430 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending { 1431 &mut self.sig_shared_pending 1432 } 1433 1434 pub fn sig_shared_pending(&self) -> &SigPending { 1435 &self.sig_shared_pending 1436 } 1437 1438 pub fn tty(&self) -> Option<Arc<TtyCore>> { 1439 self.tty.clone() 1440 } 1441 1442 pub fn set_tty(&mut self, tty: Arc<TtyCore>) { 1443 self.tty = Some(tty); 1444 } 1445 1446 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号 1447 /// 1448 /// ## 参数 1449 /// 1450 /// - `sig_mask` 被忽略掉的信号 1451 /// 1452 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) { 1453 let res = self.sig_pending.dequeue_signal(sig_mask); 1454 if res.0 != Signal::INVALID { 1455 return res; 1456 } else { 1457 return self.sig_shared_pending.dequeue_signal(sig_mask); 1458 } 1459 } 1460 } 1461 1462 impl Default for ProcessSignalInfo { 1463 fn default() -> Self { 1464 Self { 1465 sig_block: SigSet::empty(), 1466 sig_pending: SigPending::default(), 1467 sig_shared_pending: SigPending::default(), 1468 tty: None, 1469 } 1470 } 1471 } 1472