1 use core::{ 2 hash::Hash, 3 hint::spin_loop, 4 intrinsics::{likely, unlikely}, 5 mem::ManuallyDrop, 6 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering}, 7 }; 8 9 use alloc::{ 10 string::{String, ToString}, 11 sync::{Arc, Weak}, 12 vec::Vec, 13 }; 14 use hashbrown::HashMap; 15 use system_error::SystemError; 16 17 use crate::{ 18 arch::{ 19 cpu::current_cpu_id, 20 ipc::signal::{AtomicSignal, SigSet, Signal}, 21 process::ArchPCBInfo, 22 CurrentIrqArch, 23 }, 24 driver::tty::tty_core::TtyCore, 25 exception::InterruptArch, 26 filesystem::{ 27 procfs::procfs_unregister_pid, 28 vfs::{file::FileDescriptorVec, FileType}, 29 }, 30 ipc::signal_types::{SigInfo, SigPending, SignalStruct}, 31 kdebug, kinfo, 32 libs::{ 33 align::AlignedBox, 34 casting::DowncastArc, 35 futex::{ 36 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY}, 37 futex::{Futex, RobustListHead}, 38 }, 39 lock_free_flags::LockFreeFlags, 40 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, 41 spinlock::{SpinLock, SpinLockGuard}, 42 wait_queue::WaitQueue, 43 }, 44 mm::{ 45 percpu::{PerCpu, PerCpuVar}, 46 set_IDLE_PROCESS_ADDRESS_SPACE, 47 ucontext::AddressSpace, 48 VirtAddr, 49 }, 50 net::socket::SocketInode, 51 sched::completion::Completion, 52 sched::{ 53 cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, 54 WakeupFlags, __schedule, 55 }, 56 smp::{ 57 core::smp_get_processor_id, 58 cpu::{AtomicProcessorId, ProcessorId}, 59 kick_cpu, 60 }, 61 syscall::{user_access::clear_user, Syscall}, 62 }; 63 64 use self::kthread::WorkerPrivate; 65 66 pub mod abi; 67 pub mod c_adapter; 68 pub mod exec; 69 pub mod exit; 70 pub mod fork; 71 pub mod idle; 72 pub mod kthread; 73 pub mod pid; 74 pub mod resource; 75 pub mod stdio; 76 pub mod syscall; 77 pub mod utils; 78 79 /// 系统中所有进程的pcb 80 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None); 81 82 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None; 83 84 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成 85 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false; 86 87 #[derive(Debug)] 88 pub struct SwitchResult { 89 pub prev_pcb: Option<Arc<ProcessControlBlock>>, 90 pub next_pcb: Option<Arc<ProcessControlBlock>>, 91 } 92 93 impl SwitchResult { 94 pub fn new() -> Self { 95 Self { 96 prev_pcb: None, 97 next_pcb: None, 98 } 99 } 100 } 101 102 #[derive(Debug)] 103 pub struct ProcessManager; 104 impl ProcessManager { 105 #[inline(never)] 106 fn init() { 107 static INIT_FLAG: AtomicBool = AtomicBool::new(false); 108 if INIT_FLAG 109 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 110 .is_err() 111 { 112 panic!("ProcessManager has been initialized!"); 113 } 114 115 unsafe { 116 compiler_fence(Ordering::SeqCst); 117 kdebug!("To create address space for INIT process."); 118 // test_buddy(); 119 set_IDLE_PROCESS_ADDRESS_SPACE( 120 AddressSpace::new(true).expect("Failed to create address space for INIT process."), 121 ); 122 kdebug!("INIT process address space created."); 123 compiler_fence(Ordering::SeqCst); 124 }; 125 126 ALL_PROCESS.lock_irqsave().replace(HashMap::new()); 127 Self::init_switch_result(); 128 Self::arch_init(); 129 kdebug!("process arch init done."); 130 Self::init_idle(); 131 kdebug!("process idle init done."); 132 133 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true }; 134 kinfo!("Process Manager initialized."); 135 } 136 137 fn init_switch_result() { 138 let mut switch_res_vec: Vec<SwitchResult> = Vec::new(); 139 for _ in 0..PerCpu::MAX_CPU_NUM { 140 switch_res_vec.push(SwitchResult::new()); 141 } 142 unsafe { 143 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap()); 144 } 145 } 146 147 /// 判断进程管理器是否已经初始化完成 148 pub fn initialized() -> bool { 149 unsafe { __PROCESS_MANAGEMENT_INIT_DONE } 150 } 151 152 /// 获取当前进程的pcb 153 pub fn current_pcb() -> Arc<ProcessControlBlock> { 154 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 155 kerror!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false"); 156 loop { 157 spin_loop(); 158 } 159 } 160 return ProcessControlBlock::arch_current_pcb(); 161 } 162 163 /// 获取当前进程的pid 164 /// 165 /// 如果进程管理器未初始化完成,那么返回0 166 pub fn current_pid() -> Pid { 167 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 168 return Pid(0); 169 } 170 171 return ProcessManager::current_pcb().pid(); 172 } 173 174 /// 增加当前进程的锁持有计数 175 #[inline(always)] 176 pub fn preempt_disable() { 177 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 178 ProcessManager::current_pcb().preempt_disable(); 179 } 180 } 181 182 /// 减少当前进程的锁持有计数 183 #[inline(always)] 184 pub fn preempt_enable() { 185 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 186 ProcessManager::current_pcb().preempt_enable(); 187 } 188 } 189 190 /// 根据pid获取进程的pcb 191 /// 192 /// ## 参数 193 /// 194 /// - `pid` : 进程的pid 195 /// 196 /// ## 返回值 197 /// 198 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None 199 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> { 200 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned(); 201 } 202 203 /// 向系统中添加一个进程的pcb 204 /// 205 /// ## 参数 206 /// 207 /// - `pcb` : 进程的pcb 208 /// 209 /// ## 返回值 210 /// 211 /// 无 212 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) { 213 ALL_PROCESS 214 .lock_irqsave() 215 .as_mut() 216 .unwrap() 217 .insert(pcb.pid(), pcb.clone()); 218 } 219 220 /// 唤醒一个进程 221 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 222 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 223 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 224 if state.is_blocked() { 225 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 226 let state = writer.state(); 227 if state.is_blocked() { 228 writer.set_state(ProcessState::Runnable); 229 writer.set_wakeup(); 230 231 // avoid deadlock 232 drop(writer); 233 234 let rq = 235 cpu_rq(pcb.sched_info().on_cpu().unwrap_or(current_cpu_id()).data() as usize); 236 237 let (rq, _guard) = rq.self_lock(); 238 rq.update_rq_clock(); 239 rq.activate_task( 240 pcb, 241 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 242 ); 243 244 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 245 246 // sched_enqueue(pcb.clone(), true); 247 return Ok(()); 248 } else if state.is_exited() { 249 return Err(SystemError::EINVAL); 250 } else { 251 return Ok(()); 252 } 253 } else if state.is_exited() { 254 return Err(SystemError::EINVAL); 255 } else { 256 return Ok(()); 257 } 258 } 259 260 /// 唤醒暂停的进程 261 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 262 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 263 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 264 if let ProcessState::Stopped = state { 265 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 266 let state = writer.state(); 267 if let ProcessState::Stopped = state { 268 writer.set_state(ProcessState::Runnable); 269 // avoid deadlock 270 drop(writer); 271 272 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 273 274 let (rq, _guard) = rq.self_lock(); 275 rq.update_rq_clock(); 276 rq.activate_task( 277 pcb, 278 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 279 ); 280 281 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 282 283 // sched_enqueue(pcb.clone(), true); 284 return Ok(()); 285 } else if state.is_runnable() { 286 return Ok(()); 287 } else { 288 return Err(SystemError::EINVAL); 289 } 290 } else if state.is_runnable() { 291 return Ok(()); 292 } else { 293 return Err(SystemError::EINVAL); 294 } 295 } 296 297 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成 298 /// 299 /// ## 注意 300 /// 301 /// - 进入当前函数之前,不能持有sched_info的锁 302 /// - 进入当前函数之前,必须关闭中断 303 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列 304 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> { 305 assert!( 306 !CurrentIrqArch::is_irq_enabled(), 307 "interrupt must be disabled before enter ProcessManager::mark_sleep()" 308 ); 309 let pcb = ProcessManager::current_pcb(); 310 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 311 if !matches!(writer.state(), ProcessState::Exited(_)) { 312 writer.set_state(ProcessState::Blocked(interruptable)); 313 writer.set_sleep(); 314 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 315 fence(Ordering::SeqCst); 316 drop(writer); 317 return Ok(()); 318 } 319 return Err(SystemError::EINTR); 320 } 321 322 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成 323 /// 324 /// ## 注意 325 /// 326 /// - 进入当前函数之前,不能持有sched_info的锁 327 /// - 进入当前函数之前,必须关闭中断 328 pub fn mark_stop() -> Result<(), SystemError> { 329 assert!( 330 !CurrentIrqArch::is_irq_enabled(), 331 "interrupt must be disabled before enter ProcessManager::mark_stop()" 332 ); 333 334 let pcb = ProcessManager::current_pcb(); 335 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 336 if !matches!(writer.state(), ProcessState::Exited(_)) { 337 writer.set_state(ProcessState::Stopped); 338 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 339 drop(writer); 340 341 return Ok(()); 342 } 343 return Err(SystemError::EINTR); 344 } 345 /// 当子进程退出后向父进程发送通知 346 fn exit_notify() { 347 let current = ProcessManager::current_pcb(); 348 // 让INIT进程收养所有子进程 349 if current.pid() != Pid(1) { 350 unsafe { 351 current 352 .adopt_childen() 353 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}")) 354 }; 355 let r = current.parent_pcb.read_irqsave().upgrade(); 356 if r.is_none() { 357 return; 358 } 359 let parent_pcb = r.unwrap(); 360 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32); 361 if r.is_err() { 362 kwarn!( 363 "failed to send kill signal to {:?}'s parent pcb {:?}", 364 current.pid(), 365 parent_pcb.pid() 366 ); 367 } 368 // todo: 这里需要向父进程发送SIGCHLD信号 369 // todo: 这里还需要根据线程组的信息,决定信号的发送 370 } 371 } 372 373 /// 退出当前进程 374 /// 375 /// ## 参数 376 /// 377 /// - `exit_code` : 进程的退出码 378 pub fn exit(exit_code: usize) -> ! { 379 // 关中断 380 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 381 let pcb = ProcessManager::current_pcb(); 382 let pid = pcb.pid(); 383 pcb.sched_info 384 .inner_lock_write_irqsave() 385 .set_state(ProcessState::Exited(exit_code)); 386 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true))); 387 388 let rq = cpu_rq(smp_get_processor_id().data() as usize); 389 let (rq, guard) = rq.self_lock(); 390 rq.deactivate_task( 391 pcb.clone(), 392 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK, 393 ); 394 drop(guard); 395 396 // 进行进程退出后的工作 397 let thread = pcb.thread.write_irqsave(); 398 if let Some(addr) = thread.set_child_tid { 399 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 400 } 401 402 if let Some(addr) = thread.clear_child_tid { 403 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 { 404 let _ = 405 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY); 406 } 407 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 408 } 409 410 RobustListHead::exit_robust_list(pcb.clone()); 411 412 // 如果是vfork出来的进程,则需要处理completion 413 if thread.vfork_done.is_some() { 414 thread.vfork_done.as_ref().unwrap().complete_all(); 415 } 416 drop(thread); 417 unsafe { pcb.basic_mut().set_user_vm(None) }; 418 drop(pcb); 419 ProcessManager::exit_notify(); 420 // unsafe { CurrentIrqArch::interrupt_enable() }; 421 __schedule(SchedMode::SM_NONE); 422 kerror!("pid {pid:?} exited but sched again!"); 423 #[allow(clippy::empty_loop)] 424 loop { 425 spin_loop(); 426 } 427 } 428 429 pub unsafe fn release(pid: Pid) { 430 let pcb = ProcessManager::find(pid); 431 if pcb.is_some() { 432 // let pcb = pcb.unwrap(); 433 // 判断该pcb是否在全局没有任何引用 434 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。 435 // 因此目前暂时注释掉,使得能跑 436 // if Arc::strong_count(&pcb) <= 2 { 437 // drop(pcb); 438 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid); 439 // } else { 440 // // 如果不为1就panic 441 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb)); 442 // kerror!("{}", msg); 443 // panic!() 444 // } 445 446 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid); 447 } 448 } 449 450 /// 上下文切换完成后的钩子函数 451 unsafe fn switch_finish_hook() { 452 // kdebug!("switch_finish_hook"); 453 let prev_pcb = PROCESS_SWITCH_RESULT 454 .as_mut() 455 .unwrap() 456 .get_mut() 457 .prev_pcb 458 .take() 459 .expect("prev_pcb is None"); 460 let next_pcb = PROCESS_SWITCH_RESULT 461 .as_mut() 462 .unwrap() 463 .get_mut() 464 .next_pcb 465 .take() 466 .expect("next_pcb is None"); 467 468 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁 469 fence(Ordering::SeqCst); 470 471 prev_pcb.arch_info.force_unlock(); 472 fence(Ordering::SeqCst); 473 474 next_pcb.arch_info.force_unlock(); 475 fence(Ordering::SeqCst); 476 } 477 478 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态 479 /// 480 /// ## 参数 481 /// 482 /// - `pcb` : 进程的pcb 483 #[allow(dead_code)] 484 pub fn kick(pcb: &Arc<ProcessControlBlock>) { 485 ProcessManager::current_pcb().preempt_disable(); 486 let cpu_id = pcb.sched_info().on_cpu(); 487 488 if let Some(cpu_id) = cpu_id { 489 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() { 490 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu"); 491 } 492 } 493 494 ProcessManager::current_pcb().preempt_enable(); 495 } 496 } 497 498 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换 499 #[cfg(target_arch = "x86_64")] 500 #[inline(never)] 501 pub unsafe extern "sysv64" fn switch_finish_hook() { 502 ProcessManager::switch_finish_hook(); 503 } 504 #[cfg(target_arch = "riscv64")] 505 #[inline(always)] 506 pub unsafe fn switch_finish_hook() { 507 ProcessManager::switch_finish_hook(); 508 } 509 510 int_like!(Pid, AtomicPid, usize, AtomicUsize); 511 512 impl ToString for Pid { 513 fn to_string(&self) -> String { 514 self.0.to_string() 515 } 516 } 517 518 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 519 pub enum ProcessState { 520 /// The process is running on a CPU or in a run queue. 521 Runnable, 522 /// The process is waiting for an event to occur. 523 /// 其中的bool表示该等待过程是否可以被打断。 524 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。 525 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。 526 Blocked(bool), 527 /// 进程被信号终止 528 Stopped, 529 /// 进程已经退出,usize表示进程的退出码 530 Exited(usize), 531 } 532 533 #[allow(dead_code)] 534 impl ProcessState { 535 #[inline(always)] 536 pub fn is_runnable(&self) -> bool { 537 return matches!(self, ProcessState::Runnable); 538 } 539 540 #[inline(always)] 541 pub fn is_blocked(&self) -> bool { 542 return matches!(self, ProcessState::Blocked(_)); 543 } 544 545 #[inline(always)] 546 pub fn is_blocked_interruptable(&self) -> bool { 547 return matches!(self, ProcessState::Blocked(true)); 548 } 549 550 /// Returns `true` if the process state is [`Exited`]. 551 #[inline(always)] 552 pub fn is_exited(&self) -> bool { 553 return matches!(self, ProcessState::Exited(_)); 554 } 555 556 /// Returns `true` if the process state is [`Stopped`]. 557 /// 558 /// [`Stopped`]: ProcessState::Stopped 559 #[inline(always)] 560 pub fn is_stopped(&self) -> bool { 561 matches!(self, ProcessState::Stopped) 562 } 563 564 /// Returns exit code if the process state is [`Exited`]. 565 #[inline(always)] 566 pub fn exit_code(&self) -> Option<usize> { 567 match self { 568 ProcessState::Exited(code) => Some(*code), 569 _ => None, 570 } 571 } 572 } 573 574 bitflags! { 575 /// pcb的标志位 576 pub struct ProcessFlags: usize { 577 /// 当前pcb表示一个内核线程 578 const KTHREAD = 1 << 0; 579 /// 当前进程需要被调度 580 const NEED_SCHEDULE = 1 << 1; 581 /// 进程由于vfork而与父进程存在资源共享 582 const VFORK = 1 << 2; 583 /// 进程不可被冻结 584 const NOFREEZE = 1 << 3; 585 /// 进程正在退出 586 const EXITING = 1 << 4; 587 /// 进程由于接收到终止信号唤醒 588 const WAKEKILL = 1 << 5; 589 /// 进程由于接收到信号而退出.(Killed by a signal) 590 const SIGNALED = 1 << 6; 591 /// 进程需要迁移到其他cpu上 592 const NEED_MIGRATE = 1 << 7; 593 /// 随机化的虚拟地址空间,主要用于动态链接器的加载 594 const RANDOMIZE = 1 << 8; 595 } 596 } 597 598 #[derive(Debug)] 599 pub struct ProcessControlBlock { 600 /// 当前进程的pid 601 pid: Pid, 602 /// 当前进程的线程组id(这个值在同一个线程组内永远不变) 603 tgid: Pid, 604 605 basic: RwLock<ProcessBasicInfo>, 606 /// 当前进程的自旋锁持有计数 607 preempt_count: AtomicUsize, 608 609 flags: LockFreeFlags<ProcessFlags>, 610 worker_private: SpinLock<Option<WorkerPrivate>>, 611 /// 进程的内核栈 612 kernel_stack: RwLock<KernelStack>, 613 614 /// 系统调用栈 615 syscall_stack: RwLock<KernelStack>, 616 617 /// 与调度相关的信息 618 sched_info: ProcessSchedulerInfo, 619 /// 与处理器架构相关的信息 620 arch_info: SpinLock<ArchPCBInfo>, 621 /// 与信号处理相关的信息(似乎可以是无锁的) 622 sig_info: RwLock<ProcessSignalInfo>, 623 /// 信号处理结构体 624 sig_struct: SpinLock<SignalStruct>, 625 /// 退出信号S 626 exit_signal: AtomicSignal, 627 628 /// 父进程指针 629 parent_pcb: RwLock<Weak<ProcessControlBlock>>, 630 /// 真实父进程指针 631 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>, 632 633 /// 子进程链表 634 children: RwLock<Vec<Pid>>, 635 636 /// 等待队列 637 wait_queue: WaitQueue, 638 639 /// 线程信息 640 thread: RwLock<ThreadInfo>, 641 642 /// 进程的robust lock列表 643 robust_list: RwLock<Option<RobustListHead>>, 644 } 645 646 impl ProcessControlBlock { 647 /// Generate a new pcb. 648 /// 649 /// ## 参数 650 /// 651 /// - `name` : 进程的名字 652 /// - `kstack` : 进程的内核栈 653 /// 654 /// ## 返回值 655 /// 656 /// 返回一个新的pcb 657 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> { 658 return Self::do_create_pcb(name, kstack, false); 659 } 660 661 /// 创建一个新的idle进程 662 /// 663 /// 请注意,这个函数只能在进程管理初始化的时候调用。 664 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> { 665 let name = format!("idle-{}", cpu_id); 666 return Self::do_create_pcb(name, kstack, true); 667 } 668 669 #[inline(never)] 670 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> { 671 let (pid, ppid, cwd) = if is_idle { 672 (Pid(0), Pid(0), "/".to_string()) 673 } else { 674 let ppid = ProcessManager::current_pcb().pid(); 675 let cwd = ProcessManager::current_pcb().basic().cwd(); 676 (Self::generate_pid(), ppid, cwd) 677 }; 678 679 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, name, cwd, None); 680 let preempt_count = AtomicUsize::new(0); 681 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) }; 682 683 let sched_info = ProcessSchedulerInfo::new(None); 684 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack)); 685 686 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid) 687 .map(|p| Arc::downgrade(&p)) 688 .unwrap_or_default(); 689 690 let pcb = Self { 691 pid, 692 tgid: pid, 693 basic: basic_info, 694 preempt_count, 695 flags, 696 kernel_stack: RwLock::new(kstack), 697 syscall_stack: RwLock::new(KernelStack::new().unwrap()), 698 worker_private: SpinLock::new(None), 699 sched_info, 700 arch_info, 701 sig_info: RwLock::new(ProcessSignalInfo::default()), 702 sig_struct: SpinLock::new(SignalStruct::new()), 703 exit_signal: AtomicSignal::new(Signal::SIGCHLD), 704 parent_pcb: RwLock::new(ppcb.clone()), 705 real_parent_pcb: RwLock::new(ppcb), 706 children: RwLock::new(Vec::new()), 707 wait_queue: WaitQueue::default(), 708 thread: RwLock::new(ThreadInfo::new()), 709 robust_list: RwLock::new(None), 710 }; 711 712 // 初始化系统调用栈 713 #[cfg(target_arch = "x86_64")] 714 pcb.arch_info 715 .lock() 716 .init_syscall_stack(&pcb.syscall_stack.read()); 717 718 let pcb = Arc::new(pcb); 719 720 pcb.sched_info() 721 .sched_entity() 722 .force_mut() 723 .set_pcb(Arc::downgrade(&pcb)); 724 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处 725 unsafe { 726 pcb.kernel_stack 727 .write() 728 .set_pcb(Arc::downgrade(&pcb)) 729 .unwrap(); 730 731 pcb.syscall_stack 732 .write() 733 .set_pcb(Arc::downgrade(&pcb)) 734 .unwrap() 735 }; 736 737 // 将当前pcb加入父进程的子进程哈希表中 738 if pcb.pid() > Pid(1) { 739 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() { 740 let mut children = ppcb_arc.children.write_irqsave(); 741 children.push(pcb.pid()); 742 } else { 743 panic!("parent pcb is None"); 744 } 745 } 746 747 return pcb; 748 } 749 750 /// 生成一个新的pid 751 #[inline(always)] 752 fn generate_pid() -> Pid { 753 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1)); 754 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst); 755 } 756 757 /// 返回当前进程的锁持有计数 758 #[inline(always)] 759 pub fn preempt_count(&self) -> usize { 760 return self.preempt_count.load(Ordering::SeqCst); 761 } 762 763 /// 增加当前进程的锁持有计数 764 #[inline(always)] 765 pub fn preempt_disable(&self) { 766 self.preempt_count.fetch_add(1, Ordering::SeqCst); 767 } 768 769 /// 减少当前进程的锁持有计数 770 #[inline(always)] 771 pub fn preempt_enable(&self) { 772 self.preempt_count.fetch_sub(1, Ordering::SeqCst); 773 } 774 775 #[inline(always)] 776 pub unsafe fn set_preempt_count(&self, count: usize) { 777 self.preempt_count.store(count, Ordering::SeqCst); 778 } 779 780 #[inline(always)] 781 pub fn flags(&self) -> &mut ProcessFlags { 782 return self.flags.get_mut(); 783 } 784 785 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改 786 /// 否则会导致死锁 787 #[inline(always)] 788 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> { 789 return self.basic.read_irqsave(); 790 } 791 792 #[inline(always)] 793 pub fn set_name(&self, name: String) { 794 self.basic.write().set_name(name); 795 } 796 797 #[inline(always)] 798 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> { 799 return self.basic.write_irqsave(); 800 } 801 802 /// # 获取arch info的锁,同时关闭中断 803 #[inline(always)] 804 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> { 805 return self.arch_info.lock_irqsave(); 806 } 807 808 /// # 获取arch info的锁,但是不关闭中断 809 /// 810 /// 由于arch info在进程切换的时候会使用到, 811 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的. 812 /// 813 /// 只能在以下情况下使用这个函数: 814 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。 815 /// - 刚刚创建新的pcb 816 #[inline(always)] 817 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> { 818 return self.arch_info.lock(); 819 } 820 821 #[inline(always)] 822 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> { 823 return self.kernel_stack.read(); 824 } 825 826 pub unsafe fn kernel_stack_force_ref(&self) -> &KernelStack { 827 self.kernel_stack.force_get_ref() 828 } 829 830 #[inline(always)] 831 #[allow(dead_code)] 832 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> { 833 return self.kernel_stack.write(); 834 } 835 836 #[inline(always)] 837 pub fn sched_info(&self) -> &ProcessSchedulerInfo { 838 return &self.sched_info; 839 } 840 841 #[inline(always)] 842 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> { 843 return self.worker_private.lock(); 844 } 845 846 #[inline(always)] 847 pub fn pid(&self) -> Pid { 848 return self.pid; 849 } 850 851 #[inline(always)] 852 pub fn tgid(&self) -> Pid { 853 return self.tgid; 854 } 855 856 /// 获取文件描述符表的Arc指针 857 #[inline(always)] 858 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> { 859 return self.basic.read().fd_table().unwrap(); 860 } 861 862 /// 根据文件描述符序号,获取socket对象的Arc指针 863 /// 864 /// ## 参数 865 /// 866 /// - `fd` 文件描述符序号 867 /// 868 /// ## 返回值 869 /// 870 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None 871 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> { 872 let binding = ProcessManager::current_pcb().fd_table(); 873 let fd_table_guard = binding.read(); 874 875 let f = fd_table_guard.get_file_by_fd(fd)?; 876 drop(fd_table_guard); 877 878 if f.file_type() != FileType::Socket { 879 return None; 880 } 881 let socket: Arc<SocketInode> = f 882 .inode() 883 .downcast_arc::<SocketInode>() 884 .expect("Not a socket inode"); 885 return Some(socket); 886 } 887 888 /// 当前进程退出时,让初始进程收养所有子进程 889 unsafe fn adopt_childen(&self) -> Result<(), SystemError> { 890 match ProcessManager::find(Pid(1)) { 891 Some(init_pcb) => { 892 let childen_guard = self.children.write(); 893 let mut init_childen_guard = init_pcb.children.write(); 894 895 childen_guard.iter().for_each(|pid| { 896 init_childen_guard.push(*pid); 897 }); 898 899 return Ok(()); 900 } 901 _ => Err(SystemError::ECHILD), 902 } 903 } 904 905 /// 生成进程的名字 906 pub fn generate_name(program_path: &str, args: &Vec<String>) -> String { 907 let mut name = program_path.to_string(); 908 for arg in args { 909 name.push(' '); 910 name.push_str(arg); 911 } 912 return name; 913 } 914 915 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> { 916 self.sig_info.read_irqsave() 917 } 918 919 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> { 920 for _ in 0..times { 921 if let Some(r) = self.sig_info.try_read_irqsave() { 922 return Some(r); 923 } 924 } 925 926 return None; 927 } 928 929 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> { 930 self.sig_info.write_irqsave() 931 } 932 933 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> { 934 for _ in 0..times { 935 if let Some(r) = self.sig_info.try_write_irqsave() { 936 return Some(r); 937 } 938 } 939 940 return None; 941 } 942 943 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> { 944 self.sig_struct.lock_irqsave() 945 } 946 947 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> { 948 for _ in 0..times { 949 if let Ok(r) = self.sig_struct.try_lock_irqsave() { 950 return Some(r); 951 } 952 } 953 954 return None; 955 } 956 957 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> { 958 self.sig_struct.lock_irqsave() 959 } 960 961 #[inline(always)] 962 pub fn get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>> { 963 return self.robust_list.read_irqsave(); 964 } 965 966 #[inline(always)] 967 pub fn set_robust_list(&self, new_robust_list: Option<RobustListHead>) { 968 *self.robust_list.write_irqsave() = new_robust_list; 969 } 970 } 971 972 impl Drop for ProcessControlBlock { 973 fn drop(&mut self) { 974 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 975 // 在ProcFS中,解除进程的注册 976 procfs_unregister_pid(self.pid()) 977 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}")); 978 979 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() { 980 ppcb.children 981 .write_irqsave() 982 .retain(|pid| *pid != self.pid()); 983 } 984 985 drop(irq_guard); 986 } 987 } 988 989 /// 线程信息 990 #[derive(Debug)] 991 pub struct ThreadInfo { 992 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程 993 clear_child_tid: Option<VirtAddr>, 994 set_child_tid: Option<VirtAddr>, 995 996 vfork_done: Option<Arc<Completion>>, 997 /// 线程组的组长 998 group_leader: Weak<ProcessControlBlock>, 999 } 1000 1001 impl ThreadInfo { 1002 pub fn new() -> Self { 1003 Self { 1004 clear_child_tid: None, 1005 set_child_tid: None, 1006 vfork_done: None, 1007 group_leader: Weak::default(), 1008 } 1009 } 1010 1011 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> { 1012 return self.group_leader.upgrade(); 1013 } 1014 } 1015 1016 /// 进程的基本信息 1017 /// 1018 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。 1019 #[derive(Debug)] 1020 pub struct ProcessBasicInfo { 1021 /// 当前进程的进程组id 1022 pgid: Pid, 1023 /// 当前进程的父进程的pid 1024 ppid: Pid, 1025 /// 进程的名字 1026 name: String, 1027 1028 /// 当前进程的工作目录 1029 cwd: String, 1030 1031 /// 用户地址空间 1032 user_vm: Option<Arc<AddressSpace>>, 1033 1034 /// 文件描述符表 1035 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>, 1036 } 1037 1038 impl ProcessBasicInfo { 1039 #[inline(never)] 1040 pub fn new( 1041 pgid: Pid, 1042 ppid: Pid, 1043 name: String, 1044 cwd: String, 1045 user_vm: Option<Arc<AddressSpace>>, 1046 ) -> RwLock<Self> { 1047 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new())); 1048 return RwLock::new(Self { 1049 pgid, 1050 ppid, 1051 name, 1052 cwd, 1053 user_vm, 1054 fd_table: Some(fd_table), 1055 }); 1056 } 1057 1058 pub fn pgid(&self) -> Pid { 1059 return self.pgid; 1060 } 1061 1062 pub fn ppid(&self) -> Pid { 1063 return self.ppid; 1064 } 1065 1066 pub fn name(&self) -> &str { 1067 return &self.name; 1068 } 1069 1070 pub fn set_name(&mut self, name: String) { 1071 self.name = name; 1072 } 1073 1074 pub fn cwd(&self) -> String { 1075 return self.cwd.clone(); 1076 } 1077 pub fn set_cwd(&mut self, path: String) { 1078 return self.cwd = path; 1079 } 1080 1081 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> { 1082 return self.user_vm.clone(); 1083 } 1084 1085 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) { 1086 self.user_vm = user_vm; 1087 } 1088 1089 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> { 1090 return self.fd_table.clone(); 1091 } 1092 1093 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) { 1094 self.fd_table = fd_table; 1095 } 1096 } 1097 1098 #[derive(Debug)] 1099 pub struct ProcessSchedulerInfo { 1100 /// 当前进程所在的cpu 1101 on_cpu: AtomicProcessorId, 1102 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位), 1103 /// 该字段存储要被迁移到的目标处理器核心号 1104 // migrate_to: AtomicProcessorId, 1105 inner_locked: RwLock<InnerSchedInfo>, 1106 /// 进程的调度优先级 1107 // priority: SchedPriority, 1108 /// 当前进程的虚拟运行时间 1109 // virtual_runtime: AtomicIsize, 1110 /// 由实时调度器管理的时间片 1111 // rt_time_slice: AtomicIsize, 1112 pub sched_stat: RwLock<SchedInfo>, 1113 /// 调度策略 1114 pub sched_policy: RwLock<crate::sched::SchedPolicy>, 1115 /// cfs调度实体 1116 pub sched_entity: Arc<FairSchedEntity>, 1117 pub on_rq: SpinLock<OnRq>, 1118 1119 pub prio_data: RwLock<PrioData>, 1120 } 1121 1122 #[derive(Debug, Default)] 1123 pub struct SchedInfo { 1124 /// 记录任务在特定 CPU 上运行的次数 1125 pub pcount: usize, 1126 /// 记录任务等待在运行队列上的时间 1127 pub run_delay: usize, 1128 /// 记录任务上次在 CPU 上运行的时间戳 1129 pub last_arrival: u64, 1130 /// 记录任务上次被加入到运行队列中的时间戳 1131 pub last_queued: u64, 1132 } 1133 1134 #[derive(Debug)] 1135 pub struct PrioData { 1136 pub prio: i32, 1137 pub static_prio: i32, 1138 pub normal_prio: i32, 1139 } 1140 1141 impl Default for PrioData { 1142 fn default() -> Self { 1143 Self { 1144 prio: MAX_PRIO - 20, 1145 static_prio: MAX_PRIO - 20, 1146 normal_prio: MAX_PRIO - 20, 1147 } 1148 } 1149 } 1150 1151 #[derive(Debug)] 1152 pub struct InnerSchedInfo { 1153 /// 当前进程的状态 1154 state: ProcessState, 1155 /// 进程的调度策略 1156 sleep: bool, 1157 } 1158 1159 impl InnerSchedInfo { 1160 pub fn state(&self) -> ProcessState { 1161 return self.state; 1162 } 1163 1164 pub fn set_state(&mut self, state: ProcessState) { 1165 self.state = state; 1166 } 1167 1168 pub fn set_sleep(&mut self) { 1169 self.sleep = true; 1170 } 1171 1172 pub fn set_wakeup(&mut self) { 1173 self.sleep = false; 1174 } 1175 1176 pub fn is_mark_sleep(&self) -> bool { 1177 self.sleep 1178 } 1179 } 1180 1181 impl ProcessSchedulerInfo { 1182 #[inline(never)] 1183 pub fn new(on_cpu: Option<ProcessorId>) -> Self { 1184 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID); 1185 return Self { 1186 on_cpu: AtomicProcessorId::new(cpu_id), 1187 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID), 1188 inner_locked: RwLock::new(InnerSchedInfo { 1189 state: ProcessState::Blocked(false), 1190 sleep: false, 1191 }), 1192 // virtual_runtime: AtomicIsize::new(0), 1193 // rt_time_slice: AtomicIsize::new(0), 1194 // priority: SchedPriority::new(100).unwrap(), 1195 sched_stat: RwLock::new(SchedInfo::default()), 1196 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS), 1197 sched_entity: FairSchedEntity::new(), 1198 on_rq: SpinLock::new(OnRq::None), 1199 prio_data: RwLock::new(PrioData::default()), 1200 }; 1201 } 1202 1203 pub fn sched_entity(&self) -> Arc<FairSchedEntity> { 1204 return self.sched_entity.clone(); 1205 } 1206 1207 pub fn on_cpu(&self) -> Option<ProcessorId> { 1208 let on_cpu = self.on_cpu.load(Ordering::SeqCst); 1209 if on_cpu == ProcessorId::INVALID { 1210 return None; 1211 } else { 1212 return Some(on_cpu); 1213 } 1214 } 1215 1216 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) { 1217 if let Some(cpu_id) = on_cpu { 1218 self.on_cpu.store(cpu_id, Ordering::SeqCst); 1219 } else { 1220 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst); 1221 } 1222 } 1223 1224 // pub fn migrate_to(&self) -> Option<ProcessorId> { 1225 // let migrate_to = self.migrate_to.load(Ordering::SeqCst); 1226 // if migrate_to == ProcessorId::INVALID { 1227 // return None; 1228 // } else { 1229 // return Some(migrate_to); 1230 // } 1231 // } 1232 1233 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) { 1234 // if let Some(data) = migrate_to { 1235 // self.migrate_to.store(data, Ordering::SeqCst); 1236 // } else { 1237 // self.migrate_to 1238 // .store(ProcessorId::INVALID, Ordering::SeqCst) 1239 // } 1240 // } 1241 1242 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> { 1243 return self.inner_locked.write_irqsave(); 1244 } 1245 1246 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> { 1247 return self.inner_locked.read_irqsave(); 1248 } 1249 1250 // pub fn inner_lock_try_read_irqsave( 1251 // &self, 1252 // times: u8, 1253 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> { 1254 // for _ in 0..times { 1255 // if let Some(r) = self.inner_locked.try_read_irqsave() { 1256 // return Some(r); 1257 // } 1258 // } 1259 1260 // return None; 1261 // } 1262 1263 // pub fn inner_lock_try_upgradable_read_irqsave( 1264 // &self, 1265 // times: u8, 1266 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> { 1267 // for _ in 0..times { 1268 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() { 1269 // return Some(r); 1270 // } 1271 // } 1272 1273 // return None; 1274 // } 1275 1276 // pub fn virtual_runtime(&self) -> isize { 1277 // return self.virtual_runtime.load(Ordering::SeqCst); 1278 // } 1279 1280 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) { 1281 // self.virtual_runtime 1282 // .store(virtual_runtime, Ordering::SeqCst); 1283 // } 1284 // pub fn increase_virtual_runtime(&self, delta: isize) { 1285 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst); 1286 // } 1287 1288 // pub fn rt_time_slice(&self) -> isize { 1289 // return self.rt_time_slice.load(Ordering::SeqCst); 1290 // } 1291 1292 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) { 1293 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst); 1294 // } 1295 1296 // pub fn increase_rt_time_slice(&self, delta: isize) { 1297 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst); 1298 // } 1299 1300 pub fn policy(&self) -> crate::sched::SchedPolicy { 1301 return *self.sched_policy.read_irqsave(); 1302 } 1303 } 1304 1305 #[derive(Debug, Clone)] 1306 pub struct KernelStack { 1307 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>, 1308 /// 标记该内核栈是否可以被释放 1309 can_be_freed: bool, 1310 } 1311 1312 impl KernelStack { 1313 pub const SIZE: usize = 0x4000; 1314 pub const ALIGN: usize = 0x4000; 1315 1316 pub fn new() -> Result<Self, SystemError> { 1317 return Ok(Self { 1318 stack: Some( 1319 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?, 1320 ), 1321 can_be_freed: true, 1322 }); 1323 } 1324 1325 /// 根据已有的空间,构造一个内核栈结构体 1326 /// 1327 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误! 1328 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> { 1329 if base.is_null() || !base.check_aligned(Self::ALIGN) { 1330 return Err(SystemError::EFAULT); 1331 } 1332 1333 return Ok(Self { 1334 stack: Some( 1335 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked( 1336 base.data() as *mut [u8; KernelStack::SIZE], 1337 ), 1338 ), 1339 can_be_freed: false, 1340 }); 1341 } 1342 1343 /// 返回内核栈的起始虚拟地址(低地址) 1344 pub fn start_address(&self) -> VirtAddr { 1345 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize); 1346 } 1347 1348 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址) 1349 pub fn stack_max_address(&self) -> VirtAddr { 1350 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE); 1351 } 1352 1353 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> { 1354 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处 1355 let p: *const ProcessControlBlock = Weak::into_raw(pcb); 1356 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1357 1358 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误 1359 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) { 1360 kerror!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr); 1361 return Err(SystemError::EPERM); 1362 } 1363 // 将pcb的地址放到内核栈的最低地址处 1364 unsafe { 1365 *stack_bottom_ptr = p; 1366 } 1367 1368 return Ok(()); 1369 } 1370 1371 /// 清除内核栈的pcb指针 1372 /// 1373 /// ## 参数 1374 /// 1375 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题 1376 pub unsafe fn clear_pcb(&mut self, force: bool) { 1377 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1378 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) { 1379 return; 1380 } 1381 1382 if !force { 1383 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr); 1384 drop(pcb_ptr); 1385 } 1386 1387 *stack_bottom_ptr = core::ptr::null(); 1388 } 1389 1390 /// 返回指向当前内核栈pcb的Arc指针 1391 #[allow(dead_code)] 1392 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> { 1393 // 从内核栈的最低地址处取出pcb的地址 1394 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1395 if unlikely(unsafe { (*p).is_null() }) { 1396 return None; 1397 } 1398 1399 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用 1400 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> = 1401 ManuallyDrop::new(Weak::from_raw(*p)); 1402 1403 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?; 1404 return Some(new_arc); 1405 } 1406 } 1407 1408 impl Drop for KernelStack { 1409 fn drop(&mut self) { 1410 if self.stack.is_some() { 1411 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1412 if unsafe { !(*ptr).is_null() } { 1413 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) }; 1414 drop(pcb_ptr); 1415 } 1416 } 1417 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数 1418 if !self.can_be_freed { 1419 let bx = self.stack.take(); 1420 core::mem::forget(bx); 1421 } 1422 } 1423 } 1424 1425 pub fn process_init() { 1426 ProcessManager::init(); 1427 } 1428 1429 #[derive(Debug)] 1430 pub struct ProcessSignalInfo { 1431 // 当前进程 1432 sig_block: SigSet, 1433 // sig_pending 中存储当前线程要处理的信号 1434 sig_pending: SigPending, 1435 // sig_shared_pending 中存储当前线程所属进程要处理的信号 1436 sig_shared_pending: SigPending, 1437 // 当前进程对应的tty 1438 tty: Option<Arc<TtyCore>>, 1439 } 1440 1441 impl ProcessSignalInfo { 1442 pub fn sig_block(&self) -> &SigSet { 1443 &self.sig_block 1444 } 1445 1446 pub fn sig_pending(&self) -> &SigPending { 1447 &self.sig_pending 1448 } 1449 1450 pub fn sig_pending_mut(&mut self) -> &mut SigPending { 1451 &mut self.sig_pending 1452 } 1453 1454 pub fn sig_block_mut(&mut self) -> &mut SigSet { 1455 &mut self.sig_block 1456 } 1457 1458 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending { 1459 &mut self.sig_shared_pending 1460 } 1461 1462 pub fn sig_shared_pending(&self) -> &SigPending { 1463 &self.sig_shared_pending 1464 } 1465 1466 pub fn tty(&self) -> Option<Arc<TtyCore>> { 1467 self.tty.clone() 1468 } 1469 1470 pub fn set_tty(&mut self, tty: Arc<TtyCore>) { 1471 self.tty = Some(tty); 1472 } 1473 1474 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号 1475 /// 1476 /// ## 参数 1477 /// 1478 /// - `sig_mask` 被忽略掉的信号 1479 /// 1480 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) { 1481 let res = self.sig_pending.dequeue_signal(sig_mask); 1482 if res.0 != Signal::INVALID { 1483 return res; 1484 } else { 1485 return self.sig_shared_pending.dequeue_signal(sig_mask); 1486 } 1487 } 1488 } 1489 1490 impl Default for ProcessSignalInfo { 1491 fn default() -> Self { 1492 Self { 1493 sig_block: SigSet::empty(), 1494 sig_pending: SigPending::default(), 1495 sig_shared_pending: SigPending::default(), 1496 tty: None, 1497 } 1498 } 1499 } 1500