1 use core::{ 2 hash::Hash, 3 hint::spin_loop, 4 intrinsics::{likely, unlikely}, 5 mem::ManuallyDrop, 6 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering}, 7 }; 8 9 use alloc::{ 10 string::{String, ToString}, 11 sync::{Arc, Weak}, 12 vec::Vec, 13 }; 14 use hashbrown::HashMap; 15 use system_error::SystemError; 16 17 use crate::{ 18 arch::{ 19 cpu::current_cpu_id, 20 ipc::signal::{AtomicSignal, SigSet, Signal}, 21 process::ArchPCBInfo, 22 CurrentIrqArch, 23 }, 24 driver::tty::tty_core::TtyCore, 25 exception::InterruptArch, 26 filesystem::{ 27 procfs::procfs_unregister_pid, 28 vfs::{file::FileDescriptorVec, FileType}, 29 }, 30 ipc::signal_types::{SigInfo, SigPending, SignalStruct}, 31 kdebug, kinfo, 32 libs::{ 33 align::AlignedBox, 34 casting::DowncastArc, 35 futex::{ 36 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY}, 37 futex::{Futex, RobustListHead}, 38 }, 39 lock_free_flags::LockFreeFlags, 40 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, 41 spinlock::{SpinLock, SpinLockGuard}, 42 wait_queue::WaitQueue, 43 }, 44 mm::{ 45 percpu::{PerCpu, PerCpuVar}, 46 set_IDLE_PROCESS_ADDRESS_SPACE, 47 ucontext::AddressSpace, 48 VirtAddr, 49 }, 50 net::socket::SocketInode, 51 sched::completion::Completion, 52 sched::{ 53 cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, 54 WakeupFlags, __schedule, 55 }, 56 smp::{ 57 core::smp_get_processor_id, 58 cpu::{AtomicProcessorId, ProcessorId}, 59 kick_cpu, 60 }, 61 syscall::{user_access::clear_user, Syscall}, 62 }; 63 use timer::AlarmTimer; 64 65 use self::kthread::WorkerPrivate; 66 67 pub mod abi; 68 pub mod c_adapter; 69 pub mod exec; 70 pub mod exit; 71 pub mod fork; 72 pub mod idle; 73 pub mod kthread; 74 pub mod pid; 75 pub mod resource; 76 pub mod stdio; 77 pub mod syscall; 78 pub mod timer; 79 pub mod utils; 80 81 /// 系统中所有进程的pcb 82 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None); 83 84 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None; 85 86 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成 87 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false; 88 89 #[derive(Debug)] 90 pub struct SwitchResult { 91 pub prev_pcb: Option<Arc<ProcessControlBlock>>, 92 pub next_pcb: Option<Arc<ProcessControlBlock>>, 93 } 94 95 impl SwitchResult { 96 pub fn new() -> Self { 97 Self { 98 prev_pcb: None, 99 next_pcb: None, 100 } 101 } 102 } 103 104 #[derive(Debug)] 105 pub struct ProcessManager; 106 impl ProcessManager { 107 #[inline(never)] 108 fn init() { 109 static INIT_FLAG: AtomicBool = AtomicBool::new(false); 110 if INIT_FLAG 111 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 112 .is_err() 113 { 114 panic!("ProcessManager has been initialized!"); 115 } 116 117 unsafe { 118 compiler_fence(Ordering::SeqCst); 119 kdebug!("To create address space for INIT process."); 120 // test_buddy(); 121 set_IDLE_PROCESS_ADDRESS_SPACE( 122 AddressSpace::new(true).expect("Failed to create address space for INIT process."), 123 ); 124 kdebug!("INIT process address space created."); 125 compiler_fence(Ordering::SeqCst); 126 }; 127 128 ALL_PROCESS.lock_irqsave().replace(HashMap::new()); 129 Self::init_switch_result(); 130 Self::arch_init(); 131 kdebug!("process arch init done."); 132 Self::init_idle(); 133 kdebug!("process idle init done."); 134 135 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true }; 136 kinfo!("Process Manager initialized."); 137 } 138 139 fn init_switch_result() { 140 let mut switch_res_vec: Vec<SwitchResult> = Vec::new(); 141 for _ in 0..PerCpu::MAX_CPU_NUM { 142 switch_res_vec.push(SwitchResult::new()); 143 } 144 unsafe { 145 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap()); 146 } 147 } 148 149 /// 判断进程管理器是否已经初始化完成 150 #[allow(dead_code)] 151 pub fn initialized() -> bool { 152 unsafe { __PROCESS_MANAGEMENT_INIT_DONE } 153 } 154 155 /// 获取当前进程的pcb 156 pub fn current_pcb() -> Arc<ProcessControlBlock> { 157 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 158 kerror!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false"); 159 loop { 160 spin_loop(); 161 } 162 } 163 return ProcessControlBlock::arch_current_pcb(); 164 } 165 166 /// 获取当前进程的pid 167 /// 168 /// 如果进程管理器未初始化完成,那么返回0 169 pub fn current_pid() -> Pid { 170 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 171 return Pid(0); 172 } 173 174 return ProcessManager::current_pcb().pid(); 175 } 176 177 /// 增加当前进程的锁持有计数 178 #[inline(always)] 179 pub fn preempt_disable() { 180 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 181 ProcessManager::current_pcb().preempt_disable(); 182 } 183 } 184 185 /// 减少当前进程的锁持有计数 186 #[inline(always)] 187 pub fn preempt_enable() { 188 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 189 ProcessManager::current_pcb().preempt_enable(); 190 } 191 } 192 193 /// 根据pid获取进程的pcb 194 /// 195 /// ## 参数 196 /// 197 /// - `pid` : 进程的pid 198 /// 199 /// ## 返回值 200 /// 201 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None 202 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> { 203 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned(); 204 } 205 206 /// 向系统中添加一个进程的pcb 207 /// 208 /// ## 参数 209 /// 210 /// - `pcb` : 进程的pcb 211 /// 212 /// ## 返回值 213 /// 214 /// 无 215 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) { 216 ALL_PROCESS 217 .lock_irqsave() 218 .as_mut() 219 .unwrap() 220 .insert(pcb.pid(), pcb.clone()); 221 } 222 223 /// 唤醒一个进程 224 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 225 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 226 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 227 if state.is_blocked() { 228 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 229 let state = writer.state(); 230 if state.is_blocked() { 231 writer.set_state(ProcessState::Runnable); 232 writer.set_wakeup(); 233 234 // avoid deadlock 235 drop(writer); 236 237 let rq = 238 cpu_rq(pcb.sched_info().on_cpu().unwrap_or(current_cpu_id()).data() as usize); 239 240 let (rq, _guard) = rq.self_lock(); 241 rq.update_rq_clock(); 242 rq.activate_task( 243 pcb, 244 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 245 ); 246 247 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 248 249 // sched_enqueue(pcb.clone(), true); 250 return Ok(()); 251 } else if state.is_exited() { 252 return Err(SystemError::EINVAL); 253 } else { 254 return Ok(()); 255 } 256 } else if state.is_exited() { 257 return Err(SystemError::EINVAL); 258 } else { 259 return Ok(()); 260 } 261 } 262 263 /// 唤醒暂停的进程 264 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 265 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 266 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 267 if let ProcessState::Stopped = state { 268 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 269 let state = writer.state(); 270 if let ProcessState::Stopped = state { 271 writer.set_state(ProcessState::Runnable); 272 // avoid deadlock 273 drop(writer); 274 275 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 276 277 let (rq, _guard) = rq.self_lock(); 278 rq.update_rq_clock(); 279 rq.activate_task( 280 pcb, 281 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 282 ); 283 284 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 285 286 // sched_enqueue(pcb.clone(), true); 287 return Ok(()); 288 } else if state.is_runnable() { 289 return Ok(()); 290 } else { 291 return Err(SystemError::EINVAL); 292 } 293 } else if state.is_runnable() { 294 return Ok(()); 295 } else { 296 return Err(SystemError::EINVAL); 297 } 298 } 299 300 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成 301 /// 302 /// ## 注意 303 /// 304 /// - 进入当前函数之前,不能持有sched_info的锁 305 /// - 进入当前函数之前,必须关闭中断 306 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列 307 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> { 308 assert!( 309 !CurrentIrqArch::is_irq_enabled(), 310 "interrupt must be disabled before enter ProcessManager::mark_sleep()" 311 ); 312 let pcb = ProcessManager::current_pcb(); 313 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 314 if !matches!(writer.state(), ProcessState::Exited(_)) { 315 writer.set_state(ProcessState::Blocked(interruptable)); 316 writer.set_sleep(); 317 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 318 fence(Ordering::SeqCst); 319 drop(writer); 320 return Ok(()); 321 } 322 return Err(SystemError::EINTR); 323 } 324 325 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成 326 /// 327 /// ## 注意 328 /// 329 /// - 进入当前函数之前,不能持有sched_info的锁 330 /// - 进入当前函数之前,必须关闭中断 331 pub fn mark_stop() -> Result<(), SystemError> { 332 assert!( 333 !CurrentIrqArch::is_irq_enabled(), 334 "interrupt must be disabled before enter ProcessManager::mark_stop()" 335 ); 336 337 let pcb = ProcessManager::current_pcb(); 338 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 339 if !matches!(writer.state(), ProcessState::Exited(_)) { 340 writer.set_state(ProcessState::Stopped); 341 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 342 drop(writer); 343 344 return Ok(()); 345 } 346 return Err(SystemError::EINTR); 347 } 348 /// 当子进程退出后向父进程发送通知 349 fn exit_notify() { 350 let current = ProcessManager::current_pcb(); 351 // 让INIT进程收养所有子进程 352 if current.pid() != Pid(1) { 353 unsafe { 354 current 355 .adopt_childen() 356 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}")) 357 }; 358 let r = current.parent_pcb.read_irqsave().upgrade(); 359 if r.is_none() { 360 return; 361 } 362 let parent_pcb = r.unwrap(); 363 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32); 364 if r.is_err() { 365 kwarn!( 366 "failed to send kill signal to {:?}'s parent pcb {:?}", 367 current.pid(), 368 parent_pcb.pid() 369 ); 370 } 371 // todo: 这里需要向父进程发送SIGCHLD信号 372 // todo: 这里还需要根据线程组的信息,决定信号的发送 373 } 374 } 375 376 /// 退出当前进程 377 /// 378 /// ## 参数 379 /// 380 /// - `exit_code` : 进程的退出码 381 pub fn exit(exit_code: usize) -> ! { 382 // 关中断 383 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 384 let pcb = ProcessManager::current_pcb(); 385 let pid = pcb.pid(); 386 pcb.sched_info 387 .inner_lock_write_irqsave() 388 .set_state(ProcessState::Exited(exit_code)); 389 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true))); 390 391 let rq = cpu_rq(smp_get_processor_id().data() as usize); 392 let (rq, guard) = rq.self_lock(); 393 rq.deactivate_task( 394 pcb.clone(), 395 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK, 396 ); 397 drop(guard); 398 399 // 进行进程退出后的工作 400 let thread = pcb.thread.write_irqsave(); 401 if let Some(addr) = thread.set_child_tid { 402 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 403 } 404 405 if let Some(addr) = thread.clear_child_tid { 406 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 { 407 let _ = 408 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY); 409 } 410 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 411 } 412 413 RobustListHead::exit_robust_list(pcb.clone()); 414 415 // 如果是vfork出来的进程,则需要处理completion 416 if thread.vfork_done.is_some() { 417 thread.vfork_done.as_ref().unwrap().complete_all(); 418 } 419 drop(thread); 420 unsafe { pcb.basic_mut().set_user_vm(None) }; 421 drop(pcb); 422 ProcessManager::exit_notify(); 423 // unsafe { CurrentIrqArch::interrupt_enable() }; 424 __schedule(SchedMode::SM_NONE); 425 kerror!("pid {pid:?} exited but sched again!"); 426 #[allow(clippy::empty_loop)] 427 loop { 428 spin_loop(); 429 } 430 } 431 432 pub unsafe fn release(pid: Pid) { 433 let pcb = ProcessManager::find(pid); 434 if pcb.is_some() { 435 // let pcb = pcb.unwrap(); 436 // 判断该pcb是否在全局没有任何引用 437 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。 438 // 因此目前暂时注释掉,使得能跑 439 // if Arc::strong_count(&pcb) <= 2 { 440 // drop(pcb); 441 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid); 442 // } else { 443 // // 如果不为1就panic 444 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb)); 445 // kerror!("{}", msg); 446 // panic!() 447 // } 448 449 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid); 450 } 451 } 452 453 /// 上下文切换完成后的钩子函数 454 unsafe fn switch_finish_hook() { 455 // kdebug!("switch_finish_hook"); 456 let prev_pcb = PROCESS_SWITCH_RESULT 457 .as_mut() 458 .unwrap() 459 .get_mut() 460 .prev_pcb 461 .take() 462 .expect("prev_pcb is None"); 463 let next_pcb = PROCESS_SWITCH_RESULT 464 .as_mut() 465 .unwrap() 466 .get_mut() 467 .next_pcb 468 .take() 469 .expect("next_pcb is None"); 470 471 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁 472 fence(Ordering::SeqCst); 473 474 prev_pcb.arch_info.force_unlock(); 475 fence(Ordering::SeqCst); 476 477 next_pcb.arch_info.force_unlock(); 478 fence(Ordering::SeqCst); 479 } 480 481 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态 482 /// 483 /// ## 参数 484 /// 485 /// - `pcb` : 进程的pcb 486 #[allow(dead_code)] 487 pub fn kick(pcb: &Arc<ProcessControlBlock>) { 488 ProcessManager::current_pcb().preempt_disable(); 489 let cpu_id = pcb.sched_info().on_cpu(); 490 491 if let Some(cpu_id) = cpu_id { 492 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() { 493 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu"); 494 } 495 } 496 497 ProcessManager::current_pcb().preempt_enable(); 498 } 499 } 500 501 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换 502 #[cfg(target_arch = "x86_64")] 503 #[inline(never)] 504 pub unsafe extern "sysv64" fn switch_finish_hook() { 505 ProcessManager::switch_finish_hook(); 506 } 507 #[cfg(target_arch = "riscv64")] 508 #[inline(always)] 509 pub unsafe fn switch_finish_hook() { 510 ProcessManager::switch_finish_hook(); 511 } 512 513 int_like!(Pid, AtomicPid, usize, AtomicUsize); 514 515 impl ToString for Pid { 516 fn to_string(&self) -> String { 517 self.0.to_string() 518 } 519 } 520 521 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 522 pub enum ProcessState { 523 /// The process is running on a CPU or in a run queue. 524 Runnable, 525 /// The process is waiting for an event to occur. 526 /// 其中的bool表示该等待过程是否可以被打断。 527 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。 528 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。 529 Blocked(bool), 530 /// 进程被信号终止 531 Stopped, 532 /// 进程已经退出,usize表示进程的退出码 533 Exited(usize), 534 } 535 536 #[allow(dead_code)] 537 impl ProcessState { 538 #[inline(always)] 539 pub fn is_runnable(&self) -> bool { 540 return matches!(self, ProcessState::Runnable); 541 } 542 543 #[inline(always)] 544 pub fn is_blocked(&self) -> bool { 545 return matches!(self, ProcessState::Blocked(_)); 546 } 547 548 #[inline(always)] 549 pub fn is_blocked_interruptable(&self) -> bool { 550 return matches!(self, ProcessState::Blocked(true)); 551 } 552 553 /// Returns `true` if the process state is [`Exited`]. 554 #[inline(always)] 555 pub fn is_exited(&self) -> bool { 556 return matches!(self, ProcessState::Exited(_)); 557 } 558 559 /// Returns `true` if the process state is [`Stopped`]. 560 /// 561 /// [`Stopped`]: ProcessState::Stopped 562 #[inline(always)] 563 pub fn is_stopped(&self) -> bool { 564 matches!(self, ProcessState::Stopped) 565 } 566 567 /// Returns exit code if the process state is [`Exited`]. 568 #[inline(always)] 569 pub fn exit_code(&self) -> Option<usize> { 570 match self { 571 ProcessState::Exited(code) => Some(*code), 572 _ => None, 573 } 574 } 575 } 576 577 bitflags! { 578 /// pcb的标志位 579 pub struct ProcessFlags: usize { 580 /// 当前pcb表示一个内核线程 581 const KTHREAD = 1 << 0; 582 /// 当前进程需要被调度 583 const NEED_SCHEDULE = 1 << 1; 584 /// 进程由于vfork而与父进程存在资源共享 585 const VFORK = 1 << 2; 586 /// 进程不可被冻结 587 const NOFREEZE = 1 << 3; 588 /// 进程正在退出 589 const EXITING = 1 << 4; 590 /// 进程由于接收到终止信号唤醒 591 const WAKEKILL = 1 << 5; 592 /// 进程由于接收到信号而退出.(Killed by a signal) 593 const SIGNALED = 1 << 6; 594 /// 进程需要迁移到其他cpu上 595 const NEED_MIGRATE = 1 << 7; 596 /// 随机化的虚拟地址空间,主要用于动态链接器的加载 597 const RANDOMIZE = 1 << 8; 598 } 599 } 600 601 #[derive(Debug)] 602 pub struct ProcessControlBlock { 603 /// 当前进程的pid 604 pid: Pid, 605 /// 当前进程的线程组id(这个值在同一个线程组内永远不变) 606 tgid: Pid, 607 608 basic: RwLock<ProcessBasicInfo>, 609 /// 当前进程的自旋锁持有计数 610 preempt_count: AtomicUsize, 611 612 flags: LockFreeFlags<ProcessFlags>, 613 worker_private: SpinLock<Option<WorkerPrivate>>, 614 /// 进程的内核栈 615 kernel_stack: RwLock<KernelStack>, 616 617 /// 系统调用栈 618 syscall_stack: RwLock<KernelStack>, 619 620 /// 与调度相关的信息 621 sched_info: ProcessSchedulerInfo, 622 /// 与处理器架构相关的信息 623 arch_info: SpinLock<ArchPCBInfo>, 624 /// 与信号处理相关的信息(似乎可以是无锁的) 625 sig_info: RwLock<ProcessSignalInfo>, 626 /// 信号处理结构体 627 sig_struct: SpinLock<SignalStruct>, 628 /// 退出信号S 629 exit_signal: AtomicSignal, 630 631 /// 父进程指针 632 parent_pcb: RwLock<Weak<ProcessControlBlock>>, 633 /// 真实父进程指针 634 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>, 635 636 /// 子进程链表 637 children: RwLock<Vec<Pid>>, 638 639 /// 等待队列 640 wait_queue: WaitQueue, 641 642 /// 线程信息 643 thread: RwLock<ThreadInfo>, 644 645 ///闹钟定时器 646 alarm_timer: SpinLock<Option<AlarmTimer>>, 647 648 /// 进程的robust lock列表 649 robust_list: RwLock<Option<RobustListHead>>, 650 } 651 652 impl ProcessControlBlock { 653 /// Generate a new pcb. 654 /// 655 /// ## 参数 656 /// 657 /// - `name` : 进程的名字 658 /// - `kstack` : 进程的内核栈 659 /// 660 /// ## 返回值 661 /// 662 /// 返回一个新的pcb 663 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> { 664 return Self::do_create_pcb(name, kstack, false); 665 } 666 667 /// 创建一个新的idle进程 668 /// 669 /// 请注意,这个函数只能在进程管理初始化的时候调用。 670 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> { 671 let name = format!("idle-{}", cpu_id); 672 return Self::do_create_pcb(name, kstack, true); 673 } 674 675 #[inline(never)] 676 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> { 677 let (pid, ppid, cwd) = if is_idle { 678 (Pid(0), Pid(0), "/".to_string()) 679 } else { 680 let ppid = ProcessManager::current_pcb().pid(); 681 let cwd = ProcessManager::current_pcb().basic().cwd(); 682 (Self::generate_pid(), ppid, cwd) 683 }; 684 685 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, name, cwd, None); 686 let preempt_count = AtomicUsize::new(0); 687 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) }; 688 689 let sched_info = ProcessSchedulerInfo::new(None); 690 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack)); 691 692 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid) 693 .map(|p| Arc::downgrade(&p)) 694 .unwrap_or_default(); 695 696 let pcb = Self { 697 pid, 698 tgid: pid, 699 basic: basic_info, 700 preempt_count, 701 flags, 702 kernel_stack: RwLock::new(kstack), 703 syscall_stack: RwLock::new(KernelStack::new().unwrap()), 704 worker_private: SpinLock::new(None), 705 sched_info, 706 arch_info, 707 sig_info: RwLock::new(ProcessSignalInfo::default()), 708 sig_struct: SpinLock::new(SignalStruct::new()), 709 exit_signal: AtomicSignal::new(Signal::SIGCHLD), 710 parent_pcb: RwLock::new(ppcb.clone()), 711 real_parent_pcb: RwLock::new(ppcb), 712 children: RwLock::new(Vec::new()), 713 wait_queue: WaitQueue::default(), 714 thread: RwLock::new(ThreadInfo::new()), 715 alarm_timer: SpinLock::new(None), 716 robust_list: RwLock::new(None), 717 }; 718 719 // 初始化系统调用栈 720 #[cfg(target_arch = "x86_64")] 721 pcb.arch_info 722 .lock() 723 .init_syscall_stack(&pcb.syscall_stack.read()); 724 725 let pcb = Arc::new(pcb); 726 727 pcb.sched_info() 728 .sched_entity() 729 .force_mut() 730 .set_pcb(Arc::downgrade(&pcb)); 731 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处 732 unsafe { 733 pcb.kernel_stack 734 .write() 735 .set_pcb(Arc::downgrade(&pcb)) 736 .unwrap(); 737 738 pcb.syscall_stack 739 .write() 740 .set_pcb(Arc::downgrade(&pcb)) 741 .unwrap() 742 }; 743 744 // 将当前pcb加入父进程的子进程哈希表中 745 if pcb.pid() > Pid(1) { 746 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() { 747 let mut children = ppcb_arc.children.write_irqsave(); 748 children.push(pcb.pid()); 749 } else { 750 panic!("parent pcb is None"); 751 } 752 } 753 754 return pcb; 755 } 756 757 /// 生成一个新的pid 758 #[inline(always)] 759 fn generate_pid() -> Pid { 760 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1)); 761 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst); 762 } 763 764 /// 返回当前进程的锁持有计数 765 #[inline(always)] 766 pub fn preempt_count(&self) -> usize { 767 return self.preempt_count.load(Ordering::SeqCst); 768 } 769 770 /// 增加当前进程的锁持有计数 771 #[inline(always)] 772 pub fn preempt_disable(&self) { 773 self.preempt_count.fetch_add(1, Ordering::SeqCst); 774 } 775 776 /// 减少当前进程的锁持有计数 777 #[inline(always)] 778 pub fn preempt_enable(&self) { 779 self.preempt_count.fetch_sub(1, Ordering::SeqCst); 780 } 781 782 #[inline(always)] 783 pub unsafe fn set_preempt_count(&self, count: usize) { 784 self.preempt_count.store(count, Ordering::SeqCst); 785 } 786 787 #[inline(always)] 788 pub fn flags(&self) -> &mut ProcessFlags { 789 return self.flags.get_mut(); 790 } 791 792 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改 793 /// 否则会导致死锁 794 #[inline(always)] 795 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> { 796 return self.basic.read_irqsave(); 797 } 798 799 #[inline(always)] 800 pub fn set_name(&self, name: String) { 801 self.basic.write().set_name(name); 802 } 803 804 #[inline(always)] 805 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> { 806 return self.basic.write_irqsave(); 807 } 808 809 /// # 获取arch info的锁,同时关闭中断 810 #[inline(always)] 811 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> { 812 return self.arch_info.lock_irqsave(); 813 } 814 815 /// # 获取arch info的锁,但是不关闭中断 816 /// 817 /// 由于arch info在进程切换的时候会使用到, 818 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的. 819 /// 820 /// 只能在以下情况下使用这个函数: 821 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。 822 /// - 刚刚创建新的pcb 823 #[inline(always)] 824 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> { 825 return self.arch_info.lock(); 826 } 827 828 #[inline(always)] 829 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> { 830 return self.kernel_stack.read(); 831 } 832 833 pub unsafe fn kernel_stack_force_ref(&self) -> &KernelStack { 834 self.kernel_stack.force_get_ref() 835 } 836 837 #[inline(always)] 838 #[allow(dead_code)] 839 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> { 840 return self.kernel_stack.write(); 841 } 842 843 #[inline(always)] 844 pub fn sched_info(&self) -> &ProcessSchedulerInfo { 845 return &self.sched_info; 846 } 847 848 #[inline(always)] 849 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> { 850 return self.worker_private.lock(); 851 } 852 853 #[inline(always)] 854 pub fn pid(&self) -> Pid { 855 return self.pid; 856 } 857 858 #[inline(always)] 859 pub fn tgid(&self) -> Pid { 860 return self.tgid; 861 } 862 863 /// 获取文件描述符表的Arc指针 864 #[inline(always)] 865 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> { 866 return self.basic.read().fd_table().unwrap(); 867 } 868 869 /// 根据文件描述符序号,获取socket对象的Arc指针 870 /// 871 /// ## 参数 872 /// 873 /// - `fd` 文件描述符序号 874 /// 875 /// ## 返回值 876 /// 877 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None 878 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> { 879 let binding = ProcessManager::current_pcb().fd_table(); 880 let fd_table_guard = binding.read(); 881 882 let f = fd_table_guard.get_file_by_fd(fd)?; 883 drop(fd_table_guard); 884 885 if f.file_type() != FileType::Socket { 886 return None; 887 } 888 let socket: Arc<SocketInode> = f 889 .inode() 890 .downcast_arc::<SocketInode>() 891 .expect("Not a socket inode"); 892 return Some(socket); 893 } 894 895 /// 当前进程退出时,让初始进程收养所有子进程 896 unsafe fn adopt_childen(&self) -> Result<(), SystemError> { 897 match ProcessManager::find(Pid(1)) { 898 Some(init_pcb) => { 899 let childen_guard = self.children.write(); 900 let mut init_childen_guard = init_pcb.children.write(); 901 902 childen_guard.iter().for_each(|pid| { 903 init_childen_guard.push(*pid); 904 }); 905 906 return Ok(()); 907 } 908 _ => Err(SystemError::ECHILD), 909 } 910 } 911 912 /// 生成进程的名字 913 pub fn generate_name(program_path: &str, args: &Vec<String>) -> String { 914 let mut name = program_path.to_string(); 915 for arg in args { 916 name.push(' '); 917 name.push_str(arg); 918 } 919 return name; 920 } 921 922 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> { 923 self.sig_info.read_irqsave() 924 } 925 926 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> { 927 for _ in 0..times { 928 if let Some(r) = self.sig_info.try_read_irqsave() { 929 return Some(r); 930 } 931 } 932 933 return None; 934 } 935 936 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> { 937 self.sig_info.write_irqsave() 938 } 939 940 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> { 941 for _ in 0..times { 942 if let Some(r) = self.sig_info.try_write_irqsave() { 943 return Some(r); 944 } 945 } 946 947 return None; 948 } 949 950 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> { 951 self.sig_struct.lock_irqsave() 952 } 953 954 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> { 955 for _ in 0..times { 956 if let Ok(r) = self.sig_struct.try_lock_irqsave() { 957 return Some(r); 958 } 959 } 960 961 return None; 962 } 963 964 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> { 965 self.sig_struct.lock_irqsave() 966 } 967 968 #[inline(always)] 969 pub fn get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>> { 970 return self.robust_list.read_irqsave(); 971 } 972 973 #[inline(always)] 974 pub fn set_robust_list(&self, new_robust_list: Option<RobustListHead>) { 975 *self.robust_list.write_irqsave() = new_robust_list; 976 } 977 978 pub fn alarm_timer_irqsave(&self) -> SpinLockGuard<Option<AlarmTimer>> { 979 return self.alarm_timer.lock_irqsave(); 980 } 981 } 982 983 impl Drop for ProcessControlBlock { 984 fn drop(&mut self) { 985 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 986 // 在ProcFS中,解除进程的注册 987 procfs_unregister_pid(self.pid()) 988 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}")); 989 990 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() { 991 ppcb.children 992 .write_irqsave() 993 .retain(|pid| *pid != self.pid()); 994 } 995 996 drop(irq_guard); 997 } 998 } 999 1000 /// 线程信息 1001 #[derive(Debug)] 1002 pub struct ThreadInfo { 1003 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程 1004 clear_child_tid: Option<VirtAddr>, 1005 set_child_tid: Option<VirtAddr>, 1006 1007 vfork_done: Option<Arc<Completion>>, 1008 /// 线程组的组长 1009 group_leader: Weak<ProcessControlBlock>, 1010 } 1011 1012 impl ThreadInfo { 1013 pub fn new() -> Self { 1014 Self { 1015 clear_child_tid: None, 1016 set_child_tid: None, 1017 vfork_done: None, 1018 group_leader: Weak::default(), 1019 } 1020 } 1021 1022 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> { 1023 return self.group_leader.upgrade(); 1024 } 1025 } 1026 1027 /// 进程的基本信息 1028 /// 1029 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。 1030 #[derive(Debug)] 1031 pub struct ProcessBasicInfo { 1032 /// 当前进程的进程组id 1033 pgid: Pid, 1034 /// 当前进程的父进程的pid 1035 ppid: Pid, 1036 /// 进程的名字 1037 name: String, 1038 1039 /// 当前进程的工作目录 1040 cwd: String, 1041 1042 /// 用户地址空间 1043 user_vm: Option<Arc<AddressSpace>>, 1044 1045 /// 文件描述符表 1046 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>, 1047 } 1048 1049 impl ProcessBasicInfo { 1050 #[inline(never)] 1051 pub fn new( 1052 pgid: Pid, 1053 ppid: Pid, 1054 name: String, 1055 cwd: String, 1056 user_vm: Option<Arc<AddressSpace>>, 1057 ) -> RwLock<Self> { 1058 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new())); 1059 return RwLock::new(Self { 1060 pgid, 1061 ppid, 1062 name, 1063 cwd, 1064 user_vm, 1065 fd_table: Some(fd_table), 1066 }); 1067 } 1068 1069 pub fn pgid(&self) -> Pid { 1070 return self.pgid; 1071 } 1072 1073 pub fn ppid(&self) -> Pid { 1074 return self.ppid; 1075 } 1076 1077 pub fn name(&self) -> &str { 1078 return &self.name; 1079 } 1080 1081 pub fn set_name(&mut self, name: String) { 1082 self.name = name; 1083 } 1084 1085 pub fn cwd(&self) -> String { 1086 return self.cwd.clone(); 1087 } 1088 pub fn set_cwd(&mut self, path: String) { 1089 return self.cwd = path; 1090 } 1091 1092 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> { 1093 return self.user_vm.clone(); 1094 } 1095 1096 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) { 1097 self.user_vm = user_vm; 1098 } 1099 1100 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> { 1101 return self.fd_table.clone(); 1102 } 1103 1104 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) { 1105 self.fd_table = fd_table; 1106 } 1107 } 1108 1109 #[derive(Debug)] 1110 pub struct ProcessSchedulerInfo { 1111 /// 当前进程所在的cpu 1112 on_cpu: AtomicProcessorId, 1113 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位), 1114 /// 该字段存储要被迁移到的目标处理器核心号 1115 // migrate_to: AtomicProcessorId, 1116 inner_locked: RwLock<InnerSchedInfo>, 1117 /// 进程的调度优先级 1118 // priority: SchedPriority, 1119 /// 当前进程的虚拟运行时间 1120 // virtual_runtime: AtomicIsize, 1121 /// 由实时调度器管理的时间片 1122 // rt_time_slice: AtomicIsize, 1123 pub sched_stat: RwLock<SchedInfo>, 1124 /// 调度策略 1125 pub sched_policy: RwLock<crate::sched::SchedPolicy>, 1126 /// cfs调度实体 1127 pub sched_entity: Arc<FairSchedEntity>, 1128 pub on_rq: SpinLock<OnRq>, 1129 1130 pub prio_data: RwLock<PrioData>, 1131 } 1132 1133 #[derive(Debug, Default)] 1134 pub struct SchedInfo { 1135 /// 记录任务在特定 CPU 上运行的次数 1136 pub pcount: usize, 1137 /// 记录任务等待在运行队列上的时间 1138 pub run_delay: usize, 1139 /// 记录任务上次在 CPU 上运行的时间戳 1140 pub last_arrival: u64, 1141 /// 记录任务上次被加入到运行队列中的时间戳 1142 pub last_queued: u64, 1143 } 1144 1145 #[derive(Debug)] 1146 pub struct PrioData { 1147 pub prio: i32, 1148 pub static_prio: i32, 1149 pub normal_prio: i32, 1150 } 1151 1152 impl Default for PrioData { 1153 fn default() -> Self { 1154 Self { 1155 prio: MAX_PRIO - 20, 1156 static_prio: MAX_PRIO - 20, 1157 normal_prio: MAX_PRIO - 20, 1158 } 1159 } 1160 } 1161 1162 #[derive(Debug)] 1163 pub struct InnerSchedInfo { 1164 /// 当前进程的状态 1165 state: ProcessState, 1166 /// 进程的调度策略 1167 sleep: bool, 1168 } 1169 1170 impl InnerSchedInfo { 1171 pub fn state(&self) -> ProcessState { 1172 return self.state; 1173 } 1174 1175 pub fn set_state(&mut self, state: ProcessState) { 1176 self.state = state; 1177 } 1178 1179 pub fn set_sleep(&mut self) { 1180 self.sleep = true; 1181 } 1182 1183 pub fn set_wakeup(&mut self) { 1184 self.sleep = false; 1185 } 1186 1187 pub fn is_mark_sleep(&self) -> bool { 1188 self.sleep 1189 } 1190 } 1191 1192 impl ProcessSchedulerInfo { 1193 #[inline(never)] 1194 pub fn new(on_cpu: Option<ProcessorId>) -> Self { 1195 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID); 1196 return Self { 1197 on_cpu: AtomicProcessorId::new(cpu_id), 1198 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID), 1199 inner_locked: RwLock::new(InnerSchedInfo { 1200 state: ProcessState::Blocked(false), 1201 sleep: false, 1202 }), 1203 // virtual_runtime: AtomicIsize::new(0), 1204 // rt_time_slice: AtomicIsize::new(0), 1205 // priority: SchedPriority::new(100).unwrap(), 1206 sched_stat: RwLock::new(SchedInfo::default()), 1207 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS), 1208 sched_entity: FairSchedEntity::new(), 1209 on_rq: SpinLock::new(OnRq::None), 1210 prio_data: RwLock::new(PrioData::default()), 1211 }; 1212 } 1213 1214 pub fn sched_entity(&self) -> Arc<FairSchedEntity> { 1215 return self.sched_entity.clone(); 1216 } 1217 1218 pub fn on_cpu(&self) -> Option<ProcessorId> { 1219 let on_cpu = self.on_cpu.load(Ordering::SeqCst); 1220 if on_cpu == ProcessorId::INVALID { 1221 return None; 1222 } else { 1223 return Some(on_cpu); 1224 } 1225 } 1226 1227 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) { 1228 if let Some(cpu_id) = on_cpu { 1229 self.on_cpu.store(cpu_id, Ordering::SeqCst); 1230 } else { 1231 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst); 1232 } 1233 } 1234 1235 // pub fn migrate_to(&self) -> Option<ProcessorId> { 1236 // let migrate_to = self.migrate_to.load(Ordering::SeqCst); 1237 // if migrate_to == ProcessorId::INVALID { 1238 // return None; 1239 // } else { 1240 // return Some(migrate_to); 1241 // } 1242 // } 1243 1244 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) { 1245 // if let Some(data) = migrate_to { 1246 // self.migrate_to.store(data, Ordering::SeqCst); 1247 // } else { 1248 // self.migrate_to 1249 // .store(ProcessorId::INVALID, Ordering::SeqCst) 1250 // } 1251 // } 1252 1253 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> { 1254 return self.inner_locked.write_irqsave(); 1255 } 1256 1257 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> { 1258 return self.inner_locked.read_irqsave(); 1259 } 1260 1261 // pub fn inner_lock_try_read_irqsave( 1262 // &self, 1263 // times: u8, 1264 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> { 1265 // for _ in 0..times { 1266 // if let Some(r) = self.inner_locked.try_read_irqsave() { 1267 // return Some(r); 1268 // } 1269 // } 1270 1271 // return None; 1272 // } 1273 1274 // pub fn inner_lock_try_upgradable_read_irqsave( 1275 // &self, 1276 // times: u8, 1277 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> { 1278 // for _ in 0..times { 1279 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() { 1280 // return Some(r); 1281 // } 1282 // } 1283 1284 // return None; 1285 // } 1286 1287 // pub fn virtual_runtime(&self) -> isize { 1288 // return self.virtual_runtime.load(Ordering::SeqCst); 1289 // } 1290 1291 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) { 1292 // self.virtual_runtime 1293 // .store(virtual_runtime, Ordering::SeqCst); 1294 // } 1295 // pub fn increase_virtual_runtime(&self, delta: isize) { 1296 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst); 1297 // } 1298 1299 // pub fn rt_time_slice(&self) -> isize { 1300 // return self.rt_time_slice.load(Ordering::SeqCst); 1301 // } 1302 1303 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) { 1304 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst); 1305 // } 1306 1307 // pub fn increase_rt_time_slice(&self, delta: isize) { 1308 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst); 1309 // } 1310 1311 pub fn policy(&self) -> crate::sched::SchedPolicy { 1312 return *self.sched_policy.read_irqsave(); 1313 } 1314 } 1315 1316 #[derive(Debug, Clone)] 1317 pub struct KernelStack { 1318 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>, 1319 /// 标记该内核栈是否可以被释放 1320 can_be_freed: bool, 1321 } 1322 1323 impl KernelStack { 1324 pub const SIZE: usize = 0x4000; 1325 pub const ALIGN: usize = 0x4000; 1326 1327 pub fn new() -> Result<Self, SystemError> { 1328 return Ok(Self { 1329 stack: Some( 1330 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?, 1331 ), 1332 can_be_freed: true, 1333 }); 1334 } 1335 1336 /// 根据已有的空间,构造一个内核栈结构体 1337 /// 1338 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误! 1339 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> { 1340 if base.is_null() || !base.check_aligned(Self::ALIGN) { 1341 return Err(SystemError::EFAULT); 1342 } 1343 1344 return Ok(Self { 1345 stack: Some( 1346 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked( 1347 base.data() as *mut [u8; KernelStack::SIZE], 1348 ), 1349 ), 1350 can_be_freed: false, 1351 }); 1352 } 1353 1354 /// 返回内核栈的起始虚拟地址(低地址) 1355 pub fn start_address(&self) -> VirtAddr { 1356 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize); 1357 } 1358 1359 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址) 1360 pub fn stack_max_address(&self) -> VirtAddr { 1361 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE); 1362 } 1363 1364 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> { 1365 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处 1366 let p: *const ProcessControlBlock = Weak::into_raw(pcb); 1367 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1368 1369 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误 1370 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) { 1371 kerror!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr); 1372 return Err(SystemError::EPERM); 1373 } 1374 // 将pcb的地址放到内核栈的最低地址处 1375 unsafe { 1376 *stack_bottom_ptr = p; 1377 } 1378 1379 return Ok(()); 1380 } 1381 1382 /// 清除内核栈的pcb指针 1383 /// 1384 /// ## 参数 1385 /// 1386 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题 1387 pub unsafe fn clear_pcb(&mut self, force: bool) { 1388 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1389 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) { 1390 return; 1391 } 1392 1393 if !force { 1394 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr); 1395 drop(pcb_ptr); 1396 } 1397 1398 *stack_bottom_ptr = core::ptr::null(); 1399 } 1400 1401 /// 返回指向当前内核栈pcb的Arc指针 1402 #[allow(dead_code)] 1403 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> { 1404 // 从内核栈的最低地址处取出pcb的地址 1405 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1406 if unlikely(unsafe { (*p).is_null() }) { 1407 return None; 1408 } 1409 1410 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用 1411 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> = 1412 ManuallyDrop::new(Weak::from_raw(*p)); 1413 1414 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?; 1415 return Some(new_arc); 1416 } 1417 } 1418 1419 impl Drop for KernelStack { 1420 fn drop(&mut self) { 1421 if self.stack.is_some() { 1422 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1423 if unsafe { !(*ptr).is_null() } { 1424 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) }; 1425 drop(pcb_ptr); 1426 } 1427 } 1428 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数 1429 if !self.can_be_freed { 1430 let bx = self.stack.take(); 1431 core::mem::forget(bx); 1432 } 1433 } 1434 } 1435 1436 pub fn process_init() { 1437 ProcessManager::init(); 1438 } 1439 1440 #[derive(Debug)] 1441 pub struct ProcessSignalInfo { 1442 // 当前进程 1443 sig_block: SigSet, 1444 // sig_pending 中存储当前线程要处理的信号 1445 sig_pending: SigPending, 1446 // sig_shared_pending 中存储当前线程所属进程要处理的信号 1447 sig_shared_pending: SigPending, 1448 // 当前进程对应的tty 1449 tty: Option<Arc<TtyCore>>, 1450 } 1451 1452 impl ProcessSignalInfo { 1453 pub fn sig_block(&self) -> &SigSet { 1454 &self.sig_block 1455 } 1456 1457 pub fn sig_pending(&self) -> &SigPending { 1458 &self.sig_pending 1459 } 1460 1461 pub fn sig_pending_mut(&mut self) -> &mut SigPending { 1462 &mut self.sig_pending 1463 } 1464 1465 pub fn sig_block_mut(&mut self) -> &mut SigSet { 1466 &mut self.sig_block 1467 } 1468 1469 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending { 1470 &mut self.sig_shared_pending 1471 } 1472 1473 pub fn sig_shared_pending(&self) -> &SigPending { 1474 &self.sig_shared_pending 1475 } 1476 1477 pub fn tty(&self) -> Option<Arc<TtyCore>> { 1478 self.tty.clone() 1479 } 1480 1481 pub fn set_tty(&mut self, tty: Arc<TtyCore>) { 1482 self.tty = Some(tty); 1483 } 1484 1485 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号 1486 /// 1487 /// ## 参数 1488 /// 1489 /// - `sig_mask` 被忽略掉的信号 1490 /// 1491 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) { 1492 let res = self.sig_pending.dequeue_signal(sig_mask); 1493 if res.0 != Signal::INVALID { 1494 return res; 1495 } else { 1496 return self.sig_shared_pending.dequeue_signal(sig_mask); 1497 } 1498 } 1499 } 1500 1501 impl Default for ProcessSignalInfo { 1502 fn default() -> Self { 1503 Self { 1504 sig_block: SigSet::empty(), 1505 sig_pending: SigPending::default(), 1506 sig_shared_pending: SigPending::default(), 1507 tty: None, 1508 } 1509 } 1510 } 1511