1 use core::{ 2 fmt, 3 hash::Hash, 4 hint::spin_loop, 5 intrinsics::{likely, unlikely}, 6 mem::ManuallyDrop, 7 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering}, 8 }; 9 10 use alloc::{ 11 ffi::CString, 12 string::{String, ToString}, 13 sync::{Arc, Weak}, 14 vec::Vec, 15 }; 16 use hashbrown::HashMap; 17 use log::{debug, error, info, warn}; 18 use system_error::SystemError; 19 20 use crate::{ 21 arch::{ 22 cpu::current_cpu_id, 23 ipc::signal::{AtomicSignal, SigSet, Signal}, 24 process::ArchPCBInfo, 25 CurrentIrqArch, 26 }, 27 driver::tty::tty_core::TtyCore, 28 exception::InterruptArch, 29 filesystem::{ 30 procfs::procfs_unregister_pid, 31 vfs::{file::FileDescriptorVec, FileType}, 32 }, 33 ipc::signal_types::{SigInfo, SigPending, SignalStruct}, 34 libs::{ 35 align::AlignedBox, 36 casting::DowncastArc, 37 futex::{ 38 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY}, 39 futex::{Futex, RobustListHead}, 40 }, 41 lock_free_flags::LockFreeFlags, 42 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, 43 spinlock::{SpinLock, SpinLockGuard}, 44 wait_queue::WaitQueue, 45 }, 46 mm::{ 47 percpu::{PerCpu, PerCpuVar}, 48 set_IDLE_PROCESS_ADDRESS_SPACE, 49 ucontext::AddressSpace, 50 VirtAddr, 51 }, 52 net::socket::SocketInode, 53 sched::completion::Completion, 54 sched::{ 55 cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, 56 WakeupFlags, __schedule, 57 }, 58 smp::{ 59 core::smp_get_processor_id, 60 cpu::{AtomicProcessorId, ProcessorId}, 61 kick_cpu, 62 }, 63 syscall::{user_access::clear_user, Syscall}, 64 }; 65 use timer::AlarmTimer; 66 67 use self::kthread::WorkerPrivate; 68 69 pub mod abi; 70 pub mod c_adapter; 71 pub mod exec; 72 pub mod exit; 73 pub mod fork; 74 pub mod idle; 75 pub mod kthread; 76 pub mod pid; 77 pub mod resource; 78 pub mod stdio; 79 pub mod syscall; 80 pub mod timer; 81 pub mod utils; 82 83 /// 系统中所有进程的pcb 84 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None); 85 86 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None; 87 88 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成 89 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false; 90 91 #[derive(Debug)] 92 pub struct SwitchResult { 93 pub prev_pcb: Option<Arc<ProcessControlBlock>>, 94 pub next_pcb: Option<Arc<ProcessControlBlock>>, 95 } 96 97 impl SwitchResult { 98 pub fn new() -> Self { 99 Self { 100 prev_pcb: None, 101 next_pcb: None, 102 } 103 } 104 } 105 106 #[derive(Debug)] 107 pub struct ProcessManager; 108 impl ProcessManager { 109 #[inline(never)] 110 fn init() { 111 static INIT_FLAG: AtomicBool = AtomicBool::new(false); 112 if INIT_FLAG 113 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 114 .is_err() 115 { 116 panic!("ProcessManager has been initialized!"); 117 } 118 119 unsafe { 120 compiler_fence(Ordering::SeqCst); 121 debug!("To create address space for INIT process."); 122 // test_buddy(); 123 set_IDLE_PROCESS_ADDRESS_SPACE( 124 AddressSpace::new(true).expect("Failed to create address space for INIT process."), 125 ); 126 debug!("INIT process address space created."); 127 compiler_fence(Ordering::SeqCst); 128 }; 129 130 ALL_PROCESS.lock_irqsave().replace(HashMap::new()); 131 Self::init_switch_result(); 132 Self::arch_init(); 133 debug!("process arch init done."); 134 Self::init_idle(); 135 debug!("process idle init done."); 136 137 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true }; 138 info!("Process Manager initialized."); 139 } 140 141 fn init_switch_result() { 142 let mut switch_res_vec: Vec<SwitchResult> = Vec::new(); 143 for _ in 0..PerCpu::MAX_CPU_NUM { 144 switch_res_vec.push(SwitchResult::new()); 145 } 146 unsafe { 147 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap()); 148 } 149 } 150 151 /// 判断进程管理器是否已经初始化完成 152 #[allow(dead_code)] 153 pub fn initialized() -> bool { 154 unsafe { __PROCESS_MANAGEMENT_INIT_DONE } 155 } 156 157 /// 获取当前进程的pcb 158 pub fn current_pcb() -> Arc<ProcessControlBlock> { 159 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 160 error!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false"); 161 loop { 162 spin_loop(); 163 } 164 } 165 return ProcessControlBlock::arch_current_pcb(); 166 } 167 168 /// 获取当前进程的pid 169 /// 170 /// 如果进程管理器未初始化完成,那么返回0 171 pub fn current_pid() -> Pid { 172 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 173 return Pid(0); 174 } 175 176 return ProcessManager::current_pcb().pid(); 177 } 178 179 /// 增加当前进程的锁持有计数 180 #[inline(always)] 181 pub fn preempt_disable() { 182 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 183 ProcessManager::current_pcb().preempt_disable(); 184 } 185 } 186 187 /// 减少当前进程的锁持有计数 188 #[inline(always)] 189 pub fn preempt_enable() { 190 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 191 ProcessManager::current_pcb().preempt_enable(); 192 } 193 } 194 195 /// 根据pid获取进程的pcb 196 /// 197 /// ## 参数 198 /// 199 /// - `pid` : 进程的pid 200 /// 201 /// ## 返回值 202 /// 203 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None 204 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> { 205 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned(); 206 } 207 208 /// 向系统中添加一个进程的pcb 209 /// 210 /// ## 参数 211 /// 212 /// - `pcb` : 进程的pcb 213 /// 214 /// ## 返回值 215 /// 216 /// 无 217 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) { 218 ALL_PROCESS 219 .lock_irqsave() 220 .as_mut() 221 .unwrap() 222 .insert(pcb.pid(), pcb.clone()); 223 } 224 225 /// 唤醒一个进程 226 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 227 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 228 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 229 if state.is_blocked() { 230 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 231 let state = writer.state(); 232 if state.is_blocked() { 233 writer.set_state(ProcessState::Runnable); 234 writer.set_wakeup(); 235 236 // avoid deadlock 237 drop(writer); 238 239 let rq = 240 cpu_rq(pcb.sched_info().on_cpu().unwrap_or(current_cpu_id()).data() as usize); 241 242 let (rq, _guard) = rq.self_lock(); 243 rq.update_rq_clock(); 244 rq.activate_task( 245 pcb, 246 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 247 ); 248 249 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 250 251 // sched_enqueue(pcb.clone(), true); 252 return Ok(()); 253 } else if state.is_exited() { 254 return Err(SystemError::EINVAL); 255 } else { 256 return Ok(()); 257 } 258 } else if state.is_exited() { 259 return Err(SystemError::EINVAL); 260 } else { 261 return Ok(()); 262 } 263 } 264 265 /// 唤醒暂停的进程 266 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 267 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 268 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 269 if let ProcessState::Stopped = state { 270 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 271 let state = writer.state(); 272 if let ProcessState::Stopped = state { 273 writer.set_state(ProcessState::Runnable); 274 // avoid deadlock 275 drop(writer); 276 277 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 278 279 let (rq, _guard) = rq.self_lock(); 280 rq.update_rq_clock(); 281 rq.activate_task( 282 pcb, 283 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 284 ); 285 286 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 287 288 // sched_enqueue(pcb.clone(), true); 289 return Ok(()); 290 } else if state.is_runnable() { 291 return Ok(()); 292 } else { 293 return Err(SystemError::EINVAL); 294 } 295 } else if state.is_runnable() { 296 return Ok(()); 297 } else { 298 return Err(SystemError::EINVAL); 299 } 300 } 301 302 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成 303 /// 304 /// ## 注意 305 /// 306 /// - 进入当前函数之前,不能持有sched_info的锁 307 /// - 进入当前函数之前,必须关闭中断 308 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列 309 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> { 310 assert!( 311 !CurrentIrqArch::is_irq_enabled(), 312 "interrupt must be disabled before enter ProcessManager::mark_sleep()" 313 ); 314 let pcb = ProcessManager::current_pcb(); 315 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 316 if !matches!(writer.state(), ProcessState::Exited(_)) { 317 writer.set_state(ProcessState::Blocked(interruptable)); 318 writer.set_sleep(); 319 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 320 fence(Ordering::SeqCst); 321 drop(writer); 322 return Ok(()); 323 } 324 return Err(SystemError::EINTR); 325 } 326 327 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成 328 /// 329 /// ## 注意 330 /// 331 /// - 进入当前函数之前,不能持有sched_info的锁 332 /// - 进入当前函数之前,必须关闭中断 333 pub fn mark_stop() -> Result<(), SystemError> { 334 assert!( 335 !CurrentIrqArch::is_irq_enabled(), 336 "interrupt must be disabled before enter ProcessManager::mark_stop()" 337 ); 338 339 let pcb = ProcessManager::current_pcb(); 340 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 341 if !matches!(writer.state(), ProcessState::Exited(_)) { 342 writer.set_state(ProcessState::Stopped); 343 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 344 drop(writer); 345 346 return Ok(()); 347 } 348 return Err(SystemError::EINTR); 349 } 350 /// 当子进程退出后向父进程发送通知 351 fn exit_notify() { 352 let current = ProcessManager::current_pcb(); 353 // 让INIT进程收养所有子进程 354 if current.pid() != Pid(1) { 355 unsafe { 356 current 357 .adopt_childen() 358 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}")) 359 }; 360 let r = current.parent_pcb.read_irqsave().upgrade(); 361 if r.is_none() { 362 return; 363 } 364 let parent_pcb = r.unwrap(); 365 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32); 366 if r.is_err() { 367 warn!( 368 "failed to send kill signal to {:?}'s parent pcb {:?}", 369 current.pid(), 370 parent_pcb.pid() 371 ); 372 } 373 // todo: 这里需要向父进程发送SIGCHLD信号 374 // todo: 这里还需要根据线程组的信息,决定信号的发送 375 } 376 } 377 378 /// 退出当前进程 379 /// 380 /// ## 参数 381 /// 382 /// - `exit_code` : 进程的退出码 383 pub fn exit(exit_code: usize) -> ! { 384 // 关中断 385 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 386 let pcb = ProcessManager::current_pcb(); 387 let pid = pcb.pid(); 388 pcb.sched_info 389 .inner_lock_write_irqsave() 390 .set_state(ProcessState::Exited(exit_code)); 391 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true))); 392 393 let rq = cpu_rq(smp_get_processor_id().data() as usize); 394 let (rq, guard) = rq.self_lock(); 395 rq.deactivate_task( 396 pcb.clone(), 397 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK, 398 ); 399 drop(guard); 400 401 // 进行进程退出后的工作 402 let thread = pcb.thread.write_irqsave(); 403 if let Some(addr) = thread.set_child_tid { 404 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 405 } 406 407 if let Some(addr) = thread.clear_child_tid { 408 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 { 409 let _ = 410 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY); 411 } 412 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 413 } 414 415 RobustListHead::exit_robust_list(pcb.clone()); 416 417 // 如果是vfork出来的进程,则需要处理completion 418 if thread.vfork_done.is_some() { 419 thread.vfork_done.as_ref().unwrap().complete_all(); 420 } 421 drop(thread); 422 unsafe { pcb.basic_mut().set_user_vm(None) }; 423 drop(pcb); 424 ProcessManager::exit_notify(); 425 // unsafe { CurrentIrqArch::interrupt_enable() }; 426 __schedule(SchedMode::SM_NONE); 427 error!("pid {pid:?} exited but sched again!"); 428 #[allow(clippy::empty_loop)] 429 loop { 430 spin_loop(); 431 } 432 } 433 434 pub unsafe fn release(pid: Pid) { 435 let pcb = ProcessManager::find(pid); 436 if pcb.is_some() { 437 // let pcb = pcb.unwrap(); 438 // 判断该pcb是否在全局没有任何引用 439 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。 440 // 因此目前暂时注释掉,使得能跑 441 // if Arc::strong_count(&pcb) <= 2 { 442 // drop(pcb); 443 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid); 444 // } else { 445 // // 如果不为1就panic 446 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb)); 447 // error!("{}", msg); 448 // panic!() 449 // } 450 451 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid); 452 } 453 } 454 455 /// 上下文切换完成后的钩子函数 456 unsafe fn switch_finish_hook() { 457 // debug!("switch_finish_hook"); 458 let prev_pcb = PROCESS_SWITCH_RESULT 459 .as_mut() 460 .unwrap() 461 .get_mut() 462 .prev_pcb 463 .take() 464 .expect("prev_pcb is None"); 465 let next_pcb = PROCESS_SWITCH_RESULT 466 .as_mut() 467 .unwrap() 468 .get_mut() 469 .next_pcb 470 .take() 471 .expect("next_pcb is None"); 472 473 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁 474 fence(Ordering::SeqCst); 475 476 prev_pcb.arch_info.force_unlock(); 477 fence(Ordering::SeqCst); 478 479 next_pcb.arch_info.force_unlock(); 480 fence(Ordering::SeqCst); 481 } 482 483 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态 484 /// 485 /// ## 参数 486 /// 487 /// - `pcb` : 进程的pcb 488 #[allow(dead_code)] 489 pub fn kick(pcb: &Arc<ProcessControlBlock>) { 490 ProcessManager::current_pcb().preempt_disable(); 491 let cpu_id = pcb.sched_info().on_cpu(); 492 493 if let Some(cpu_id) = cpu_id { 494 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() { 495 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu"); 496 } 497 } 498 499 ProcessManager::current_pcb().preempt_enable(); 500 } 501 } 502 503 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换 504 #[cfg(target_arch = "x86_64")] 505 #[inline(never)] 506 pub unsafe extern "sysv64" fn switch_finish_hook() { 507 ProcessManager::switch_finish_hook(); 508 } 509 #[cfg(target_arch = "riscv64")] 510 #[inline(always)] 511 pub unsafe fn switch_finish_hook() { 512 ProcessManager::switch_finish_hook(); 513 } 514 515 int_like!(Pid, AtomicPid, usize, AtomicUsize); 516 517 impl fmt::Display for Pid { 518 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 519 write!(f, "{}", self.0) 520 } 521 } 522 523 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 524 pub enum ProcessState { 525 /// The process is running on a CPU or in a run queue. 526 Runnable, 527 /// The process is waiting for an event to occur. 528 /// 其中的bool表示该等待过程是否可以被打断。 529 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。 530 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。 531 Blocked(bool), 532 /// 进程被信号终止 533 Stopped, 534 /// 进程已经退出,usize表示进程的退出码 535 Exited(usize), 536 } 537 538 #[allow(dead_code)] 539 impl ProcessState { 540 #[inline(always)] 541 pub fn is_runnable(&self) -> bool { 542 return matches!(self, ProcessState::Runnable); 543 } 544 545 #[inline(always)] 546 pub fn is_blocked(&self) -> bool { 547 return matches!(self, ProcessState::Blocked(_)); 548 } 549 550 #[inline(always)] 551 pub fn is_blocked_interruptable(&self) -> bool { 552 return matches!(self, ProcessState::Blocked(true)); 553 } 554 555 /// Returns `true` if the process state is [`Exited`]. 556 #[inline(always)] 557 pub fn is_exited(&self) -> bool { 558 return matches!(self, ProcessState::Exited(_)); 559 } 560 561 /// Returns `true` if the process state is [`Stopped`]. 562 /// 563 /// [`Stopped`]: ProcessState::Stopped 564 #[inline(always)] 565 pub fn is_stopped(&self) -> bool { 566 matches!(self, ProcessState::Stopped) 567 } 568 569 /// Returns exit code if the process state is [`Exited`]. 570 #[inline(always)] 571 pub fn exit_code(&self) -> Option<usize> { 572 match self { 573 ProcessState::Exited(code) => Some(*code), 574 _ => None, 575 } 576 } 577 } 578 579 bitflags! { 580 /// pcb的标志位 581 pub struct ProcessFlags: usize { 582 /// 当前pcb表示一个内核线程 583 const KTHREAD = 1 << 0; 584 /// 当前进程需要被调度 585 const NEED_SCHEDULE = 1 << 1; 586 /// 进程由于vfork而与父进程存在资源共享 587 const VFORK = 1 << 2; 588 /// 进程不可被冻结 589 const NOFREEZE = 1 << 3; 590 /// 进程正在退出 591 const EXITING = 1 << 4; 592 /// 进程由于接收到终止信号唤醒 593 const WAKEKILL = 1 << 5; 594 /// 进程由于接收到信号而退出.(Killed by a signal) 595 const SIGNALED = 1 << 6; 596 /// 进程需要迁移到其他cpu上 597 const NEED_MIGRATE = 1 << 7; 598 /// 随机化的虚拟地址空间,主要用于动态链接器的加载 599 const RANDOMIZE = 1 << 8; 600 } 601 } 602 603 #[derive(Debug)] 604 pub struct ProcessControlBlock { 605 /// 当前进程的pid 606 pid: Pid, 607 /// 当前进程的线程组id(这个值在同一个线程组内永远不变) 608 tgid: Pid, 609 610 basic: RwLock<ProcessBasicInfo>, 611 /// 当前进程的自旋锁持有计数 612 preempt_count: AtomicUsize, 613 614 flags: LockFreeFlags<ProcessFlags>, 615 worker_private: SpinLock<Option<WorkerPrivate>>, 616 /// 进程的内核栈 617 kernel_stack: RwLock<KernelStack>, 618 619 /// 系统调用栈 620 syscall_stack: RwLock<KernelStack>, 621 622 /// 与调度相关的信息 623 sched_info: ProcessSchedulerInfo, 624 /// 与处理器架构相关的信息 625 arch_info: SpinLock<ArchPCBInfo>, 626 /// 与信号处理相关的信息(似乎可以是无锁的) 627 sig_info: RwLock<ProcessSignalInfo>, 628 /// 信号处理结构体 629 sig_struct: SpinLock<SignalStruct>, 630 /// 退出信号S 631 exit_signal: AtomicSignal, 632 633 /// 父进程指针 634 parent_pcb: RwLock<Weak<ProcessControlBlock>>, 635 /// 真实父进程指针 636 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>, 637 638 /// 子进程链表 639 children: RwLock<Vec<Pid>>, 640 641 /// 等待队列 642 wait_queue: WaitQueue, 643 644 /// 线程信息 645 thread: RwLock<ThreadInfo>, 646 647 ///闹钟定时器 648 alarm_timer: SpinLock<Option<AlarmTimer>>, 649 650 /// 进程的robust lock列表 651 robust_list: RwLock<Option<RobustListHead>>, 652 } 653 654 impl ProcessControlBlock { 655 /// Generate a new pcb. 656 /// 657 /// ## 参数 658 /// 659 /// - `name` : 进程的名字 660 /// - `kstack` : 进程的内核栈 661 /// 662 /// ## 返回值 663 /// 664 /// 返回一个新的pcb 665 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> { 666 return Self::do_create_pcb(name, kstack, false); 667 } 668 669 /// 创建一个新的idle进程 670 /// 671 /// 请注意,这个函数只能在进程管理初始化的时候调用。 672 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> { 673 let name = format!("idle-{}", cpu_id); 674 return Self::do_create_pcb(name, kstack, true); 675 } 676 677 /// # 函数的功能 678 /// 679 /// 返回此函数是否是内核进程 680 /// 681 /// # 返回值 682 /// 683 /// 若进程是内核进程则返回true 否则返回false 684 pub fn is_kthread(&self) -> bool { 685 return matches!(self.flags(), &mut ProcessFlags::KTHREAD); 686 } 687 688 #[inline(never)] 689 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> { 690 let (pid, ppid, cwd) = if is_idle { 691 (Pid(0), Pid(0), "/".to_string()) 692 } else { 693 let ppid = ProcessManager::current_pcb().pid(); 694 let cwd = ProcessManager::current_pcb().basic().cwd(); 695 (Self::generate_pid(), ppid, cwd) 696 }; 697 698 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, name, cwd, None); 699 let preempt_count = AtomicUsize::new(0); 700 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) }; 701 702 let sched_info = ProcessSchedulerInfo::new(None); 703 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack)); 704 705 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid) 706 .map(|p| Arc::downgrade(&p)) 707 .unwrap_or_default(); 708 709 let pcb = Self { 710 pid, 711 tgid: pid, 712 basic: basic_info, 713 preempt_count, 714 flags, 715 kernel_stack: RwLock::new(kstack), 716 syscall_stack: RwLock::new(KernelStack::new().unwrap()), 717 worker_private: SpinLock::new(None), 718 sched_info, 719 arch_info, 720 sig_info: RwLock::new(ProcessSignalInfo::default()), 721 sig_struct: SpinLock::new(SignalStruct::new()), 722 exit_signal: AtomicSignal::new(Signal::SIGCHLD), 723 parent_pcb: RwLock::new(ppcb.clone()), 724 real_parent_pcb: RwLock::new(ppcb), 725 children: RwLock::new(Vec::new()), 726 wait_queue: WaitQueue::default(), 727 thread: RwLock::new(ThreadInfo::new()), 728 alarm_timer: SpinLock::new(None), 729 robust_list: RwLock::new(None), 730 }; 731 732 // 初始化系统调用栈 733 #[cfg(target_arch = "x86_64")] 734 pcb.arch_info 735 .lock() 736 .init_syscall_stack(&pcb.syscall_stack.read()); 737 738 let pcb = Arc::new(pcb); 739 740 pcb.sched_info() 741 .sched_entity() 742 .force_mut() 743 .set_pcb(Arc::downgrade(&pcb)); 744 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处 745 unsafe { 746 pcb.kernel_stack 747 .write() 748 .set_pcb(Arc::downgrade(&pcb)) 749 .unwrap(); 750 751 pcb.syscall_stack 752 .write() 753 .set_pcb(Arc::downgrade(&pcb)) 754 .unwrap() 755 }; 756 757 // 将当前pcb加入父进程的子进程哈希表中 758 if pcb.pid() > Pid(1) { 759 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() { 760 let mut children = ppcb_arc.children.write_irqsave(); 761 children.push(pcb.pid()); 762 } else { 763 panic!("parent pcb is None"); 764 } 765 } 766 767 return pcb; 768 } 769 770 /// 生成一个新的pid 771 #[inline(always)] 772 fn generate_pid() -> Pid { 773 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1)); 774 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst); 775 } 776 777 /// 返回当前进程的锁持有计数 778 #[inline(always)] 779 pub fn preempt_count(&self) -> usize { 780 return self.preempt_count.load(Ordering::SeqCst); 781 } 782 783 /// 增加当前进程的锁持有计数 784 #[inline(always)] 785 pub fn preempt_disable(&self) { 786 self.preempt_count.fetch_add(1, Ordering::SeqCst); 787 } 788 789 /// 减少当前进程的锁持有计数 790 #[inline(always)] 791 pub fn preempt_enable(&self) { 792 self.preempt_count.fetch_sub(1, Ordering::SeqCst); 793 } 794 795 #[inline(always)] 796 pub unsafe fn set_preempt_count(&self, count: usize) { 797 self.preempt_count.store(count, Ordering::SeqCst); 798 } 799 800 #[inline(always)] 801 pub fn flags(&self) -> &mut ProcessFlags { 802 return self.flags.get_mut(); 803 } 804 805 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改 806 /// 否则会导致死锁 807 #[inline(always)] 808 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> { 809 return self.basic.read_irqsave(); 810 } 811 812 #[inline(always)] 813 pub fn set_name(&self, name: String) { 814 self.basic.write().set_name(name); 815 } 816 817 #[inline(always)] 818 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> { 819 return self.basic.write_irqsave(); 820 } 821 822 /// # 获取arch info的锁,同时关闭中断 823 #[inline(always)] 824 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> { 825 return self.arch_info.lock_irqsave(); 826 } 827 828 /// # 获取arch info的锁,但是不关闭中断 829 /// 830 /// 由于arch info在进程切换的时候会使用到, 831 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的. 832 /// 833 /// 只能在以下情况下使用这个函数: 834 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。 835 /// - 刚刚创建新的pcb 836 #[inline(always)] 837 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> { 838 return self.arch_info.lock(); 839 } 840 841 #[inline(always)] 842 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> { 843 return self.kernel_stack.read(); 844 } 845 846 pub unsafe fn kernel_stack_force_ref(&self) -> &KernelStack { 847 self.kernel_stack.force_get_ref() 848 } 849 850 #[inline(always)] 851 #[allow(dead_code)] 852 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> { 853 return self.kernel_stack.write(); 854 } 855 856 #[inline(always)] 857 pub fn sched_info(&self) -> &ProcessSchedulerInfo { 858 return &self.sched_info; 859 } 860 861 #[inline(always)] 862 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> { 863 return self.worker_private.lock(); 864 } 865 866 #[inline(always)] 867 pub fn pid(&self) -> Pid { 868 return self.pid; 869 } 870 871 #[inline(always)] 872 pub fn tgid(&self) -> Pid { 873 return self.tgid; 874 } 875 876 /// 获取文件描述符表的Arc指针 877 #[inline(always)] 878 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> { 879 return self.basic.read().fd_table().unwrap(); 880 } 881 882 /// 根据文件描述符序号,获取socket对象的Arc指针 883 /// 884 /// ## 参数 885 /// 886 /// - `fd` 文件描述符序号 887 /// 888 /// ## 返回值 889 /// 890 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None 891 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> { 892 let binding = ProcessManager::current_pcb().fd_table(); 893 let fd_table_guard = binding.read(); 894 895 let f = fd_table_guard.get_file_by_fd(fd)?; 896 drop(fd_table_guard); 897 898 if f.file_type() != FileType::Socket { 899 return None; 900 } 901 let socket: Arc<SocketInode> = f 902 .inode() 903 .downcast_arc::<SocketInode>() 904 .expect("Not a socket inode"); 905 return Some(socket); 906 } 907 908 /// 当前进程退出时,让初始进程收养所有子进程 909 unsafe fn adopt_childen(&self) -> Result<(), SystemError> { 910 match ProcessManager::find(Pid(1)) { 911 Some(init_pcb) => { 912 let childen_guard = self.children.write(); 913 let mut init_childen_guard = init_pcb.children.write(); 914 915 childen_guard.iter().for_each(|pid| { 916 init_childen_guard.push(*pid); 917 }); 918 919 return Ok(()); 920 } 921 _ => Err(SystemError::ECHILD), 922 } 923 } 924 925 /// 生成进程的名字 926 pub fn generate_name(program_path: &str, args: &Vec<CString>) -> String { 927 let mut name = program_path.to_string(); 928 for arg in args { 929 name.push(' '); 930 name.push_str(arg.to_string_lossy().as_ref()); 931 } 932 return name; 933 } 934 935 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> { 936 self.sig_info.read_irqsave() 937 } 938 939 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> { 940 for _ in 0..times { 941 if let Some(r) = self.sig_info.try_read_irqsave() { 942 return Some(r); 943 } 944 } 945 946 return None; 947 } 948 949 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> { 950 self.sig_info.write_irqsave() 951 } 952 953 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> { 954 for _ in 0..times { 955 if let Some(r) = self.sig_info.try_write_irqsave() { 956 return Some(r); 957 } 958 } 959 960 return None; 961 } 962 963 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> { 964 self.sig_struct.lock_irqsave() 965 } 966 967 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> { 968 for _ in 0..times { 969 if let Ok(r) = self.sig_struct.try_lock_irqsave() { 970 return Some(r); 971 } 972 } 973 974 return None; 975 } 976 977 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> { 978 self.sig_struct.lock_irqsave() 979 } 980 981 #[inline(always)] 982 pub fn get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>> { 983 return self.robust_list.read_irqsave(); 984 } 985 986 #[inline(always)] 987 pub fn set_robust_list(&self, new_robust_list: Option<RobustListHead>) { 988 *self.robust_list.write_irqsave() = new_robust_list; 989 } 990 991 pub fn alarm_timer_irqsave(&self) -> SpinLockGuard<Option<AlarmTimer>> { 992 return self.alarm_timer.lock_irqsave(); 993 } 994 } 995 996 impl Drop for ProcessControlBlock { 997 fn drop(&mut self) { 998 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 999 // 在ProcFS中,解除进程的注册 1000 procfs_unregister_pid(self.pid()) 1001 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}")); 1002 1003 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() { 1004 ppcb.children 1005 .write_irqsave() 1006 .retain(|pid| *pid != self.pid()); 1007 } 1008 1009 drop(irq_guard); 1010 } 1011 } 1012 1013 /// 线程信息 1014 #[derive(Debug)] 1015 pub struct ThreadInfo { 1016 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程 1017 clear_child_tid: Option<VirtAddr>, 1018 set_child_tid: Option<VirtAddr>, 1019 1020 vfork_done: Option<Arc<Completion>>, 1021 /// 线程组的组长 1022 group_leader: Weak<ProcessControlBlock>, 1023 } 1024 1025 impl ThreadInfo { 1026 pub fn new() -> Self { 1027 Self { 1028 clear_child_tid: None, 1029 set_child_tid: None, 1030 vfork_done: None, 1031 group_leader: Weak::default(), 1032 } 1033 } 1034 1035 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> { 1036 return self.group_leader.upgrade(); 1037 } 1038 } 1039 1040 /// 进程的基本信息 1041 /// 1042 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。 1043 #[derive(Debug)] 1044 pub struct ProcessBasicInfo { 1045 /// 当前进程的进程组id 1046 pgid: Pid, 1047 /// 当前进程的父进程的pid 1048 ppid: Pid, 1049 /// 进程的名字 1050 name: String, 1051 1052 /// 当前进程的工作目录 1053 cwd: String, 1054 1055 /// 用户地址空间 1056 user_vm: Option<Arc<AddressSpace>>, 1057 1058 /// 文件描述符表 1059 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>, 1060 } 1061 1062 impl ProcessBasicInfo { 1063 #[inline(never)] 1064 pub fn new( 1065 pgid: Pid, 1066 ppid: Pid, 1067 name: String, 1068 cwd: String, 1069 user_vm: Option<Arc<AddressSpace>>, 1070 ) -> RwLock<Self> { 1071 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new())); 1072 return RwLock::new(Self { 1073 pgid, 1074 ppid, 1075 name, 1076 cwd, 1077 user_vm, 1078 fd_table: Some(fd_table), 1079 }); 1080 } 1081 1082 pub fn pgid(&self) -> Pid { 1083 return self.pgid; 1084 } 1085 1086 pub fn ppid(&self) -> Pid { 1087 return self.ppid; 1088 } 1089 1090 pub fn name(&self) -> &str { 1091 return &self.name; 1092 } 1093 1094 pub fn set_name(&mut self, name: String) { 1095 self.name = name; 1096 } 1097 1098 pub fn cwd(&self) -> String { 1099 return self.cwd.clone(); 1100 } 1101 pub fn set_cwd(&mut self, path: String) { 1102 return self.cwd = path; 1103 } 1104 1105 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> { 1106 return self.user_vm.clone(); 1107 } 1108 1109 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) { 1110 self.user_vm = user_vm; 1111 } 1112 1113 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> { 1114 return self.fd_table.clone(); 1115 } 1116 1117 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) { 1118 self.fd_table = fd_table; 1119 } 1120 } 1121 1122 #[derive(Debug)] 1123 pub struct ProcessSchedulerInfo { 1124 /// 当前进程所在的cpu 1125 on_cpu: AtomicProcessorId, 1126 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位), 1127 /// 该字段存储要被迁移到的目标处理器核心号 1128 // migrate_to: AtomicProcessorId, 1129 inner_locked: RwLock<InnerSchedInfo>, 1130 /// 进程的调度优先级 1131 // priority: SchedPriority, 1132 /// 当前进程的虚拟运行时间 1133 // virtual_runtime: AtomicIsize, 1134 /// 由实时调度器管理的时间片 1135 // rt_time_slice: AtomicIsize, 1136 pub sched_stat: RwLock<SchedInfo>, 1137 /// 调度策略 1138 pub sched_policy: RwLock<crate::sched::SchedPolicy>, 1139 /// cfs调度实体 1140 pub sched_entity: Arc<FairSchedEntity>, 1141 pub on_rq: SpinLock<OnRq>, 1142 1143 pub prio_data: RwLock<PrioData>, 1144 } 1145 1146 #[derive(Debug, Default)] 1147 #[allow(dead_code)] 1148 pub struct SchedInfo { 1149 /// 记录任务在特定 CPU 上运行的次数 1150 pub pcount: usize, 1151 /// 记录任务等待在运行队列上的时间 1152 pub run_delay: usize, 1153 /// 记录任务上次在 CPU 上运行的时间戳 1154 pub last_arrival: u64, 1155 /// 记录任务上次被加入到运行队列中的时间戳 1156 pub last_queued: u64, 1157 } 1158 1159 #[derive(Debug)] 1160 #[allow(dead_code)] 1161 pub struct PrioData { 1162 pub prio: i32, 1163 pub static_prio: i32, 1164 pub normal_prio: i32, 1165 } 1166 1167 impl Default for PrioData { 1168 fn default() -> Self { 1169 Self { 1170 prio: MAX_PRIO - 20, 1171 static_prio: MAX_PRIO - 20, 1172 normal_prio: MAX_PRIO - 20, 1173 } 1174 } 1175 } 1176 1177 #[derive(Debug)] 1178 pub struct InnerSchedInfo { 1179 /// 当前进程的状态 1180 state: ProcessState, 1181 /// 进程的调度策略 1182 sleep: bool, 1183 } 1184 1185 impl InnerSchedInfo { 1186 pub fn state(&self) -> ProcessState { 1187 return self.state; 1188 } 1189 1190 pub fn set_state(&mut self, state: ProcessState) { 1191 self.state = state; 1192 } 1193 1194 pub fn set_sleep(&mut self) { 1195 self.sleep = true; 1196 } 1197 1198 pub fn set_wakeup(&mut self) { 1199 self.sleep = false; 1200 } 1201 1202 pub fn is_mark_sleep(&self) -> bool { 1203 self.sleep 1204 } 1205 } 1206 1207 impl ProcessSchedulerInfo { 1208 #[inline(never)] 1209 pub fn new(on_cpu: Option<ProcessorId>) -> Self { 1210 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID); 1211 return Self { 1212 on_cpu: AtomicProcessorId::new(cpu_id), 1213 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID), 1214 inner_locked: RwLock::new(InnerSchedInfo { 1215 state: ProcessState::Blocked(false), 1216 sleep: false, 1217 }), 1218 // virtual_runtime: AtomicIsize::new(0), 1219 // rt_time_slice: AtomicIsize::new(0), 1220 // priority: SchedPriority::new(100).unwrap(), 1221 sched_stat: RwLock::new(SchedInfo::default()), 1222 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS), 1223 sched_entity: FairSchedEntity::new(), 1224 on_rq: SpinLock::new(OnRq::None), 1225 prio_data: RwLock::new(PrioData::default()), 1226 }; 1227 } 1228 1229 pub fn sched_entity(&self) -> Arc<FairSchedEntity> { 1230 return self.sched_entity.clone(); 1231 } 1232 1233 pub fn on_cpu(&self) -> Option<ProcessorId> { 1234 let on_cpu = self.on_cpu.load(Ordering::SeqCst); 1235 if on_cpu == ProcessorId::INVALID { 1236 return None; 1237 } else { 1238 return Some(on_cpu); 1239 } 1240 } 1241 1242 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) { 1243 if let Some(cpu_id) = on_cpu { 1244 self.on_cpu.store(cpu_id, Ordering::SeqCst); 1245 } else { 1246 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst); 1247 } 1248 } 1249 1250 // pub fn migrate_to(&self) -> Option<ProcessorId> { 1251 // let migrate_to = self.migrate_to.load(Ordering::SeqCst); 1252 // if migrate_to == ProcessorId::INVALID { 1253 // return None; 1254 // } else { 1255 // return Some(migrate_to); 1256 // } 1257 // } 1258 1259 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) { 1260 // if let Some(data) = migrate_to { 1261 // self.migrate_to.store(data, Ordering::SeqCst); 1262 // } else { 1263 // self.migrate_to 1264 // .store(ProcessorId::INVALID, Ordering::SeqCst) 1265 // } 1266 // } 1267 1268 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> { 1269 return self.inner_locked.write_irqsave(); 1270 } 1271 1272 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> { 1273 return self.inner_locked.read_irqsave(); 1274 } 1275 1276 // pub fn inner_lock_try_read_irqsave( 1277 // &self, 1278 // times: u8, 1279 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> { 1280 // for _ in 0..times { 1281 // if let Some(r) = self.inner_locked.try_read_irqsave() { 1282 // return Some(r); 1283 // } 1284 // } 1285 1286 // return None; 1287 // } 1288 1289 // pub fn inner_lock_try_upgradable_read_irqsave( 1290 // &self, 1291 // times: u8, 1292 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> { 1293 // for _ in 0..times { 1294 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() { 1295 // return Some(r); 1296 // } 1297 // } 1298 1299 // return None; 1300 // } 1301 1302 // pub fn virtual_runtime(&self) -> isize { 1303 // return self.virtual_runtime.load(Ordering::SeqCst); 1304 // } 1305 1306 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) { 1307 // self.virtual_runtime 1308 // .store(virtual_runtime, Ordering::SeqCst); 1309 // } 1310 // pub fn increase_virtual_runtime(&self, delta: isize) { 1311 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst); 1312 // } 1313 1314 // pub fn rt_time_slice(&self) -> isize { 1315 // return self.rt_time_slice.load(Ordering::SeqCst); 1316 // } 1317 1318 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) { 1319 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst); 1320 // } 1321 1322 // pub fn increase_rt_time_slice(&self, delta: isize) { 1323 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst); 1324 // } 1325 1326 pub fn policy(&self) -> crate::sched::SchedPolicy { 1327 return *self.sched_policy.read_irqsave(); 1328 } 1329 } 1330 1331 #[derive(Debug, Clone)] 1332 pub struct KernelStack { 1333 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>, 1334 /// 标记该内核栈是否可以被释放 1335 can_be_freed: bool, 1336 } 1337 1338 impl KernelStack { 1339 pub const SIZE: usize = 0x4000; 1340 pub const ALIGN: usize = 0x4000; 1341 1342 pub fn new() -> Result<Self, SystemError> { 1343 return Ok(Self { 1344 stack: Some( 1345 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?, 1346 ), 1347 can_be_freed: true, 1348 }); 1349 } 1350 1351 /// 根据已有的空间,构造一个内核栈结构体 1352 /// 1353 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误! 1354 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> { 1355 if base.is_null() || !base.check_aligned(Self::ALIGN) { 1356 return Err(SystemError::EFAULT); 1357 } 1358 1359 return Ok(Self { 1360 stack: Some( 1361 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked( 1362 base.data() as *mut [u8; KernelStack::SIZE], 1363 ), 1364 ), 1365 can_be_freed: false, 1366 }); 1367 } 1368 1369 /// 返回内核栈的起始虚拟地址(低地址) 1370 pub fn start_address(&self) -> VirtAddr { 1371 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize); 1372 } 1373 1374 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址) 1375 pub fn stack_max_address(&self) -> VirtAddr { 1376 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE); 1377 } 1378 1379 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> { 1380 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处 1381 let p: *const ProcessControlBlock = Weak::into_raw(pcb); 1382 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1383 1384 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误 1385 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) { 1386 error!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr); 1387 return Err(SystemError::EPERM); 1388 } 1389 // 将pcb的地址放到内核栈的最低地址处 1390 unsafe { 1391 *stack_bottom_ptr = p; 1392 } 1393 1394 return Ok(()); 1395 } 1396 1397 /// 清除内核栈的pcb指针 1398 /// 1399 /// ## 参数 1400 /// 1401 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题 1402 pub unsafe fn clear_pcb(&mut self, force: bool) { 1403 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1404 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) { 1405 return; 1406 } 1407 1408 if !force { 1409 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr); 1410 drop(pcb_ptr); 1411 } 1412 1413 *stack_bottom_ptr = core::ptr::null(); 1414 } 1415 1416 /// 返回指向当前内核栈pcb的Arc指针 1417 #[allow(dead_code)] 1418 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> { 1419 // 从内核栈的最低地址处取出pcb的地址 1420 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1421 if unlikely(unsafe { (*p).is_null() }) { 1422 return None; 1423 } 1424 1425 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用 1426 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> = 1427 ManuallyDrop::new(Weak::from_raw(*p)); 1428 1429 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?; 1430 return Some(new_arc); 1431 } 1432 } 1433 1434 impl Drop for KernelStack { 1435 fn drop(&mut self) { 1436 if self.stack.is_some() { 1437 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1438 if unsafe { !(*ptr).is_null() } { 1439 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) }; 1440 drop(pcb_ptr); 1441 } 1442 } 1443 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数 1444 if !self.can_be_freed { 1445 let bx = self.stack.take(); 1446 core::mem::forget(bx); 1447 } 1448 } 1449 } 1450 1451 pub fn process_init() { 1452 ProcessManager::init(); 1453 } 1454 1455 #[derive(Debug)] 1456 pub struct ProcessSignalInfo { 1457 // 当前进程 1458 sig_block: SigSet, 1459 // sig_pending 中存储当前线程要处理的信号 1460 sig_pending: SigPending, 1461 // sig_shared_pending 中存储当前线程所属进程要处理的信号 1462 sig_shared_pending: SigPending, 1463 // 当前进程对应的tty 1464 tty: Option<Arc<TtyCore>>, 1465 } 1466 1467 impl ProcessSignalInfo { 1468 pub fn sig_block(&self) -> &SigSet { 1469 &self.sig_block 1470 } 1471 1472 pub fn sig_pending(&self) -> &SigPending { 1473 &self.sig_pending 1474 } 1475 1476 pub fn sig_pending_mut(&mut self) -> &mut SigPending { 1477 &mut self.sig_pending 1478 } 1479 1480 pub fn sig_block_mut(&mut self) -> &mut SigSet { 1481 &mut self.sig_block 1482 } 1483 1484 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending { 1485 &mut self.sig_shared_pending 1486 } 1487 1488 pub fn sig_shared_pending(&self) -> &SigPending { 1489 &self.sig_shared_pending 1490 } 1491 1492 pub fn tty(&self) -> Option<Arc<TtyCore>> { 1493 self.tty.clone() 1494 } 1495 1496 pub fn set_tty(&mut self, tty: Arc<TtyCore>) { 1497 self.tty = Some(tty); 1498 } 1499 1500 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号 1501 /// 1502 /// ## 参数 1503 /// 1504 /// - `sig_mask` 被忽略掉的信号 1505 /// 1506 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) { 1507 let res = self.sig_pending.dequeue_signal(sig_mask); 1508 if res.0 != Signal::INVALID { 1509 return res; 1510 } else { 1511 return self.sig_shared_pending.dequeue_signal(sig_mask); 1512 } 1513 } 1514 } 1515 1516 impl Default for ProcessSignalInfo { 1517 fn default() -> Self { 1518 Self { 1519 sig_block: SigSet::empty(), 1520 sig_pending: SigPending::default(), 1521 sig_shared_pending: SigPending::default(), 1522 tty: None, 1523 } 1524 } 1525 } 1526