1 use core::{ 2 hash::Hash, 3 hint::spin_loop, 4 intrinsics::{likely, unlikely}, 5 mem::ManuallyDrop, 6 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering}, 7 }; 8 9 use alloc::{ 10 string::{String, ToString}, 11 sync::{Arc, Weak}, 12 vec::Vec, 13 }; 14 use hashbrown::HashMap; 15 use system_error::SystemError; 16 17 use crate::{ 18 arch::{ 19 cpu::current_cpu_id, 20 ipc::signal::{AtomicSignal, SigSet, Signal}, 21 process::ArchPCBInfo, 22 CurrentIrqArch, 23 }, 24 driver::tty::tty_core::TtyCore, 25 exception::InterruptArch, 26 filesystem::{ 27 procfs::procfs_unregister_pid, 28 vfs::{file::FileDescriptorVec, FileType}, 29 }, 30 ipc::signal_types::{SigInfo, SigPending, SignalStruct}, 31 kdebug, kinfo, 32 libs::{ 33 align::AlignedBox, 34 casting::DowncastArc, 35 futex::{ 36 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY}, 37 futex::{Futex, RobustListHead}, 38 }, 39 lock_free_flags::LockFreeFlags, 40 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, 41 spinlock::{SpinLock, SpinLockGuard}, 42 wait_queue::WaitQueue, 43 }, 44 mm::{ 45 percpu::{PerCpu, PerCpuVar}, 46 set_IDLE_PROCESS_ADDRESS_SPACE, 47 ucontext::AddressSpace, 48 VirtAddr, 49 }, 50 net::socket::SocketInode, 51 sched::completion::Completion, 52 sched::{ 53 cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, 54 WakeupFlags, __schedule, 55 }, 56 smp::{ 57 core::smp_get_processor_id, 58 cpu::{AtomicProcessorId, ProcessorId}, 59 kick_cpu, 60 }, 61 syscall::{user_access::clear_user, Syscall}, 62 }; 63 use timer::AlarmTimer; 64 65 use self::kthread::WorkerPrivate; 66 67 pub mod abi; 68 pub mod c_adapter; 69 pub mod exec; 70 pub mod exit; 71 pub mod fork; 72 pub mod idle; 73 pub mod kthread; 74 pub mod pid; 75 pub mod resource; 76 pub mod stdio; 77 pub mod syscall; 78 pub mod timer; 79 pub mod utils; 80 81 /// 系统中所有进程的pcb 82 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None); 83 84 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None; 85 86 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成 87 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false; 88 89 #[derive(Debug)] 90 pub struct SwitchResult { 91 pub prev_pcb: Option<Arc<ProcessControlBlock>>, 92 pub next_pcb: Option<Arc<ProcessControlBlock>>, 93 } 94 95 impl SwitchResult { 96 pub fn new() -> Self { 97 Self { 98 prev_pcb: None, 99 next_pcb: None, 100 } 101 } 102 } 103 104 #[derive(Debug)] 105 pub struct ProcessManager; 106 impl ProcessManager { 107 #[inline(never)] 108 fn init() { 109 static INIT_FLAG: AtomicBool = AtomicBool::new(false); 110 if INIT_FLAG 111 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 112 .is_err() 113 { 114 panic!("ProcessManager has been initialized!"); 115 } 116 117 unsafe { 118 compiler_fence(Ordering::SeqCst); 119 kdebug!("To create address space for INIT process."); 120 // test_buddy(); 121 set_IDLE_PROCESS_ADDRESS_SPACE( 122 AddressSpace::new(true).expect("Failed to create address space for INIT process."), 123 ); 124 kdebug!("INIT process address space created."); 125 compiler_fence(Ordering::SeqCst); 126 }; 127 128 ALL_PROCESS.lock_irqsave().replace(HashMap::new()); 129 Self::init_switch_result(); 130 Self::arch_init(); 131 kdebug!("process arch init done."); 132 Self::init_idle(); 133 kdebug!("process idle init done."); 134 135 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true }; 136 kinfo!("Process Manager initialized."); 137 } 138 139 fn init_switch_result() { 140 let mut switch_res_vec: Vec<SwitchResult> = Vec::new(); 141 for _ in 0..PerCpu::MAX_CPU_NUM { 142 switch_res_vec.push(SwitchResult::new()); 143 } 144 unsafe { 145 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap()); 146 } 147 } 148 149 /// 判断进程管理器是否已经初始化完成 150 pub fn initialized() -> bool { 151 unsafe { __PROCESS_MANAGEMENT_INIT_DONE } 152 } 153 154 /// 获取当前进程的pcb 155 pub fn current_pcb() -> Arc<ProcessControlBlock> { 156 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 157 kerror!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false"); 158 loop { 159 spin_loop(); 160 } 161 } 162 return ProcessControlBlock::arch_current_pcb(); 163 } 164 165 /// 获取当前进程的pid 166 /// 167 /// 如果进程管理器未初始化完成,那么返回0 168 pub fn current_pid() -> Pid { 169 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 170 return Pid(0); 171 } 172 173 return ProcessManager::current_pcb().pid(); 174 } 175 176 /// 增加当前进程的锁持有计数 177 #[inline(always)] 178 pub fn preempt_disable() { 179 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 180 ProcessManager::current_pcb().preempt_disable(); 181 } 182 } 183 184 /// 减少当前进程的锁持有计数 185 #[inline(always)] 186 pub fn preempt_enable() { 187 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 188 ProcessManager::current_pcb().preempt_enable(); 189 } 190 } 191 192 /// 根据pid获取进程的pcb 193 /// 194 /// ## 参数 195 /// 196 /// - `pid` : 进程的pid 197 /// 198 /// ## 返回值 199 /// 200 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None 201 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> { 202 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned(); 203 } 204 205 /// 向系统中添加一个进程的pcb 206 /// 207 /// ## 参数 208 /// 209 /// - `pcb` : 进程的pcb 210 /// 211 /// ## 返回值 212 /// 213 /// 无 214 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) { 215 ALL_PROCESS 216 .lock_irqsave() 217 .as_mut() 218 .unwrap() 219 .insert(pcb.pid(), pcb.clone()); 220 } 221 222 /// 唤醒一个进程 223 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 224 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 225 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 226 if state.is_blocked() { 227 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 228 let state = writer.state(); 229 if state.is_blocked() { 230 writer.set_state(ProcessState::Runnable); 231 writer.set_wakeup(); 232 233 // avoid deadlock 234 drop(writer); 235 236 let rq = 237 cpu_rq(pcb.sched_info().on_cpu().unwrap_or(current_cpu_id()).data() as usize); 238 239 let (rq, _guard) = rq.self_lock(); 240 rq.update_rq_clock(); 241 rq.activate_task( 242 pcb, 243 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 244 ); 245 246 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 247 248 // sched_enqueue(pcb.clone(), true); 249 return Ok(()); 250 } else if state.is_exited() { 251 return Err(SystemError::EINVAL); 252 } else { 253 return Ok(()); 254 } 255 } else if state.is_exited() { 256 return Err(SystemError::EINVAL); 257 } else { 258 return Ok(()); 259 } 260 } 261 262 /// 唤醒暂停的进程 263 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 264 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 265 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 266 if let ProcessState::Stopped = state { 267 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 268 let state = writer.state(); 269 if let ProcessState::Stopped = state { 270 writer.set_state(ProcessState::Runnable); 271 // avoid deadlock 272 drop(writer); 273 274 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 275 276 let (rq, _guard) = rq.self_lock(); 277 rq.update_rq_clock(); 278 rq.activate_task( 279 pcb, 280 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 281 ); 282 283 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 284 285 // sched_enqueue(pcb.clone(), true); 286 return Ok(()); 287 } else if state.is_runnable() { 288 return Ok(()); 289 } else { 290 return Err(SystemError::EINVAL); 291 } 292 } else if state.is_runnable() { 293 return Ok(()); 294 } else { 295 return Err(SystemError::EINVAL); 296 } 297 } 298 299 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成 300 /// 301 /// ## 注意 302 /// 303 /// - 进入当前函数之前,不能持有sched_info的锁 304 /// - 进入当前函数之前,必须关闭中断 305 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列 306 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> { 307 assert!( 308 !CurrentIrqArch::is_irq_enabled(), 309 "interrupt must be disabled before enter ProcessManager::mark_sleep()" 310 ); 311 let pcb = ProcessManager::current_pcb(); 312 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 313 if !matches!(writer.state(), ProcessState::Exited(_)) { 314 writer.set_state(ProcessState::Blocked(interruptable)); 315 writer.set_sleep(); 316 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 317 fence(Ordering::SeqCst); 318 drop(writer); 319 return Ok(()); 320 } 321 return Err(SystemError::EINTR); 322 } 323 324 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成 325 /// 326 /// ## 注意 327 /// 328 /// - 进入当前函数之前,不能持有sched_info的锁 329 /// - 进入当前函数之前,必须关闭中断 330 pub fn mark_stop() -> Result<(), SystemError> { 331 assert!( 332 !CurrentIrqArch::is_irq_enabled(), 333 "interrupt must be disabled before enter ProcessManager::mark_stop()" 334 ); 335 336 let pcb = ProcessManager::current_pcb(); 337 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 338 if !matches!(writer.state(), ProcessState::Exited(_)) { 339 writer.set_state(ProcessState::Stopped); 340 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 341 drop(writer); 342 343 return Ok(()); 344 } 345 return Err(SystemError::EINTR); 346 } 347 /// 当子进程退出后向父进程发送通知 348 fn exit_notify() { 349 let current = ProcessManager::current_pcb(); 350 // 让INIT进程收养所有子进程 351 if current.pid() != Pid(1) { 352 unsafe { 353 current 354 .adopt_childen() 355 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}")) 356 }; 357 let r = current.parent_pcb.read_irqsave().upgrade(); 358 if r.is_none() { 359 return; 360 } 361 let parent_pcb = r.unwrap(); 362 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32); 363 if r.is_err() { 364 kwarn!( 365 "failed to send kill signal to {:?}'s parent pcb {:?}", 366 current.pid(), 367 parent_pcb.pid() 368 ); 369 } 370 // todo: 这里需要向父进程发送SIGCHLD信号 371 // todo: 这里还需要根据线程组的信息,决定信号的发送 372 } 373 } 374 375 /// 退出当前进程 376 /// 377 /// ## 参数 378 /// 379 /// - `exit_code` : 进程的退出码 380 pub fn exit(exit_code: usize) -> ! { 381 // 关中断 382 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 383 let pcb = ProcessManager::current_pcb(); 384 let pid = pcb.pid(); 385 pcb.sched_info 386 .inner_lock_write_irqsave() 387 .set_state(ProcessState::Exited(exit_code)); 388 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true))); 389 390 let rq = cpu_rq(smp_get_processor_id().data() as usize); 391 let (rq, guard) = rq.self_lock(); 392 rq.deactivate_task( 393 pcb.clone(), 394 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK, 395 ); 396 drop(guard); 397 398 // 进行进程退出后的工作 399 let thread = pcb.thread.write_irqsave(); 400 if let Some(addr) = thread.set_child_tid { 401 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 402 } 403 404 if let Some(addr) = thread.clear_child_tid { 405 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 { 406 let _ = 407 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY); 408 } 409 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 410 } 411 412 RobustListHead::exit_robust_list(pcb.clone()); 413 414 // 如果是vfork出来的进程,则需要处理completion 415 if thread.vfork_done.is_some() { 416 thread.vfork_done.as_ref().unwrap().complete_all(); 417 } 418 drop(thread); 419 unsafe { pcb.basic_mut().set_user_vm(None) }; 420 drop(pcb); 421 ProcessManager::exit_notify(); 422 // unsafe { CurrentIrqArch::interrupt_enable() }; 423 __schedule(SchedMode::SM_NONE); 424 kerror!("pid {pid:?} exited but sched again!"); 425 #[allow(clippy::empty_loop)] 426 loop { 427 spin_loop(); 428 } 429 } 430 431 pub unsafe fn release(pid: Pid) { 432 let pcb = ProcessManager::find(pid); 433 if pcb.is_some() { 434 // let pcb = pcb.unwrap(); 435 // 判断该pcb是否在全局没有任何引用 436 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。 437 // 因此目前暂时注释掉,使得能跑 438 // if Arc::strong_count(&pcb) <= 2 { 439 // drop(pcb); 440 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid); 441 // } else { 442 // // 如果不为1就panic 443 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb)); 444 // kerror!("{}", msg); 445 // panic!() 446 // } 447 448 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid); 449 } 450 } 451 452 /// 上下文切换完成后的钩子函数 453 unsafe fn switch_finish_hook() { 454 // kdebug!("switch_finish_hook"); 455 let prev_pcb = PROCESS_SWITCH_RESULT 456 .as_mut() 457 .unwrap() 458 .get_mut() 459 .prev_pcb 460 .take() 461 .expect("prev_pcb is None"); 462 let next_pcb = PROCESS_SWITCH_RESULT 463 .as_mut() 464 .unwrap() 465 .get_mut() 466 .next_pcb 467 .take() 468 .expect("next_pcb is None"); 469 470 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁 471 fence(Ordering::SeqCst); 472 473 prev_pcb.arch_info.force_unlock(); 474 fence(Ordering::SeqCst); 475 476 next_pcb.arch_info.force_unlock(); 477 fence(Ordering::SeqCst); 478 } 479 480 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态 481 /// 482 /// ## 参数 483 /// 484 /// - `pcb` : 进程的pcb 485 #[allow(dead_code)] 486 pub fn kick(pcb: &Arc<ProcessControlBlock>) { 487 ProcessManager::current_pcb().preempt_disable(); 488 let cpu_id = pcb.sched_info().on_cpu(); 489 490 if let Some(cpu_id) = cpu_id { 491 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() { 492 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu"); 493 } 494 } 495 496 ProcessManager::current_pcb().preempt_enable(); 497 } 498 } 499 500 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换 501 #[cfg(target_arch = "x86_64")] 502 #[inline(never)] 503 pub unsafe extern "sysv64" fn switch_finish_hook() { 504 ProcessManager::switch_finish_hook(); 505 } 506 #[cfg(target_arch = "riscv64")] 507 #[inline(always)] 508 pub unsafe fn switch_finish_hook() { 509 ProcessManager::switch_finish_hook(); 510 } 511 512 int_like!(Pid, AtomicPid, usize, AtomicUsize); 513 514 impl ToString for Pid { 515 fn to_string(&self) -> String { 516 self.0.to_string() 517 } 518 } 519 520 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 521 pub enum ProcessState { 522 /// The process is running on a CPU or in a run queue. 523 Runnable, 524 /// The process is waiting for an event to occur. 525 /// 其中的bool表示该等待过程是否可以被打断。 526 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。 527 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。 528 Blocked(bool), 529 /// 进程被信号终止 530 Stopped, 531 /// 进程已经退出,usize表示进程的退出码 532 Exited(usize), 533 } 534 535 #[allow(dead_code)] 536 impl ProcessState { 537 #[inline(always)] 538 pub fn is_runnable(&self) -> bool { 539 return matches!(self, ProcessState::Runnable); 540 } 541 542 #[inline(always)] 543 pub fn is_blocked(&self) -> bool { 544 return matches!(self, ProcessState::Blocked(_)); 545 } 546 547 #[inline(always)] 548 pub fn is_blocked_interruptable(&self) -> bool { 549 return matches!(self, ProcessState::Blocked(true)); 550 } 551 552 /// Returns `true` if the process state is [`Exited`]. 553 #[inline(always)] 554 pub fn is_exited(&self) -> bool { 555 return matches!(self, ProcessState::Exited(_)); 556 } 557 558 /// Returns `true` if the process state is [`Stopped`]. 559 /// 560 /// [`Stopped`]: ProcessState::Stopped 561 #[inline(always)] 562 pub fn is_stopped(&self) -> bool { 563 matches!(self, ProcessState::Stopped) 564 } 565 566 /// Returns exit code if the process state is [`Exited`]. 567 #[inline(always)] 568 pub fn exit_code(&self) -> Option<usize> { 569 match self { 570 ProcessState::Exited(code) => Some(*code), 571 _ => None, 572 } 573 } 574 } 575 576 bitflags! { 577 /// pcb的标志位 578 pub struct ProcessFlags: usize { 579 /// 当前pcb表示一个内核线程 580 const KTHREAD = 1 << 0; 581 /// 当前进程需要被调度 582 const NEED_SCHEDULE = 1 << 1; 583 /// 进程由于vfork而与父进程存在资源共享 584 const VFORK = 1 << 2; 585 /// 进程不可被冻结 586 const NOFREEZE = 1 << 3; 587 /// 进程正在退出 588 const EXITING = 1 << 4; 589 /// 进程由于接收到终止信号唤醒 590 const WAKEKILL = 1 << 5; 591 /// 进程由于接收到信号而退出.(Killed by a signal) 592 const SIGNALED = 1 << 6; 593 /// 进程需要迁移到其他cpu上 594 const NEED_MIGRATE = 1 << 7; 595 /// 随机化的虚拟地址空间,主要用于动态链接器的加载 596 const RANDOMIZE = 1 << 8; 597 } 598 } 599 600 #[derive(Debug)] 601 pub struct ProcessControlBlock { 602 /// 当前进程的pid 603 pid: Pid, 604 /// 当前进程的线程组id(这个值在同一个线程组内永远不变) 605 tgid: Pid, 606 607 basic: RwLock<ProcessBasicInfo>, 608 /// 当前进程的自旋锁持有计数 609 preempt_count: AtomicUsize, 610 611 flags: LockFreeFlags<ProcessFlags>, 612 worker_private: SpinLock<Option<WorkerPrivate>>, 613 /// 进程的内核栈 614 kernel_stack: RwLock<KernelStack>, 615 616 /// 系统调用栈 617 syscall_stack: RwLock<KernelStack>, 618 619 /// 与调度相关的信息 620 sched_info: ProcessSchedulerInfo, 621 /// 与处理器架构相关的信息 622 arch_info: SpinLock<ArchPCBInfo>, 623 /// 与信号处理相关的信息(似乎可以是无锁的) 624 sig_info: RwLock<ProcessSignalInfo>, 625 /// 信号处理结构体 626 sig_struct: SpinLock<SignalStruct>, 627 /// 退出信号S 628 exit_signal: AtomicSignal, 629 630 /// 父进程指针 631 parent_pcb: RwLock<Weak<ProcessControlBlock>>, 632 /// 真实父进程指针 633 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>, 634 635 /// 子进程链表 636 children: RwLock<Vec<Pid>>, 637 638 /// 等待队列 639 wait_queue: WaitQueue, 640 641 /// 线程信息 642 thread: RwLock<ThreadInfo>, 643 644 ///闹钟定时器 645 alarm_timer: SpinLock<Option<AlarmTimer>>, 646 647 /// 进程的robust lock列表 648 robust_list: RwLock<Option<RobustListHead>>, 649 } 650 651 impl ProcessControlBlock { 652 /// Generate a new pcb. 653 /// 654 /// ## 参数 655 /// 656 /// - `name` : 进程的名字 657 /// - `kstack` : 进程的内核栈 658 /// 659 /// ## 返回值 660 /// 661 /// 返回一个新的pcb 662 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> { 663 return Self::do_create_pcb(name, kstack, false); 664 } 665 666 /// 创建一个新的idle进程 667 /// 668 /// 请注意,这个函数只能在进程管理初始化的时候调用。 669 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> { 670 let name = format!("idle-{}", cpu_id); 671 return Self::do_create_pcb(name, kstack, true); 672 } 673 674 #[inline(never)] 675 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> { 676 let (pid, ppid, cwd) = if is_idle { 677 (Pid(0), Pid(0), "/".to_string()) 678 } else { 679 let ppid = ProcessManager::current_pcb().pid(); 680 let cwd = ProcessManager::current_pcb().basic().cwd(); 681 (Self::generate_pid(), ppid, cwd) 682 }; 683 684 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, name, cwd, None); 685 let preempt_count = AtomicUsize::new(0); 686 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) }; 687 688 let sched_info = ProcessSchedulerInfo::new(None); 689 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack)); 690 691 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid) 692 .map(|p| Arc::downgrade(&p)) 693 .unwrap_or_default(); 694 695 let pcb = Self { 696 pid, 697 tgid: pid, 698 basic: basic_info, 699 preempt_count, 700 flags, 701 kernel_stack: RwLock::new(kstack), 702 syscall_stack: RwLock::new(KernelStack::new().unwrap()), 703 worker_private: SpinLock::new(None), 704 sched_info, 705 arch_info, 706 sig_info: RwLock::new(ProcessSignalInfo::default()), 707 sig_struct: SpinLock::new(SignalStruct::new()), 708 exit_signal: AtomicSignal::new(Signal::SIGCHLD), 709 parent_pcb: RwLock::new(ppcb.clone()), 710 real_parent_pcb: RwLock::new(ppcb), 711 children: RwLock::new(Vec::new()), 712 wait_queue: WaitQueue::default(), 713 thread: RwLock::new(ThreadInfo::new()), 714 alarm_timer: SpinLock::new(None), 715 robust_list: RwLock::new(None), 716 }; 717 718 // 初始化系统调用栈 719 #[cfg(target_arch = "x86_64")] 720 pcb.arch_info 721 .lock() 722 .init_syscall_stack(&pcb.syscall_stack.read()); 723 724 let pcb = Arc::new(pcb); 725 726 pcb.sched_info() 727 .sched_entity() 728 .force_mut() 729 .set_pcb(Arc::downgrade(&pcb)); 730 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处 731 unsafe { 732 pcb.kernel_stack 733 .write() 734 .set_pcb(Arc::downgrade(&pcb)) 735 .unwrap(); 736 737 pcb.syscall_stack 738 .write() 739 .set_pcb(Arc::downgrade(&pcb)) 740 .unwrap() 741 }; 742 743 // 将当前pcb加入父进程的子进程哈希表中 744 if pcb.pid() > Pid(1) { 745 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() { 746 let mut children = ppcb_arc.children.write_irqsave(); 747 children.push(pcb.pid()); 748 } else { 749 panic!("parent pcb is None"); 750 } 751 } 752 753 return pcb; 754 } 755 756 /// 生成一个新的pid 757 #[inline(always)] 758 fn generate_pid() -> Pid { 759 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1)); 760 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst); 761 } 762 763 /// 返回当前进程的锁持有计数 764 #[inline(always)] 765 pub fn preempt_count(&self) -> usize { 766 return self.preempt_count.load(Ordering::SeqCst); 767 } 768 769 /// 增加当前进程的锁持有计数 770 #[inline(always)] 771 pub fn preempt_disable(&self) { 772 self.preempt_count.fetch_add(1, Ordering::SeqCst); 773 } 774 775 /// 减少当前进程的锁持有计数 776 #[inline(always)] 777 pub fn preempt_enable(&self) { 778 self.preempt_count.fetch_sub(1, Ordering::SeqCst); 779 } 780 781 #[inline(always)] 782 pub unsafe fn set_preempt_count(&self, count: usize) { 783 self.preempt_count.store(count, Ordering::SeqCst); 784 } 785 786 #[inline(always)] 787 pub fn flags(&self) -> &mut ProcessFlags { 788 return self.flags.get_mut(); 789 } 790 791 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改 792 /// 否则会导致死锁 793 #[inline(always)] 794 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> { 795 return self.basic.read_irqsave(); 796 } 797 798 #[inline(always)] 799 pub fn set_name(&self, name: String) { 800 self.basic.write().set_name(name); 801 } 802 803 #[inline(always)] 804 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> { 805 return self.basic.write_irqsave(); 806 } 807 808 /// # 获取arch info的锁,同时关闭中断 809 #[inline(always)] 810 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> { 811 return self.arch_info.lock_irqsave(); 812 } 813 814 /// # 获取arch info的锁,但是不关闭中断 815 /// 816 /// 由于arch info在进程切换的时候会使用到, 817 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的. 818 /// 819 /// 只能在以下情况下使用这个函数: 820 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。 821 /// - 刚刚创建新的pcb 822 #[inline(always)] 823 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> { 824 return self.arch_info.lock(); 825 } 826 827 #[inline(always)] 828 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> { 829 return self.kernel_stack.read(); 830 } 831 832 pub unsafe fn kernel_stack_force_ref(&self) -> &KernelStack { 833 self.kernel_stack.force_get_ref() 834 } 835 836 #[inline(always)] 837 #[allow(dead_code)] 838 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> { 839 return self.kernel_stack.write(); 840 } 841 842 #[inline(always)] 843 pub fn sched_info(&self) -> &ProcessSchedulerInfo { 844 return &self.sched_info; 845 } 846 847 #[inline(always)] 848 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> { 849 return self.worker_private.lock(); 850 } 851 852 #[inline(always)] 853 pub fn pid(&self) -> Pid { 854 return self.pid; 855 } 856 857 #[inline(always)] 858 pub fn tgid(&self) -> Pid { 859 return self.tgid; 860 } 861 862 /// 获取文件描述符表的Arc指针 863 #[inline(always)] 864 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> { 865 return self.basic.read().fd_table().unwrap(); 866 } 867 868 /// 根据文件描述符序号,获取socket对象的Arc指针 869 /// 870 /// ## 参数 871 /// 872 /// - `fd` 文件描述符序号 873 /// 874 /// ## 返回值 875 /// 876 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None 877 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> { 878 let binding = ProcessManager::current_pcb().fd_table(); 879 let fd_table_guard = binding.read(); 880 881 let f = fd_table_guard.get_file_by_fd(fd)?; 882 drop(fd_table_guard); 883 884 if f.file_type() != FileType::Socket { 885 return None; 886 } 887 let socket: Arc<SocketInode> = f 888 .inode() 889 .downcast_arc::<SocketInode>() 890 .expect("Not a socket inode"); 891 return Some(socket); 892 } 893 894 /// 当前进程退出时,让初始进程收养所有子进程 895 unsafe fn adopt_childen(&self) -> Result<(), SystemError> { 896 match ProcessManager::find(Pid(1)) { 897 Some(init_pcb) => { 898 let childen_guard = self.children.write(); 899 let mut init_childen_guard = init_pcb.children.write(); 900 901 childen_guard.iter().for_each(|pid| { 902 init_childen_guard.push(*pid); 903 }); 904 905 return Ok(()); 906 } 907 _ => Err(SystemError::ECHILD), 908 } 909 } 910 911 /// 生成进程的名字 912 pub fn generate_name(program_path: &str, args: &Vec<String>) -> String { 913 let mut name = program_path.to_string(); 914 for arg in args { 915 name.push(' '); 916 name.push_str(arg); 917 } 918 return name; 919 } 920 921 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> { 922 self.sig_info.read_irqsave() 923 } 924 925 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> { 926 for _ in 0..times { 927 if let Some(r) = self.sig_info.try_read_irqsave() { 928 return Some(r); 929 } 930 } 931 932 return None; 933 } 934 935 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> { 936 self.sig_info.write_irqsave() 937 } 938 939 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> { 940 for _ in 0..times { 941 if let Some(r) = self.sig_info.try_write_irqsave() { 942 return Some(r); 943 } 944 } 945 946 return None; 947 } 948 949 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> { 950 self.sig_struct.lock_irqsave() 951 } 952 953 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> { 954 for _ in 0..times { 955 if let Ok(r) = self.sig_struct.try_lock_irqsave() { 956 return Some(r); 957 } 958 } 959 960 return None; 961 } 962 963 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> { 964 self.sig_struct.lock_irqsave() 965 } 966 967 #[inline(always)] 968 pub fn get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>> { 969 return self.robust_list.read_irqsave(); 970 } 971 972 #[inline(always)] 973 pub fn set_robust_list(&self, new_robust_list: Option<RobustListHead>) { 974 *self.robust_list.write_irqsave() = new_robust_list; 975 } 976 977 pub fn alarm_timer_irqsave(&self) -> SpinLockGuard<Option<AlarmTimer>> { 978 return self.alarm_timer.lock_irqsave(); 979 } 980 } 981 982 impl Drop for ProcessControlBlock { 983 fn drop(&mut self) { 984 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 985 // 在ProcFS中,解除进程的注册 986 procfs_unregister_pid(self.pid()) 987 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}")); 988 989 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() { 990 ppcb.children 991 .write_irqsave() 992 .retain(|pid| *pid != self.pid()); 993 } 994 995 drop(irq_guard); 996 } 997 } 998 999 /// 线程信息 1000 #[derive(Debug)] 1001 pub struct ThreadInfo { 1002 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程 1003 clear_child_tid: Option<VirtAddr>, 1004 set_child_tid: Option<VirtAddr>, 1005 1006 vfork_done: Option<Arc<Completion>>, 1007 /// 线程组的组长 1008 group_leader: Weak<ProcessControlBlock>, 1009 } 1010 1011 impl ThreadInfo { 1012 pub fn new() -> Self { 1013 Self { 1014 clear_child_tid: None, 1015 set_child_tid: None, 1016 vfork_done: None, 1017 group_leader: Weak::default(), 1018 } 1019 } 1020 1021 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> { 1022 return self.group_leader.upgrade(); 1023 } 1024 } 1025 1026 /// 进程的基本信息 1027 /// 1028 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。 1029 #[derive(Debug)] 1030 pub struct ProcessBasicInfo { 1031 /// 当前进程的进程组id 1032 pgid: Pid, 1033 /// 当前进程的父进程的pid 1034 ppid: Pid, 1035 /// 进程的名字 1036 name: String, 1037 1038 /// 当前进程的工作目录 1039 cwd: String, 1040 1041 /// 用户地址空间 1042 user_vm: Option<Arc<AddressSpace>>, 1043 1044 /// 文件描述符表 1045 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>, 1046 } 1047 1048 impl ProcessBasicInfo { 1049 #[inline(never)] 1050 pub fn new( 1051 pgid: Pid, 1052 ppid: Pid, 1053 name: String, 1054 cwd: String, 1055 user_vm: Option<Arc<AddressSpace>>, 1056 ) -> RwLock<Self> { 1057 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new())); 1058 return RwLock::new(Self { 1059 pgid, 1060 ppid, 1061 name, 1062 cwd, 1063 user_vm, 1064 fd_table: Some(fd_table), 1065 }); 1066 } 1067 1068 pub fn pgid(&self) -> Pid { 1069 return self.pgid; 1070 } 1071 1072 pub fn ppid(&self) -> Pid { 1073 return self.ppid; 1074 } 1075 1076 pub fn name(&self) -> &str { 1077 return &self.name; 1078 } 1079 1080 pub fn set_name(&mut self, name: String) { 1081 self.name = name; 1082 } 1083 1084 pub fn cwd(&self) -> String { 1085 return self.cwd.clone(); 1086 } 1087 pub fn set_cwd(&mut self, path: String) { 1088 return self.cwd = path; 1089 } 1090 1091 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> { 1092 return self.user_vm.clone(); 1093 } 1094 1095 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) { 1096 self.user_vm = user_vm; 1097 } 1098 1099 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> { 1100 return self.fd_table.clone(); 1101 } 1102 1103 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) { 1104 self.fd_table = fd_table; 1105 } 1106 } 1107 1108 #[derive(Debug)] 1109 pub struct ProcessSchedulerInfo { 1110 /// 当前进程所在的cpu 1111 on_cpu: AtomicProcessorId, 1112 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位), 1113 /// 该字段存储要被迁移到的目标处理器核心号 1114 // migrate_to: AtomicProcessorId, 1115 inner_locked: RwLock<InnerSchedInfo>, 1116 /// 进程的调度优先级 1117 // priority: SchedPriority, 1118 /// 当前进程的虚拟运行时间 1119 // virtual_runtime: AtomicIsize, 1120 /// 由实时调度器管理的时间片 1121 // rt_time_slice: AtomicIsize, 1122 pub sched_stat: RwLock<SchedInfo>, 1123 /// 调度策略 1124 pub sched_policy: RwLock<crate::sched::SchedPolicy>, 1125 /// cfs调度实体 1126 pub sched_entity: Arc<FairSchedEntity>, 1127 pub on_rq: SpinLock<OnRq>, 1128 1129 pub prio_data: RwLock<PrioData>, 1130 } 1131 1132 #[derive(Debug, Default)] 1133 pub struct SchedInfo { 1134 /// 记录任务在特定 CPU 上运行的次数 1135 pub pcount: usize, 1136 /// 记录任务等待在运行队列上的时间 1137 pub run_delay: usize, 1138 /// 记录任务上次在 CPU 上运行的时间戳 1139 pub last_arrival: u64, 1140 /// 记录任务上次被加入到运行队列中的时间戳 1141 pub last_queued: u64, 1142 } 1143 1144 #[derive(Debug)] 1145 pub struct PrioData { 1146 pub prio: i32, 1147 pub static_prio: i32, 1148 pub normal_prio: i32, 1149 } 1150 1151 impl Default for PrioData { 1152 fn default() -> Self { 1153 Self { 1154 prio: MAX_PRIO - 20, 1155 static_prio: MAX_PRIO - 20, 1156 normal_prio: MAX_PRIO - 20, 1157 } 1158 } 1159 } 1160 1161 #[derive(Debug)] 1162 pub struct InnerSchedInfo { 1163 /// 当前进程的状态 1164 state: ProcessState, 1165 /// 进程的调度策略 1166 sleep: bool, 1167 } 1168 1169 impl InnerSchedInfo { 1170 pub fn state(&self) -> ProcessState { 1171 return self.state; 1172 } 1173 1174 pub fn set_state(&mut self, state: ProcessState) { 1175 self.state = state; 1176 } 1177 1178 pub fn set_sleep(&mut self) { 1179 self.sleep = true; 1180 } 1181 1182 pub fn set_wakeup(&mut self) { 1183 self.sleep = false; 1184 } 1185 1186 pub fn is_mark_sleep(&self) -> bool { 1187 self.sleep 1188 } 1189 } 1190 1191 impl ProcessSchedulerInfo { 1192 #[inline(never)] 1193 pub fn new(on_cpu: Option<ProcessorId>) -> Self { 1194 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID); 1195 return Self { 1196 on_cpu: AtomicProcessorId::new(cpu_id), 1197 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID), 1198 inner_locked: RwLock::new(InnerSchedInfo { 1199 state: ProcessState::Blocked(false), 1200 sleep: false, 1201 }), 1202 // virtual_runtime: AtomicIsize::new(0), 1203 // rt_time_slice: AtomicIsize::new(0), 1204 // priority: SchedPriority::new(100).unwrap(), 1205 sched_stat: RwLock::new(SchedInfo::default()), 1206 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS), 1207 sched_entity: FairSchedEntity::new(), 1208 on_rq: SpinLock::new(OnRq::None), 1209 prio_data: RwLock::new(PrioData::default()), 1210 }; 1211 } 1212 1213 pub fn sched_entity(&self) -> Arc<FairSchedEntity> { 1214 return self.sched_entity.clone(); 1215 } 1216 1217 pub fn on_cpu(&self) -> Option<ProcessorId> { 1218 let on_cpu = self.on_cpu.load(Ordering::SeqCst); 1219 if on_cpu == ProcessorId::INVALID { 1220 return None; 1221 } else { 1222 return Some(on_cpu); 1223 } 1224 } 1225 1226 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) { 1227 if let Some(cpu_id) = on_cpu { 1228 self.on_cpu.store(cpu_id, Ordering::SeqCst); 1229 } else { 1230 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst); 1231 } 1232 } 1233 1234 // pub fn migrate_to(&self) -> Option<ProcessorId> { 1235 // let migrate_to = self.migrate_to.load(Ordering::SeqCst); 1236 // if migrate_to == ProcessorId::INVALID { 1237 // return None; 1238 // } else { 1239 // return Some(migrate_to); 1240 // } 1241 // } 1242 1243 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) { 1244 // if let Some(data) = migrate_to { 1245 // self.migrate_to.store(data, Ordering::SeqCst); 1246 // } else { 1247 // self.migrate_to 1248 // .store(ProcessorId::INVALID, Ordering::SeqCst) 1249 // } 1250 // } 1251 1252 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> { 1253 return self.inner_locked.write_irqsave(); 1254 } 1255 1256 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> { 1257 return self.inner_locked.read_irqsave(); 1258 } 1259 1260 // pub fn inner_lock_try_read_irqsave( 1261 // &self, 1262 // times: u8, 1263 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> { 1264 // for _ in 0..times { 1265 // if let Some(r) = self.inner_locked.try_read_irqsave() { 1266 // return Some(r); 1267 // } 1268 // } 1269 1270 // return None; 1271 // } 1272 1273 // pub fn inner_lock_try_upgradable_read_irqsave( 1274 // &self, 1275 // times: u8, 1276 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> { 1277 // for _ in 0..times { 1278 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() { 1279 // return Some(r); 1280 // } 1281 // } 1282 1283 // return None; 1284 // } 1285 1286 // pub fn virtual_runtime(&self) -> isize { 1287 // return self.virtual_runtime.load(Ordering::SeqCst); 1288 // } 1289 1290 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) { 1291 // self.virtual_runtime 1292 // .store(virtual_runtime, Ordering::SeqCst); 1293 // } 1294 // pub fn increase_virtual_runtime(&self, delta: isize) { 1295 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst); 1296 // } 1297 1298 // pub fn rt_time_slice(&self) -> isize { 1299 // return self.rt_time_slice.load(Ordering::SeqCst); 1300 // } 1301 1302 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) { 1303 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst); 1304 // } 1305 1306 // pub fn increase_rt_time_slice(&self, delta: isize) { 1307 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst); 1308 // } 1309 1310 pub fn policy(&self) -> crate::sched::SchedPolicy { 1311 return *self.sched_policy.read_irqsave(); 1312 } 1313 } 1314 1315 #[derive(Debug, Clone)] 1316 pub struct KernelStack { 1317 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>, 1318 /// 标记该内核栈是否可以被释放 1319 can_be_freed: bool, 1320 } 1321 1322 impl KernelStack { 1323 pub const SIZE: usize = 0x4000; 1324 pub const ALIGN: usize = 0x4000; 1325 1326 pub fn new() -> Result<Self, SystemError> { 1327 return Ok(Self { 1328 stack: Some( 1329 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?, 1330 ), 1331 can_be_freed: true, 1332 }); 1333 } 1334 1335 /// 根据已有的空间,构造一个内核栈结构体 1336 /// 1337 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误! 1338 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> { 1339 if base.is_null() || !base.check_aligned(Self::ALIGN) { 1340 return Err(SystemError::EFAULT); 1341 } 1342 1343 return Ok(Self { 1344 stack: Some( 1345 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked( 1346 base.data() as *mut [u8; KernelStack::SIZE], 1347 ), 1348 ), 1349 can_be_freed: false, 1350 }); 1351 } 1352 1353 /// 返回内核栈的起始虚拟地址(低地址) 1354 pub fn start_address(&self) -> VirtAddr { 1355 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize); 1356 } 1357 1358 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址) 1359 pub fn stack_max_address(&self) -> VirtAddr { 1360 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE); 1361 } 1362 1363 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> { 1364 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处 1365 let p: *const ProcessControlBlock = Weak::into_raw(pcb); 1366 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1367 1368 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误 1369 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) { 1370 kerror!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr); 1371 return Err(SystemError::EPERM); 1372 } 1373 // 将pcb的地址放到内核栈的最低地址处 1374 unsafe { 1375 *stack_bottom_ptr = p; 1376 } 1377 1378 return Ok(()); 1379 } 1380 1381 /// 清除内核栈的pcb指针 1382 /// 1383 /// ## 参数 1384 /// 1385 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题 1386 pub unsafe fn clear_pcb(&mut self, force: bool) { 1387 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1388 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) { 1389 return; 1390 } 1391 1392 if !force { 1393 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr); 1394 drop(pcb_ptr); 1395 } 1396 1397 *stack_bottom_ptr = core::ptr::null(); 1398 } 1399 1400 /// 返回指向当前内核栈pcb的Arc指针 1401 #[allow(dead_code)] 1402 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> { 1403 // 从内核栈的最低地址处取出pcb的地址 1404 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1405 if unlikely(unsafe { (*p).is_null() }) { 1406 return None; 1407 } 1408 1409 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用 1410 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> = 1411 ManuallyDrop::new(Weak::from_raw(*p)); 1412 1413 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?; 1414 return Some(new_arc); 1415 } 1416 } 1417 1418 impl Drop for KernelStack { 1419 fn drop(&mut self) { 1420 if self.stack.is_some() { 1421 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1422 if unsafe { !(*ptr).is_null() } { 1423 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) }; 1424 drop(pcb_ptr); 1425 } 1426 } 1427 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数 1428 if !self.can_be_freed { 1429 let bx = self.stack.take(); 1430 core::mem::forget(bx); 1431 } 1432 } 1433 } 1434 1435 pub fn process_init() { 1436 ProcessManager::init(); 1437 } 1438 1439 #[derive(Debug)] 1440 pub struct ProcessSignalInfo { 1441 // 当前进程 1442 sig_block: SigSet, 1443 // sig_pending 中存储当前线程要处理的信号 1444 sig_pending: SigPending, 1445 // sig_shared_pending 中存储当前线程所属进程要处理的信号 1446 sig_shared_pending: SigPending, 1447 // 当前进程对应的tty 1448 tty: Option<Arc<TtyCore>>, 1449 } 1450 1451 impl ProcessSignalInfo { 1452 pub fn sig_block(&self) -> &SigSet { 1453 &self.sig_block 1454 } 1455 1456 pub fn sig_pending(&self) -> &SigPending { 1457 &self.sig_pending 1458 } 1459 1460 pub fn sig_pending_mut(&mut self) -> &mut SigPending { 1461 &mut self.sig_pending 1462 } 1463 1464 pub fn sig_block_mut(&mut self) -> &mut SigSet { 1465 &mut self.sig_block 1466 } 1467 1468 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending { 1469 &mut self.sig_shared_pending 1470 } 1471 1472 pub fn sig_shared_pending(&self) -> &SigPending { 1473 &self.sig_shared_pending 1474 } 1475 1476 pub fn tty(&self) -> Option<Arc<TtyCore>> { 1477 self.tty.clone() 1478 } 1479 1480 pub fn set_tty(&mut self, tty: Arc<TtyCore>) { 1481 self.tty = Some(tty); 1482 } 1483 1484 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号 1485 /// 1486 /// ## 参数 1487 /// 1488 /// - `sig_mask` 被忽略掉的信号 1489 /// 1490 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) { 1491 let res = self.sig_pending.dequeue_signal(sig_mask); 1492 if res.0 != Signal::INVALID { 1493 return res; 1494 } else { 1495 return self.sig_shared_pending.dequeue_signal(sig_mask); 1496 } 1497 } 1498 } 1499 1500 impl Default for ProcessSignalInfo { 1501 fn default() -> Self { 1502 Self { 1503 sig_block: SigSet::empty(), 1504 sig_pending: SigPending::default(), 1505 sig_shared_pending: SigPending::default(), 1506 tty: None, 1507 } 1508 } 1509 } 1510