1 use core::{ 2 fmt, 3 hash::Hash, 4 hint::spin_loop, 5 intrinsics::{likely, unlikely}, 6 mem::ManuallyDrop, 7 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering}, 8 }; 9 10 use alloc::{ 11 string::{String, ToString}, 12 sync::{Arc, Weak}, 13 vec::Vec, 14 }; 15 use hashbrown::HashMap; 16 use log::{debug, error, info, warn}; 17 use system_error::SystemError; 18 19 use crate::{ 20 arch::{ 21 cpu::current_cpu_id, 22 ipc::signal::{AtomicSignal, SigSet, Signal}, 23 process::ArchPCBInfo, 24 CurrentIrqArch, 25 }, 26 driver::tty::tty_core::TtyCore, 27 exception::InterruptArch, 28 filesystem::{ 29 procfs::procfs_unregister_pid, 30 vfs::{file::FileDescriptorVec, FileType}, 31 }, 32 ipc::signal_types::{SigInfo, SigPending, SignalStruct}, 33 libs::{ 34 align::AlignedBox, 35 casting::DowncastArc, 36 futex::{ 37 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY}, 38 futex::{Futex, RobustListHead}, 39 }, 40 lock_free_flags::LockFreeFlags, 41 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, 42 spinlock::{SpinLock, SpinLockGuard}, 43 wait_queue::WaitQueue, 44 }, 45 mm::{ 46 percpu::{PerCpu, PerCpuVar}, 47 set_IDLE_PROCESS_ADDRESS_SPACE, 48 ucontext::AddressSpace, 49 VirtAddr, 50 }, 51 net::socket::SocketInode, 52 sched::completion::Completion, 53 sched::{ 54 cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, 55 WakeupFlags, __schedule, 56 }, 57 smp::{ 58 core::smp_get_processor_id, 59 cpu::{AtomicProcessorId, ProcessorId}, 60 kick_cpu, 61 }, 62 syscall::{user_access::clear_user, Syscall}, 63 }; 64 use timer::AlarmTimer; 65 66 use self::kthread::WorkerPrivate; 67 68 pub mod abi; 69 pub mod c_adapter; 70 pub mod exec; 71 pub mod exit; 72 pub mod fork; 73 pub mod idle; 74 pub mod kthread; 75 pub mod pid; 76 pub mod resource; 77 pub mod stdio; 78 pub mod syscall; 79 pub mod timer; 80 pub mod utils; 81 82 /// 系统中所有进程的pcb 83 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None); 84 85 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None; 86 87 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成 88 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false; 89 90 #[derive(Debug)] 91 pub struct SwitchResult { 92 pub prev_pcb: Option<Arc<ProcessControlBlock>>, 93 pub next_pcb: Option<Arc<ProcessControlBlock>>, 94 } 95 96 impl SwitchResult { 97 pub fn new() -> Self { 98 Self { 99 prev_pcb: None, 100 next_pcb: None, 101 } 102 } 103 } 104 105 #[derive(Debug)] 106 pub struct ProcessManager; 107 impl ProcessManager { 108 #[inline(never)] 109 fn init() { 110 static INIT_FLAG: AtomicBool = AtomicBool::new(false); 111 if INIT_FLAG 112 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 113 .is_err() 114 { 115 panic!("ProcessManager has been initialized!"); 116 } 117 118 unsafe { 119 compiler_fence(Ordering::SeqCst); 120 debug!("To create address space for INIT process."); 121 // test_buddy(); 122 set_IDLE_PROCESS_ADDRESS_SPACE( 123 AddressSpace::new(true).expect("Failed to create address space for INIT process."), 124 ); 125 debug!("INIT process address space created."); 126 compiler_fence(Ordering::SeqCst); 127 }; 128 129 ALL_PROCESS.lock_irqsave().replace(HashMap::new()); 130 Self::init_switch_result(); 131 Self::arch_init(); 132 debug!("process arch init done."); 133 Self::init_idle(); 134 debug!("process idle init done."); 135 136 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true }; 137 info!("Process Manager initialized."); 138 } 139 140 fn init_switch_result() { 141 let mut switch_res_vec: Vec<SwitchResult> = Vec::new(); 142 for _ in 0..PerCpu::MAX_CPU_NUM { 143 switch_res_vec.push(SwitchResult::new()); 144 } 145 unsafe { 146 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap()); 147 } 148 } 149 150 /// 判断进程管理器是否已经初始化完成 151 #[allow(dead_code)] 152 pub fn initialized() -> bool { 153 unsafe { __PROCESS_MANAGEMENT_INIT_DONE } 154 } 155 156 /// 获取当前进程的pcb 157 pub fn current_pcb() -> Arc<ProcessControlBlock> { 158 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 159 error!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false"); 160 loop { 161 spin_loop(); 162 } 163 } 164 return ProcessControlBlock::arch_current_pcb(); 165 } 166 167 /// 获取当前进程的pid 168 /// 169 /// 如果进程管理器未初始化完成,那么返回0 170 pub fn current_pid() -> Pid { 171 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 172 return Pid(0); 173 } 174 175 return ProcessManager::current_pcb().pid(); 176 } 177 178 /// 增加当前进程的锁持有计数 179 #[inline(always)] 180 pub fn preempt_disable() { 181 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 182 ProcessManager::current_pcb().preempt_disable(); 183 } 184 } 185 186 /// 减少当前进程的锁持有计数 187 #[inline(always)] 188 pub fn preempt_enable() { 189 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 190 ProcessManager::current_pcb().preempt_enable(); 191 } 192 } 193 194 /// 根据pid获取进程的pcb 195 /// 196 /// ## 参数 197 /// 198 /// - `pid` : 进程的pid 199 /// 200 /// ## 返回值 201 /// 202 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None 203 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> { 204 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned(); 205 } 206 207 /// 向系统中添加一个进程的pcb 208 /// 209 /// ## 参数 210 /// 211 /// - `pcb` : 进程的pcb 212 /// 213 /// ## 返回值 214 /// 215 /// 无 216 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) { 217 ALL_PROCESS 218 .lock_irqsave() 219 .as_mut() 220 .unwrap() 221 .insert(pcb.pid(), pcb.clone()); 222 } 223 224 /// 唤醒一个进程 225 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 226 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 227 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 228 if state.is_blocked() { 229 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 230 let state = writer.state(); 231 if state.is_blocked() { 232 writer.set_state(ProcessState::Runnable); 233 writer.set_wakeup(); 234 235 // avoid deadlock 236 drop(writer); 237 238 let rq = 239 cpu_rq(pcb.sched_info().on_cpu().unwrap_or(current_cpu_id()).data() as usize); 240 241 let (rq, _guard) = rq.self_lock(); 242 rq.update_rq_clock(); 243 rq.activate_task( 244 pcb, 245 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 246 ); 247 248 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 249 250 // sched_enqueue(pcb.clone(), true); 251 return Ok(()); 252 } else if state.is_exited() { 253 return Err(SystemError::EINVAL); 254 } else { 255 return Ok(()); 256 } 257 } else if state.is_exited() { 258 return Err(SystemError::EINVAL); 259 } else { 260 return Ok(()); 261 } 262 } 263 264 /// 唤醒暂停的进程 265 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 266 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 267 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 268 if let ProcessState::Stopped = state { 269 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 270 let state = writer.state(); 271 if let ProcessState::Stopped = state { 272 writer.set_state(ProcessState::Runnable); 273 // avoid deadlock 274 drop(writer); 275 276 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 277 278 let (rq, _guard) = rq.self_lock(); 279 rq.update_rq_clock(); 280 rq.activate_task( 281 pcb, 282 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 283 ); 284 285 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 286 287 // sched_enqueue(pcb.clone(), true); 288 return Ok(()); 289 } else if state.is_runnable() { 290 return Ok(()); 291 } else { 292 return Err(SystemError::EINVAL); 293 } 294 } else if state.is_runnable() { 295 return Ok(()); 296 } else { 297 return Err(SystemError::EINVAL); 298 } 299 } 300 301 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成 302 /// 303 /// ## 注意 304 /// 305 /// - 进入当前函数之前,不能持有sched_info的锁 306 /// - 进入当前函数之前,必须关闭中断 307 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列 308 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> { 309 assert!( 310 !CurrentIrqArch::is_irq_enabled(), 311 "interrupt must be disabled before enter ProcessManager::mark_sleep()" 312 ); 313 let pcb = ProcessManager::current_pcb(); 314 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 315 if !matches!(writer.state(), ProcessState::Exited(_)) { 316 writer.set_state(ProcessState::Blocked(interruptable)); 317 writer.set_sleep(); 318 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 319 fence(Ordering::SeqCst); 320 drop(writer); 321 return Ok(()); 322 } 323 return Err(SystemError::EINTR); 324 } 325 326 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成 327 /// 328 /// ## 注意 329 /// 330 /// - 进入当前函数之前,不能持有sched_info的锁 331 /// - 进入当前函数之前,必须关闭中断 332 pub fn mark_stop() -> Result<(), SystemError> { 333 assert!( 334 !CurrentIrqArch::is_irq_enabled(), 335 "interrupt must be disabled before enter ProcessManager::mark_stop()" 336 ); 337 338 let pcb = ProcessManager::current_pcb(); 339 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 340 if !matches!(writer.state(), ProcessState::Exited(_)) { 341 writer.set_state(ProcessState::Stopped); 342 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 343 drop(writer); 344 345 return Ok(()); 346 } 347 return Err(SystemError::EINTR); 348 } 349 /// 当子进程退出后向父进程发送通知 350 fn exit_notify() { 351 let current = ProcessManager::current_pcb(); 352 // 让INIT进程收养所有子进程 353 if current.pid() != Pid(1) { 354 unsafe { 355 current 356 .adopt_childen() 357 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}")) 358 }; 359 let r = current.parent_pcb.read_irqsave().upgrade(); 360 if r.is_none() { 361 return; 362 } 363 let parent_pcb = r.unwrap(); 364 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32); 365 if r.is_err() { 366 warn!( 367 "failed to send kill signal to {:?}'s parent pcb {:?}", 368 current.pid(), 369 parent_pcb.pid() 370 ); 371 } 372 // todo: 这里需要向父进程发送SIGCHLD信号 373 // todo: 这里还需要根据线程组的信息,决定信号的发送 374 } 375 } 376 377 /// 退出当前进程 378 /// 379 /// ## 参数 380 /// 381 /// - `exit_code` : 进程的退出码 382 pub fn exit(exit_code: usize) -> ! { 383 // 关中断 384 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 385 let pcb = ProcessManager::current_pcb(); 386 let pid = pcb.pid(); 387 pcb.sched_info 388 .inner_lock_write_irqsave() 389 .set_state(ProcessState::Exited(exit_code)); 390 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true))); 391 392 let rq = cpu_rq(smp_get_processor_id().data() as usize); 393 let (rq, guard) = rq.self_lock(); 394 rq.deactivate_task( 395 pcb.clone(), 396 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK, 397 ); 398 drop(guard); 399 400 // 进行进程退出后的工作 401 let thread = pcb.thread.write_irqsave(); 402 if let Some(addr) = thread.set_child_tid { 403 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 404 } 405 406 if let Some(addr) = thread.clear_child_tid { 407 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 { 408 let _ = 409 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY); 410 } 411 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 412 } 413 414 RobustListHead::exit_robust_list(pcb.clone()); 415 416 // 如果是vfork出来的进程,则需要处理completion 417 if thread.vfork_done.is_some() { 418 thread.vfork_done.as_ref().unwrap().complete_all(); 419 } 420 drop(thread); 421 unsafe { pcb.basic_mut().set_user_vm(None) }; 422 drop(pcb); 423 ProcessManager::exit_notify(); 424 // unsafe { CurrentIrqArch::interrupt_enable() }; 425 __schedule(SchedMode::SM_NONE); 426 error!("pid {pid:?} exited but sched again!"); 427 #[allow(clippy::empty_loop)] 428 loop { 429 spin_loop(); 430 } 431 } 432 433 pub unsafe fn release(pid: Pid) { 434 let pcb = ProcessManager::find(pid); 435 if pcb.is_some() { 436 // let pcb = pcb.unwrap(); 437 // 判断该pcb是否在全局没有任何引用 438 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。 439 // 因此目前暂时注释掉,使得能跑 440 // if Arc::strong_count(&pcb) <= 2 { 441 // drop(pcb); 442 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid); 443 // } else { 444 // // 如果不为1就panic 445 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb)); 446 // error!("{}", msg); 447 // panic!() 448 // } 449 450 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid); 451 } 452 } 453 454 /// 上下文切换完成后的钩子函数 455 unsafe fn switch_finish_hook() { 456 // debug!("switch_finish_hook"); 457 let prev_pcb = PROCESS_SWITCH_RESULT 458 .as_mut() 459 .unwrap() 460 .get_mut() 461 .prev_pcb 462 .take() 463 .expect("prev_pcb is None"); 464 let next_pcb = PROCESS_SWITCH_RESULT 465 .as_mut() 466 .unwrap() 467 .get_mut() 468 .next_pcb 469 .take() 470 .expect("next_pcb is None"); 471 472 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁 473 fence(Ordering::SeqCst); 474 475 prev_pcb.arch_info.force_unlock(); 476 fence(Ordering::SeqCst); 477 478 next_pcb.arch_info.force_unlock(); 479 fence(Ordering::SeqCst); 480 } 481 482 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态 483 /// 484 /// ## 参数 485 /// 486 /// - `pcb` : 进程的pcb 487 #[allow(dead_code)] 488 pub fn kick(pcb: &Arc<ProcessControlBlock>) { 489 ProcessManager::current_pcb().preempt_disable(); 490 let cpu_id = pcb.sched_info().on_cpu(); 491 492 if let Some(cpu_id) = cpu_id { 493 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() { 494 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu"); 495 } 496 } 497 498 ProcessManager::current_pcb().preempt_enable(); 499 } 500 } 501 502 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换 503 #[cfg(target_arch = "x86_64")] 504 #[inline(never)] 505 pub unsafe extern "sysv64" fn switch_finish_hook() { 506 ProcessManager::switch_finish_hook(); 507 } 508 #[cfg(target_arch = "riscv64")] 509 #[inline(always)] 510 pub unsafe fn switch_finish_hook() { 511 ProcessManager::switch_finish_hook(); 512 } 513 514 int_like!(Pid, AtomicPid, usize, AtomicUsize); 515 516 impl fmt::Display for Pid { 517 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 518 write!(f, "{}", self.0) 519 } 520 } 521 522 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 523 pub enum ProcessState { 524 /// The process is running on a CPU or in a run queue. 525 Runnable, 526 /// The process is waiting for an event to occur. 527 /// 其中的bool表示该等待过程是否可以被打断。 528 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。 529 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。 530 Blocked(bool), 531 /// 进程被信号终止 532 Stopped, 533 /// 进程已经退出,usize表示进程的退出码 534 Exited(usize), 535 } 536 537 #[allow(dead_code)] 538 impl ProcessState { 539 #[inline(always)] 540 pub fn is_runnable(&self) -> bool { 541 return matches!(self, ProcessState::Runnable); 542 } 543 544 #[inline(always)] 545 pub fn is_blocked(&self) -> bool { 546 return matches!(self, ProcessState::Blocked(_)); 547 } 548 549 #[inline(always)] 550 pub fn is_blocked_interruptable(&self) -> bool { 551 return matches!(self, ProcessState::Blocked(true)); 552 } 553 554 /// Returns `true` if the process state is [`Exited`]. 555 #[inline(always)] 556 pub fn is_exited(&self) -> bool { 557 return matches!(self, ProcessState::Exited(_)); 558 } 559 560 /// Returns `true` if the process state is [`Stopped`]. 561 /// 562 /// [`Stopped`]: ProcessState::Stopped 563 #[inline(always)] 564 pub fn is_stopped(&self) -> bool { 565 matches!(self, ProcessState::Stopped) 566 } 567 568 /// Returns exit code if the process state is [`Exited`]. 569 #[inline(always)] 570 pub fn exit_code(&self) -> Option<usize> { 571 match self { 572 ProcessState::Exited(code) => Some(*code), 573 _ => None, 574 } 575 } 576 } 577 578 bitflags! { 579 /// pcb的标志位 580 pub struct ProcessFlags: usize { 581 /// 当前pcb表示一个内核线程 582 const KTHREAD = 1 << 0; 583 /// 当前进程需要被调度 584 const NEED_SCHEDULE = 1 << 1; 585 /// 进程由于vfork而与父进程存在资源共享 586 const VFORK = 1 << 2; 587 /// 进程不可被冻结 588 const NOFREEZE = 1 << 3; 589 /// 进程正在退出 590 const EXITING = 1 << 4; 591 /// 进程由于接收到终止信号唤醒 592 const WAKEKILL = 1 << 5; 593 /// 进程由于接收到信号而退出.(Killed by a signal) 594 const SIGNALED = 1 << 6; 595 /// 进程需要迁移到其他cpu上 596 const NEED_MIGRATE = 1 << 7; 597 /// 随机化的虚拟地址空间,主要用于动态链接器的加载 598 const RANDOMIZE = 1 << 8; 599 } 600 } 601 602 #[derive(Debug)] 603 pub struct ProcessControlBlock { 604 /// 当前进程的pid 605 pid: Pid, 606 /// 当前进程的线程组id(这个值在同一个线程组内永远不变) 607 tgid: Pid, 608 609 basic: RwLock<ProcessBasicInfo>, 610 /// 当前进程的自旋锁持有计数 611 preempt_count: AtomicUsize, 612 613 flags: LockFreeFlags<ProcessFlags>, 614 worker_private: SpinLock<Option<WorkerPrivate>>, 615 /// 进程的内核栈 616 kernel_stack: RwLock<KernelStack>, 617 618 /// 系统调用栈 619 syscall_stack: RwLock<KernelStack>, 620 621 /// 与调度相关的信息 622 sched_info: ProcessSchedulerInfo, 623 /// 与处理器架构相关的信息 624 arch_info: SpinLock<ArchPCBInfo>, 625 /// 与信号处理相关的信息(似乎可以是无锁的) 626 sig_info: RwLock<ProcessSignalInfo>, 627 /// 信号处理结构体 628 sig_struct: SpinLock<SignalStruct>, 629 /// 退出信号S 630 exit_signal: AtomicSignal, 631 632 /// 父进程指针 633 parent_pcb: RwLock<Weak<ProcessControlBlock>>, 634 /// 真实父进程指针 635 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>, 636 637 /// 子进程链表 638 children: RwLock<Vec<Pid>>, 639 640 /// 等待队列 641 wait_queue: WaitQueue, 642 643 /// 线程信息 644 thread: RwLock<ThreadInfo>, 645 646 ///闹钟定时器 647 alarm_timer: SpinLock<Option<AlarmTimer>>, 648 649 /// 进程的robust lock列表 650 robust_list: RwLock<Option<RobustListHead>>, 651 } 652 653 impl ProcessControlBlock { 654 /// Generate a new pcb. 655 /// 656 /// ## 参数 657 /// 658 /// - `name` : 进程的名字 659 /// - `kstack` : 进程的内核栈 660 /// 661 /// ## 返回值 662 /// 663 /// 返回一个新的pcb 664 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> { 665 return Self::do_create_pcb(name, kstack, false); 666 } 667 668 /// 创建一个新的idle进程 669 /// 670 /// 请注意,这个函数只能在进程管理初始化的时候调用。 671 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> { 672 let name = format!("idle-{}", cpu_id); 673 return Self::do_create_pcb(name, kstack, true); 674 } 675 676 /// # 函数的功能 677 /// 678 /// 返回此函数是否是内核进程 679 /// 680 /// # 返回值 681 /// 682 /// 若进程是内核进程则返回true 否则返回false 683 pub fn is_kthread(&self) -> bool { 684 return matches!(self.flags(), &mut ProcessFlags::KTHREAD); 685 } 686 687 #[inline(never)] 688 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> { 689 let (pid, ppid, cwd) = if is_idle { 690 (Pid(0), Pid(0), "/".to_string()) 691 } else { 692 let ppid = ProcessManager::current_pcb().pid(); 693 let cwd = ProcessManager::current_pcb().basic().cwd(); 694 (Self::generate_pid(), ppid, cwd) 695 }; 696 697 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, name, cwd, None); 698 let preempt_count = AtomicUsize::new(0); 699 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) }; 700 701 let sched_info = ProcessSchedulerInfo::new(None); 702 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack)); 703 704 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid) 705 .map(|p| Arc::downgrade(&p)) 706 .unwrap_or_default(); 707 708 let pcb = Self { 709 pid, 710 tgid: pid, 711 basic: basic_info, 712 preempt_count, 713 flags, 714 kernel_stack: RwLock::new(kstack), 715 syscall_stack: RwLock::new(KernelStack::new().unwrap()), 716 worker_private: SpinLock::new(None), 717 sched_info, 718 arch_info, 719 sig_info: RwLock::new(ProcessSignalInfo::default()), 720 sig_struct: SpinLock::new(SignalStruct::new()), 721 exit_signal: AtomicSignal::new(Signal::SIGCHLD), 722 parent_pcb: RwLock::new(ppcb.clone()), 723 real_parent_pcb: RwLock::new(ppcb), 724 children: RwLock::new(Vec::new()), 725 wait_queue: WaitQueue::default(), 726 thread: RwLock::new(ThreadInfo::new()), 727 alarm_timer: SpinLock::new(None), 728 robust_list: RwLock::new(None), 729 }; 730 731 // 初始化系统调用栈 732 #[cfg(target_arch = "x86_64")] 733 pcb.arch_info 734 .lock() 735 .init_syscall_stack(&pcb.syscall_stack.read()); 736 737 let pcb = Arc::new(pcb); 738 739 pcb.sched_info() 740 .sched_entity() 741 .force_mut() 742 .set_pcb(Arc::downgrade(&pcb)); 743 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处 744 unsafe { 745 pcb.kernel_stack 746 .write() 747 .set_pcb(Arc::downgrade(&pcb)) 748 .unwrap(); 749 750 pcb.syscall_stack 751 .write() 752 .set_pcb(Arc::downgrade(&pcb)) 753 .unwrap() 754 }; 755 756 // 将当前pcb加入父进程的子进程哈希表中 757 if pcb.pid() > Pid(1) { 758 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() { 759 let mut children = ppcb_arc.children.write_irqsave(); 760 children.push(pcb.pid()); 761 } else { 762 panic!("parent pcb is None"); 763 } 764 } 765 766 return pcb; 767 } 768 769 /// 生成一个新的pid 770 #[inline(always)] 771 fn generate_pid() -> Pid { 772 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1)); 773 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst); 774 } 775 776 /// 返回当前进程的锁持有计数 777 #[inline(always)] 778 pub fn preempt_count(&self) -> usize { 779 return self.preempt_count.load(Ordering::SeqCst); 780 } 781 782 /// 增加当前进程的锁持有计数 783 #[inline(always)] 784 pub fn preempt_disable(&self) { 785 self.preempt_count.fetch_add(1, Ordering::SeqCst); 786 } 787 788 /// 减少当前进程的锁持有计数 789 #[inline(always)] 790 pub fn preempt_enable(&self) { 791 self.preempt_count.fetch_sub(1, Ordering::SeqCst); 792 } 793 794 #[inline(always)] 795 pub unsafe fn set_preempt_count(&self, count: usize) { 796 self.preempt_count.store(count, Ordering::SeqCst); 797 } 798 799 #[inline(always)] 800 pub fn flags(&self) -> &mut ProcessFlags { 801 return self.flags.get_mut(); 802 } 803 804 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改 805 /// 否则会导致死锁 806 #[inline(always)] 807 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> { 808 return self.basic.read_irqsave(); 809 } 810 811 #[inline(always)] 812 pub fn set_name(&self, name: String) { 813 self.basic.write().set_name(name); 814 } 815 816 #[inline(always)] 817 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> { 818 return self.basic.write_irqsave(); 819 } 820 821 /// # 获取arch info的锁,同时关闭中断 822 #[inline(always)] 823 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> { 824 return self.arch_info.lock_irqsave(); 825 } 826 827 /// # 获取arch info的锁,但是不关闭中断 828 /// 829 /// 由于arch info在进程切换的时候会使用到, 830 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的. 831 /// 832 /// 只能在以下情况下使用这个函数: 833 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。 834 /// - 刚刚创建新的pcb 835 #[inline(always)] 836 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> { 837 return self.arch_info.lock(); 838 } 839 840 #[inline(always)] 841 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> { 842 return self.kernel_stack.read(); 843 } 844 845 pub unsafe fn kernel_stack_force_ref(&self) -> &KernelStack { 846 self.kernel_stack.force_get_ref() 847 } 848 849 #[inline(always)] 850 #[allow(dead_code)] 851 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> { 852 return self.kernel_stack.write(); 853 } 854 855 #[inline(always)] 856 pub fn sched_info(&self) -> &ProcessSchedulerInfo { 857 return &self.sched_info; 858 } 859 860 #[inline(always)] 861 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> { 862 return self.worker_private.lock(); 863 } 864 865 #[inline(always)] 866 pub fn pid(&self) -> Pid { 867 return self.pid; 868 } 869 870 #[inline(always)] 871 pub fn tgid(&self) -> Pid { 872 return self.tgid; 873 } 874 875 /// 获取文件描述符表的Arc指针 876 #[inline(always)] 877 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> { 878 return self.basic.read().fd_table().unwrap(); 879 } 880 881 /// 根据文件描述符序号,获取socket对象的Arc指针 882 /// 883 /// ## 参数 884 /// 885 /// - `fd` 文件描述符序号 886 /// 887 /// ## 返回值 888 /// 889 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None 890 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> { 891 let binding = ProcessManager::current_pcb().fd_table(); 892 let fd_table_guard = binding.read(); 893 894 let f = fd_table_guard.get_file_by_fd(fd)?; 895 drop(fd_table_guard); 896 897 if f.file_type() != FileType::Socket { 898 return None; 899 } 900 let socket: Arc<SocketInode> = f 901 .inode() 902 .downcast_arc::<SocketInode>() 903 .expect("Not a socket inode"); 904 return Some(socket); 905 } 906 907 /// 当前进程退出时,让初始进程收养所有子进程 908 unsafe fn adopt_childen(&self) -> Result<(), SystemError> { 909 match ProcessManager::find(Pid(1)) { 910 Some(init_pcb) => { 911 let childen_guard = self.children.write(); 912 let mut init_childen_guard = init_pcb.children.write(); 913 914 childen_guard.iter().for_each(|pid| { 915 init_childen_guard.push(*pid); 916 }); 917 918 return Ok(()); 919 } 920 _ => Err(SystemError::ECHILD), 921 } 922 } 923 924 /// 生成进程的名字 925 pub fn generate_name(program_path: &str, args: &Vec<String>) -> String { 926 let mut name = program_path.to_string(); 927 for arg in args { 928 name.push(' '); 929 name.push_str(arg); 930 } 931 return name; 932 } 933 934 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> { 935 self.sig_info.read_irqsave() 936 } 937 938 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> { 939 for _ in 0..times { 940 if let Some(r) = self.sig_info.try_read_irqsave() { 941 return Some(r); 942 } 943 } 944 945 return None; 946 } 947 948 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> { 949 self.sig_info.write_irqsave() 950 } 951 952 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> { 953 for _ in 0..times { 954 if let Some(r) = self.sig_info.try_write_irqsave() { 955 return Some(r); 956 } 957 } 958 959 return None; 960 } 961 962 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> { 963 self.sig_struct.lock_irqsave() 964 } 965 966 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> { 967 for _ in 0..times { 968 if let Ok(r) = self.sig_struct.try_lock_irqsave() { 969 return Some(r); 970 } 971 } 972 973 return None; 974 } 975 976 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> { 977 self.sig_struct.lock_irqsave() 978 } 979 980 #[inline(always)] 981 pub fn get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>> { 982 return self.robust_list.read_irqsave(); 983 } 984 985 #[inline(always)] 986 pub fn set_robust_list(&self, new_robust_list: Option<RobustListHead>) { 987 *self.robust_list.write_irqsave() = new_robust_list; 988 } 989 990 pub fn alarm_timer_irqsave(&self) -> SpinLockGuard<Option<AlarmTimer>> { 991 return self.alarm_timer.lock_irqsave(); 992 } 993 } 994 995 impl Drop for ProcessControlBlock { 996 fn drop(&mut self) { 997 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 998 // 在ProcFS中,解除进程的注册 999 procfs_unregister_pid(self.pid()) 1000 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}")); 1001 1002 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() { 1003 ppcb.children 1004 .write_irqsave() 1005 .retain(|pid| *pid != self.pid()); 1006 } 1007 1008 drop(irq_guard); 1009 } 1010 } 1011 1012 /// 线程信息 1013 #[derive(Debug)] 1014 pub struct ThreadInfo { 1015 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程 1016 clear_child_tid: Option<VirtAddr>, 1017 set_child_tid: Option<VirtAddr>, 1018 1019 vfork_done: Option<Arc<Completion>>, 1020 /// 线程组的组长 1021 group_leader: Weak<ProcessControlBlock>, 1022 } 1023 1024 impl ThreadInfo { 1025 pub fn new() -> Self { 1026 Self { 1027 clear_child_tid: None, 1028 set_child_tid: None, 1029 vfork_done: None, 1030 group_leader: Weak::default(), 1031 } 1032 } 1033 1034 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> { 1035 return self.group_leader.upgrade(); 1036 } 1037 } 1038 1039 /// 进程的基本信息 1040 /// 1041 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。 1042 #[derive(Debug)] 1043 pub struct ProcessBasicInfo { 1044 /// 当前进程的进程组id 1045 pgid: Pid, 1046 /// 当前进程的父进程的pid 1047 ppid: Pid, 1048 /// 进程的名字 1049 name: String, 1050 1051 /// 当前进程的工作目录 1052 cwd: String, 1053 1054 /// 用户地址空间 1055 user_vm: Option<Arc<AddressSpace>>, 1056 1057 /// 文件描述符表 1058 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>, 1059 } 1060 1061 impl ProcessBasicInfo { 1062 #[inline(never)] 1063 pub fn new( 1064 pgid: Pid, 1065 ppid: Pid, 1066 name: String, 1067 cwd: String, 1068 user_vm: Option<Arc<AddressSpace>>, 1069 ) -> RwLock<Self> { 1070 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new())); 1071 return RwLock::new(Self { 1072 pgid, 1073 ppid, 1074 name, 1075 cwd, 1076 user_vm, 1077 fd_table: Some(fd_table), 1078 }); 1079 } 1080 1081 pub fn pgid(&self) -> Pid { 1082 return self.pgid; 1083 } 1084 1085 pub fn ppid(&self) -> Pid { 1086 return self.ppid; 1087 } 1088 1089 pub fn name(&self) -> &str { 1090 return &self.name; 1091 } 1092 1093 pub fn set_name(&mut self, name: String) { 1094 self.name = name; 1095 } 1096 1097 pub fn cwd(&self) -> String { 1098 return self.cwd.clone(); 1099 } 1100 pub fn set_cwd(&mut self, path: String) { 1101 return self.cwd = path; 1102 } 1103 1104 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> { 1105 return self.user_vm.clone(); 1106 } 1107 1108 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) { 1109 self.user_vm = user_vm; 1110 } 1111 1112 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> { 1113 return self.fd_table.clone(); 1114 } 1115 1116 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) { 1117 self.fd_table = fd_table; 1118 } 1119 } 1120 1121 #[derive(Debug)] 1122 pub struct ProcessSchedulerInfo { 1123 /// 当前进程所在的cpu 1124 on_cpu: AtomicProcessorId, 1125 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位), 1126 /// 该字段存储要被迁移到的目标处理器核心号 1127 // migrate_to: AtomicProcessorId, 1128 inner_locked: RwLock<InnerSchedInfo>, 1129 /// 进程的调度优先级 1130 // priority: SchedPriority, 1131 /// 当前进程的虚拟运行时间 1132 // virtual_runtime: AtomicIsize, 1133 /// 由实时调度器管理的时间片 1134 // rt_time_slice: AtomicIsize, 1135 pub sched_stat: RwLock<SchedInfo>, 1136 /// 调度策略 1137 pub sched_policy: RwLock<crate::sched::SchedPolicy>, 1138 /// cfs调度实体 1139 pub sched_entity: Arc<FairSchedEntity>, 1140 pub on_rq: SpinLock<OnRq>, 1141 1142 pub prio_data: RwLock<PrioData>, 1143 } 1144 1145 #[derive(Debug, Default)] 1146 #[allow(dead_code)] 1147 pub struct SchedInfo { 1148 /// 记录任务在特定 CPU 上运行的次数 1149 pub pcount: usize, 1150 /// 记录任务等待在运行队列上的时间 1151 pub run_delay: usize, 1152 /// 记录任务上次在 CPU 上运行的时间戳 1153 pub last_arrival: u64, 1154 /// 记录任务上次被加入到运行队列中的时间戳 1155 pub last_queued: u64, 1156 } 1157 1158 #[derive(Debug)] 1159 #[allow(dead_code)] 1160 pub struct PrioData { 1161 pub prio: i32, 1162 pub static_prio: i32, 1163 pub normal_prio: i32, 1164 } 1165 1166 impl Default for PrioData { 1167 fn default() -> Self { 1168 Self { 1169 prio: MAX_PRIO - 20, 1170 static_prio: MAX_PRIO - 20, 1171 normal_prio: MAX_PRIO - 20, 1172 } 1173 } 1174 } 1175 1176 #[derive(Debug)] 1177 pub struct InnerSchedInfo { 1178 /// 当前进程的状态 1179 state: ProcessState, 1180 /// 进程的调度策略 1181 sleep: bool, 1182 } 1183 1184 impl InnerSchedInfo { 1185 pub fn state(&self) -> ProcessState { 1186 return self.state; 1187 } 1188 1189 pub fn set_state(&mut self, state: ProcessState) { 1190 self.state = state; 1191 } 1192 1193 pub fn set_sleep(&mut self) { 1194 self.sleep = true; 1195 } 1196 1197 pub fn set_wakeup(&mut self) { 1198 self.sleep = false; 1199 } 1200 1201 pub fn is_mark_sleep(&self) -> bool { 1202 self.sleep 1203 } 1204 } 1205 1206 impl ProcessSchedulerInfo { 1207 #[inline(never)] 1208 pub fn new(on_cpu: Option<ProcessorId>) -> Self { 1209 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID); 1210 return Self { 1211 on_cpu: AtomicProcessorId::new(cpu_id), 1212 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID), 1213 inner_locked: RwLock::new(InnerSchedInfo { 1214 state: ProcessState::Blocked(false), 1215 sleep: false, 1216 }), 1217 // virtual_runtime: AtomicIsize::new(0), 1218 // rt_time_slice: AtomicIsize::new(0), 1219 // priority: SchedPriority::new(100).unwrap(), 1220 sched_stat: RwLock::new(SchedInfo::default()), 1221 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS), 1222 sched_entity: FairSchedEntity::new(), 1223 on_rq: SpinLock::new(OnRq::None), 1224 prio_data: RwLock::new(PrioData::default()), 1225 }; 1226 } 1227 1228 pub fn sched_entity(&self) -> Arc<FairSchedEntity> { 1229 return self.sched_entity.clone(); 1230 } 1231 1232 pub fn on_cpu(&self) -> Option<ProcessorId> { 1233 let on_cpu = self.on_cpu.load(Ordering::SeqCst); 1234 if on_cpu == ProcessorId::INVALID { 1235 return None; 1236 } else { 1237 return Some(on_cpu); 1238 } 1239 } 1240 1241 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) { 1242 if let Some(cpu_id) = on_cpu { 1243 self.on_cpu.store(cpu_id, Ordering::SeqCst); 1244 } else { 1245 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst); 1246 } 1247 } 1248 1249 // pub fn migrate_to(&self) -> Option<ProcessorId> { 1250 // let migrate_to = self.migrate_to.load(Ordering::SeqCst); 1251 // if migrate_to == ProcessorId::INVALID { 1252 // return None; 1253 // } else { 1254 // return Some(migrate_to); 1255 // } 1256 // } 1257 1258 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) { 1259 // if let Some(data) = migrate_to { 1260 // self.migrate_to.store(data, Ordering::SeqCst); 1261 // } else { 1262 // self.migrate_to 1263 // .store(ProcessorId::INVALID, Ordering::SeqCst) 1264 // } 1265 // } 1266 1267 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> { 1268 return self.inner_locked.write_irqsave(); 1269 } 1270 1271 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> { 1272 return self.inner_locked.read_irqsave(); 1273 } 1274 1275 // pub fn inner_lock_try_read_irqsave( 1276 // &self, 1277 // times: u8, 1278 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> { 1279 // for _ in 0..times { 1280 // if let Some(r) = self.inner_locked.try_read_irqsave() { 1281 // return Some(r); 1282 // } 1283 // } 1284 1285 // return None; 1286 // } 1287 1288 // pub fn inner_lock_try_upgradable_read_irqsave( 1289 // &self, 1290 // times: u8, 1291 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> { 1292 // for _ in 0..times { 1293 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() { 1294 // return Some(r); 1295 // } 1296 // } 1297 1298 // return None; 1299 // } 1300 1301 // pub fn virtual_runtime(&self) -> isize { 1302 // return self.virtual_runtime.load(Ordering::SeqCst); 1303 // } 1304 1305 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) { 1306 // self.virtual_runtime 1307 // .store(virtual_runtime, Ordering::SeqCst); 1308 // } 1309 // pub fn increase_virtual_runtime(&self, delta: isize) { 1310 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst); 1311 // } 1312 1313 // pub fn rt_time_slice(&self) -> isize { 1314 // return self.rt_time_slice.load(Ordering::SeqCst); 1315 // } 1316 1317 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) { 1318 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst); 1319 // } 1320 1321 // pub fn increase_rt_time_slice(&self, delta: isize) { 1322 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst); 1323 // } 1324 1325 pub fn policy(&self) -> crate::sched::SchedPolicy { 1326 return *self.sched_policy.read_irqsave(); 1327 } 1328 } 1329 1330 #[derive(Debug, Clone)] 1331 pub struct KernelStack { 1332 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>, 1333 /// 标记该内核栈是否可以被释放 1334 can_be_freed: bool, 1335 } 1336 1337 impl KernelStack { 1338 pub const SIZE: usize = 0x4000; 1339 pub const ALIGN: usize = 0x4000; 1340 1341 pub fn new() -> Result<Self, SystemError> { 1342 return Ok(Self { 1343 stack: Some( 1344 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?, 1345 ), 1346 can_be_freed: true, 1347 }); 1348 } 1349 1350 /// 根据已有的空间,构造一个内核栈结构体 1351 /// 1352 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误! 1353 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> { 1354 if base.is_null() || !base.check_aligned(Self::ALIGN) { 1355 return Err(SystemError::EFAULT); 1356 } 1357 1358 return Ok(Self { 1359 stack: Some( 1360 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked( 1361 base.data() as *mut [u8; KernelStack::SIZE], 1362 ), 1363 ), 1364 can_be_freed: false, 1365 }); 1366 } 1367 1368 /// 返回内核栈的起始虚拟地址(低地址) 1369 pub fn start_address(&self) -> VirtAddr { 1370 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize); 1371 } 1372 1373 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址) 1374 pub fn stack_max_address(&self) -> VirtAddr { 1375 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE); 1376 } 1377 1378 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> { 1379 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处 1380 let p: *const ProcessControlBlock = Weak::into_raw(pcb); 1381 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1382 1383 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误 1384 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) { 1385 error!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr); 1386 return Err(SystemError::EPERM); 1387 } 1388 // 将pcb的地址放到内核栈的最低地址处 1389 unsafe { 1390 *stack_bottom_ptr = p; 1391 } 1392 1393 return Ok(()); 1394 } 1395 1396 /// 清除内核栈的pcb指针 1397 /// 1398 /// ## 参数 1399 /// 1400 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题 1401 pub unsafe fn clear_pcb(&mut self, force: bool) { 1402 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1403 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) { 1404 return; 1405 } 1406 1407 if !force { 1408 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr); 1409 drop(pcb_ptr); 1410 } 1411 1412 *stack_bottom_ptr = core::ptr::null(); 1413 } 1414 1415 /// 返回指向当前内核栈pcb的Arc指针 1416 #[allow(dead_code)] 1417 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> { 1418 // 从内核栈的最低地址处取出pcb的地址 1419 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1420 if unlikely(unsafe { (*p).is_null() }) { 1421 return None; 1422 } 1423 1424 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用 1425 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> = 1426 ManuallyDrop::new(Weak::from_raw(*p)); 1427 1428 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?; 1429 return Some(new_arc); 1430 } 1431 } 1432 1433 impl Drop for KernelStack { 1434 fn drop(&mut self) { 1435 if self.stack.is_some() { 1436 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1437 if unsafe { !(*ptr).is_null() } { 1438 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) }; 1439 drop(pcb_ptr); 1440 } 1441 } 1442 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数 1443 if !self.can_be_freed { 1444 let bx = self.stack.take(); 1445 core::mem::forget(bx); 1446 } 1447 } 1448 } 1449 1450 pub fn process_init() { 1451 ProcessManager::init(); 1452 } 1453 1454 #[derive(Debug)] 1455 pub struct ProcessSignalInfo { 1456 // 当前进程 1457 sig_block: SigSet, 1458 // sig_pending 中存储当前线程要处理的信号 1459 sig_pending: SigPending, 1460 // sig_shared_pending 中存储当前线程所属进程要处理的信号 1461 sig_shared_pending: SigPending, 1462 // 当前进程对应的tty 1463 tty: Option<Arc<TtyCore>>, 1464 } 1465 1466 impl ProcessSignalInfo { 1467 pub fn sig_block(&self) -> &SigSet { 1468 &self.sig_block 1469 } 1470 1471 pub fn sig_pending(&self) -> &SigPending { 1472 &self.sig_pending 1473 } 1474 1475 pub fn sig_pending_mut(&mut self) -> &mut SigPending { 1476 &mut self.sig_pending 1477 } 1478 1479 pub fn sig_block_mut(&mut self) -> &mut SigSet { 1480 &mut self.sig_block 1481 } 1482 1483 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending { 1484 &mut self.sig_shared_pending 1485 } 1486 1487 pub fn sig_shared_pending(&self) -> &SigPending { 1488 &self.sig_shared_pending 1489 } 1490 1491 pub fn tty(&self) -> Option<Arc<TtyCore>> { 1492 self.tty.clone() 1493 } 1494 1495 pub fn set_tty(&mut self, tty: Arc<TtyCore>) { 1496 self.tty = Some(tty); 1497 } 1498 1499 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号 1500 /// 1501 /// ## 参数 1502 /// 1503 /// - `sig_mask` 被忽略掉的信号 1504 /// 1505 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) { 1506 let res = self.sig_pending.dequeue_signal(sig_mask); 1507 if res.0 != Signal::INVALID { 1508 return res; 1509 } else { 1510 return self.sig_shared_pending.dequeue_signal(sig_mask); 1511 } 1512 } 1513 } 1514 1515 impl Default for ProcessSignalInfo { 1516 fn default() -> Self { 1517 Self { 1518 sig_block: SigSet::empty(), 1519 sig_pending: SigPending::default(), 1520 sig_shared_pending: SigPending::default(), 1521 tty: None, 1522 } 1523 } 1524 } 1525