1 use core::{ 2 fmt, 3 hash::Hash, 4 hint::spin_loop, 5 intrinsics::{likely, unlikely}, 6 mem::ManuallyDrop, 7 sync::atomic::{compiler_fence, fence, AtomicBool, AtomicUsize, Ordering}, 8 }; 9 10 use alloc::{ 11 ffi::CString, 12 string::{String, ToString}, 13 sync::{Arc, Weak}, 14 vec::Vec, 15 }; 16 use cred::INIT_CRED; 17 use hashbrown::HashMap; 18 use log::{debug, error, info, warn}; 19 use system_error::SystemError; 20 21 use crate::{ 22 arch::{ 23 cpu::current_cpu_id, 24 ipc::signal::{AtomicSignal, SigSet, Signal}, 25 process::ArchPCBInfo, 26 CurrentIrqArch, 27 }, 28 driver::tty::tty_core::TtyCore, 29 exception::InterruptArch, 30 filesystem::{ 31 procfs::procfs_unregister_pid, 32 vfs::{file::FileDescriptorVec, FileType}, 33 }, 34 ipc::signal_types::{SigInfo, SigPending, SignalStruct}, 35 libs::{ 36 align::AlignedBox, 37 casting::DowncastArc, 38 futex::{ 39 constant::{FutexFlag, FUTEX_BITSET_MATCH_ANY}, 40 futex::{Futex, RobustListHead}, 41 }, 42 lock_free_flags::LockFreeFlags, 43 rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, 44 spinlock::{SpinLock, SpinLockGuard}, 45 wait_queue::WaitQueue, 46 }, 47 mm::{ 48 percpu::{PerCpu, PerCpuVar}, 49 set_IDLE_PROCESS_ADDRESS_SPACE, 50 ucontext::AddressSpace, 51 VirtAddr, 52 }, 53 net::socket::SocketInode, 54 sched::completion::Completion, 55 sched::{ 56 cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, 57 WakeupFlags, __schedule, 58 }, 59 smp::{ 60 core::smp_get_processor_id, 61 cpu::{AtomicProcessorId, ProcessorId}, 62 kick_cpu, 63 }, 64 syscall::{user_access::clear_user, Syscall}, 65 }; 66 use timer::AlarmTimer; 67 68 use self::{cred::Cred, kthread::WorkerPrivate}; 69 70 pub mod abi; 71 pub mod c_adapter; 72 pub mod cred; 73 pub mod exec; 74 pub mod exit; 75 pub mod fork; 76 pub mod idle; 77 pub mod kthread; 78 pub mod pid; 79 pub mod resource; 80 pub mod stdio; 81 pub mod syscall; 82 pub mod timer; 83 pub mod utils; 84 85 /// 系统中所有进程的pcb 86 static ALL_PROCESS: SpinLock<Option<HashMap<Pid, Arc<ProcessControlBlock>>>> = SpinLock::new(None); 87 88 pub static mut PROCESS_SWITCH_RESULT: Option<PerCpuVar<SwitchResult>> = None; 89 90 /// 一个只改变1次的全局变量,标志进程管理器是否已经初始化完成 91 static mut __PROCESS_MANAGEMENT_INIT_DONE: bool = false; 92 93 #[derive(Debug)] 94 pub struct SwitchResult { 95 pub prev_pcb: Option<Arc<ProcessControlBlock>>, 96 pub next_pcb: Option<Arc<ProcessControlBlock>>, 97 } 98 99 impl SwitchResult { 100 pub fn new() -> Self { 101 Self { 102 prev_pcb: None, 103 next_pcb: None, 104 } 105 } 106 } 107 108 #[derive(Debug)] 109 pub struct ProcessManager; 110 impl ProcessManager { 111 #[inline(never)] 112 fn init() { 113 static INIT_FLAG: AtomicBool = AtomicBool::new(false); 114 if INIT_FLAG 115 .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 116 .is_err() 117 { 118 panic!("ProcessManager has been initialized!"); 119 } 120 121 unsafe { 122 compiler_fence(Ordering::SeqCst); 123 debug!("To create address space for INIT process."); 124 // test_buddy(); 125 set_IDLE_PROCESS_ADDRESS_SPACE( 126 AddressSpace::new(true).expect("Failed to create address space for INIT process."), 127 ); 128 debug!("INIT process address space created."); 129 compiler_fence(Ordering::SeqCst); 130 }; 131 132 ALL_PROCESS.lock_irqsave().replace(HashMap::new()); 133 Self::init_switch_result(); 134 Self::arch_init(); 135 debug!("process arch init done."); 136 Self::init_idle(); 137 debug!("process idle init done."); 138 139 unsafe { __PROCESS_MANAGEMENT_INIT_DONE = true }; 140 info!("Process Manager initialized."); 141 } 142 143 fn init_switch_result() { 144 let mut switch_res_vec: Vec<SwitchResult> = Vec::new(); 145 for _ in 0..PerCpu::MAX_CPU_NUM { 146 switch_res_vec.push(SwitchResult::new()); 147 } 148 unsafe { 149 PROCESS_SWITCH_RESULT = Some(PerCpuVar::new(switch_res_vec).unwrap()); 150 } 151 } 152 153 /// 判断进程管理器是否已经初始化完成 154 #[allow(dead_code)] 155 pub fn initialized() -> bool { 156 unsafe { __PROCESS_MANAGEMENT_INIT_DONE } 157 } 158 159 /// 获取当前进程的pcb 160 pub fn current_pcb() -> Arc<ProcessControlBlock> { 161 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 162 error!("unsafe__PROCESS_MANAGEMENT_INIT_DONE == false"); 163 loop { 164 spin_loop(); 165 } 166 } 167 return ProcessControlBlock::arch_current_pcb(); 168 } 169 170 /// 获取当前进程的pid 171 /// 172 /// 如果进程管理器未初始化完成,那么返回0 173 pub fn current_pid() -> Pid { 174 if unlikely(unsafe { !__PROCESS_MANAGEMENT_INIT_DONE }) { 175 return Pid(0); 176 } 177 178 return ProcessManager::current_pcb().pid(); 179 } 180 181 /// 增加当前进程的锁持有计数 182 #[inline(always)] 183 pub fn preempt_disable() { 184 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 185 ProcessManager::current_pcb().preempt_disable(); 186 } 187 } 188 189 /// 减少当前进程的锁持有计数 190 #[inline(always)] 191 pub fn preempt_enable() { 192 if likely(unsafe { __PROCESS_MANAGEMENT_INIT_DONE }) { 193 ProcessManager::current_pcb().preempt_enable(); 194 } 195 } 196 197 /// 根据pid获取进程的pcb 198 /// 199 /// ## 参数 200 /// 201 /// - `pid` : 进程的pid 202 /// 203 /// ## 返回值 204 /// 205 /// 如果找到了对应的进程,那么返回该进程的pcb,否则返回None 206 pub fn find(pid: Pid) -> Option<Arc<ProcessControlBlock>> { 207 return ALL_PROCESS.lock_irqsave().as_ref()?.get(&pid).cloned(); 208 } 209 210 /// 向系统中添加一个进程的pcb 211 /// 212 /// ## 参数 213 /// 214 /// - `pcb` : 进程的pcb 215 /// 216 /// ## 返回值 217 /// 218 /// 无 219 pub fn add_pcb(pcb: Arc<ProcessControlBlock>) { 220 ALL_PROCESS 221 .lock_irqsave() 222 .as_mut() 223 .unwrap() 224 .insert(pcb.pid(), pcb.clone()); 225 } 226 227 /// 唤醒一个进程 228 pub fn wakeup(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 229 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 230 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 231 if state.is_blocked() { 232 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 233 let state = writer.state(); 234 if state.is_blocked() { 235 writer.set_state(ProcessState::Runnable); 236 writer.set_wakeup(); 237 238 // avoid deadlock 239 drop(writer); 240 241 let rq = 242 cpu_rq(pcb.sched_info().on_cpu().unwrap_or(current_cpu_id()).data() as usize); 243 244 let (rq, _guard) = rq.self_lock(); 245 rq.update_rq_clock(); 246 rq.activate_task( 247 pcb, 248 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 249 ); 250 251 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 252 253 // sched_enqueue(pcb.clone(), true); 254 return Ok(()); 255 } else if state.is_exited() { 256 return Err(SystemError::EINVAL); 257 } else { 258 return Ok(()); 259 } 260 } else if state.is_exited() { 261 return Err(SystemError::EINVAL); 262 } else { 263 return Ok(()); 264 } 265 } 266 267 /// 唤醒暂停的进程 268 pub fn wakeup_stop(pcb: &Arc<ProcessControlBlock>) -> Result<(), SystemError> { 269 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 270 let state = pcb.sched_info().inner_lock_read_irqsave().state(); 271 if let ProcessState::Stopped = state { 272 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 273 let state = writer.state(); 274 if let ProcessState::Stopped = state { 275 writer.set_state(ProcessState::Runnable); 276 // avoid deadlock 277 drop(writer); 278 279 let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize); 280 281 let (rq, _guard) = rq.self_lock(); 282 rq.update_rq_clock(); 283 rq.activate_task( 284 pcb, 285 EnqueueFlag::ENQUEUE_WAKEUP | EnqueueFlag::ENQUEUE_NOCLOCK, 286 ); 287 288 rq.check_preempt_currnet(pcb, WakeupFlags::empty()); 289 290 // sched_enqueue(pcb.clone(), true); 291 return Ok(()); 292 } else if state.is_runnable() { 293 return Ok(()); 294 } else { 295 return Err(SystemError::EINVAL); 296 } 297 } else if state.is_runnable() { 298 return Ok(()); 299 } else { 300 return Err(SystemError::EINVAL); 301 } 302 } 303 304 /// 标志当前进程永久睡眠,但是发起调度的工作,应该由调用者完成 305 /// 306 /// ## 注意 307 /// 308 /// - 进入当前函数之前,不能持有sched_info的锁 309 /// - 进入当前函数之前,必须关闭中断 310 /// - 进入当前函数之后必须保证逻辑的正确性,避免被重复加入调度队列 311 pub fn mark_sleep(interruptable: bool) -> Result<(), SystemError> { 312 assert!( 313 !CurrentIrqArch::is_irq_enabled(), 314 "interrupt must be disabled before enter ProcessManager::mark_sleep()" 315 ); 316 let pcb = ProcessManager::current_pcb(); 317 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 318 if !matches!(writer.state(), ProcessState::Exited(_)) { 319 writer.set_state(ProcessState::Blocked(interruptable)); 320 writer.set_sleep(); 321 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 322 fence(Ordering::SeqCst); 323 drop(writer); 324 return Ok(()); 325 } 326 return Err(SystemError::EINTR); 327 } 328 329 /// 标志当前进程为停止状态,但是发起调度的工作,应该由调用者完成 330 /// 331 /// ## 注意 332 /// 333 /// - 进入当前函数之前,不能持有sched_info的锁 334 /// - 进入当前函数之前,必须关闭中断 335 pub fn mark_stop() -> Result<(), SystemError> { 336 assert!( 337 !CurrentIrqArch::is_irq_enabled(), 338 "interrupt must be disabled before enter ProcessManager::mark_stop()" 339 ); 340 341 let pcb = ProcessManager::current_pcb(); 342 let mut writer = pcb.sched_info().inner_lock_write_irqsave(); 343 if !matches!(writer.state(), ProcessState::Exited(_)) { 344 writer.set_state(ProcessState::Stopped); 345 pcb.flags().insert(ProcessFlags::NEED_SCHEDULE); 346 drop(writer); 347 348 return Ok(()); 349 } 350 return Err(SystemError::EINTR); 351 } 352 /// 当子进程退出后向父进程发送通知 353 fn exit_notify() { 354 let current = ProcessManager::current_pcb(); 355 // 让INIT进程收养所有子进程 356 if current.pid() != Pid(1) { 357 unsafe { 358 current 359 .adopt_childen() 360 .unwrap_or_else(|e| panic!("adopte_childen failed: error: {e:?}")) 361 }; 362 let r = current.parent_pcb.read_irqsave().upgrade(); 363 if r.is_none() { 364 return; 365 } 366 let parent_pcb = r.unwrap(); 367 let r = Syscall::kill(parent_pcb.pid(), Signal::SIGCHLD as i32); 368 if r.is_err() { 369 warn!( 370 "failed to send kill signal to {:?}'s parent pcb {:?}", 371 current.pid(), 372 parent_pcb.pid() 373 ); 374 } 375 // todo: 这里需要向父进程发送SIGCHLD信号 376 // todo: 这里还需要根据线程组的信息,决定信号的发送 377 } 378 } 379 380 /// 退出当前进程 381 /// 382 /// ## 参数 383 /// 384 /// - `exit_code` : 进程的退出码 385 pub fn exit(exit_code: usize) -> ! { 386 // 关中断 387 let _guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 388 let pcb = ProcessManager::current_pcb(); 389 let pid = pcb.pid(); 390 pcb.sched_info 391 .inner_lock_write_irqsave() 392 .set_state(ProcessState::Exited(exit_code)); 393 pcb.wait_queue.wakeup(Some(ProcessState::Blocked(true))); 394 395 let rq = cpu_rq(smp_get_processor_id().data() as usize); 396 let (rq, guard) = rq.self_lock(); 397 rq.deactivate_task( 398 pcb.clone(), 399 DequeueFlag::DEQUEUE_SLEEP | DequeueFlag::DEQUEUE_NOCLOCK, 400 ); 401 drop(guard); 402 403 // 进行进程退出后的工作 404 let thread = pcb.thread.write_irqsave(); 405 if let Some(addr) = thread.set_child_tid { 406 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 407 } 408 409 if let Some(addr) = thread.clear_child_tid { 410 if Arc::strong_count(&pcb.basic().user_vm().expect("User VM Not found")) > 1 { 411 let _ = 412 Futex::futex_wake(addr, FutexFlag::FLAGS_MATCH_NONE, 1, FUTEX_BITSET_MATCH_ANY); 413 } 414 unsafe { clear_user(addr, core::mem::size_of::<i32>()).expect("clear tid failed") }; 415 } 416 417 RobustListHead::exit_robust_list(pcb.clone()); 418 419 // 如果是vfork出来的进程,则需要处理completion 420 if thread.vfork_done.is_some() { 421 thread.vfork_done.as_ref().unwrap().complete_all(); 422 } 423 drop(thread); 424 unsafe { pcb.basic_mut().set_user_vm(None) }; 425 drop(pcb); 426 ProcessManager::exit_notify(); 427 // unsafe { CurrentIrqArch::interrupt_enable() }; 428 __schedule(SchedMode::SM_NONE); 429 error!("pid {pid:?} exited but sched again!"); 430 #[allow(clippy::empty_loop)] 431 loop { 432 spin_loop(); 433 } 434 } 435 436 pub unsafe fn release(pid: Pid) { 437 let pcb = ProcessManager::find(pid); 438 if pcb.is_some() { 439 // let pcb = pcb.unwrap(); 440 // 判断该pcb是否在全局没有任何引用 441 // TODO: 当前,pcb的Arc指针存在泄露问题,引用计数不正确,打算在接下来实现debug专用的Arc,方便调试,然后解决这个bug。 442 // 因此目前暂时注释掉,使得能跑 443 // if Arc::strong_count(&pcb) <= 2 { 444 // drop(pcb); 445 // ALL_PROCESS.lock().as_mut().unwrap().remove(&pid); 446 // } else { 447 // // 如果不为1就panic 448 // let msg = format!("pcb '{:?}' is still referenced, strong count={}",pcb.pid(), Arc::strong_count(&pcb)); 449 // error!("{}", msg); 450 // panic!() 451 // } 452 453 ALL_PROCESS.lock_irqsave().as_mut().unwrap().remove(&pid); 454 } 455 } 456 457 /// 上下文切换完成后的钩子函数 458 unsafe fn switch_finish_hook() { 459 // debug!("switch_finish_hook"); 460 let prev_pcb = PROCESS_SWITCH_RESULT 461 .as_mut() 462 .unwrap() 463 .get_mut() 464 .prev_pcb 465 .take() 466 .expect("prev_pcb is None"); 467 let next_pcb = PROCESS_SWITCH_RESULT 468 .as_mut() 469 .unwrap() 470 .get_mut() 471 .next_pcb 472 .take() 473 .expect("next_pcb is None"); 474 475 // 由于进程切换前使用了SpinLockGuard::leak(),所以这里需要手动释放锁 476 fence(Ordering::SeqCst); 477 478 prev_pcb.arch_info.force_unlock(); 479 fence(Ordering::SeqCst); 480 481 next_pcb.arch_info.force_unlock(); 482 fence(Ordering::SeqCst); 483 } 484 485 /// 如果目标进程正在目标CPU上运行,那么就让这个cpu陷入内核态 486 /// 487 /// ## 参数 488 /// 489 /// - `pcb` : 进程的pcb 490 #[allow(dead_code)] 491 pub fn kick(pcb: &Arc<ProcessControlBlock>) { 492 ProcessManager::current_pcb().preempt_disable(); 493 let cpu_id = pcb.sched_info().on_cpu(); 494 495 if let Some(cpu_id) = cpu_id { 496 if pcb.pid() == cpu_rq(cpu_id.data() as usize).current().pid() { 497 kick_cpu(cpu_id).expect("ProcessManager::kick(): Failed to kick cpu"); 498 } 499 } 500 501 ProcessManager::current_pcb().preempt_enable(); 502 } 503 } 504 505 /// 上下文切换的钩子函数,当这个函数return的时候,将会发生上下文切换 506 #[cfg(target_arch = "x86_64")] 507 #[inline(never)] 508 pub unsafe extern "sysv64" fn switch_finish_hook() { 509 ProcessManager::switch_finish_hook(); 510 } 511 #[cfg(target_arch = "riscv64")] 512 #[inline(always)] 513 pub unsafe fn switch_finish_hook() { 514 ProcessManager::switch_finish_hook(); 515 } 516 517 int_like!(Pid, AtomicPid, usize, AtomicUsize); 518 519 impl fmt::Display for Pid { 520 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 521 write!(f, "{}", self.0) 522 } 523 } 524 525 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 526 pub enum ProcessState { 527 /// The process is running on a CPU or in a run queue. 528 Runnable, 529 /// The process is waiting for an event to occur. 530 /// 其中的bool表示该等待过程是否可以被打断。 531 /// - 如果该bool为true,那么,硬件中断/信号/其他系统事件都可以打断该等待过程,使得该进程重新进入Runnable状态。 532 /// - 如果该bool为false,那么,这个进程必须被显式的唤醒,才能重新进入Runnable状态。 533 Blocked(bool), 534 /// 进程被信号终止 535 Stopped, 536 /// 进程已经退出,usize表示进程的退出码 537 Exited(usize), 538 } 539 540 #[allow(dead_code)] 541 impl ProcessState { 542 #[inline(always)] 543 pub fn is_runnable(&self) -> bool { 544 return matches!(self, ProcessState::Runnable); 545 } 546 547 #[inline(always)] 548 pub fn is_blocked(&self) -> bool { 549 return matches!(self, ProcessState::Blocked(_)); 550 } 551 552 #[inline(always)] 553 pub fn is_blocked_interruptable(&self) -> bool { 554 return matches!(self, ProcessState::Blocked(true)); 555 } 556 557 /// Returns `true` if the process state is [`Exited`]. 558 #[inline(always)] 559 pub fn is_exited(&self) -> bool { 560 return matches!(self, ProcessState::Exited(_)); 561 } 562 563 /// Returns `true` if the process state is [`Stopped`]. 564 /// 565 /// [`Stopped`]: ProcessState::Stopped 566 #[inline(always)] 567 pub fn is_stopped(&self) -> bool { 568 matches!(self, ProcessState::Stopped) 569 } 570 571 /// Returns exit code if the process state is [`Exited`]. 572 #[inline(always)] 573 pub fn exit_code(&self) -> Option<usize> { 574 match self { 575 ProcessState::Exited(code) => Some(*code), 576 _ => None, 577 } 578 } 579 } 580 581 bitflags! { 582 /// pcb的标志位 583 pub struct ProcessFlags: usize { 584 /// 当前pcb表示一个内核线程 585 const KTHREAD = 1 << 0; 586 /// 当前进程需要被调度 587 const NEED_SCHEDULE = 1 << 1; 588 /// 进程由于vfork而与父进程存在资源共享 589 const VFORK = 1 << 2; 590 /// 进程不可被冻结 591 const NOFREEZE = 1 << 3; 592 /// 进程正在退出 593 const EXITING = 1 << 4; 594 /// 进程由于接收到终止信号唤醒 595 const WAKEKILL = 1 << 5; 596 /// 进程由于接收到信号而退出.(Killed by a signal) 597 const SIGNALED = 1 << 6; 598 /// 进程需要迁移到其他cpu上 599 const NEED_MIGRATE = 1 << 7; 600 /// 随机化的虚拟地址空间,主要用于动态链接器的加载 601 const RANDOMIZE = 1 << 8; 602 } 603 } 604 605 #[derive(Debug)] 606 pub struct ProcessControlBlock { 607 /// 当前进程的pid 608 pid: Pid, 609 /// 当前进程的线程组id(这个值在同一个线程组内永远不变) 610 tgid: Pid, 611 612 basic: RwLock<ProcessBasicInfo>, 613 /// 当前进程的自旋锁持有计数 614 preempt_count: AtomicUsize, 615 616 flags: LockFreeFlags<ProcessFlags>, 617 worker_private: SpinLock<Option<WorkerPrivate>>, 618 /// 进程的内核栈 619 kernel_stack: RwLock<KernelStack>, 620 621 /// 系统调用栈 622 syscall_stack: RwLock<KernelStack>, 623 624 /// 与调度相关的信息 625 sched_info: ProcessSchedulerInfo, 626 /// 与处理器架构相关的信息 627 arch_info: SpinLock<ArchPCBInfo>, 628 /// 与信号处理相关的信息(似乎可以是无锁的) 629 sig_info: RwLock<ProcessSignalInfo>, 630 /// 信号处理结构体 631 sig_struct: SpinLock<SignalStruct>, 632 /// 退出信号S 633 exit_signal: AtomicSignal, 634 635 /// 父进程指针 636 parent_pcb: RwLock<Weak<ProcessControlBlock>>, 637 /// 真实父进程指针 638 real_parent_pcb: RwLock<Weak<ProcessControlBlock>>, 639 640 /// 子进程链表 641 children: RwLock<Vec<Pid>>, 642 643 /// 等待队列 644 wait_queue: WaitQueue, 645 646 /// 线程信息 647 thread: RwLock<ThreadInfo>, 648 649 ///闹钟定时器 650 alarm_timer: SpinLock<Option<AlarmTimer>>, 651 652 /// 进程的robust lock列表 653 robust_list: RwLock<Option<RobustListHead>>, 654 655 /// 进程作为主体的凭证集 656 cred: SpinLock<Cred>, 657 } 658 659 impl ProcessControlBlock { 660 /// Generate a new pcb. 661 /// 662 /// ## 参数 663 /// 664 /// - `name` : 进程的名字 665 /// - `kstack` : 进程的内核栈 666 /// 667 /// ## 返回值 668 /// 669 /// 返回一个新的pcb 670 pub fn new(name: String, kstack: KernelStack) -> Arc<Self> { 671 return Self::do_create_pcb(name, kstack, false); 672 } 673 674 /// 创建一个新的idle进程 675 /// 676 /// 请注意,这个函数只能在进程管理初始化的时候调用。 677 pub fn new_idle(cpu_id: u32, kstack: KernelStack) -> Arc<Self> { 678 let name = format!("idle-{}", cpu_id); 679 return Self::do_create_pcb(name, kstack, true); 680 } 681 682 /// # 函数的功能 683 /// 684 /// 返回此函数是否是内核进程 685 /// 686 /// # 返回值 687 /// 688 /// 若进程是内核进程则返回true 否则返回false 689 pub fn is_kthread(&self) -> bool { 690 return matches!(self.flags(), &mut ProcessFlags::KTHREAD); 691 } 692 693 #[inline(never)] 694 fn do_create_pcb(name: String, kstack: KernelStack, is_idle: bool) -> Arc<Self> { 695 let (pid, ppid, cwd, cred) = if is_idle { 696 let cred = INIT_CRED.clone(); 697 (Pid(0), Pid(0), "/".to_string(), cred) 698 } else { 699 let ppid = ProcessManager::current_pcb().pid(); 700 let mut cred = ProcessManager::current_pcb().cred(); 701 cred.cap_permitted = cred.cap_ambient; 702 cred.cap_effective = cred.cap_ambient; 703 let cwd = ProcessManager::current_pcb().basic().cwd(); 704 (Self::generate_pid(), ppid, cwd, cred) 705 }; 706 707 let basic_info = ProcessBasicInfo::new(Pid(0), ppid, name, cwd, None); 708 let preempt_count = AtomicUsize::new(0); 709 let flags = unsafe { LockFreeFlags::new(ProcessFlags::empty()) }; 710 711 let sched_info = ProcessSchedulerInfo::new(None); 712 let arch_info = SpinLock::new(ArchPCBInfo::new(&kstack)); 713 714 let ppcb: Weak<ProcessControlBlock> = ProcessManager::find(ppid) 715 .map(|p| Arc::downgrade(&p)) 716 .unwrap_or_default(); 717 718 let pcb = Self { 719 pid, 720 tgid: pid, 721 basic: basic_info, 722 preempt_count, 723 flags, 724 kernel_stack: RwLock::new(kstack), 725 syscall_stack: RwLock::new(KernelStack::new().unwrap()), 726 worker_private: SpinLock::new(None), 727 sched_info, 728 arch_info, 729 sig_info: RwLock::new(ProcessSignalInfo::default()), 730 sig_struct: SpinLock::new(SignalStruct::new()), 731 exit_signal: AtomicSignal::new(Signal::SIGCHLD), 732 parent_pcb: RwLock::new(ppcb.clone()), 733 real_parent_pcb: RwLock::new(ppcb), 734 children: RwLock::new(Vec::new()), 735 wait_queue: WaitQueue::default(), 736 thread: RwLock::new(ThreadInfo::new()), 737 alarm_timer: SpinLock::new(None), 738 robust_list: RwLock::new(None), 739 cred: SpinLock::new(cred), 740 }; 741 742 // 初始化系统调用栈 743 #[cfg(target_arch = "x86_64")] 744 pcb.arch_info 745 .lock() 746 .init_syscall_stack(&pcb.syscall_stack.read()); 747 748 let pcb = Arc::new(pcb); 749 750 pcb.sched_info() 751 .sched_entity() 752 .force_mut() 753 .set_pcb(Arc::downgrade(&pcb)); 754 // 设置进程的arc指针到内核栈和系统调用栈的最低地址处 755 unsafe { 756 pcb.kernel_stack 757 .write() 758 .set_pcb(Arc::downgrade(&pcb)) 759 .unwrap(); 760 761 pcb.syscall_stack 762 .write() 763 .set_pcb(Arc::downgrade(&pcb)) 764 .unwrap() 765 }; 766 767 // 将当前pcb加入父进程的子进程哈希表中 768 if pcb.pid() > Pid(1) { 769 if let Some(ppcb_arc) = pcb.parent_pcb.read_irqsave().upgrade() { 770 let mut children = ppcb_arc.children.write_irqsave(); 771 children.push(pcb.pid()); 772 } else { 773 panic!("parent pcb is None"); 774 } 775 } 776 777 return pcb; 778 } 779 780 /// 生成一个新的pid 781 #[inline(always)] 782 fn generate_pid() -> Pid { 783 static NEXT_PID: AtomicPid = AtomicPid::new(Pid(1)); 784 return NEXT_PID.fetch_add(Pid(1), Ordering::SeqCst); 785 } 786 787 /// 返回当前进程的锁持有计数 788 #[inline(always)] 789 pub fn preempt_count(&self) -> usize { 790 return self.preempt_count.load(Ordering::SeqCst); 791 } 792 793 /// 增加当前进程的锁持有计数 794 #[inline(always)] 795 pub fn preempt_disable(&self) { 796 self.preempt_count.fetch_add(1, Ordering::SeqCst); 797 } 798 799 /// 减少当前进程的锁持有计数 800 #[inline(always)] 801 pub fn preempt_enable(&self) { 802 self.preempt_count.fetch_sub(1, Ordering::SeqCst); 803 } 804 805 #[inline(always)] 806 pub unsafe fn set_preempt_count(&self, count: usize) { 807 self.preempt_count.store(count, Ordering::SeqCst); 808 } 809 810 #[inline(always)] 811 pub fn flags(&self) -> &mut ProcessFlags { 812 return self.flags.get_mut(); 813 } 814 815 /// 请注意,这个值能在中断上下文中读取,但不能被中断上下文修改 816 /// 否则会导致死锁 817 #[inline(always)] 818 pub fn basic(&self) -> RwLockReadGuard<ProcessBasicInfo> { 819 return self.basic.read_irqsave(); 820 } 821 822 #[inline(always)] 823 pub fn set_name(&self, name: String) { 824 self.basic.write().set_name(name); 825 } 826 827 #[inline(always)] 828 pub fn basic_mut(&self) -> RwLockWriteGuard<ProcessBasicInfo> { 829 return self.basic.write_irqsave(); 830 } 831 832 /// # 获取arch info的锁,同时关闭中断 833 #[inline(always)] 834 pub fn arch_info_irqsave(&self) -> SpinLockGuard<ArchPCBInfo> { 835 return self.arch_info.lock_irqsave(); 836 } 837 838 /// # 获取arch info的锁,但是不关闭中断 839 /// 840 /// 由于arch info在进程切换的时候会使用到, 841 /// 因此在中断上下文外,获取arch info 而不irqsave是不安全的. 842 /// 843 /// 只能在以下情况下使用这个函数: 844 /// - 在中断上下文中(中断已经禁用),获取arch info的锁。 845 /// - 刚刚创建新的pcb 846 #[inline(always)] 847 pub unsafe fn arch_info(&self) -> SpinLockGuard<ArchPCBInfo> { 848 return self.arch_info.lock(); 849 } 850 851 #[inline(always)] 852 pub fn kernel_stack(&self) -> RwLockReadGuard<KernelStack> { 853 return self.kernel_stack.read(); 854 } 855 856 pub unsafe fn kernel_stack_force_ref(&self) -> &KernelStack { 857 self.kernel_stack.force_get_ref() 858 } 859 860 #[inline(always)] 861 #[allow(dead_code)] 862 pub fn kernel_stack_mut(&self) -> RwLockWriteGuard<KernelStack> { 863 return self.kernel_stack.write(); 864 } 865 866 #[inline(always)] 867 pub fn sched_info(&self) -> &ProcessSchedulerInfo { 868 return &self.sched_info; 869 } 870 871 #[inline(always)] 872 pub fn worker_private(&self) -> SpinLockGuard<Option<WorkerPrivate>> { 873 return self.worker_private.lock(); 874 } 875 876 #[inline(always)] 877 pub fn pid(&self) -> Pid { 878 return self.pid; 879 } 880 881 #[inline(always)] 882 pub fn tgid(&self) -> Pid { 883 return self.tgid; 884 } 885 886 /// 获取文件描述符表的Arc指针 887 #[inline(always)] 888 pub fn fd_table(&self) -> Arc<RwLock<FileDescriptorVec>> { 889 return self.basic.read().fd_table().unwrap(); 890 } 891 892 #[inline(always)] 893 pub fn cred(&self) -> Cred { 894 self.cred.lock().clone() 895 } 896 897 /// 根据文件描述符序号,获取socket对象的Arc指针 898 /// 899 /// ## 参数 900 /// 901 /// - `fd` 文件描述符序号 902 /// 903 /// ## 返回值 904 /// 905 /// Option(&mut Box<dyn Socket>) socket对象的可变引用. 如果文件描述符不是socket,那么返回None 906 pub fn get_socket(&self, fd: i32) -> Option<Arc<SocketInode>> { 907 let binding = ProcessManager::current_pcb().fd_table(); 908 let fd_table_guard = binding.read(); 909 910 let f = fd_table_guard.get_file_by_fd(fd)?; 911 drop(fd_table_guard); 912 913 if f.file_type() != FileType::Socket { 914 return None; 915 } 916 let socket: Arc<SocketInode> = f 917 .inode() 918 .downcast_arc::<SocketInode>() 919 .expect("Not a socket inode"); 920 return Some(socket); 921 } 922 923 /// 当前进程退出时,让初始进程收养所有子进程 924 unsafe fn adopt_childen(&self) -> Result<(), SystemError> { 925 match ProcessManager::find(Pid(1)) { 926 Some(init_pcb) => { 927 let childen_guard = self.children.write(); 928 let mut init_childen_guard = init_pcb.children.write(); 929 930 childen_guard.iter().for_each(|pid| { 931 init_childen_guard.push(*pid); 932 }); 933 934 return Ok(()); 935 } 936 _ => Err(SystemError::ECHILD), 937 } 938 } 939 940 /// 生成进程的名字 941 pub fn generate_name(program_path: &str, args: &Vec<CString>) -> String { 942 let mut name = program_path.to_string(); 943 for arg in args { 944 name.push(' '); 945 name.push_str(arg.to_string_lossy().as_ref()); 946 } 947 return name; 948 } 949 950 pub fn sig_info_irqsave(&self) -> RwLockReadGuard<ProcessSignalInfo> { 951 self.sig_info.read_irqsave() 952 } 953 954 pub fn try_siginfo_irqsave(&self, times: u8) -> Option<RwLockReadGuard<ProcessSignalInfo>> { 955 for _ in 0..times { 956 if let Some(r) = self.sig_info.try_read_irqsave() { 957 return Some(r); 958 } 959 } 960 961 return None; 962 } 963 964 pub fn sig_info_mut(&self) -> RwLockWriteGuard<ProcessSignalInfo> { 965 self.sig_info.write_irqsave() 966 } 967 968 pub fn try_siginfo_mut(&self, times: u8) -> Option<RwLockWriteGuard<ProcessSignalInfo>> { 969 for _ in 0..times { 970 if let Some(r) = self.sig_info.try_write_irqsave() { 971 return Some(r); 972 } 973 } 974 975 return None; 976 } 977 978 pub fn sig_struct(&self) -> SpinLockGuard<SignalStruct> { 979 self.sig_struct.lock_irqsave() 980 } 981 982 pub fn try_sig_struct_irqsave(&self, times: u8) -> Option<SpinLockGuard<SignalStruct>> { 983 for _ in 0..times { 984 if let Ok(r) = self.sig_struct.try_lock_irqsave() { 985 return Some(r); 986 } 987 } 988 989 return None; 990 } 991 992 pub fn sig_struct_irqsave(&self) -> SpinLockGuard<SignalStruct> { 993 self.sig_struct.lock_irqsave() 994 } 995 996 #[inline(always)] 997 pub fn get_robust_list(&self) -> RwLockReadGuard<Option<RobustListHead>> { 998 return self.robust_list.read_irqsave(); 999 } 1000 1001 #[inline(always)] 1002 pub fn set_robust_list(&self, new_robust_list: Option<RobustListHead>) { 1003 *self.robust_list.write_irqsave() = new_robust_list; 1004 } 1005 1006 pub fn alarm_timer_irqsave(&self) -> SpinLockGuard<Option<AlarmTimer>> { 1007 return self.alarm_timer.lock_irqsave(); 1008 } 1009 } 1010 1011 impl Drop for ProcessControlBlock { 1012 fn drop(&mut self) { 1013 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 1014 // 在ProcFS中,解除进程的注册 1015 procfs_unregister_pid(self.pid()) 1016 .unwrap_or_else(|e| panic!("procfs_unregister_pid failed: error: {e:?}")); 1017 1018 if let Some(ppcb) = self.parent_pcb.read_irqsave().upgrade() { 1019 ppcb.children 1020 .write_irqsave() 1021 .retain(|pid| *pid != self.pid()); 1022 } 1023 1024 drop(irq_guard); 1025 } 1026 } 1027 1028 /// 线程信息 1029 #[derive(Debug)] 1030 pub struct ThreadInfo { 1031 // 来自用户空间记录用户线程id的地址,在该线程结束时将该地址置0以通知父进程 1032 clear_child_tid: Option<VirtAddr>, 1033 set_child_tid: Option<VirtAddr>, 1034 1035 vfork_done: Option<Arc<Completion>>, 1036 /// 线程组的组长 1037 group_leader: Weak<ProcessControlBlock>, 1038 } 1039 1040 impl ThreadInfo { 1041 pub fn new() -> Self { 1042 Self { 1043 clear_child_tid: None, 1044 set_child_tid: None, 1045 vfork_done: None, 1046 group_leader: Weak::default(), 1047 } 1048 } 1049 1050 pub fn group_leader(&self) -> Option<Arc<ProcessControlBlock>> { 1051 return self.group_leader.upgrade(); 1052 } 1053 } 1054 1055 /// 进程的基本信息 1056 /// 1057 /// 这个结构体保存进程的基本信息,主要是那些不会随着进程的运行而经常改变的信息。 1058 #[derive(Debug)] 1059 pub struct ProcessBasicInfo { 1060 /// 当前进程的进程组id 1061 pgid: Pid, 1062 /// 当前进程的父进程的pid 1063 ppid: Pid, 1064 /// 进程的名字 1065 name: String, 1066 1067 /// 当前进程的工作目录 1068 cwd: String, 1069 1070 /// 用户地址空间 1071 user_vm: Option<Arc<AddressSpace>>, 1072 1073 /// 文件描述符表 1074 fd_table: Option<Arc<RwLock<FileDescriptorVec>>>, 1075 } 1076 1077 impl ProcessBasicInfo { 1078 #[inline(never)] 1079 pub fn new( 1080 pgid: Pid, 1081 ppid: Pid, 1082 name: String, 1083 cwd: String, 1084 user_vm: Option<Arc<AddressSpace>>, 1085 ) -> RwLock<Self> { 1086 let fd_table = Arc::new(RwLock::new(FileDescriptorVec::new())); 1087 return RwLock::new(Self { 1088 pgid, 1089 ppid, 1090 name, 1091 cwd, 1092 user_vm, 1093 fd_table: Some(fd_table), 1094 }); 1095 } 1096 1097 pub fn pgid(&self) -> Pid { 1098 return self.pgid; 1099 } 1100 1101 pub fn ppid(&self) -> Pid { 1102 return self.ppid; 1103 } 1104 1105 pub fn name(&self) -> &str { 1106 return &self.name; 1107 } 1108 1109 pub fn set_name(&mut self, name: String) { 1110 self.name = name; 1111 } 1112 1113 pub fn cwd(&self) -> String { 1114 return self.cwd.clone(); 1115 } 1116 pub fn set_cwd(&mut self, path: String) { 1117 return self.cwd = path; 1118 } 1119 1120 pub fn user_vm(&self) -> Option<Arc<AddressSpace>> { 1121 return self.user_vm.clone(); 1122 } 1123 1124 pub unsafe fn set_user_vm(&mut self, user_vm: Option<Arc<AddressSpace>>) { 1125 self.user_vm = user_vm; 1126 } 1127 1128 pub fn fd_table(&self) -> Option<Arc<RwLock<FileDescriptorVec>>> { 1129 return self.fd_table.clone(); 1130 } 1131 1132 pub fn set_fd_table(&mut self, fd_table: Option<Arc<RwLock<FileDescriptorVec>>>) { 1133 self.fd_table = fd_table; 1134 } 1135 } 1136 1137 #[derive(Debug)] 1138 pub struct ProcessSchedulerInfo { 1139 /// 当前进程所在的cpu 1140 on_cpu: AtomicProcessorId, 1141 /// 如果当前进程等待被迁移到另一个cpu核心上(也就是flags中的PF_NEED_MIGRATE被置位), 1142 /// 该字段存储要被迁移到的目标处理器核心号 1143 // migrate_to: AtomicProcessorId, 1144 inner_locked: RwLock<InnerSchedInfo>, 1145 /// 进程的调度优先级 1146 // priority: SchedPriority, 1147 /// 当前进程的虚拟运行时间 1148 // virtual_runtime: AtomicIsize, 1149 /// 由实时调度器管理的时间片 1150 // rt_time_slice: AtomicIsize, 1151 pub sched_stat: RwLock<SchedInfo>, 1152 /// 调度策略 1153 pub sched_policy: RwLock<crate::sched::SchedPolicy>, 1154 /// cfs调度实体 1155 pub sched_entity: Arc<FairSchedEntity>, 1156 pub on_rq: SpinLock<OnRq>, 1157 1158 pub prio_data: RwLock<PrioData>, 1159 } 1160 1161 #[derive(Debug, Default)] 1162 #[allow(dead_code)] 1163 pub struct SchedInfo { 1164 /// 记录任务在特定 CPU 上运行的次数 1165 pub pcount: usize, 1166 /// 记录任务等待在运行队列上的时间 1167 pub run_delay: usize, 1168 /// 记录任务上次在 CPU 上运行的时间戳 1169 pub last_arrival: u64, 1170 /// 记录任务上次被加入到运行队列中的时间戳 1171 pub last_queued: u64, 1172 } 1173 1174 #[derive(Debug)] 1175 #[allow(dead_code)] 1176 pub struct PrioData { 1177 pub prio: i32, 1178 pub static_prio: i32, 1179 pub normal_prio: i32, 1180 } 1181 1182 impl Default for PrioData { 1183 fn default() -> Self { 1184 Self { 1185 prio: MAX_PRIO - 20, 1186 static_prio: MAX_PRIO - 20, 1187 normal_prio: MAX_PRIO - 20, 1188 } 1189 } 1190 } 1191 1192 #[derive(Debug)] 1193 pub struct InnerSchedInfo { 1194 /// 当前进程的状态 1195 state: ProcessState, 1196 /// 进程的调度策略 1197 sleep: bool, 1198 } 1199 1200 impl InnerSchedInfo { 1201 pub fn state(&self) -> ProcessState { 1202 return self.state; 1203 } 1204 1205 pub fn set_state(&mut self, state: ProcessState) { 1206 self.state = state; 1207 } 1208 1209 pub fn set_sleep(&mut self) { 1210 self.sleep = true; 1211 } 1212 1213 pub fn set_wakeup(&mut self) { 1214 self.sleep = false; 1215 } 1216 1217 pub fn is_mark_sleep(&self) -> bool { 1218 self.sleep 1219 } 1220 } 1221 1222 impl ProcessSchedulerInfo { 1223 #[inline(never)] 1224 pub fn new(on_cpu: Option<ProcessorId>) -> Self { 1225 let cpu_id = on_cpu.unwrap_or(ProcessorId::INVALID); 1226 return Self { 1227 on_cpu: AtomicProcessorId::new(cpu_id), 1228 // migrate_to: AtomicProcessorId::new(ProcessorId::INVALID), 1229 inner_locked: RwLock::new(InnerSchedInfo { 1230 state: ProcessState::Blocked(false), 1231 sleep: false, 1232 }), 1233 // virtual_runtime: AtomicIsize::new(0), 1234 // rt_time_slice: AtomicIsize::new(0), 1235 // priority: SchedPriority::new(100).unwrap(), 1236 sched_stat: RwLock::new(SchedInfo::default()), 1237 sched_policy: RwLock::new(crate::sched::SchedPolicy::CFS), 1238 sched_entity: FairSchedEntity::new(), 1239 on_rq: SpinLock::new(OnRq::None), 1240 prio_data: RwLock::new(PrioData::default()), 1241 }; 1242 } 1243 1244 pub fn sched_entity(&self) -> Arc<FairSchedEntity> { 1245 return self.sched_entity.clone(); 1246 } 1247 1248 pub fn on_cpu(&self) -> Option<ProcessorId> { 1249 let on_cpu = self.on_cpu.load(Ordering::SeqCst); 1250 if on_cpu == ProcessorId::INVALID { 1251 return None; 1252 } else { 1253 return Some(on_cpu); 1254 } 1255 } 1256 1257 pub fn set_on_cpu(&self, on_cpu: Option<ProcessorId>) { 1258 if let Some(cpu_id) = on_cpu { 1259 self.on_cpu.store(cpu_id, Ordering::SeqCst); 1260 } else { 1261 self.on_cpu.store(ProcessorId::INVALID, Ordering::SeqCst); 1262 } 1263 } 1264 1265 // pub fn migrate_to(&self) -> Option<ProcessorId> { 1266 // let migrate_to = self.migrate_to.load(Ordering::SeqCst); 1267 // if migrate_to == ProcessorId::INVALID { 1268 // return None; 1269 // } else { 1270 // return Some(migrate_to); 1271 // } 1272 // } 1273 1274 // pub fn set_migrate_to(&self, migrate_to: Option<ProcessorId>) { 1275 // if let Some(data) = migrate_to { 1276 // self.migrate_to.store(data, Ordering::SeqCst); 1277 // } else { 1278 // self.migrate_to 1279 // .store(ProcessorId::INVALID, Ordering::SeqCst) 1280 // } 1281 // } 1282 1283 pub fn inner_lock_write_irqsave(&self) -> RwLockWriteGuard<InnerSchedInfo> { 1284 return self.inner_locked.write_irqsave(); 1285 } 1286 1287 pub fn inner_lock_read_irqsave(&self) -> RwLockReadGuard<InnerSchedInfo> { 1288 return self.inner_locked.read_irqsave(); 1289 } 1290 1291 // pub fn inner_lock_try_read_irqsave( 1292 // &self, 1293 // times: u8, 1294 // ) -> Option<RwLockReadGuard<InnerSchedInfo>> { 1295 // for _ in 0..times { 1296 // if let Some(r) = self.inner_locked.try_read_irqsave() { 1297 // return Some(r); 1298 // } 1299 // } 1300 1301 // return None; 1302 // } 1303 1304 // pub fn inner_lock_try_upgradable_read_irqsave( 1305 // &self, 1306 // times: u8, 1307 // ) -> Option<RwLockUpgradableGuard<InnerSchedInfo>> { 1308 // for _ in 0..times { 1309 // if let Some(r) = self.inner_locked.try_upgradeable_read_irqsave() { 1310 // return Some(r); 1311 // } 1312 // } 1313 1314 // return None; 1315 // } 1316 1317 // pub fn virtual_runtime(&self) -> isize { 1318 // return self.virtual_runtime.load(Ordering::SeqCst); 1319 // } 1320 1321 // pub fn set_virtual_runtime(&self, virtual_runtime: isize) { 1322 // self.virtual_runtime 1323 // .store(virtual_runtime, Ordering::SeqCst); 1324 // } 1325 // pub fn increase_virtual_runtime(&self, delta: isize) { 1326 // self.virtual_runtime.fetch_add(delta, Ordering::SeqCst); 1327 // } 1328 1329 // pub fn rt_time_slice(&self) -> isize { 1330 // return self.rt_time_slice.load(Ordering::SeqCst); 1331 // } 1332 1333 // pub fn set_rt_time_slice(&self, rt_time_slice: isize) { 1334 // self.rt_time_slice.store(rt_time_slice, Ordering::SeqCst); 1335 // } 1336 1337 // pub fn increase_rt_time_slice(&self, delta: isize) { 1338 // self.rt_time_slice.fetch_add(delta, Ordering::SeqCst); 1339 // } 1340 1341 pub fn policy(&self) -> crate::sched::SchedPolicy { 1342 return *self.sched_policy.read_irqsave(); 1343 } 1344 } 1345 1346 #[derive(Debug, Clone)] 1347 pub struct KernelStack { 1348 stack: Option<AlignedBox<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>>, 1349 /// 标记该内核栈是否可以被释放 1350 can_be_freed: bool, 1351 } 1352 1353 impl KernelStack { 1354 pub const SIZE: usize = 0x4000; 1355 pub const ALIGN: usize = 0x4000; 1356 1357 pub fn new() -> Result<Self, SystemError> { 1358 return Ok(Self { 1359 stack: Some( 1360 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_zeroed()?, 1361 ), 1362 can_be_freed: true, 1363 }); 1364 } 1365 1366 /// 根据已有的空间,构造一个内核栈结构体 1367 /// 1368 /// 仅仅用于BSP启动时,为idle进程构造内核栈。其他时候使用这个函数,很可能造成错误! 1369 pub unsafe fn from_existed(base: VirtAddr) -> Result<Self, SystemError> { 1370 if base.is_null() || !base.check_aligned(Self::ALIGN) { 1371 return Err(SystemError::EFAULT); 1372 } 1373 1374 return Ok(Self { 1375 stack: Some( 1376 AlignedBox::<[u8; KernelStack::SIZE], { KernelStack::ALIGN }>::new_unchecked( 1377 base.data() as *mut [u8; KernelStack::SIZE], 1378 ), 1379 ), 1380 can_be_freed: false, 1381 }); 1382 } 1383 1384 /// 返回内核栈的起始虚拟地址(低地址) 1385 pub fn start_address(&self) -> VirtAddr { 1386 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize); 1387 } 1388 1389 /// 返回内核栈的结束虚拟地址(高地址)(不包含该地址) 1390 pub fn stack_max_address(&self) -> VirtAddr { 1391 return VirtAddr::new(self.stack.as_ref().unwrap().as_ptr() as usize + Self::SIZE); 1392 } 1393 1394 pub unsafe fn set_pcb(&mut self, pcb: Weak<ProcessControlBlock>) -> Result<(), SystemError> { 1395 // 将一个Weak<ProcessControlBlock>放到内核栈的最低地址处 1396 let p: *const ProcessControlBlock = Weak::into_raw(pcb); 1397 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1398 1399 // 如果内核栈的最低地址处已经有了一个pcb,那么,这里就不再设置,直接返回错误 1400 if unlikely(unsafe { !(*stack_bottom_ptr).is_null() }) { 1401 error!("kernel stack bottom is not null: {:p}", *stack_bottom_ptr); 1402 return Err(SystemError::EPERM); 1403 } 1404 // 将pcb的地址放到内核栈的最低地址处 1405 unsafe { 1406 *stack_bottom_ptr = p; 1407 } 1408 1409 return Ok(()); 1410 } 1411 1412 /// 清除内核栈的pcb指针 1413 /// 1414 /// ## 参数 1415 /// 1416 /// - `force` : 如果为true,那么,即使该内核栈的pcb指针不为null,也会被强制清除而不处理Weak指针问题 1417 pub unsafe fn clear_pcb(&mut self, force: bool) { 1418 let stack_bottom_ptr = self.start_address().data() as *mut *const ProcessControlBlock; 1419 if unlikely(unsafe { (*stack_bottom_ptr).is_null() }) { 1420 return; 1421 } 1422 1423 if !force { 1424 let pcb_ptr: Weak<ProcessControlBlock> = Weak::from_raw(*stack_bottom_ptr); 1425 drop(pcb_ptr); 1426 } 1427 1428 *stack_bottom_ptr = core::ptr::null(); 1429 } 1430 1431 /// 返回指向当前内核栈pcb的Arc指针 1432 #[allow(dead_code)] 1433 pub unsafe fn pcb(&self) -> Option<Arc<ProcessControlBlock>> { 1434 // 从内核栈的最低地址处取出pcb的地址 1435 let p = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1436 if unlikely(unsafe { (*p).is_null() }) { 1437 return None; 1438 } 1439 1440 // 为了防止内核栈的pcb指针被释放,这里需要将其包装一下,使得Arc的drop不会被调用 1441 let weak_wrapper: ManuallyDrop<Weak<ProcessControlBlock>> = 1442 ManuallyDrop::new(Weak::from_raw(*p)); 1443 1444 let new_arc: Arc<ProcessControlBlock> = weak_wrapper.upgrade()?; 1445 return Some(new_arc); 1446 } 1447 } 1448 1449 impl Drop for KernelStack { 1450 fn drop(&mut self) { 1451 if self.stack.is_some() { 1452 let ptr = self.stack.as_ref().unwrap().as_ptr() as *const *const ProcessControlBlock; 1453 if unsafe { !(*ptr).is_null() } { 1454 let pcb_ptr: Weak<ProcessControlBlock> = unsafe { Weak::from_raw(*ptr) }; 1455 drop(pcb_ptr); 1456 } 1457 } 1458 // 如果该内核栈不可以被释放,那么,这里就forget,不调用AlignedBox的drop函数 1459 if !self.can_be_freed { 1460 let bx = self.stack.take(); 1461 core::mem::forget(bx); 1462 } 1463 } 1464 } 1465 1466 pub fn process_init() { 1467 ProcessManager::init(); 1468 } 1469 1470 #[derive(Debug)] 1471 pub struct ProcessSignalInfo { 1472 // 当前进程 1473 sig_block: SigSet, 1474 // sig_pending 中存储当前线程要处理的信号 1475 sig_pending: SigPending, 1476 // sig_shared_pending 中存储当前线程所属进程要处理的信号 1477 sig_shared_pending: SigPending, 1478 // 当前进程对应的tty 1479 tty: Option<Arc<TtyCore>>, 1480 } 1481 1482 impl ProcessSignalInfo { 1483 pub fn sig_block(&self) -> &SigSet { 1484 &self.sig_block 1485 } 1486 1487 pub fn sig_pending(&self) -> &SigPending { 1488 &self.sig_pending 1489 } 1490 1491 pub fn sig_pending_mut(&mut self) -> &mut SigPending { 1492 &mut self.sig_pending 1493 } 1494 1495 pub fn sig_block_mut(&mut self) -> &mut SigSet { 1496 &mut self.sig_block 1497 } 1498 1499 pub fn sig_shared_pending_mut(&mut self) -> &mut SigPending { 1500 &mut self.sig_shared_pending 1501 } 1502 1503 pub fn sig_shared_pending(&self) -> &SigPending { 1504 &self.sig_shared_pending 1505 } 1506 1507 pub fn tty(&self) -> Option<Arc<TtyCore>> { 1508 self.tty.clone() 1509 } 1510 1511 pub fn set_tty(&mut self, tty: Arc<TtyCore>) { 1512 self.tty = Some(tty); 1513 } 1514 1515 /// 从 pcb 的 siginfo中取出下一个要处理的信号,先处理线程信号,再处理进程信号 1516 /// 1517 /// ## 参数 1518 /// 1519 /// - `sig_mask` 被忽略掉的信号 1520 /// 1521 pub fn dequeue_signal(&mut self, sig_mask: &SigSet) -> (Signal, Option<SigInfo>) { 1522 let res = self.sig_pending.dequeue_signal(sig_mask); 1523 if res.0 != Signal::INVALID { 1524 return res; 1525 } else { 1526 return self.sig_shared_pending.dequeue_signal(sig_mask); 1527 } 1528 } 1529 } 1530 1531 impl Default for ProcessSignalInfo { 1532 fn default() -> Self { 1533 Self { 1534 sig_block: SigSet::empty(), 1535 sig_pending: SigPending::default(), 1536 sig_shared_pending: SigPending::default(), 1537 tty: None, 1538 } 1539 } 1540 } 1541