1 // 进程的用户空间内存管理 2 3 use core::{ 4 cmp, 5 hash::Hasher, 6 intrinsics::unlikely, 7 ops::Add, 8 sync::atomic::{compiler_fence, Ordering}, 9 }; 10 11 use alloc::{ 12 collections::BTreeMap, 13 sync::{Arc, Weak}, 14 vec::Vec, 15 }; 16 use hashbrown::HashSet; 17 18 use crate::{ 19 arch::{mm::PageMapper, CurrentIrqArch, MMArch}, 20 exception::InterruptArch, 21 libs::{ 22 align::page_align_up, 23 rwlock::{RwLock, RwLockWriteGuard}, 24 spinlock::{SpinLock, SpinLockGuard}, 25 }, 26 process::ProcessManager, 27 syscall::SystemError, 28 }; 29 30 use super::{ 31 allocator::page_frame::{ 32 deallocate_page_frames, PageFrameCount, PhysPageFrame, VirtPageFrame, VirtPageFrameIter, 33 }, 34 page::{Flusher, InactiveFlusher, PageFlags, PageFlushAll}, 35 syscall::{MapFlags, ProtFlags}, 36 MemoryManagementArch, PageTableKind, VirtAddr, VirtRegion, 37 }; 38 39 /// MMAP_MIN_ADDR的默认值 40 /// 以下内容来自linux-5.19: 41 /// This is the portion of low virtual memory which should be protected 42 // from userspace allocation. Keeping a user from writing to low pages 43 // can help reduce the impact of kernel NULL pointer bugs. 44 // For most ia64, ppc64 and x86 users with lots of address space 45 // a value of 65536 is reasonable and should cause no problems. 46 // On arm and other archs it should not be higher than 32768. 47 // Programs which use vm86 functionality or have some need to map 48 // this low address space will need CAP_SYS_RAWIO or disable this 49 // protection by setting the value to 0. 50 pub const DEFAULT_MMAP_MIN_ADDR: usize = 65536; 51 52 #[derive(Debug)] 53 pub struct AddressSpace { 54 inner: RwLock<InnerAddressSpace>, 55 } 56 57 impl AddressSpace { 58 pub fn new(create_stack: bool) -> Result<Arc<Self>, SystemError> { 59 let inner = InnerAddressSpace::new(create_stack)?; 60 let result = Self { 61 inner: RwLock::new(inner), 62 }; 63 return Ok(Arc::new(result)); 64 } 65 66 /// 从pcb中获取当前进程的地址空间结构体的Arc指针 67 pub fn current() -> Result<Arc<AddressSpace>, SystemError> { 68 let vm = ProcessManager::current_pcb() 69 .basic() 70 .user_vm() 71 .expect("Current process has no address space"); 72 73 return Ok(vm); 74 } 75 76 /// 判断某个地址空间是否为当前进程的地址空间 77 pub fn is_current(self: &Arc<Self>) -> bool { 78 let current = Self::current(); 79 if let Ok(current) = current { 80 return Arc::ptr_eq(¤t, self); 81 } 82 return false; 83 } 84 } 85 86 impl core::ops::Deref for AddressSpace { 87 type Target = RwLock<InnerAddressSpace>; 88 89 fn deref(&self) -> &Self::Target { 90 &self.inner 91 } 92 } 93 94 impl core::ops::DerefMut for AddressSpace { 95 fn deref_mut(&mut self) -> &mut Self::Target { 96 &mut self.inner 97 } 98 } 99 100 /// @brief 用户地址空间结构体(每个进程都有一个) 101 #[derive(Debug)] 102 pub struct InnerAddressSpace { 103 pub user_mapper: UserMapper, 104 pub mappings: UserMappings, 105 pub mmap_min: VirtAddr, 106 /// 用户栈信息结构体 107 pub user_stack: Option<UserStack>, 108 109 pub elf_brk_start: VirtAddr, 110 pub elf_brk: VirtAddr, 111 112 /// 当前进程的堆空间的起始地址 113 pub brk_start: VirtAddr, 114 /// 当前进程的堆空间的结束地址(不包含) 115 pub brk: VirtAddr, 116 117 pub start_code: VirtAddr, 118 pub end_code: VirtAddr, 119 pub start_data: VirtAddr, 120 pub end_data: VirtAddr, 121 } 122 123 impl InnerAddressSpace { 124 pub fn new(create_stack: bool) -> Result<Self, SystemError> { 125 let mut result = Self { 126 user_mapper: MMArch::setup_new_usermapper()?, 127 mappings: UserMappings::new(), 128 mmap_min: VirtAddr(DEFAULT_MMAP_MIN_ADDR), 129 elf_brk_start: VirtAddr::new(0), 130 elf_brk: VirtAddr::new(0), 131 brk_start: MMArch::USER_BRK_START, 132 brk: MMArch::USER_BRK_START, 133 user_stack: None, 134 start_code: VirtAddr(0), 135 end_code: VirtAddr(0), 136 start_data: VirtAddr(0), 137 end_data: VirtAddr(0), 138 }; 139 if create_stack { 140 // kdebug!("to create user stack."); 141 result.new_user_stack(UserStack::DEFAULT_USER_STACK_SIZE)?; 142 } 143 144 return Ok(result); 145 } 146 147 /// 尝试克隆当前进程的地址空间,包括这些映射都会被克隆 148 /// 149 /// # Returns 150 /// 151 /// 返回克隆后的,新的地址空间的Arc指针 152 pub fn try_clone(&mut self) -> Result<Arc<AddressSpace>, SystemError> { 153 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 154 let new_addr_space = AddressSpace::new(false)?; 155 let mut new_guard = new_addr_space.write(); 156 157 // 拷贝用户栈的结构体信息,但是不拷贝用户栈的内容(因为后面VMA的拷贝会拷贝用户栈的内容) 158 unsafe { 159 new_guard.user_stack = Some(self.user_stack.as_ref().unwrap().clone_info_only()); 160 } 161 let _current_stack_size = self.user_stack.as_ref().unwrap().stack_size(); 162 163 let current_mapper = &mut self.user_mapper.utable; 164 165 // 拷贝空洞 166 new_guard.mappings.vm_holes = self.mappings.vm_holes.clone(); 167 168 for vma in self.mappings.vmas.iter() { 169 // TODO: 增加对VMA是否为文件映射的判断,如果是的话,就跳过 170 171 let vma_guard: SpinLockGuard<'_, VMA> = vma.lock(); 172 let old_flags = vma_guard.flags(); 173 let tmp_flags: PageFlags<MMArch> = PageFlags::new().set_write(true); 174 175 // 分配内存页并创建新的VMA 176 let new_vma = VMA::zeroed( 177 VirtPageFrame::new(vma_guard.region.start()), 178 PageFrameCount::new(vma_guard.region.size() / MMArch::PAGE_SIZE), 179 tmp_flags, 180 &mut new_guard.user_mapper.utable, 181 (), 182 )?; 183 new_guard.mappings.vmas.insert(new_vma.clone()); 184 // kdebug!("new vma: {:x?}", new_vma); 185 let mut new_vma_guard = new_vma.lock(); 186 for page in new_vma_guard.pages().map(|p| p.virt_address()) { 187 // kdebug!("page: {:x?}", page); 188 let current_frame = unsafe { 189 MMArch::phys_2_virt( 190 current_mapper 191 .translate(page) 192 .expect("VMA page not mapped") 193 .0, 194 ) 195 } 196 .expect("Phys2Virt: vaddr overflow.") 197 .data() as *mut u8; 198 199 let new_frame = unsafe { 200 MMArch::phys_2_virt( 201 new_guard 202 .user_mapper 203 .utable 204 .translate(page) 205 .expect("VMA page not mapped") 206 .0, 207 ) 208 } 209 .expect("Phys2Virt: vaddr overflow.") 210 .data() as *mut u8; 211 212 unsafe { 213 // 拷贝数据 214 new_frame.copy_from_nonoverlapping(current_frame, MMArch::PAGE_SIZE); 215 } 216 } 217 drop(vma_guard); 218 219 new_vma_guard.remap(old_flags, &mut new_guard.user_mapper.utable, ())?; 220 drop(new_vma_guard); 221 } 222 drop(new_guard); 223 drop(irq_guard); 224 return Ok(new_addr_space); 225 } 226 227 /// 判断当前的地址空间是否是当前进程的地址空间 228 #[inline] 229 pub fn is_current(&self) -> bool { 230 return self.user_mapper.utable.is_current(); 231 } 232 233 /// 进行匿名页映射 234 /// 235 /// ## 参数 236 /// 237 /// - `start_vaddr`:映射的起始地址 238 /// - `len`:映射的长度 239 /// - `prot_flags`:保护标志 240 /// - `map_flags`:映射标志 241 /// - `round_to_min`:是否将`start_vaddr`对齐到`mmap_min`,如果为`true`,则当`start_vaddr`不为0时,会对齐到`mmap_min`,否则仅向下对齐到页边界 242 /// 243 /// ## 返回 244 /// 245 /// 返回映射的起始虚拟页帧 246 pub fn map_anonymous( 247 &mut self, 248 start_vaddr: VirtAddr, 249 len: usize, 250 prot_flags: ProtFlags, 251 map_flags: MapFlags, 252 round_to_min: bool, 253 ) -> Result<VirtPageFrame, SystemError> { 254 // 用于对齐hint的函数 255 let round_hint_to_min = |hint: VirtAddr| { 256 // 先把hint向下对齐到页边界 257 let addr = hint.data() & (!MMArch::PAGE_OFFSET_MASK); 258 // kdebug!("map_anonymous: hint = {:?}, addr = {addr:#x}", hint); 259 // 如果hint不是0,且hint小于DEFAULT_MMAP_MIN_ADDR,则对齐到DEFAULT_MMAP_MIN_ADDR 260 if (addr != 0) && round_to_min && (addr < DEFAULT_MMAP_MIN_ADDR) { 261 Some(VirtAddr::new(page_align_up(DEFAULT_MMAP_MIN_ADDR))) 262 } else if addr == 0 { 263 None 264 } else { 265 Some(VirtAddr::new(addr)) 266 } 267 }; 268 // kdebug!("map_anonymous: start_vaddr = {:?}", start_vaddr); 269 // kdebug!("map_anonymous: len(no align) = {}", len); 270 271 let len = page_align_up(len); 272 273 // kdebug!("map_anonymous: len = {}", len); 274 275 let start_page: VirtPageFrame = self.mmap( 276 round_hint_to_min(start_vaddr), 277 PageFrameCount::from_bytes(len).unwrap(), 278 prot_flags, 279 map_flags, 280 move |page, count, flags, mapper, flusher| { 281 Ok(VMA::zeroed(page, count, flags, mapper, flusher)?) 282 }, 283 )?; 284 285 return Ok(start_page); 286 } 287 288 /// 向进程的地址空间映射页面 289 /// 290 /// # 参数 291 /// 292 /// - `addr`:映射的起始地址,如果为`None`,则由内核自动分配 293 /// - `page_count`:映射的页面数量 294 /// - `prot_flags`:保护标志 295 /// - `map_flags`:映射标志 296 /// - `map_func`:映射函数,用于创建VMA 297 /// 298 /// # Returns 299 /// 300 /// 返回映射的起始虚拟页帧 301 /// 302 /// # Errors 303 /// 304 /// - `EINVAL`:参数错误 305 pub fn mmap< 306 F: FnOnce( 307 VirtPageFrame, 308 PageFrameCount, 309 PageFlags<MMArch>, 310 &mut PageMapper, 311 &mut dyn Flusher<MMArch>, 312 ) -> Result<Arc<LockedVMA>, SystemError>, 313 >( 314 &mut self, 315 addr: Option<VirtAddr>, 316 page_count: PageFrameCount, 317 prot_flags: ProtFlags, 318 map_flags: MapFlags, 319 map_func: F, 320 ) -> Result<VirtPageFrame, SystemError> { 321 if page_count == PageFrameCount::new(0) { 322 return Err(SystemError::EINVAL); 323 } 324 // kdebug!("mmap: addr: {addr:?}, page_count: {page_count:?}, prot_flags: {prot_flags:?}, map_flags: {map_flags:?}"); 325 326 // 找到未使用的区域 327 let region = match addr { 328 Some(vaddr) => { 329 self.mappings 330 .find_free_at(self.mmap_min, vaddr, page_count.bytes(), map_flags)? 331 } 332 None => self 333 .mappings 334 .find_free(self.mmap_min, page_count.bytes()) 335 .ok_or(SystemError::ENOMEM)?, 336 }; 337 338 let page = VirtPageFrame::new(region.start()); 339 340 // kdebug!("mmap: page: {:?}, region={region:?}", page.virt_address()); 341 342 compiler_fence(Ordering::SeqCst); 343 let (mut active, mut inactive); 344 let flusher = if self.is_current() { 345 active = PageFlushAll::new(); 346 &mut active as &mut dyn Flusher<MMArch> 347 } else { 348 inactive = InactiveFlusher::new(); 349 &mut inactive as &mut dyn Flusher<MMArch> 350 }; 351 compiler_fence(Ordering::SeqCst); 352 // 映射页面,并将VMA插入到地址空间的VMA列表中 353 self.mappings.insert_vma(map_func( 354 page, 355 page_count, 356 PageFlags::from_prot_flags(prot_flags, true), 357 &mut self.user_mapper.utable, 358 flusher, 359 )?); 360 361 return Ok(page); 362 } 363 364 /// 取消进程的地址空间中的映射 365 /// 366 /// # 参数 367 /// 368 /// - `start_page`:起始页帧 369 /// - `page_count`:取消映射的页帧数量 370 /// 371 /// # Errors 372 /// 373 /// - `EINVAL`:参数错误 374 /// - `ENOMEM`:内存不足 375 pub fn munmap( 376 &mut self, 377 start_page: VirtPageFrame, 378 page_count: PageFrameCount, 379 ) -> Result<(), SystemError> { 380 let to_unmap = VirtRegion::new(start_page.virt_address(), page_count.bytes()); 381 let mut flusher: PageFlushAll<MMArch> = PageFlushAll::new(); 382 383 let regions: Vec<Arc<LockedVMA>> = self.mappings.conflicts(to_unmap).collect::<Vec<_>>(); 384 385 for r in regions { 386 let r = r.lock().region; 387 let r = self.mappings.remove_vma(&r).unwrap(); 388 let intersection = r.lock().region().intersect(&to_unmap).unwrap(); 389 let (before, r, after) = r.extract(intersection).unwrap(); 390 391 // TODO: 当引入后备页映射后,这里需要增加通知文件的逻辑 392 393 if let Some(before) = before { 394 // 如果前面有VMA,则需要将前面的VMA重新插入到地址空间的VMA列表中 395 self.mappings.insert_vma(before); 396 } 397 398 if let Some(after) = after { 399 // 如果后面有VMA,则需要将后面的VMA重新插入到地址空间的VMA列表中 400 self.mappings.insert_vma(after); 401 } 402 403 r.unmap(&mut self.user_mapper.utable, &mut flusher); 404 } 405 406 // TODO: 当引入后备页映射后,这里需要增加通知文件的逻辑 407 408 return Ok(()); 409 } 410 411 pub fn mprotect( 412 &mut self, 413 start_page: VirtPageFrame, 414 page_count: PageFrameCount, 415 prot_flags: ProtFlags, 416 ) -> Result<(), SystemError> { 417 // kdebug!( 418 // "mprotect: start_page: {:?}, page_count: {:?}, prot_flags:{prot_flags:?}", 419 // start_page, 420 // page_count 421 // ); 422 let (mut active, mut inactive); 423 let mut flusher = if self.is_current() { 424 active = PageFlushAll::new(); 425 &mut active as &mut dyn Flusher<MMArch> 426 } else { 427 inactive = InactiveFlusher::new(); 428 &mut inactive as &mut dyn Flusher<MMArch> 429 }; 430 431 let mapper = &mut self.user_mapper.utable; 432 let region = VirtRegion::new(start_page.virt_address(), page_count.bytes()); 433 // kdebug!("mprotect: region: {:?}", region); 434 435 let regions = self.mappings.conflicts(region).collect::<Vec<_>>(); 436 // kdebug!("mprotect: regions: {:?}", regions); 437 438 for r in regions { 439 // kdebug!("mprotect: r: {:?}", r); 440 let r = r.lock().region().clone(); 441 let r = self.mappings.remove_vma(&r).unwrap(); 442 443 let intersection = r.lock().region().intersect(®ion).unwrap(); 444 let (before, r, after) = r.extract(intersection).expect("Failed to extract VMA"); 445 446 if let Some(before) = before { 447 self.mappings.insert_vma(before); 448 } 449 if let Some(after) = after { 450 self.mappings.insert_vma(after); 451 } 452 453 let mut r_guard = r.lock(); 454 // 如果VMA的保护标志不允许指定的修改,则返回错误 455 if !r_guard.can_have_flags(prot_flags) { 456 drop(r_guard); 457 self.mappings.insert_vma(r.clone()); 458 return Err(SystemError::EACCES); 459 } 460 461 let new_flags: PageFlags<MMArch> = r_guard 462 .flags() 463 .set_execute(prot_flags.contains(ProtFlags::PROT_EXEC)) 464 .set_write(prot_flags.contains(ProtFlags::PROT_WRITE)); 465 466 r_guard.remap(new_flags, mapper, &mut flusher)?; 467 drop(r_guard); 468 self.mappings.insert_vma(r); 469 } 470 471 return Ok(()); 472 } 473 474 /// 创建新的用户栈 475 /// 476 /// ## 参数 477 /// 478 /// - `size`:栈的大小 479 pub fn new_user_stack(&mut self, size: usize) -> Result<(), SystemError> { 480 assert!(self.user_stack.is_none(), "User stack already exists"); 481 let stack = UserStack::new(self, None, size)?; 482 self.user_stack = Some(stack); 483 return Ok(()); 484 } 485 486 #[inline(always)] 487 pub fn user_stack_mut(&mut self) -> Option<&mut UserStack> { 488 return self.user_stack.as_mut(); 489 } 490 491 /// 取消用户空间内的所有映射 492 pub unsafe fn unmap_all(&mut self) { 493 let mut flusher: PageFlushAll<MMArch> = PageFlushAll::new(); 494 for vma in self.mappings.iter_vmas() { 495 vma.unmap(&mut self.user_mapper.utable, &mut flusher); 496 } 497 } 498 499 /// 设置进程的堆的内存空间 500 /// 501 /// ## 参数 502 /// 503 /// - `new_brk`:新的堆的结束地址。需要满足页对齐要求,并且是用户空间地址,且大于等于当前的堆的起始地址 504 /// 505 /// ## 返回值 506 /// 507 /// 返回旧的堆的结束地址 508 pub unsafe fn set_brk(&mut self, new_brk: VirtAddr) -> Result<VirtAddr, SystemError> { 509 assert!(new_brk.check_aligned(MMArch::PAGE_SIZE)); 510 511 if !new_brk.check_user() || new_brk < self.brk_start { 512 return Err(SystemError::EFAULT); 513 } 514 515 let old_brk = self.brk; 516 517 if new_brk > self.brk { 518 let len = new_brk - self.brk; 519 let prot_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC; 520 let map_flags = MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED; 521 self.map_anonymous(old_brk, len, prot_flags, map_flags, true)?; 522 523 self.brk = new_brk; 524 return Ok(old_brk); 525 } else { 526 let unmap_len = self.brk - new_brk; 527 let unmap_start = new_brk; 528 if unmap_len == 0 { 529 return Ok(old_brk); 530 } 531 self.munmap( 532 VirtPageFrame::new(unmap_start), 533 PageFrameCount::from_bytes(unmap_len).unwrap(), 534 )?; 535 self.brk = new_brk; 536 return Ok(old_brk); 537 } 538 } 539 540 pub unsafe fn sbrk(&mut self, incr: isize) -> Result<VirtAddr, SystemError> { 541 if incr == 0 { 542 return Ok(self.brk); 543 } 544 545 let new_brk = if incr > 0 { 546 self.brk + incr as usize 547 } else { 548 self.brk - (incr.abs() as usize) 549 }; 550 551 let new_brk = VirtAddr::new(page_align_up(new_brk.data())); 552 553 return self.set_brk(new_brk); 554 } 555 } 556 557 impl Drop for InnerAddressSpace { 558 fn drop(&mut self) { 559 unsafe { 560 self.unmap_all(); 561 } 562 } 563 } 564 565 #[derive(Debug, Hash)] 566 pub struct UserMapper { 567 pub utable: PageMapper, 568 } 569 570 impl UserMapper { 571 pub fn new(utable: PageMapper) -> Self { 572 return Self { utable }; 573 } 574 } 575 576 impl Drop for UserMapper { 577 fn drop(&mut self) { 578 if self.utable.is_current() { 579 // 如果当前要被销毁的用户空间的页表是当前进程的页表,那么就切换回初始内核页表 580 unsafe { MMArch::set_table(PageTableKind::User, MMArch::initial_page_table()) } 581 } 582 // 释放用户空间顶层页表占用的页帧 583 // 请注意,在释放这个页帧之前,用户页表应该已经被完全释放,否则会产生内存泄露 584 unsafe { 585 deallocate_page_frames( 586 PhysPageFrame::new(self.utable.table().phys()), 587 PageFrameCount::new(1), 588 ) 589 }; 590 } 591 } 592 593 /// 用户空间映射信息 594 #[derive(Debug)] 595 pub struct UserMappings { 596 /// 当前用户空间的虚拟内存区域 597 vmas: HashSet<Arc<LockedVMA>>, 598 /// 当前用户空间的VMA空洞 599 vm_holes: BTreeMap<VirtAddr, usize>, 600 } 601 602 impl UserMappings { 603 pub fn new() -> Self { 604 return Self { 605 vmas: HashSet::new(), 606 vm_holes: core::iter::once((VirtAddr::new(0), MMArch::USER_END_VADDR.data())) 607 .collect::<BTreeMap<_, _>>(), 608 }; 609 } 610 611 /// 判断当前进程的VMA内,是否有包含指定的虚拟地址的VMA。 612 /// 613 /// 如果有,返回包含指定虚拟地址的VMA的Arc指针,否则返回None。 614 #[allow(dead_code)] 615 pub fn contains(&self, vaddr: VirtAddr) -> Option<Arc<LockedVMA>> { 616 for v in self.vmas.iter() { 617 let guard = v.lock(); 618 if guard.region.contains(vaddr) { 619 return Some(v.clone()); 620 } 621 } 622 return None; 623 } 624 625 /// 获取当前进程的地址空间中,与给定虚拟地址范围有重叠的VMA的迭代器。 626 pub fn conflicts(&self, request: VirtRegion) -> impl Iterator<Item = Arc<LockedVMA>> + '_ { 627 let r = self 628 .vmas 629 .iter() 630 .filter(move |v| !v.lock().region.intersect(&request).is_none()) 631 .cloned(); 632 return r; 633 } 634 635 /// 在当前进程的地址空间中,寻找第一个符合条件的空闲的虚拟内存范围。 636 /// 637 /// @param min_vaddr 最小的起始地址 638 /// @param size 请求的大小 639 /// 640 /// @return 如果找到了,返回虚拟内存范围,否则返回None 641 pub fn find_free(&self, min_vaddr: VirtAddr, size: usize) -> Option<VirtRegion> { 642 let _vaddr = min_vaddr; 643 let mut iter = self 644 .vm_holes 645 .iter() 646 .skip_while(|(hole_vaddr, hole_size)| hole_vaddr.add(**hole_size) <= min_vaddr); 647 648 let (hole_vaddr, size) = iter.find(|(hole_vaddr, hole_size)| { 649 // 计算当前空洞的可用大小 650 let available_size: usize = 651 if hole_vaddr <= &&min_vaddr && min_vaddr <= hole_vaddr.add(**hole_size) { 652 **hole_size - (min_vaddr - **hole_vaddr) 653 } else { 654 **hole_size 655 }; 656 657 size <= available_size 658 })?; 659 660 // 创建一个新的虚拟内存范围。 661 let region = VirtRegion::new(cmp::max(*hole_vaddr, min_vaddr), *size); 662 return Some(region); 663 } 664 665 pub fn find_free_at( 666 &self, 667 min_vaddr: VirtAddr, 668 vaddr: VirtAddr, 669 size: usize, 670 flags: MapFlags, 671 ) -> Result<VirtRegion, SystemError> { 672 // 如果没有指定地址,那么就在当前进程的地址空间中寻找一个空闲的虚拟内存范围。 673 if vaddr == VirtAddr::new(0) { 674 return self.find_free(min_vaddr, size).ok_or(SystemError::ENOMEM); 675 } 676 677 // 如果指定了地址,那么就检查指定的地址是否可用。 678 679 let requested = VirtRegion::new(vaddr, size); 680 681 if requested.end() >= MMArch::USER_END_VADDR || !vaddr.check_aligned(MMArch::PAGE_SIZE) { 682 return Err(SystemError::EINVAL); 683 } 684 685 if let Some(_x) = self.conflicts(requested).next() { 686 if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) { 687 // 如果指定了 MAP_FIXED_NOREPLACE 标志,由于所指定的地址无法成功建立映射,则放弃映射,不对地址做修正 688 return Err(SystemError::EEXIST); 689 } 690 691 if flags.contains(MapFlags::MAP_FIXED) { 692 // todo: 支持MAP_FIXED标志对已有的VMA进行覆盖 693 return Err(SystemError::EOPNOTSUPP_OR_ENOTSUP); 694 } 695 696 // 如果没有指定MAP_FIXED标志,那么就对地址做修正 697 let requested = self.find_free(min_vaddr, size).ok_or(SystemError::ENOMEM)?; 698 return Ok(requested); 699 } 700 701 return Ok(requested); 702 } 703 704 /// 在当前进程的地址空间中,保留一个指定大小的区域,使得该区域不在空洞中。 705 /// 该函数会修改vm_holes中的空洞信息。 706 /// 707 /// @param region 要保留的区域 708 /// 709 /// 请注意,在调用本函数之前,必须先确定region所在范围内没有VMA。 710 fn reserve_hole(&mut self, region: &VirtRegion) { 711 let prev_hole: Option<(&VirtAddr, &mut usize)> = 712 self.vm_holes.range_mut(..=region.start()).next_back(); 713 714 if let Some((prev_hole_vaddr, prev_hole_size)) = prev_hole { 715 let prev_hole_end = prev_hole_vaddr.add(*prev_hole_size); 716 717 if prev_hole_end > region.start() { 718 // 如果前一个空洞的结束地址大于当前空洞的起始地址,那么就需要调整前一个空洞的大小。 719 *prev_hole_size = region.start().data() - prev_hole_vaddr.data(); 720 } 721 722 if prev_hole_end > region.end() { 723 // 如果前一个空洞的结束地址大于当前空洞的结束地址,那么就需要增加一个新的空洞。 724 self.vm_holes 725 .insert(region.end(), prev_hole_end - region.end()); 726 } 727 } 728 } 729 730 /// 在当前进程的地址空间中,释放一个指定大小的区域,使得该区域成为一个空洞。 731 /// 该函数会修改vm_holes中的空洞信息。 732 fn unreserve_hole(&mut self, region: &VirtRegion) { 733 // 如果将要插入的空洞与后一个空洞相邻,那么就需要合并。 734 let next_hole_size: Option<usize> = self.vm_holes.remove(®ion.end()); 735 736 if let Some((_prev_hole_vaddr, prev_hole_size)) = self 737 .vm_holes 738 .range_mut(..region.start()) 739 .next_back() 740 .filter(|(offset, size)| offset.data() + **size == region.start().data()) 741 { 742 *prev_hole_size += region.size() + next_hole_size.unwrap_or(0); 743 } else { 744 self.vm_holes 745 .insert(region.start(), region.size() + next_hole_size.unwrap_or(0)); 746 } 747 } 748 749 /// 在当前进程的映射关系中,插入一个新的VMA。 750 pub fn insert_vma(&mut self, vma: Arc<LockedVMA>) { 751 let region = vma.lock().region.clone(); 752 // 要求插入的地址范围必须是空闲的,也就是说,当前进程的地址空间中,不能有任何与之重叠的VMA。 753 assert!(self.conflicts(region).next().is_none()); 754 self.reserve_hole(®ion); 755 756 self.vmas.insert(vma); 757 } 758 759 /// @brief 删除一个VMA,并把对应的地址空间加入空洞中。 760 /// 761 /// 这里不会取消VMA对应的地址的映射 762 /// 763 /// @param region 要删除的VMA所在的地址范围 764 /// 765 /// @return 如果成功删除了VMA,则返回被删除的VMA,否则返回None 766 /// 如果没有可以删除的VMA,则不会执行删除操作,并报告失败。 767 pub fn remove_vma(&mut self, region: &VirtRegion) -> Option<Arc<LockedVMA>> { 768 // 请注意,由于这里会对每个VMA加锁,因此性能很低 769 let vma: Arc<LockedVMA> = self 770 .vmas 771 .drain_filter(|vma| vma.lock().region == *region) 772 .next()?; 773 self.unreserve_hole(region); 774 775 return Some(vma); 776 } 777 778 /// @brief Get the iterator of all VMAs in this process. 779 pub fn iter_vmas(&self) -> hashbrown::hash_set::Iter<Arc<LockedVMA>> { 780 return self.vmas.iter(); 781 } 782 } 783 784 impl Default for UserMappings { 785 fn default() -> Self { 786 return Self::new(); 787 } 788 } 789 790 /// 加了锁的VMA 791 /// 792 /// 备注:进行性能测试,看看SpinLock和RwLock哪个更快。 793 #[derive(Debug)] 794 pub struct LockedVMA(SpinLock<VMA>); 795 796 impl core::hash::Hash for LockedVMA { 797 fn hash<H: Hasher>(&self, state: &mut H) { 798 self.0.lock().hash(state); 799 } 800 } 801 802 impl PartialEq for LockedVMA { 803 fn eq(&self, other: &Self) -> bool { 804 self.0.lock().eq(&other.0.lock()) 805 } 806 } 807 808 impl Eq for LockedVMA {} 809 810 #[allow(dead_code)] 811 impl LockedVMA { 812 pub fn new(vma: VMA) -> Arc<Self> { 813 let r = Arc::new(Self(SpinLock::new(vma))); 814 r.0.lock().self_ref = Arc::downgrade(&r); 815 return r; 816 } 817 818 pub fn lock(&self) -> SpinLockGuard<VMA> { 819 return self.0.lock(); 820 } 821 822 /// 调整当前VMA的页面的标志位 823 /// 824 /// TODO:增加调整虚拟页映射的物理地址的功能 825 /// 826 /// @param flags 新的标志位 827 /// @param mapper 页表映射器 828 /// @param flusher 页表项刷新器 829 /// 830 pub fn remap( 831 &self, 832 flags: PageFlags<MMArch>, 833 mapper: &mut PageMapper, 834 mut flusher: impl Flusher<MMArch>, 835 ) -> Result<(), SystemError> { 836 let mut guard = self.lock(); 837 assert!(guard.mapped); 838 for page in guard.region.pages() { 839 // 暂时要求所有的页帧都已经映射到页表 840 // TODO: 引入Lazy Mapping, 通过缺页中断来映射页帧,这里就不必要求所有的页帧都已经映射到页表了 841 let r = unsafe { 842 mapper 843 .remap(page.virt_address(), flags) 844 .expect("Failed to remap, beacuse of some page is not mapped") 845 }; 846 flusher.consume(r); 847 } 848 guard.flags = flags; 849 return Ok(()); 850 } 851 852 pub fn unmap(&self, mapper: &mut PageMapper, mut flusher: impl Flusher<MMArch>) { 853 // todo: 如果当前vma与文件相关,完善文件相关的逻辑 854 855 let mut guard = self.lock(); 856 assert!(guard.mapped); 857 for page in guard.region.pages() { 858 let (paddr, _, flush) = unsafe { mapper.unmap_phys(page.virt_address(), true) } 859 .expect("Failed to unmap, beacuse of some page is not mapped"); 860 861 // todo: 获取物理页的anon_vma的守卫 862 863 // todo: 从anon_vma中删除当前VMA 864 865 // todo: 如果物理页的anon_vma链表长度为0,则释放物理页. 866 867 // 目前由于还没有实现共享页,所以直接释放物理页也没问题。 868 // 但是在实现共享页之后,就不能直接释放物理页了,需要在anon_vma链表长度为0的时候才能释放物理页 869 unsafe { deallocate_page_frames(PhysPageFrame::new(paddr), PageFrameCount::new(1)) }; 870 871 flusher.consume(flush); 872 } 873 guard.mapped = false; 874 } 875 876 pub fn mapped(&self) -> bool { 877 return self.0.lock().mapped; 878 } 879 880 /// 将当前VMA进行切分,切分成3个VMA,分别是: 881 /// 882 /// 1. 前面的VMA,如果没有则为None 883 /// 2. 中间的VMA,也就是传入的Region 884 /// 3. 后面的VMA,如果没有则为None 885 pub fn extract( 886 &self, 887 region: VirtRegion, 888 ) -> Option<( 889 Option<Arc<LockedVMA>>, 890 Arc<LockedVMA>, 891 Option<Arc<LockedVMA>>, 892 )> { 893 assert!(region.start().check_aligned(MMArch::PAGE_SIZE)); 894 assert!(region.end().check_aligned(MMArch::PAGE_SIZE)); 895 896 let mut guard = self.lock(); 897 { 898 // 如果传入的region不在当前VMA的范围内,则直接返回None 899 if unlikely(region.start() < guard.region.start() || region.end() > guard.region.end()) 900 { 901 return None; 902 } 903 904 let intersect: Option<VirtRegion> = guard.region.intersect(®ion); 905 // 如果当前VMA不包含region,则直接返回None 906 if unlikely(intersect.is_none()) { 907 return None; 908 } 909 let intersect: VirtRegion = intersect.unwrap(); 910 if unlikely(intersect == guard.region) { 911 // 如果当前VMA完全包含region,则直接返回当前VMA 912 return Some((None, guard.self_ref.upgrade().unwrap(), None)); 913 } 914 } 915 916 let before: Option<Arc<LockedVMA>> = guard.region.before(®ion).map(|virt_region| { 917 let mut vma: VMA = unsafe { guard.clone() }; 918 vma.region = virt_region; 919 920 let vma: Arc<LockedVMA> = LockedVMA::new(vma); 921 vma 922 }); 923 924 let after: Option<Arc<LockedVMA>> = guard.region.after(®ion).map(|virt_region| { 925 let mut vma: VMA = unsafe { guard.clone() }; 926 vma.region = virt_region; 927 928 let vma: Arc<LockedVMA> = LockedVMA::new(vma); 929 vma 930 }); 931 932 guard.region = region; 933 934 // TODO: 重新设置before、after这两个VMA里面的物理页的anon_vma 935 936 return Some((before, guard.self_ref.upgrade().unwrap(), after)); 937 } 938 } 939 940 /// @brief 虚拟内存区域 941 #[derive(Debug)] 942 pub struct VMA { 943 /// 虚拟内存区域对应的虚拟地址范围 944 region: VirtRegion, 945 /// VMA内的页帧的标志 946 flags: PageFlags<MMArch>, 947 /// VMA内的页帧是否已经映射到页表 948 mapped: bool, 949 /// VMA所属的用户地址空间 950 user_address_space: Option<Weak<AddressSpace>>, 951 self_ref: Weak<LockedVMA>, 952 953 provider: Provider, 954 } 955 956 impl core::hash::Hash for VMA { 957 fn hash<H: Hasher>(&self, state: &mut H) { 958 self.region.hash(state); 959 self.flags.hash(state); 960 self.mapped.hash(state); 961 } 962 } 963 964 /// 描述不同类型的内存提供者或资源 965 #[derive(Debug)] 966 pub enum Provider { 967 Allocated, // TODO:其他 968 } 969 970 #[allow(dead_code)] 971 impl VMA { 972 pub fn region(&self) -> &VirtRegion { 973 return &self.region; 974 } 975 976 /// # 拷贝当前VMA的内容 977 /// 978 /// ### 安全性 979 /// 980 /// 由于这样操作可能由于错误的拷贝,导致内存泄露、内存重复释放等问题,所以需要小心使用。 981 pub unsafe fn clone(&self) -> Self { 982 return Self { 983 region: self.region, 984 flags: self.flags, 985 mapped: self.mapped, 986 user_address_space: self.user_address_space.clone(), 987 self_ref: self.self_ref.clone(), 988 provider: Provider::Allocated, 989 }; 990 } 991 992 #[inline(always)] 993 pub fn flags(&self) -> PageFlags<MMArch> { 994 return self.flags; 995 } 996 997 pub fn pages(&self) -> VirtPageFrameIter { 998 return VirtPageFrameIter::new( 999 VirtPageFrame::new(self.region.start()), 1000 VirtPageFrame::new(self.region.end()), 1001 ); 1002 } 1003 1004 pub fn remap( 1005 &mut self, 1006 flags: PageFlags<MMArch>, 1007 mapper: &mut PageMapper, 1008 mut flusher: impl Flusher<MMArch>, 1009 ) -> Result<(), SystemError> { 1010 assert!(self.mapped); 1011 for page in self.region.pages() { 1012 // kdebug!("remap page {:?}", page.virt_address()); 1013 // 暂时要求所有的页帧都已经映射到页表 1014 // TODO: 引入Lazy Mapping, 通过缺页中断来映射页帧,这里就不必要求所有的页帧都已经映射到页表了 1015 let r = unsafe { 1016 mapper 1017 .remap(page.virt_address(), flags) 1018 .expect("Failed to remap, beacuse of some page is not mapped") 1019 }; 1020 // kdebug!("consume page {:?}", page.virt_address()); 1021 flusher.consume(r); 1022 // kdebug!("remap page {:?} done", page.virt_address()); 1023 } 1024 self.flags = flags; 1025 return Ok(()); 1026 } 1027 1028 /// 检查当前VMA是否可以拥有指定的标志位 1029 /// 1030 /// ## 参数 1031 /// 1032 /// - `prot_flags` 要检查的标志位 1033 pub fn can_have_flags(&self, prot_flags: ProtFlags) -> bool { 1034 let is_downgrade = (self.flags.has_write() || !prot_flags.contains(ProtFlags::PROT_WRITE)) 1035 && (self.flags.has_execute() || !prot_flags.contains(ProtFlags::PROT_EXEC)); 1036 1037 match self.provider { 1038 Provider::Allocated { .. } => true, 1039 1040 #[allow(unreachable_patterns)] 1041 _ => is_downgrade, 1042 } 1043 } 1044 1045 /// 把物理地址映射到虚拟地址 1046 /// 1047 /// @param phys 要映射的物理地址 1048 /// @param destination 要映射到的虚拟地址 1049 /// @param count 要映射的页帧数量 1050 /// @param flags 页面标志位 1051 /// @param mapper 页表映射器 1052 /// @param flusher 页表项刷新器 1053 /// 1054 /// @return 返回映射后的虚拟内存区域 1055 pub fn physmap( 1056 phys: PhysPageFrame, 1057 destination: VirtPageFrame, 1058 count: PageFrameCount, 1059 flags: PageFlags<MMArch>, 1060 mapper: &mut PageMapper, 1061 mut flusher: impl Flusher<MMArch>, 1062 ) -> Result<Arc<LockedVMA>, SystemError> { 1063 { 1064 let mut cur_phy = phys; 1065 let mut cur_dest = destination; 1066 1067 for _ in 0..count.data() { 1068 // 将物理页帧映射到虚拟页帧 1069 let r = unsafe { 1070 mapper.map_phys(cur_dest.virt_address(), cur_phy.phys_address(), flags) 1071 } 1072 .expect("Failed to map phys, may be OOM error"); 1073 1074 // todo: 增加OOM处理 1075 1076 // todo: 将VMA加入到anon_vma中 1077 1078 // 刷新TLB 1079 flusher.consume(r); 1080 1081 cur_phy = cur_phy.next(); 1082 cur_dest = cur_dest.next(); 1083 } 1084 } 1085 1086 let r: Arc<LockedVMA> = LockedVMA::new(VMA { 1087 region: VirtRegion::new(destination.virt_address(), count.data() * MMArch::PAGE_SIZE), 1088 flags, 1089 mapped: true, 1090 user_address_space: None, 1091 self_ref: Weak::default(), 1092 provider: Provider::Allocated, 1093 }); 1094 return Ok(r); 1095 } 1096 1097 /// 从页分配器中分配一些物理页,并把它们映射到指定的虚拟地址,然后创建VMA 1098 /// 1099 /// @param destination 要映射到的虚拟地址 1100 /// @param count 要映射的页帧数量 1101 /// @param flags 页面标志位 1102 /// @param mapper 页表映射器 1103 /// @param flusher 页表项刷新器 1104 /// 1105 /// @return 返回映射后的虚拟内存区域 1106 pub fn zeroed( 1107 destination: VirtPageFrame, 1108 page_count: PageFrameCount, 1109 flags: PageFlags<MMArch>, 1110 mapper: &mut PageMapper, 1111 mut flusher: impl Flusher<MMArch>, 1112 ) -> Result<Arc<LockedVMA>, SystemError> { 1113 let mut cur_dest: VirtPageFrame = destination; 1114 // kdebug!( 1115 // "VMA::zeroed: page_count = {:?}, destination={destination:?}", 1116 // page_count 1117 // ); 1118 for _ in 0..page_count.data() { 1119 // kdebug!( 1120 // "VMA::zeroed: cur_dest={cur_dest:?}, vaddr = {:?}", 1121 // cur_dest.virt_address() 1122 // ); 1123 let r = unsafe { mapper.map(cur_dest.virt_address(), flags) } 1124 .expect("Failed to map zero, may be OOM error"); 1125 // todo: 将VMA加入到anon_vma中 1126 // todo: 增加OOM处理 1127 1128 // 稍后再刷新TLB,这里取消刷新 1129 flusher.consume(r); 1130 cur_dest = cur_dest.next(); 1131 } 1132 let r = LockedVMA::new(VMA { 1133 region: VirtRegion::new( 1134 destination.virt_address(), 1135 page_count.data() * MMArch::PAGE_SIZE, 1136 ), 1137 flags, 1138 mapped: true, 1139 user_address_space: None, 1140 self_ref: Weak::default(), 1141 provider: Provider::Allocated, 1142 }); 1143 drop(flusher); 1144 // kdebug!("VMA::zeroed: flusher dropped"); 1145 1146 // 清空这些内存 1147 let virt_iter: VirtPageFrameIter = 1148 VirtPageFrameIter::new(destination, destination.add(page_count)); 1149 for frame in virt_iter { 1150 let paddr = mapper.translate(frame.virt_address()).unwrap().0; 1151 1152 unsafe { 1153 let vaddr = MMArch::phys_2_virt(paddr).unwrap(); 1154 MMArch::write_bytes(vaddr, 0, MMArch::PAGE_SIZE); 1155 } 1156 } 1157 // kdebug!("VMA::zeroed: done"); 1158 return Ok(r); 1159 } 1160 } 1161 1162 impl Drop for VMA { 1163 fn drop(&mut self) { 1164 // 当VMA被释放时,需要确保它已经被从页表中解除映射 1165 assert!(!self.mapped, "VMA is still mapped"); 1166 } 1167 } 1168 1169 impl PartialEq for VMA { 1170 fn eq(&self, other: &Self) -> bool { 1171 return self.region == other.region; 1172 } 1173 } 1174 1175 impl Eq for VMA {} 1176 1177 impl PartialOrd for VMA { 1178 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { 1179 return self.region.partial_cmp(&other.region); 1180 } 1181 } 1182 1183 impl Ord for VMA { 1184 fn cmp(&self, other: &Self) -> cmp::Ordering { 1185 return self.region.cmp(&other.region); 1186 } 1187 } 1188 1189 #[derive(Debug)] 1190 pub struct UserStack { 1191 // 栈底地址 1192 stack_bottom: VirtAddr, 1193 // 当前已映射的大小 1194 mapped_size: usize, 1195 /// 栈顶地址(这个值需要仔细确定!因为它可能不会实时与用户栈的真实栈顶保持一致!要小心!) 1196 current_sp: VirtAddr, 1197 } 1198 1199 impl UserStack { 1200 /// 默认的用户栈底地址 1201 pub const DEFAULT_USER_STACK_BOTTOM: VirtAddr = MMArch::USER_STACK_START; 1202 /// 默认的用户栈大小为8MB 1203 pub const DEFAULT_USER_STACK_SIZE: usize = 8 * 1024 * 1024; 1204 /// 用户栈的保护页数量 1205 pub const GUARD_PAGES_NUM: usize = 4; 1206 1207 /// 创建一个用户栈 1208 pub fn new( 1209 vm: &mut InnerAddressSpace, 1210 stack_bottom: Option<VirtAddr>, 1211 stack_size: usize, 1212 ) -> Result<Self, SystemError> { 1213 let stack_bottom = stack_bottom.unwrap_or(Self::DEFAULT_USER_STACK_BOTTOM); 1214 assert!(stack_bottom.check_aligned(MMArch::PAGE_SIZE)); 1215 1216 // 分配用户栈的保护页 1217 let guard_size = Self::GUARD_PAGES_NUM * MMArch::PAGE_SIZE; 1218 let actual_stack_bottom = stack_bottom - guard_size; 1219 1220 let mut prot_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE; 1221 let map_flags = 1222 MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED_NOREPLACE; 1223 // kdebug!( 1224 // "map anonymous stack: {:?} {}", 1225 // actual_stack_bottom, 1226 // guard_size 1227 // ); 1228 vm.map_anonymous( 1229 actual_stack_bottom, 1230 guard_size, 1231 prot_flags, 1232 map_flags, 1233 false, 1234 )?; 1235 // test_buddy(); 1236 // 设置保护页只读 1237 prot_flags.remove(ProtFlags::PROT_WRITE); 1238 // kdebug!( 1239 // "to mprotect stack guard pages: {:?} {}", 1240 // actual_stack_bottom, 1241 // guard_size 1242 // ); 1243 vm.mprotect( 1244 VirtPageFrame::new(actual_stack_bottom), 1245 PageFrameCount::new(Self::GUARD_PAGES_NUM), 1246 prot_flags, 1247 )?; 1248 1249 // kdebug!( 1250 // "mprotect stack guard pages done: {:?} {}", 1251 // actual_stack_bottom, 1252 // guard_size 1253 // ); 1254 1255 let mut user_stack = UserStack { 1256 stack_bottom: actual_stack_bottom, 1257 mapped_size: guard_size, 1258 current_sp: actual_stack_bottom - guard_size, 1259 }; 1260 1261 // kdebug!("extend user stack: {:?} {}", stack_bottom, stack_size); 1262 // 分配用户栈 1263 user_stack.initial_extend(vm, stack_size)?; 1264 // kdebug!("user stack created: {:?} {}", stack_bottom, stack_size); 1265 return Ok(user_stack); 1266 } 1267 1268 fn initial_extend( 1269 &mut self, 1270 vm: &mut InnerAddressSpace, 1271 mut bytes: usize, 1272 ) -> Result<(), SystemError> { 1273 let prot_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC; 1274 let map_flags = MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS; 1275 1276 bytes = page_align_up(bytes); 1277 self.mapped_size += bytes; 1278 1279 vm.map_anonymous( 1280 self.stack_bottom - self.mapped_size, 1281 bytes, 1282 prot_flags, 1283 map_flags, 1284 false, 1285 )?; 1286 1287 return Ok(()); 1288 } 1289 1290 /// 扩展用户栈 1291 /// 1292 /// ## 参数 1293 /// 1294 /// - `vm` 用户地址空间结构体 1295 /// - `bytes` 要扩展的字节数 1296 /// 1297 /// ## 返回值 1298 /// 1299 /// - **Ok(())** 扩展成功 1300 /// - **Err(SystemError)** 扩展失败 1301 #[allow(dead_code)] 1302 pub fn extend( 1303 &mut self, 1304 vm: &mut RwLockWriteGuard<InnerAddressSpace>, 1305 mut bytes: usize, 1306 ) -> Result<(), SystemError> { 1307 let prot_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC; 1308 let map_flags = MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS; 1309 1310 bytes = page_align_up(bytes); 1311 self.mapped_size += bytes; 1312 1313 vm.map_anonymous( 1314 self.stack_bottom - self.mapped_size, 1315 bytes, 1316 prot_flags, 1317 map_flags, 1318 false, 1319 )?; 1320 1321 return Ok(()); 1322 } 1323 1324 /// 获取栈顶地址 1325 /// 1326 /// 请注意,如果用户栈的栈顶地址发生变化,这个值可能不会实时更新! 1327 pub fn sp(&self) -> VirtAddr { 1328 return self.current_sp; 1329 } 1330 1331 pub unsafe fn set_sp(&mut self, sp: VirtAddr) { 1332 self.current_sp = sp; 1333 } 1334 1335 /// 仅仅克隆用户栈的信息,不会克隆用户栈的内容/映射 1336 pub unsafe fn clone_info_only(&self) -> Self { 1337 return Self { 1338 stack_bottom: self.stack_bottom, 1339 mapped_size: self.mapped_size, 1340 current_sp: self.current_sp, 1341 }; 1342 } 1343 1344 /// 获取当前用户栈的大小(不包括保护页) 1345 pub fn stack_size(&self) -> usize { 1346 return self.mapped_size - Self::GUARD_PAGES_NUM * MMArch::PAGE_SIZE; 1347 } 1348 } 1349