1 // 进程的用户空间内存管理 2 3 use core::{ 4 cmp, 5 hash::Hasher, 6 intrinsics::unlikely, 7 ops::Add, 8 sync::atomic::{compiler_fence, Ordering}, 9 }; 10 11 use alloc::{ 12 collections::BTreeMap, 13 sync::{Arc, Weak}, 14 vec::Vec, 15 }; 16 use hashbrown::HashSet; 17 use ida::IdAllocator; 18 use system_error::SystemError; 19 20 use crate::{ 21 arch::{mm::PageMapper, CurrentIrqArch, MMArch}, 22 exception::InterruptArch, 23 filesystem::vfs::file::File, 24 libs::{ 25 align::page_align_up, 26 rwlock::RwLock, 27 spinlock::{SpinLock, SpinLockGuard}, 28 }, 29 mm::page::page_manager_lock_irqsave, 30 process::ProcessManager, 31 syscall::user_access::{UserBufferReader, UserBufferWriter}, 32 }; 33 34 use super::{ 35 allocator::page_frame::{ 36 deallocate_page_frames, PageFrameCount, PhysPageFrame, VirtPageFrame, VirtPageFrameIter, 37 }, 38 page::{EntryFlags, Flusher, InactiveFlusher, Page, PageFlushAll}, 39 syscall::{MadvFlags, MapFlags, MremapFlags, ProtFlags}, 40 MemoryManagementArch, PageTableKind, VirtAddr, VirtRegion, VmFlags, 41 }; 42 43 /// MMAP_MIN_ADDR的默认值 44 /// 以下内容来自linux-5.19: 45 /// This is the portion of low virtual memory which should be protected 46 // from userspace allocation. Keeping a user from writing to low pages 47 // can help reduce the impact of kernel NULL pointer bugs. 48 // For most ia64, ppc64 and x86 users with lots of address space 49 // a value of 65536 is reasonable and should cause no problems. 50 // On arm and other archs it should not be higher than 32768. 51 // Programs which use vm86 functionality or have some need to map 52 // this low address space will need CAP_SYS_RAWIO or disable this 53 // protection by setting the value to 0. 54 pub const DEFAULT_MMAP_MIN_ADDR: usize = 65536; 55 56 /// LockedVMA的id分配器 57 static LOCKEDVMA_ID_ALLOCATOR: IdAllocator = IdAllocator::new(0, usize::MAX); 58 59 #[derive(Debug)] 60 pub struct AddressSpace { 61 inner: RwLock<InnerAddressSpace>, 62 } 63 64 impl AddressSpace { 65 pub fn new(create_stack: bool) -> Result<Arc<Self>, SystemError> { 66 let inner = InnerAddressSpace::new(create_stack)?; 67 let result = Self { 68 inner: RwLock::new(inner), 69 }; 70 return Ok(Arc::new(result)); 71 } 72 73 /// 从pcb中获取当前进程的地址空间结构体的Arc指针 74 pub fn current() -> Result<Arc<AddressSpace>, SystemError> { 75 let vm = ProcessManager::current_pcb() 76 .basic() 77 .user_vm() 78 .expect("Current process has no address space"); 79 80 return Ok(vm); 81 } 82 83 /// 判断某个地址空间是否为当前进程的地址空间 84 pub fn is_current(self: &Arc<Self>) -> bool { 85 let current = Self::current(); 86 if let Ok(current) = current { 87 return Arc::ptr_eq(¤t, self); 88 } 89 return false; 90 } 91 } 92 93 impl core::ops::Deref for AddressSpace { 94 type Target = RwLock<InnerAddressSpace>; 95 96 fn deref(&self) -> &Self::Target { 97 &self.inner 98 } 99 } 100 101 impl core::ops::DerefMut for AddressSpace { 102 fn deref_mut(&mut self) -> &mut Self::Target { 103 &mut self.inner 104 } 105 } 106 107 /// @brief 用户地址空间结构体(每个进程都有一个) 108 #[derive(Debug)] 109 pub struct InnerAddressSpace { 110 pub user_mapper: UserMapper, 111 pub mappings: UserMappings, 112 pub mmap_min: VirtAddr, 113 /// 用户栈信息结构体 114 pub user_stack: Option<UserStack>, 115 116 pub elf_brk_start: VirtAddr, 117 pub elf_brk: VirtAddr, 118 119 /// 当前进程的堆空间的起始地址 120 pub brk_start: VirtAddr, 121 /// 当前进程的堆空间的结束地址(不包含) 122 pub brk: VirtAddr, 123 124 pub start_code: VirtAddr, 125 pub end_code: VirtAddr, 126 pub start_data: VirtAddr, 127 pub end_data: VirtAddr, 128 } 129 130 impl InnerAddressSpace { 131 pub fn new(create_stack: bool) -> Result<Self, SystemError> { 132 let mut result = Self { 133 user_mapper: MMArch::setup_new_usermapper()?, 134 mappings: UserMappings::new(), 135 mmap_min: VirtAddr(DEFAULT_MMAP_MIN_ADDR), 136 elf_brk_start: VirtAddr::new(0), 137 elf_brk: VirtAddr::new(0), 138 brk_start: MMArch::USER_BRK_START, 139 brk: MMArch::USER_BRK_START, 140 user_stack: None, 141 start_code: VirtAddr(0), 142 end_code: VirtAddr(0), 143 start_data: VirtAddr(0), 144 end_data: VirtAddr(0), 145 }; 146 if create_stack { 147 // debug!("to create user stack."); 148 result.new_user_stack(UserStack::DEFAULT_USER_STACK_SIZE)?; 149 } 150 151 return Ok(result); 152 } 153 154 /// 尝试克隆当前进程的地址空间,包括这些映射都会被克隆 155 /// 156 /// # Returns 157 /// 158 /// 返回克隆后的,新的地址空间的Arc指针 159 #[inline(never)] 160 pub fn try_clone(&mut self) -> Result<Arc<AddressSpace>, SystemError> { 161 let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() }; 162 let new_addr_space = AddressSpace::new(false)?; 163 let mut new_guard = new_addr_space.write(); 164 unsafe { 165 new_guard 166 .user_mapper 167 .clone_from(&mut self.user_mapper, MMArch::PAGE_FAULT_ENABLED) 168 }; 169 170 // 拷贝用户栈的结构体信息,但是不拷贝用户栈的内容(因为后面VMA的拷贝会拷贝用户栈的内容) 171 unsafe { 172 new_guard.user_stack = Some(self.user_stack.as_ref().unwrap().clone_info_only()); 173 } 174 let _current_stack_size = self.user_stack.as_ref().unwrap().stack_size(); 175 176 // 拷贝空洞 177 new_guard.mappings.vm_holes = self.mappings.vm_holes.clone(); 178 179 for vma in self.mappings.vmas.iter() { 180 // TODO: 增加对VMA是否为文件映射的判断,如果是的话,就跳过 181 182 let vma_guard: SpinLockGuard<'_, VMA> = vma.lock_irqsave(); 183 184 // 仅拷贝VMA信息并添加反向映射,因为UserMapper克隆时已经分配了新的物理页 185 let new_vma = LockedVMA::new(vma_guard.clone_info_only()); 186 new_guard.mappings.vmas.insert(new_vma.clone()); 187 // debug!("new vma: {:x?}", new_vma); 188 let new_vma_guard = new_vma.lock_irqsave(); 189 let new_mapper = &new_guard.user_mapper.utable; 190 let mut page_manager_guard = page_manager_lock_irqsave(); 191 for page in new_vma_guard.pages().map(|p| p.virt_address()) { 192 if let Some((paddr, _)) = new_mapper.translate(page) { 193 let page = page_manager_guard.get_unwrap(&paddr); 194 page.write_irqsave().insert_vma(new_vma.clone()); 195 } 196 } 197 198 drop(page_manager_guard); 199 drop(vma_guard); 200 drop(new_vma_guard); 201 } 202 drop(new_guard); 203 drop(irq_guard); 204 return Ok(new_addr_space); 205 } 206 207 /// 拓展用户栈 208 /// ## 参数 209 /// 210 /// - `bytes`: 拓展大小 211 #[allow(dead_code)] 212 pub fn extend_stack(&mut self, mut bytes: usize) -> Result<(), SystemError> { 213 // debug!("extend user stack"); 214 let prot_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC; 215 let map_flags = MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_GROWSDOWN; 216 let stack = self.user_stack.as_mut().unwrap(); 217 218 bytes = page_align_up(bytes); 219 stack.mapped_size += bytes; 220 let len = stack.stack_bottom - stack.mapped_size; 221 self.map_anonymous(len, bytes, prot_flags, map_flags, false, false)?; 222 return Ok(()); 223 } 224 225 /// 判断当前的地址空间是否是当前进程的地址空间 226 #[inline] 227 pub fn is_current(&self) -> bool { 228 return self.user_mapper.utable.is_current(); 229 } 230 231 /// 进行匿名页映射 232 /// 233 /// ## 参数 234 /// 235 /// - `start_vaddr`:映射的起始地址 236 /// - `len`:映射的长度 237 /// - `prot_flags`:保护标志 238 /// - `map_flags`:映射标志 239 /// - `round_to_min`:是否将`start_vaddr`对齐到`mmap_min`,如果为`true`,则当`start_vaddr`不为0时,会对齐到`mmap_min`,否则仅向下对齐到页边界 240 /// - `allocate_at_once`:是否立即分配物理空间 241 /// 242 /// ## 返回 243 /// 244 /// 返回映射的起始虚拟页帧 245 pub fn map_anonymous( 246 &mut self, 247 start_vaddr: VirtAddr, 248 len: usize, 249 prot_flags: ProtFlags, 250 map_flags: MapFlags, 251 round_to_min: bool, 252 allocate_at_once: bool, 253 ) -> Result<VirtPageFrame, SystemError> { 254 let allocate_at_once = if MMArch::PAGE_FAULT_ENABLED { 255 allocate_at_once 256 } else { 257 true 258 }; 259 // 用于对齐hint的函数 260 let round_hint_to_min = |hint: VirtAddr| { 261 // 先把hint向下对齐到页边界 262 let addr = hint.data() & (!MMArch::PAGE_OFFSET_MASK); 263 // debug!("map_anonymous: hint = {:?}, addr = {addr:#x}", hint); 264 // 如果hint不是0,且hint小于DEFAULT_MMAP_MIN_ADDR,则对齐到DEFAULT_MMAP_MIN_ADDR 265 if (addr != 0) && round_to_min && (addr < DEFAULT_MMAP_MIN_ADDR) { 266 Some(VirtAddr::new(page_align_up(DEFAULT_MMAP_MIN_ADDR))) 267 } else if addr == 0 { 268 None 269 } else { 270 Some(VirtAddr::new(addr)) 271 } 272 }; 273 // debug!("map_anonymous: start_vaddr = {:?}", start_vaddr); 274 // debug!("map_anonymous: len(no align) = {}", len); 275 276 let len = page_align_up(len); 277 278 let vm_flags = VmFlags::from(prot_flags) 279 | VmFlags::from(map_flags) 280 | VmFlags::VM_MAYREAD 281 | VmFlags::VM_MAYWRITE 282 | VmFlags::VM_MAYEXEC; 283 284 // debug!("map_anonymous: len = {}", len); 285 286 let start_page: VirtPageFrame = self.mmap( 287 round_hint_to_min(start_vaddr), 288 PageFrameCount::from_bytes(len).unwrap(), 289 prot_flags, 290 map_flags, 291 move |page, count, flags, mapper, flusher| { 292 if allocate_at_once { 293 VMA::zeroed(page, count, vm_flags, flags, mapper, flusher, None, None) 294 } else { 295 Ok(LockedVMA::new(VMA::new( 296 VirtRegion::new(page.virt_address(), count.data() * MMArch::PAGE_SIZE), 297 vm_flags, 298 flags, 299 None, 300 None, 301 false, 302 ))) 303 } 304 }, 305 )?; 306 307 return Ok(start_page); 308 } 309 310 /// 进行文件页映射 311 /// 312 /// ## 参数 313 /// 314 /// - `start_vaddr`:映射的起始地址 315 /// - `len`:映射的长度 316 /// - `prot_flags`:保护标志 317 /// - `map_flags`:映射标志 318 /// - `fd`:文件描述符 319 /// - `offset`:映射偏移量 320 /// - `round_to_min`:是否将`start_vaddr`对齐到`mmap_min`,如果为`true`,则当`start_vaddr`不为0时,会对齐到`mmap_min`,否则仅向下对齐到页边界 321 /// - `allocate_at_once`:是否立即分配物理空间 322 /// 323 /// ## 返回 324 /// 325 /// 返回映射的起始虚拟页帧 326 #[allow(clippy::too_many_arguments)] 327 pub fn file_mapping( 328 &mut self, 329 start_vaddr: VirtAddr, 330 len: usize, 331 prot_flags: ProtFlags, 332 map_flags: MapFlags, 333 fd: i32, 334 offset: usize, 335 round_to_min: bool, 336 allocate_at_once: bool, 337 ) -> Result<VirtPageFrame, SystemError> { 338 let allocate_at_once = if MMArch::PAGE_FAULT_ENABLED { 339 allocate_at_once 340 } else { 341 true 342 }; 343 // 用于对齐hint的函数 344 let round_hint_to_min = |hint: VirtAddr| { 345 // 先把hint向下对齐到页边界 346 let addr = hint.data() & (!MMArch::PAGE_OFFSET_MASK); 347 // debug!("map_anonymous: hint = {:?}, addr = {addr:#x}", hint); 348 // 如果hint不是0,且hint小于DEFAULT_MMAP_MIN_ADDR,则对齐到DEFAULT_MMAP_MIN_ADDR 349 if (addr != 0) && round_to_min && (addr < DEFAULT_MMAP_MIN_ADDR) { 350 Some(VirtAddr::new(page_align_up(DEFAULT_MMAP_MIN_ADDR))) 351 } else if addr == 0 { 352 None 353 } else { 354 Some(VirtAddr::new(addr)) 355 } 356 }; 357 // debug!("map_anonymous: start_vaddr = {:?}", start_vaddr); 358 // debug!("map_anonymous: len(no align) = {}", len); 359 360 let len = page_align_up(len); 361 362 let vm_flags = VmFlags::from(prot_flags) 363 | VmFlags::from(map_flags) 364 | VmFlags::VM_MAYREAD 365 | VmFlags::VM_MAYWRITE 366 | VmFlags::VM_MAYEXEC; 367 368 // debug!("map_anonymous: len = {}", len); 369 370 let binding = ProcessManager::current_pcb().fd_table(); 371 let fd_table_guard = binding.read(); 372 373 let file = fd_table_guard.get_file_by_fd(fd); 374 if file.is_none() { 375 return Err(SystemError::EBADF); 376 } 377 // drop guard 以避免无法调度的问题 378 drop(fd_table_guard); 379 380 // offset需要4K对齐 381 if !offset & (MMArch::PAGE_SIZE - 1) == 0 { 382 return Err(SystemError::EINVAL); 383 } 384 let pgoff = offset >> MMArch::PAGE_SHIFT; 385 386 let start_page: VirtPageFrame = self.mmap( 387 round_hint_to_min(start_vaddr), 388 PageFrameCount::from_bytes(len).unwrap(), 389 prot_flags, 390 map_flags, 391 move |page, count, flags, mapper, flusher| { 392 if allocate_at_once { 393 VMA::zeroed( 394 page, 395 count, 396 vm_flags, 397 flags, 398 mapper, 399 flusher, 400 file, 401 Some(pgoff), 402 ) 403 } else { 404 Ok(LockedVMA::new(VMA::new( 405 VirtRegion::new(page.virt_address(), count.data() * MMArch::PAGE_SIZE), 406 vm_flags, 407 flags, 408 file, 409 Some(pgoff), 410 false, 411 ))) 412 } 413 }, 414 )?; 415 return Ok(start_page); 416 } 417 418 /// 向进程的地址空间映射页面 419 /// 420 /// # 参数 421 /// 422 /// - `addr`:映射的起始地址,如果为`None`,则由内核自动分配 423 /// - `page_count`:映射的页面数量 424 /// - `prot_flags`:保护标志 425 /// - `map_flags`:映射标志 426 /// - `map_func`:映射函数,用于创建VMA 427 /// 428 /// # Returns 429 /// 430 /// 返回映射的起始虚拟页帧 431 /// 432 /// # Errors 433 /// 434 /// - `EINVAL`:参数错误 435 pub fn mmap< 436 F: FnOnce( 437 VirtPageFrame, 438 PageFrameCount, 439 EntryFlags<MMArch>, 440 &mut PageMapper, 441 &mut dyn Flusher<MMArch>, 442 ) -> Result<Arc<LockedVMA>, SystemError>, 443 >( 444 &mut self, 445 addr: Option<VirtAddr>, 446 page_count: PageFrameCount, 447 prot_flags: ProtFlags, 448 map_flags: MapFlags, 449 map_func: F, 450 ) -> Result<VirtPageFrame, SystemError> { 451 if page_count == PageFrameCount::new(0) { 452 return Err(SystemError::EINVAL); 453 } 454 // debug!("mmap: addr: {addr:?}, page_count: {page_count:?}, prot_flags: {prot_flags:?}, map_flags: {map_flags:?}"); 455 456 // 找到未使用的区域 457 let region = match addr { 458 Some(vaddr) => { 459 self.mappings 460 .find_free_at(self.mmap_min, vaddr, page_count.bytes(), map_flags)? 461 } 462 None => self 463 .mappings 464 .find_free(self.mmap_min, page_count.bytes()) 465 .ok_or(SystemError::ENOMEM)?, 466 }; 467 468 let page = VirtPageFrame::new(region.start()); 469 470 // debug!("mmap: page: {:?}, region={region:?}", page.virt_address()); 471 472 compiler_fence(Ordering::SeqCst); 473 let (mut active, mut inactive); 474 let flusher = if self.is_current() { 475 active = PageFlushAll::new(); 476 &mut active as &mut dyn Flusher<MMArch> 477 } else { 478 inactive = InactiveFlusher::new(); 479 &mut inactive as &mut dyn Flusher<MMArch> 480 }; 481 compiler_fence(Ordering::SeqCst); 482 // 映射页面,并将VMA插入到地址空间的VMA列表中 483 self.mappings.insert_vma(map_func( 484 page, 485 page_count, 486 EntryFlags::from_prot_flags(prot_flags, true), 487 &mut self.user_mapper.utable, 488 flusher, 489 )?); 490 491 return Ok(page); 492 } 493 494 /// 重映射内存区域 495 /// 496 /// # 参数 497 /// 498 /// - `old_vaddr`:原映射的起始地址 499 /// - `old_len`:原映射的长度 500 /// - `new_len`:重新映射的长度 501 /// - `mremap_flags`:重映射标志 502 /// - `new_vaddr`:重新映射的起始地址 503 /// - `vm_flags`:旧内存区域标志 504 /// 505 /// # Returns 506 /// 507 /// 返回重映射的起始虚拟页帧地址 508 /// 509 /// # Errors 510 /// 511 /// - `EINVAL`:参数错误 512 pub fn mremap( 513 &mut self, 514 old_vaddr: VirtAddr, 515 old_len: usize, 516 new_len: usize, 517 mremap_flags: MremapFlags, 518 new_vaddr: VirtAddr, 519 vm_flags: VmFlags, 520 ) -> Result<VirtAddr, SystemError> { 521 // 检查新内存地址是否对齐 522 if !new_vaddr.check_aligned(MMArch::PAGE_SIZE) { 523 return Err(SystemError::EINVAL); 524 } 525 526 // 检查新、旧内存区域是否冲突 527 let old_region = VirtRegion::new(old_vaddr, old_len); 528 let new_region = VirtRegion::new(new_vaddr, new_len); 529 if old_region.collide(&new_region) { 530 return Err(SystemError::EINVAL); 531 } 532 533 // 初始化映射标志 534 let mut map_flags: MapFlags = vm_flags.into(); 535 // 初始化内存区域保护标志 536 let prot_flags: ProtFlags = vm_flags.into(); 537 538 // 取消新内存区域的原映射 539 if mremap_flags.contains(MremapFlags::MREMAP_FIXED) { 540 map_flags |= MapFlags::MAP_FIXED; 541 let start_page = VirtPageFrame::new(new_vaddr); 542 let page_count = PageFrameCount::from_bytes(new_len).unwrap(); 543 self.munmap(start_page, page_count)?; 544 } 545 546 // 获取映射后的新内存页面 547 let new_page = self.map_anonymous(new_vaddr, new_len, prot_flags, map_flags, true, true)?; 548 let new_page_vaddr = new_page.virt_address(); 549 550 // 拷贝旧内存区域内容到新内存区域 551 let old_buffer_reader = 552 UserBufferReader::new(old_vaddr.data() as *const u8, old_len, true)?; 553 let old_buf: &[u8] = old_buffer_reader.read_from_user(0)?; 554 let mut new_buffer_writer = 555 UserBufferWriter::new(new_page_vaddr.data() as *mut u8, new_len, true)?; 556 let new_buf: &mut [u8] = new_buffer_writer.buffer(0)?; 557 let len = old_buf.len().min(new_buf.len()); 558 new_buf[..len].copy_from_slice(&old_buf[..len]); 559 560 return Ok(new_page_vaddr); 561 } 562 563 /// 取消进程的地址空间中的映射 564 /// 565 /// # 参数 566 /// 567 /// - `start_page`:起始页帧 568 /// - `page_count`:取消映射的页帧数量 569 /// 570 /// # Errors 571 /// 572 /// - `EINVAL`:参数错误 573 /// - `ENOMEM`:内存不足 574 pub fn munmap( 575 &mut self, 576 start_page: VirtPageFrame, 577 page_count: PageFrameCount, 578 ) -> Result<(), SystemError> { 579 let to_unmap = VirtRegion::new(start_page.virt_address(), page_count.bytes()); 580 let mut flusher: PageFlushAll<MMArch> = PageFlushAll::new(); 581 582 let regions: Vec<Arc<LockedVMA>> = self.mappings.conflicts(to_unmap).collect::<Vec<_>>(); 583 584 for r in regions { 585 let r = r.lock_irqsave().region; 586 let r = self.mappings.remove_vma(&r).unwrap(); 587 let intersection = r.lock_irqsave().region().intersect(&to_unmap).unwrap(); 588 let split_result = r.extract(intersection, &self.user_mapper.utable).unwrap(); 589 590 // TODO: 当引入后备页映射后,这里需要增加通知文件的逻辑 591 592 if let Some(before) = split_result.prev { 593 // 如果前面有VMA,则需要将前面的VMA重新插入到地址空间的VMA列表中 594 self.mappings.insert_vma(before); 595 } 596 597 if let Some(after) = split_result.after { 598 // 如果后面有VMA,则需要将后面的VMA重新插入到地址空间的VMA列表中 599 self.mappings.insert_vma(after); 600 } 601 602 r.unmap(&mut self.user_mapper.utable, &mut flusher); 603 } 604 605 // TODO: 当引入后备页映射后,这里需要增加通知文件的逻辑 606 607 return Ok(()); 608 } 609 610 pub fn mprotect( 611 &mut self, 612 start_page: VirtPageFrame, 613 page_count: PageFrameCount, 614 prot_flags: ProtFlags, 615 ) -> Result<(), SystemError> { 616 // debug!( 617 // "mprotect: start_page: {:?}, page_count: {:?}, prot_flags:{prot_flags:?}", 618 // start_page, 619 // page_count 620 // ); 621 let (mut active, mut inactive); 622 let flusher = if self.is_current() { 623 active = PageFlushAll::new(); 624 &mut active as &mut dyn Flusher<MMArch> 625 } else { 626 inactive = InactiveFlusher::new(); 627 &mut inactive as &mut dyn Flusher<MMArch> 628 }; 629 630 let mapper = &mut self.user_mapper.utable; 631 let region = VirtRegion::new(start_page.virt_address(), page_count.bytes()); 632 // debug!("mprotect: region: {:?}", region); 633 634 let regions = self.mappings.conflicts(region).collect::<Vec<_>>(); 635 // debug!("mprotect: regions: {:?}", regions); 636 637 for r in regions { 638 // debug!("mprotect: r: {:?}", r); 639 let r = *r.lock_irqsave().region(); 640 let r = self.mappings.remove_vma(&r).unwrap(); 641 642 let intersection = r.lock_irqsave().region().intersect(®ion).unwrap(); 643 let split_result = r 644 .extract(intersection, mapper) 645 .expect("Failed to extract VMA"); 646 647 if let Some(before) = split_result.prev { 648 self.mappings.insert_vma(before); 649 } 650 if let Some(after) = split_result.after { 651 self.mappings.insert_vma(after); 652 } 653 654 let mut r_guard = r.lock_irqsave(); 655 // 如果VMA的保护标志不允许指定的修改,则返回错误 656 if !r_guard.can_have_flags(prot_flags) { 657 drop(r_guard); 658 self.mappings.insert_vma(r.clone()); 659 return Err(SystemError::EACCES); 660 } 661 r_guard.set_vm_flags(VmFlags::from(prot_flags)); 662 663 let new_flags: EntryFlags<MMArch> = r_guard 664 .flags() 665 .set_execute(prot_flags.contains(ProtFlags::PROT_EXEC)) 666 .set_write(prot_flags.contains(ProtFlags::PROT_WRITE)); 667 668 r_guard.remap(new_flags, mapper, &mut *flusher)?; 669 drop(r_guard); 670 self.mappings.insert_vma(r); 671 } 672 673 return Ok(()); 674 } 675 676 pub fn madvise( 677 &mut self, 678 start_page: VirtPageFrame, 679 page_count: PageFrameCount, 680 behavior: MadvFlags, 681 ) -> Result<(), SystemError> { 682 let (mut active, mut inactive); 683 let flusher = if self.is_current() { 684 active = PageFlushAll::new(); 685 &mut active as &mut dyn Flusher<MMArch> 686 } else { 687 inactive = InactiveFlusher::new(); 688 &mut inactive as &mut dyn Flusher<MMArch> 689 }; 690 691 let mapper = &mut self.user_mapper.utable; 692 693 let region = VirtRegion::new(start_page.virt_address(), page_count.bytes()); 694 let regions = self.mappings.conflicts(region).collect::<Vec<_>>(); 695 696 for r in regions { 697 let r = *r.lock_irqsave().region(); 698 let r = self.mappings.remove_vma(&r).unwrap(); 699 700 let intersection = r.lock_irqsave().region().intersect(®ion).unwrap(); 701 let split_result = r 702 .extract(intersection, mapper) 703 .expect("Failed to extract VMA"); 704 705 if let Some(before) = split_result.prev { 706 self.mappings.insert_vma(before); 707 } 708 if let Some(after) = split_result.after { 709 self.mappings.insert_vma(after); 710 } 711 r.do_madvise(behavior, mapper, &mut *flusher)?; 712 self.mappings.insert_vma(r); 713 } 714 Ok(()) 715 } 716 717 /// 创建新的用户栈 718 /// 719 /// ## 参数 720 /// 721 /// - `size`:栈的大小 722 pub fn new_user_stack(&mut self, size: usize) -> Result<(), SystemError> { 723 assert!(self.user_stack.is_none(), "User stack already exists"); 724 let stack = UserStack::new(self, None, size)?; 725 self.user_stack = Some(stack); 726 return Ok(()); 727 } 728 729 #[inline(always)] 730 pub fn user_stack_mut(&mut self) -> Option<&mut UserStack> { 731 return self.user_stack.as_mut(); 732 } 733 734 /// 取消用户空间内的所有映射 735 pub unsafe fn unmap_all(&mut self) { 736 let mut flusher: PageFlushAll<MMArch> = PageFlushAll::new(); 737 for vma in self.mappings.iter_vmas() { 738 if vma.mapped() { 739 vma.unmap(&mut self.user_mapper.utable, &mut flusher); 740 } 741 } 742 } 743 744 /// 设置进程的堆的内存空间 745 /// 746 /// ## 参数 747 /// 748 /// - `new_brk`:新的堆的结束地址。需要满足页对齐要求,并且是用户空间地址,且大于等于当前的堆的起始地址 749 /// 750 /// ## 返回值 751 /// 752 /// 返回旧的堆的结束地址 753 pub unsafe fn set_brk(&mut self, new_brk: VirtAddr) -> Result<VirtAddr, SystemError> { 754 assert!(new_brk.check_aligned(MMArch::PAGE_SIZE)); 755 756 if !new_brk.check_user() || new_brk < self.brk_start { 757 return Err(SystemError::EFAULT); 758 } 759 760 let old_brk = self.brk; 761 762 if new_brk > self.brk { 763 let len = new_brk - self.brk; 764 let prot_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC; 765 let map_flags = MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED; 766 self.map_anonymous(old_brk, len, prot_flags, map_flags, true, false)?; 767 768 self.brk = new_brk; 769 return Ok(old_brk); 770 } else { 771 let unmap_len = self.brk - new_brk; 772 let unmap_start = new_brk; 773 if unmap_len == 0 { 774 return Ok(old_brk); 775 } 776 self.munmap( 777 VirtPageFrame::new(unmap_start), 778 PageFrameCount::from_bytes(unmap_len).unwrap(), 779 )?; 780 self.brk = new_brk; 781 return Ok(old_brk); 782 } 783 } 784 785 pub unsafe fn sbrk(&mut self, incr: isize) -> Result<VirtAddr, SystemError> { 786 if incr == 0 { 787 return Ok(self.brk); 788 } 789 790 let new_brk = if incr > 0 { 791 self.brk + incr as usize 792 } else { 793 self.brk - incr.unsigned_abs() 794 }; 795 796 let new_brk = VirtAddr::new(page_align_up(new_brk.data())); 797 798 return self.set_brk(new_brk); 799 } 800 } 801 802 impl Drop for InnerAddressSpace { 803 fn drop(&mut self) { 804 unsafe { 805 self.unmap_all(); 806 } 807 } 808 } 809 810 #[derive(Debug, Hash)] 811 pub struct UserMapper { 812 pub utable: PageMapper, 813 } 814 815 impl UserMapper { 816 pub fn new(utable: PageMapper) -> Self { 817 return Self { utable }; 818 } 819 820 /// 拷贝用户空间映射 821 /// ## 参数 822 /// 823 /// - `umapper`: 要拷贝的用户空间 824 /// - `copy_on_write`: 是否写时复制 825 pub unsafe fn clone_from(&mut self, umapper: &mut Self, copy_on_write: bool) { 826 self.utable 827 .clone_user_mapping(&mut umapper.utable, copy_on_write); 828 } 829 } 830 831 impl Drop for UserMapper { 832 fn drop(&mut self) { 833 if self.utable.is_current() { 834 // 如果当前要被销毁的用户空间的页表是当前进程的页表,那么就切换回初始内核页表 835 unsafe { MMArch::set_table(PageTableKind::User, MMArch::initial_page_table()) } 836 } 837 // 释放用户空间顶层页表占用的页帧 838 // 请注意,在释放这个页帧之前,用户页表应该已经被完全释放,否则会产生内存泄露 839 unsafe { 840 deallocate_page_frames( 841 PhysPageFrame::new(self.utable.table().phys()), 842 PageFrameCount::new(1), 843 &mut page_manager_lock_irqsave(), 844 ) 845 }; 846 } 847 } 848 849 /// 用户空间映射信息 850 #[derive(Debug)] 851 pub struct UserMappings { 852 /// 当前用户空间的虚拟内存区域 853 vmas: HashSet<Arc<LockedVMA>>, 854 /// 当前用户空间的VMA空洞 855 vm_holes: BTreeMap<VirtAddr, usize>, 856 } 857 858 impl UserMappings { 859 pub fn new() -> Self { 860 return Self { 861 vmas: HashSet::new(), 862 vm_holes: core::iter::once((VirtAddr::new(0), MMArch::USER_END_VADDR.data())) 863 .collect::<BTreeMap<_, _>>(), 864 }; 865 } 866 867 /// 判断当前进程的VMA内,是否有包含指定的虚拟地址的VMA。 868 /// 869 /// 如果有,返回包含指定虚拟地址的VMA的Arc指针,否则返回None。 870 #[allow(dead_code)] 871 pub fn contains(&self, vaddr: VirtAddr) -> Option<Arc<LockedVMA>> { 872 for v in self.vmas.iter() { 873 let guard = v.lock_irqsave(); 874 if guard.region.contains(vaddr) { 875 return Some(v.clone()); 876 } 877 } 878 return None; 879 } 880 881 /// 向下寻找距离虚拟地址最近的VMA 882 /// ## 参数 883 /// 884 /// - `vaddr`: 虚拟地址 885 /// 886 /// ## 返回值 887 /// - Some(Arc<LockedVMA>): 虚拟地址所在的或最近的下一个VMA 888 /// - None: 未找到VMA 889 #[allow(dead_code)] 890 pub fn find_nearest(&self, vaddr: VirtAddr) -> Option<Arc<LockedVMA>> { 891 let mut nearest: Option<Arc<LockedVMA>> = None; 892 for v in self.vmas.iter() { 893 let guard = v.lock_irqsave(); 894 if guard.region.contains(vaddr) { 895 return Some(v.clone()); 896 } 897 if guard.region.start >= vaddr 898 && if let Some(ref nearest) = nearest { 899 guard.region.start < nearest.lock_irqsave().region.start 900 } else { 901 true 902 } 903 { 904 nearest = Some(v.clone()); 905 } 906 } 907 return nearest; 908 } 909 910 /// 获取当前进程的地址空间中,与给定虚拟地址范围有重叠的VMA的迭代器。 911 pub fn conflicts(&self, request: VirtRegion) -> impl Iterator<Item = Arc<LockedVMA>> + '_ { 912 let r = self 913 .vmas 914 .iter() 915 .filter(move |v| v.lock_irqsave().region.intersect(&request).is_some()) 916 .cloned(); 917 return r; 918 } 919 920 /// 在当前进程的地址空间中,寻找第一个符合条件的空闲的虚拟内存范围。 921 /// 922 /// @param min_vaddr 最小的起始地址 923 /// @param size 请求的大小 924 /// 925 /// @return 如果找到了,返回虚拟内存范围,否则返回None 926 pub fn find_free(&self, min_vaddr: VirtAddr, size: usize) -> Option<VirtRegion> { 927 let _vaddr = min_vaddr; 928 let mut iter = self 929 .vm_holes 930 .iter() 931 .skip_while(|(hole_vaddr, hole_size)| hole_vaddr.add(**hole_size) <= min_vaddr); 932 933 let (hole_vaddr, size) = iter.find(|(hole_vaddr, hole_size)| { 934 // 计算当前空洞的可用大小 935 let available_size: usize = 936 if hole_vaddr <= &&min_vaddr && min_vaddr <= hole_vaddr.add(**hole_size) { 937 **hole_size - (min_vaddr - **hole_vaddr) 938 } else { 939 **hole_size 940 }; 941 942 size <= available_size 943 })?; 944 945 // 创建一个新的虚拟内存范围。 946 let region = VirtRegion::new(cmp::max(*hole_vaddr, min_vaddr), *size); 947 948 return Some(region); 949 } 950 951 pub fn find_free_at( 952 &self, 953 min_vaddr: VirtAddr, 954 vaddr: VirtAddr, 955 size: usize, 956 flags: MapFlags, 957 ) -> Result<VirtRegion, SystemError> { 958 // 如果没有指定地址,那么就在当前进程的地址空间中寻找一个空闲的虚拟内存范围。 959 if vaddr == VirtAddr::new(0) { 960 return self.find_free(min_vaddr, size).ok_or(SystemError::ENOMEM); 961 } 962 963 // 如果指定了地址,那么就检查指定的地址是否可用。 964 965 let requested = VirtRegion::new(vaddr, size); 966 967 if requested.end() >= MMArch::USER_END_VADDR || !vaddr.check_aligned(MMArch::PAGE_SIZE) { 968 return Err(SystemError::EINVAL); 969 } 970 971 if let Some(_x) = self.conflicts(requested).next() { 972 if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) { 973 // 如果指定了 MAP_FIXED_NOREPLACE 标志,由于所指定的地址无法成功建立映射,则放弃映射,不对地址做修正 974 return Err(SystemError::EEXIST); 975 } 976 977 if flags.contains(MapFlags::MAP_FIXED) { 978 // todo: 支持MAP_FIXED标志对已有的VMA进行覆盖 979 return Err(SystemError::ENOSYS); 980 } 981 982 // 如果没有指定MAP_FIXED标志,那么就对地址做修正 983 let requested = self.find_free(min_vaddr, size).ok_or(SystemError::ENOMEM)?; 984 return Ok(requested); 985 } 986 987 return Ok(requested); 988 } 989 990 /// 在当前进程的地址空间中,保留一个指定大小的区域,使得该区域不在空洞中。 991 /// 该函数会修改vm_holes中的空洞信息。 992 /// 993 /// @param region 要保留的区域 994 /// 995 /// 请注意,在调用本函数之前,必须先确定region所在范围内没有VMA。 996 fn reserve_hole(&mut self, region: &VirtRegion) { 997 let prev_hole: Option<(&VirtAddr, &mut usize)> = 998 self.vm_holes.range_mut(..=region.start()).next_back(); 999 1000 if let Some((prev_hole_vaddr, prev_hole_size)) = prev_hole { 1001 let prev_hole_end = prev_hole_vaddr.add(*prev_hole_size); 1002 1003 if prev_hole_end > region.start() { 1004 // 如果前一个空洞的结束地址大于当前空洞的起始地址,那么就需要调整前一个空洞的大小。 1005 *prev_hole_size = region.start().data() - prev_hole_vaddr.data(); 1006 } 1007 1008 if prev_hole_end > region.end() { 1009 // 如果前一个空洞的结束地址大于当前空洞的结束地址,那么就需要增加一个新的空洞。 1010 self.vm_holes 1011 .insert(region.end(), prev_hole_end - region.end()); 1012 } 1013 } 1014 } 1015 1016 /// 在当前进程的地址空间中,释放一个指定大小的区域,使得该区域成为一个空洞。 1017 /// 该函数会修改vm_holes中的空洞信息。 1018 fn unreserve_hole(&mut self, region: &VirtRegion) { 1019 // 如果将要插入的空洞与后一个空洞相邻,那么就需要合并。 1020 let next_hole_size: Option<usize> = self.vm_holes.remove(®ion.end()); 1021 1022 if let Some((_prev_hole_vaddr, prev_hole_size)) = self 1023 .vm_holes 1024 .range_mut(..region.start()) 1025 .next_back() 1026 .filter(|(offset, size)| offset.data() + **size == region.start().data()) 1027 { 1028 *prev_hole_size += region.size() + next_hole_size.unwrap_or(0); 1029 } else { 1030 self.vm_holes 1031 .insert(region.start(), region.size() + next_hole_size.unwrap_or(0)); 1032 } 1033 } 1034 1035 /// 在当前进程的映射关系中,插入一个新的VMA。 1036 pub fn insert_vma(&mut self, vma: Arc<LockedVMA>) { 1037 let region = vma.lock_irqsave().region; 1038 // 要求插入的地址范围必须是空闲的,也就是说,当前进程的地址空间中,不能有任何与之重叠的VMA。 1039 assert!(self.conflicts(region).next().is_none()); 1040 self.reserve_hole(®ion); 1041 1042 self.vmas.insert(vma); 1043 } 1044 1045 /// @brief 删除一个VMA,并把对应的地址空间加入空洞中。 1046 /// 1047 /// 这里不会取消VMA对应的地址的映射 1048 /// 1049 /// @param region 要删除的VMA所在的地址范围 1050 /// 1051 /// @return 如果成功删除了VMA,则返回被删除的VMA,否则返回None 1052 /// 如果没有可以删除的VMA,则不会执行删除操作,并报告失败。 1053 pub fn remove_vma(&mut self, region: &VirtRegion) -> Option<Arc<LockedVMA>> { 1054 // 请注意,由于这里会对每个VMA加锁,因此性能很低 1055 let vma: Arc<LockedVMA> = self 1056 .vmas 1057 .drain_filter(|vma| vma.lock_irqsave().region == *region) 1058 .next()?; 1059 self.unreserve_hole(region); 1060 1061 return Some(vma); 1062 } 1063 1064 /// @brief Get the iterator of all VMAs in this process. 1065 pub fn iter_vmas(&self) -> hashbrown::hash_set::Iter<Arc<LockedVMA>> { 1066 return self.vmas.iter(); 1067 } 1068 } 1069 1070 impl Default for UserMappings { 1071 fn default() -> Self { 1072 return Self::new(); 1073 } 1074 } 1075 1076 /// 加了锁的VMA 1077 /// 1078 /// 备注:进行性能测试,看看SpinLock和RwLock哪个更快。 1079 #[derive(Debug)] 1080 pub struct LockedVMA { 1081 /// 用于计算哈希值,避免总是获取vma锁来计算哈希值 1082 id: usize, 1083 vma: SpinLock<VMA>, 1084 } 1085 1086 impl core::hash::Hash for LockedVMA { 1087 fn hash<H: Hasher>(&self, state: &mut H) { 1088 self.id.hash(state); 1089 } 1090 } 1091 1092 impl PartialEq for LockedVMA { 1093 fn eq(&self, other: &Self) -> bool { 1094 self.id.eq(&other.id) 1095 } 1096 } 1097 1098 impl Eq for LockedVMA {} 1099 1100 #[allow(dead_code)] 1101 impl LockedVMA { 1102 pub fn new(vma: VMA) -> Arc<Self> { 1103 let r = Arc::new(Self { 1104 id: LOCKEDVMA_ID_ALLOCATOR.alloc().unwrap(), 1105 vma: SpinLock::new(vma), 1106 }); 1107 r.vma.lock_irqsave().self_ref = Arc::downgrade(&r); 1108 return r; 1109 } 1110 1111 pub fn id(&self) -> usize { 1112 self.id 1113 } 1114 1115 pub fn lock(&self) -> SpinLockGuard<VMA> { 1116 return self.vma.lock(); 1117 } 1118 1119 pub fn lock_irqsave(&self) -> SpinLockGuard<VMA> { 1120 return self.vma.lock_irqsave(); 1121 } 1122 1123 /// 调整当前VMA的页面的标志位 1124 /// 1125 /// TODO:增加调整虚拟页映射的物理地址的功能 1126 /// 1127 /// @param flags 新的标志位 1128 /// @param mapper 页表映射器 1129 /// @param flusher 页表项刷新器 1130 /// 1131 pub fn remap( 1132 &self, 1133 flags: EntryFlags<MMArch>, 1134 mapper: &mut PageMapper, 1135 mut flusher: impl Flusher<MMArch>, 1136 ) -> Result<(), SystemError> { 1137 let mut guard = self.lock_irqsave(); 1138 for page in guard.region.pages() { 1139 // 暂时要求所有的页帧都已经映射到页表 1140 // TODO: 引入Lazy Mapping, 通过缺页中断来映射页帧,这里就不必要求所有的页帧都已经映射到页表了 1141 let r = unsafe { 1142 mapper 1143 .remap(page.virt_address(), flags) 1144 .expect("Failed to remap, beacuse of some page is not mapped") 1145 }; 1146 flusher.consume(r); 1147 } 1148 guard.flags = flags; 1149 return Ok(()); 1150 } 1151 1152 pub fn unmap(&self, mapper: &mut PageMapper, mut flusher: impl Flusher<MMArch>) { 1153 // todo: 如果当前vma与文件相关,完善文件相关的逻辑 1154 1155 let mut guard = self.lock_irqsave(); 1156 1157 // 获取物理页的anon_vma的守卫 1158 let mut page_manager_guard: SpinLockGuard<'_, crate::mm::page::PageManager> = 1159 page_manager_lock_irqsave(); 1160 for page in guard.region.pages() { 1161 if mapper.translate(page.virt_address()).is_none() { 1162 continue; 1163 } 1164 let (paddr, _, flush) = unsafe { mapper.unmap_phys(page.virt_address(), true) } 1165 .expect("Failed to unmap, beacuse of some page is not mapped"); 1166 1167 // 从anon_vma中删除当前VMA 1168 let page = page_manager_guard.get_unwrap(&paddr); 1169 page.write_irqsave().remove_vma(self); 1170 1171 // 如果物理页的anon_vma链表长度为0并且不是共享页,则释放物理页. 1172 if page.read_irqsave().can_deallocate() { 1173 unsafe { 1174 drop(page); 1175 deallocate_page_frames( 1176 PhysPageFrame::new(paddr), 1177 PageFrameCount::new(1), 1178 &mut page_manager_guard, 1179 ) 1180 }; 1181 } 1182 1183 flusher.consume(flush); 1184 } 1185 guard.mapped = false; 1186 1187 // 当vma对应共享文件的写映射时,唤醒脏页回写线程 1188 if guard.vm_file().is_some() 1189 && guard 1190 .vm_flags() 1191 .contains(VmFlags::VM_SHARED | VmFlags::VM_WRITE) 1192 { 1193 crate::mm::page::PageReclaimer::wakeup_claim_thread(); 1194 } 1195 } 1196 1197 pub fn mapped(&self) -> bool { 1198 return self.vma.lock_irqsave().mapped; 1199 } 1200 1201 /// 将当前VMA进行切分,切分成3个VMA,分别是: 1202 /// 1203 /// 1. 前面的VMA,如果没有则为None 1204 /// 2. 中间的VMA,也就是传入的Region 1205 /// 3. 后面的VMA,如果没有则为None 1206 pub fn extract(&self, region: VirtRegion, utable: &PageMapper) -> Option<VMASplitResult> { 1207 assert!(region.start().check_aligned(MMArch::PAGE_SIZE)); 1208 assert!(region.end().check_aligned(MMArch::PAGE_SIZE)); 1209 1210 let mut guard = self.lock_irqsave(); 1211 { 1212 // 如果传入的region不在当前VMA的范围内,则直接返回None 1213 if unlikely(region.start() < guard.region.start() || region.end() > guard.region.end()) 1214 { 1215 return None; 1216 } 1217 1218 let intersect: Option<VirtRegion> = guard.region.intersect(®ion); 1219 // 如果当前VMA不包含region,则直接返回None 1220 if unlikely(intersect.is_none()) { 1221 return None; 1222 } 1223 let intersect: VirtRegion = intersect.unwrap(); 1224 if unlikely(intersect == guard.region) { 1225 // 如果当前VMA完全包含region,则直接返回当前VMA 1226 return Some(VMASplitResult::new( 1227 None, 1228 guard.self_ref.upgrade().unwrap(), 1229 None, 1230 )); 1231 } 1232 } 1233 1234 let before: Option<Arc<LockedVMA>> = guard.region.before(®ion).map(|virt_region| { 1235 let mut vma: VMA = unsafe { guard.clone() }; 1236 vma.region = virt_region; 1237 vma.mapped = false; 1238 let vma: Arc<LockedVMA> = LockedVMA::new(vma); 1239 vma 1240 }); 1241 1242 let after: Option<Arc<LockedVMA>> = guard.region.after(®ion).map(|virt_region| { 1243 let mut vma: VMA = unsafe { guard.clone() }; 1244 vma.region = virt_region; 1245 vma.mapped = false; 1246 let vma: Arc<LockedVMA> = LockedVMA::new(vma); 1247 vma 1248 }); 1249 1250 // 重新设置before、after这两个VMA里面的物理页的anon_vma 1251 let mut page_manager_guard = page_manager_lock_irqsave(); 1252 if let Some(before) = before.clone() { 1253 let virt_iter = before.lock_irqsave().region.iter_pages(); 1254 for frame in virt_iter { 1255 if let Some((paddr, _)) = utable.translate(frame.virt_address()) { 1256 let page = page_manager_guard.get_unwrap(&paddr); 1257 let mut page_guard = page.write_irqsave(); 1258 page_guard.insert_vma(before.clone()); 1259 page_guard.remove_vma(self); 1260 before.lock_irqsave().mapped = true; 1261 } 1262 } 1263 } 1264 1265 if let Some(after) = after.clone() { 1266 let virt_iter = after.lock_irqsave().region.iter_pages(); 1267 for frame in virt_iter { 1268 if let Some((paddr, _)) = utable.translate(frame.virt_address()) { 1269 let page = page_manager_guard.get_unwrap(&paddr); 1270 let mut page_guard = page.write_irqsave(); 1271 page_guard.insert_vma(after.clone()); 1272 page_guard.remove_vma(self); 1273 after.lock_irqsave().mapped = true; 1274 } 1275 } 1276 } 1277 1278 guard.region = region; 1279 1280 return Some(VMASplitResult::new( 1281 before, 1282 guard.self_ref.upgrade().unwrap(), 1283 after, 1284 )); 1285 } 1286 1287 /// 判断VMA是否为外部(非当前进程空间)的VMA 1288 pub fn is_foreign(&self) -> bool { 1289 let guard = self.lock_irqsave(); 1290 if let Some(space) = guard.user_address_space.clone() { 1291 if let Some(space) = space.upgrade() { 1292 return AddressSpace::is_current(&space); 1293 } else { 1294 return true; 1295 } 1296 } else { 1297 return true; 1298 } 1299 } 1300 1301 /// 判断VMA是否可访问 1302 pub fn is_accessible(&self) -> bool { 1303 let guard = self.lock_irqsave(); 1304 let vm_access_flags: VmFlags = VmFlags::VM_READ | VmFlags::VM_WRITE | VmFlags::VM_EXEC; 1305 guard.vm_flags().intersects(vm_access_flags) 1306 } 1307 1308 /// 判断VMA是否为匿名映射 1309 pub fn is_anonymous(&self) -> bool { 1310 let guard = self.lock_irqsave(); 1311 guard.vm_file.is_none() 1312 } 1313 1314 /// 判断VMA是否为大页映射 1315 pub fn is_hugepage(&self) -> bool { 1316 //TODO: 实现巨页映射判断逻辑,目前不支持巨页映射 1317 false 1318 } 1319 } 1320 1321 impl Drop for LockedVMA { 1322 fn drop(&mut self) { 1323 LOCKEDVMA_ID_ALLOCATOR.free(self.id); 1324 } 1325 } 1326 1327 /// VMA切分结果 1328 #[allow(dead_code)] 1329 pub struct VMASplitResult { 1330 pub prev: Option<Arc<LockedVMA>>, 1331 pub middle: Arc<LockedVMA>, 1332 pub after: Option<Arc<LockedVMA>>, 1333 } 1334 1335 impl VMASplitResult { 1336 pub fn new( 1337 prev: Option<Arc<LockedVMA>>, 1338 middle: Arc<LockedVMA>, 1339 post: Option<Arc<LockedVMA>>, 1340 ) -> Self { 1341 Self { 1342 prev, 1343 middle, 1344 after: post, 1345 } 1346 } 1347 } 1348 1349 /// @brief 虚拟内存区域 1350 #[derive(Debug)] 1351 pub struct VMA { 1352 /// 虚拟内存区域对应的虚拟地址范围 1353 region: VirtRegion, 1354 /// 虚拟内存区域标志 1355 vm_flags: VmFlags, 1356 /// VMA内的页帧的标志 1357 flags: EntryFlags<MMArch>, 1358 /// VMA内的页帧是否已经映射到页表 1359 mapped: bool, 1360 /// VMA所属的用户地址空间 1361 user_address_space: Option<Weak<AddressSpace>>, 1362 self_ref: Weak<LockedVMA>, 1363 1364 vm_file: Option<Arc<File>>, 1365 /// VMA映射的文件部分相对于整个文件的偏移页数 1366 file_pgoff: Option<usize>, 1367 1368 provider: Provider, 1369 } 1370 1371 impl core::hash::Hash for VMA { 1372 fn hash<H: Hasher>(&self, state: &mut H) { 1373 self.region.hash(state); 1374 self.flags.hash(state); 1375 self.mapped.hash(state); 1376 } 1377 } 1378 1379 /// 描述不同类型的内存提供者或资源 1380 #[derive(Debug)] 1381 pub enum Provider { 1382 Allocated, // TODO:其他 1383 } 1384 1385 #[allow(dead_code)] 1386 impl VMA { 1387 pub fn new( 1388 region: VirtRegion, 1389 vm_flags: VmFlags, 1390 flags: EntryFlags<MMArch>, 1391 file: Option<Arc<File>>, 1392 pgoff: Option<usize>, 1393 mapped: bool, 1394 ) -> Self { 1395 VMA { 1396 region, 1397 vm_flags, 1398 flags, 1399 mapped, 1400 user_address_space: None, 1401 self_ref: Weak::default(), 1402 provider: Provider::Allocated, 1403 vm_file: file, 1404 file_pgoff: pgoff, 1405 } 1406 } 1407 1408 pub fn region(&self) -> &VirtRegion { 1409 return &self.region; 1410 } 1411 1412 pub fn vm_flags(&self) -> &VmFlags { 1413 return &self.vm_flags; 1414 } 1415 1416 pub fn vm_file(&self) -> Option<Arc<File>> { 1417 return self.vm_file.clone(); 1418 } 1419 1420 pub fn address_space(&self) -> Option<Weak<AddressSpace>> { 1421 return self.user_address_space.clone(); 1422 } 1423 1424 pub fn set_vm_flags(&mut self, vm_flags: VmFlags) { 1425 self.vm_flags = vm_flags; 1426 } 1427 1428 pub fn set_region_size(&mut self, new_region_size: usize) { 1429 self.region.set_size(new_region_size); 1430 } 1431 1432 pub fn set_mapped(&mut self, mapped: bool) { 1433 self.mapped = mapped; 1434 } 1435 1436 pub fn set_flags(&mut self) { 1437 self.flags = MMArch::vm_get_page_prot(self.vm_flags); 1438 } 1439 1440 /// # 拷贝当前VMA的内容 1441 /// 1442 /// ### 安全性 1443 /// 1444 /// 由于这样操作可能由于错误的拷贝,导致内存泄露、内存重复释放等问题,所以需要小心使用。 1445 pub unsafe fn clone(&self) -> Self { 1446 return Self { 1447 region: self.region, 1448 vm_flags: self.vm_flags, 1449 flags: self.flags, 1450 mapped: self.mapped, 1451 user_address_space: self.user_address_space.clone(), 1452 self_ref: self.self_ref.clone(), 1453 provider: Provider::Allocated, 1454 file_pgoff: self.file_pgoff, 1455 vm_file: self.vm_file.clone(), 1456 }; 1457 } 1458 1459 pub fn clone_info_only(&self) -> Self { 1460 return Self { 1461 region: self.region, 1462 vm_flags: self.vm_flags, 1463 flags: self.flags, 1464 mapped: self.mapped, 1465 user_address_space: None, 1466 self_ref: Weak::default(), 1467 provider: Provider::Allocated, 1468 file_pgoff: self.file_pgoff, 1469 vm_file: self.vm_file.clone(), 1470 }; 1471 } 1472 1473 #[inline(always)] 1474 pub fn flags(&self) -> EntryFlags<MMArch> { 1475 return self.flags; 1476 } 1477 1478 #[inline(always)] 1479 pub fn file_page_offset(&self) -> Option<usize> { 1480 return self.file_pgoff; 1481 } 1482 1483 pub fn pages(&self) -> VirtPageFrameIter { 1484 return VirtPageFrameIter::new( 1485 VirtPageFrame::new(self.region.start()), 1486 VirtPageFrame::new(self.region.end()), 1487 ); 1488 } 1489 1490 pub fn remap( 1491 &mut self, 1492 flags: EntryFlags<MMArch>, 1493 mapper: &mut PageMapper, 1494 mut flusher: impl Flusher<MMArch>, 1495 ) -> Result<(), SystemError> { 1496 for page in self.region.pages() { 1497 // debug!("remap page {:?}", page.virt_address()); 1498 if mapper.translate(page.virt_address()).is_some() { 1499 let r = unsafe { 1500 mapper 1501 .remap(page.virt_address(), flags) 1502 .expect("Failed to remap") 1503 }; 1504 flusher.consume(r); 1505 } 1506 // debug!("consume page {:?}", page.virt_address()); 1507 // debug!("remap page {:?} done", page.virt_address()); 1508 } 1509 self.flags = flags; 1510 return Ok(()); 1511 } 1512 1513 /// 检查当前VMA是否可以拥有指定的标志位 1514 /// 1515 /// ## 参数 1516 /// 1517 /// - `prot_flags` 要检查的标志位 1518 pub fn can_have_flags(&self, prot_flags: ProtFlags) -> bool { 1519 let is_downgrade = (self.flags.has_write() || !prot_flags.contains(ProtFlags::PROT_WRITE)) 1520 && (self.flags.has_execute() || !prot_flags.contains(ProtFlags::PROT_EXEC)); 1521 1522 match self.provider { 1523 Provider::Allocated { .. } => true, 1524 1525 #[allow(unreachable_patterns)] 1526 _ => is_downgrade, 1527 } 1528 } 1529 1530 /// 把物理地址映射到虚拟地址 1531 /// 1532 /// @param phys 要映射的物理地址 1533 /// @param destination 要映射到的虚拟地址 1534 /// @param count 要映射的页帧数量 1535 /// @param flags 页面标志位 1536 /// @param mapper 页表映射器 1537 /// @param flusher 页表项刷新器 1538 /// 1539 /// @return 返回映射后的虚拟内存区域 1540 pub fn physmap( 1541 phys: PhysPageFrame, 1542 destination: VirtPageFrame, 1543 count: PageFrameCount, 1544 vm_flags: VmFlags, 1545 flags: EntryFlags<MMArch>, 1546 mapper: &mut PageMapper, 1547 mut flusher: impl Flusher<MMArch>, 1548 ) -> Result<Arc<LockedVMA>, SystemError> { 1549 let mut cur_phy = phys; 1550 let mut cur_dest = destination; 1551 1552 for _ in 0..count.data() { 1553 // 将物理页帧映射到虚拟页帧 1554 let r = 1555 unsafe { mapper.map_phys(cur_dest.virt_address(), cur_phy.phys_address(), flags) } 1556 .expect("Failed to map phys, may be OOM error"); 1557 1558 // todo: 增加OOM处理 1559 1560 // 刷新TLB 1561 flusher.consume(r); 1562 1563 cur_phy = cur_phy.next(); 1564 cur_dest = cur_dest.next(); 1565 } 1566 1567 let r: Arc<LockedVMA> = LockedVMA::new(VMA::new( 1568 VirtRegion::new(destination.virt_address(), count.data() * MMArch::PAGE_SIZE), 1569 vm_flags, 1570 flags, 1571 None, 1572 None, 1573 true, 1574 )); 1575 1576 // 将VMA加入到anon_vma中 1577 let mut page_manager_guard = page_manager_lock_irqsave(); 1578 cur_phy = phys; 1579 for _ in 0..count.data() { 1580 let paddr = cur_phy.phys_address(); 1581 let page = page_manager_guard.get_unwrap(&paddr); 1582 page.write_irqsave().insert_vma(r.clone()); 1583 cur_phy = cur_phy.next(); 1584 } 1585 1586 return Ok(r); 1587 } 1588 1589 /// 从页分配器中分配一些物理页,并把它们映射到指定的虚拟地址,然后创建VMA 1590 /// ## 参数 1591 /// 1592 /// - `destination`: 要映射到的虚拟地址 1593 /// - `page_count`: 要映射的页帧数量 1594 /// - `vm_flags`: VMA标志位 1595 /// - `flags`: 页面标志位 1596 /// - `mapper`: 页表映射器 1597 /// - `flusher`: 页表项刷新器 1598 /// - `file`: 映射文件 1599 /// - `pgoff`: 返回映射后的虚拟内存区域 1600 /// 1601 /// ## 返回值 1602 /// - 页面错误处理信息标志 1603 #[allow(clippy::too_many_arguments)] 1604 pub fn zeroed( 1605 destination: VirtPageFrame, 1606 page_count: PageFrameCount, 1607 vm_flags: VmFlags, 1608 flags: EntryFlags<MMArch>, 1609 mapper: &mut PageMapper, 1610 mut flusher: impl Flusher<MMArch>, 1611 file: Option<Arc<File>>, 1612 pgoff: Option<usize>, 1613 ) -> Result<Arc<LockedVMA>, SystemError> { 1614 let mut cur_dest: VirtPageFrame = destination; 1615 // debug!( 1616 // "VMA::zeroed: page_count = {:?}, destination={destination:?}", 1617 // page_count 1618 // ); 1619 for _ in 0..page_count.data() { 1620 // debug!( 1621 // "VMA::zeroed: cur_dest={cur_dest:?}, vaddr = {:?}", 1622 // cur_dest.virt_address() 1623 // ); 1624 let r = unsafe { mapper.map(cur_dest.virt_address(), flags) } 1625 .expect("Failed to map zero, may be OOM error"); 1626 // todo: 增加OOM处理 1627 1628 // 稍后再刷新TLB,这里取消刷新 1629 flusher.consume(r); 1630 cur_dest = cur_dest.next(); 1631 } 1632 let r = LockedVMA::new(VMA::new( 1633 VirtRegion::new( 1634 destination.virt_address(), 1635 page_count.data() * MMArch::PAGE_SIZE, 1636 ), 1637 vm_flags, 1638 flags, 1639 file, 1640 pgoff, 1641 true, 1642 )); 1643 drop(flusher); 1644 // debug!("VMA::zeroed: flusher dropped"); 1645 1646 // 清空这些内存并将VMA加入到anon_vma中 1647 let mut page_manager_guard = page_manager_lock_irqsave(); 1648 let virt_iter: VirtPageFrameIter = 1649 VirtPageFrameIter::new(destination, destination.add(page_count)); 1650 for frame in virt_iter { 1651 let paddr = mapper.translate(frame.virt_address()).unwrap().0; 1652 1653 // 将VMA加入到anon_vma 1654 let page = page_manager_guard.get_unwrap(&paddr); 1655 page.write_irqsave().insert_vma(r.clone()); 1656 } 1657 // debug!("VMA::zeroed: done"); 1658 return Ok(r); 1659 } 1660 1661 pub fn page_address(&self, page: &Arc<Page>) -> Result<VirtAddr, SystemError> { 1662 let page_guard = page.read_irqsave(); 1663 let index = page_guard.index().unwrap(); 1664 if index >= self.file_pgoff.unwrap() { 1665 let address = 1666 self.region.start + ((index - self.file_pgoff.unwrap()) << MMArch::PAGE_SHIFT); 1667 if address <= self.region.end() { 1668 return Ok(address); 1669 } 1670 } 1671 return Err(SystemError::EFAULT); 1672 } 1673 } 1674 1675 impl Drop for VMA { 1676 fn drop(&mut self) { 1677 // 当VMA被释放时,需要确保它已经被从页表中解除映射 1678 assert!(!self.mapped, "VMA is still mapped"); 1679 } 1680 } 1681 1682 impl PartialEq for VMA { 1683 fn eq(&self, other: &Self) -> bool { 1684 return self.region == other.region; 1685 } 1686 } 1687 1688 impl Eq for VMA {} 1689 1690 impl PartialOrd for VMA { 1691 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { 1692 Some(self.cmp(other)) 1693 } 1694 } 1695 1696 impl Ord for VMA { 1697 fn cmp(&self, other: &Self) -> cmp::Ordering { 1698 return self.region.cmp(&other.region); 1699 } 1700 } 1701 1702 #[derive(Debug)] 1703 pub struct UserStack { 1704 // 栈底地址 1705 stack_bottom: VirtAddr, 1706 // 当前已映射的大小 1707 mapped_size: usize, 1708 /// 栈顶地址(这个值需要仔细确定!因为它可能不会实时与用户栈的真实栈顶保持一致!要小心!) 1709 current_sp: VirtAddr, 1710 } 1711 1712 impl UserStack { 1713 /// 默认的用户栈底地址 1714 pub const DEFAULT_USER_STACK_BOTTOM: VirtAddr = MMArch::USER_STACK_START; 1715 /// 默认的用户栈大小为8MB 1716 pub const DEFAULT_USER_STACK_SIZE: usize = 8 * 1024 * 1024; 1717 /// 用户栈的保护页数量 1718 pub const GUARD_PAGES_NUM: usize = 4; 1719 1720 /// 创建一个用户栈 1721 pub fn new( 1722 vm: &mut InnerAddressSpace, 1723 stack_bottom: Option<VirtAddr>, 1724 stack_size: usize, 1725 ) -> Result<Self, SystemError> { 1726 let stack_bottom = stack_bottom.unwrap_or(Self::DEFAULT_USER_STACK_BOTTOM); 1727 assert!(stack_bottom.check_aligned(MMArch::PAGE_SIZE)); 1728 1729 // 分配用户栈的保护页 1730 let guard_size = Self::GUARD_PAGES_NUM * MMArch::PAGE_SIZE; 1731 let actual_stack_bottom = stack_bottom - guard_size; 1732 1733 let mut prot_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE; 1734 let map_flags = MapFlags::MAP_PRIVATE 1735 | MapFlags::MAP_ANONYMOUS 1736 | MapFlags::MAP_FIXED_NOREPLACE 1737 | MapFlags::MAP_GROWSDOWN; 1738 // debug!( 1739 // "map anonymous stack: {:?} {}", 1740 // actual_stack_bottom, 1741 // guard_size 1742 // ); 1743 vm.map_anonymous( 1744 actual_stack_bottom, 1745 guard_size, 1746 prot_flags, 1747 map_flags, 1748 false, 1749 false, 1750 )?; 1751 // test_buddy(); 1752 // 设置保护页只读 1753 prot_flags.remove(ProtFlags::PROT_WRITE); 1754 // debug!( 1755 // "to mprotect stack guard pages: {:?} {}", 1756 // actual_stack_bottom, 1757 // guard_size 1758 // ); 1759 vm.mprotect( 1760 VirtPageFrame::new(actual_stack_bottom), 1761 PageFrameCount::new(Self::GUARD_PAGES_NUM), 1762 prot_flags, 1763 )?; 1764 1765 // debug!( 1766 // "mprotect stack guard pages done: {:?} {}", 1767 // actual_stack_bottom, 1768 // guard_size 1769 // ); 1770 1771 let mut user_stack = UserStack { 1772 stack_bottom: actual_stack_bottom, 1773 mapped_size: guard_size, 1774 current_sp: actual_stack_bottom - guard_size, 1775 }; 1776 1777 // debug!("extend user stack: {:?} {}", stack_bottom, stack_size); 1778 // 分配用户栈 1779 user_stack.initial_extend(vm, stack_size)?; 1780 // debug!("user stack created: {:?} {}", stack_bottom, stack_size); 1781 return Ok(user_stack); 1782 } 1783 1784 fn initial_extend( 1785 &mut self, 1786 vm: &mut InnerAddressSpace, 1787 mut bytes: usize, 1788 ) -> Result<(), SystemError> { 1789 let prot_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC; 1790 let map_flags = MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_GROWSDOWN; 1791 1792 bytes = page_align_up(bytes); 1793 self.mapped_size += bytes; 1794 1795 vm.map_anonymous( 1796 self.stack_bottom - self.mapped_size, 1797 bytes, 1798 prot_flags, 1799 map_flags, 1800 false, 1801 false, 1802 )?; 1803 1804 return Ok(()); 1805 } 1806 1807 /// 扩展用户栈 1808 /// 1809 /// ## 参数 1810 /// 1811 /// - `vm` 用户地址空间结构体 1812 /// - `bytes` 要扩展的字节数 1813 /// 1814 /// ## 返回值 1815 /// 1816 /// - **Ok(())** 扩展成功 1817 /// - **Err(SystemError)** 扩展失败 1818 #[allow(dead_code)] 1819 pub fn extend( 1820 &mut self, 1821 vm: &mut InnerAddressSpace, 1822 mut bytes: usize, 1823 ) -> Result<(), SystemError> { 1824 let prot_flags = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC; 1825 let map_flags = MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS; 1826 1827 bytes = page_align_up(bytes); 1828 self.mapped_size += bytes; 1829 1830 vm.map_anonymous( 1831 self.stack_bottom - self.mapped_size, 1832 bytes, 1833 prot_flags, 1834 map_flags, 1835 false, 1836 false, 1837 )?; 1838 1839 return Ok(()); 1840 } 1841 1842 /// 获取栈顶地址 1843 /// 1844 /// 请注意,如果用户栈的栈顶地址发生变化,这个值可能不会实时更新! 1845 pub fn sp(&self) -> VirtAddr { 1846 return self.current_sp; 1847 } 1848 1849 pub unsafe fn set_sp(&mut self, sp: VirtAddr) { 1850 self.current_sp = sp; 1851 } 1852 1853 /// 仅仅克隆用户栈的信息,不会克隆用户栈的内容/映射 1854 pub unsafe fn clone_info_only(&self) -> Self { 1855 return Self { 1856 stack_bottom: self.stack_bottom, 1857 mapped_size: self.mapped_size, 1858 current_sp: self.current_sp, 1859 }; 1860 } 1861 1862 /// 获取当前用户栈的大小(不包括保护页) 1863 pub fn stack_size(&self) -> usize { 1864 return self.mapped_size - Self::GUARD_PAGES_NUM * MMArch::PAGE_SIZE; 1865 } 1866 } 1867