1 use klog_types::{AllocLogItem, LogSource}; 2 3 use crate::{ 4 arch::mm::LockedFrameAllocator, 5 debug::klog::mm::mm_debug_log, 6 libs::align::page_align_up, 7 mm::{MMArch, MemoryManagementArch, VirtAddr}, 8 }; 9 10 use core::{ 11 alloc::{AllocError, GlobalAlloc, Layout}, 12 intrinsics::unlikely, 13 ptr::NonNull, 14 }; 15 16 use super::{ 17 page_frame::{FrameAllocator, PageFrameCount}, 18 slab::{slab_init_state, SLABALLOCATOR}, 19 }; 20 21 /// 类kmalloc的分配器应当实现的trait 22 pub trait LocalAlloc { 23 #[allow(dead_code)] 24 unsafe fn local_alloc(&self, layout: Layout) -> *mut u8; 25 unsafe fn local_alloc_zeroed(&self, layout: Layout) -> *mut u8; 26 unsafe fn local_dealloc(&self, ptr: *mut u8, layout: Layout); 27 } 28 29 pub struct KernelAllocator; 30 31 impl KernelAllocator { 32 unsafe fn alloc_in_buddy(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { 33 // 计算需要申请的页数,向上取整 34 let count = (page_align_up(layout.size()) / MMArch::PAGE_SIZE).next_power_of_two(); 35 let page_frame_count = PageFrameCount::new(count); 36 let (phy_addr, allocated_frame_count) = LockedFrameAllocator 37 .allocate(page_frame_count) 38 .ok_or(AllocError)?; 39 40 let virt_addr = unsafe { MMArch::phys_2_virt(phy_addr).ok_or(AllocError)? }; 41 if unlikely(virt_addr.is_null()) { 42 return Err(AllocError); 43 } 44 45 let slice = unsafe { 46 core::slice::from_raw_parts_mut( 47 virt_addr.data() as *mut u8, 48 allocated_frame_count.data() * MMArch::PAGE_SIZE, 49 ) 50 }; 51 return Ok(NonNull::from(slice)); 52 } 53 54 pub(super) unsafe fn free_in_buddy(&self, ptr: *mut u8, layout: Layout) { 55 // 由于buddy分配的页数量是2的幂,因此释放的时候也需要按照2的幂向上取整。 56 let count = (page_align_up(layout.size()) / MMArch::PAGE_SIZE).next_power_of_two(); 57 let page_frame_count = PageFrameCount::new(count); 58 let phy_addr = MMArch::virt_2_phys(VirtAddr::new(ptr as usize)).unwrap(); 59 LockedFrameAllocator.free(phy_addr, page_frame_count); 60 } 61 } 62 63 /// 为内核分配器实现LocalAlloc的trait 64 impl LocalAlloc for KernelAllocator { 65 unsafe fn local_alloc(&self, layout: Layout) -> *mut u8 { 66 if allocator_select_condition(layout) { 67 return self 68 .alloc_in_buddy(layout) 69 .map(|x| x.as_mut_ptr()) 70 .unwrap_or(core::ptr::null_mut()); 71 } else { 72 if let Some(ref mut slab) = SLABALLOCATOR { 73 return slab.allocate(layout); 74 }; 75 return core::ptr::null_mut(); 76 } 77 } 78 79 unsafe fn local_alloc_zeroed(&self, layout: Layout) -> *mut u8 { 80 if allocator_select_condition(layout) { 81 return self 82 .alloc_in_buddy(layout) 83 .map(|x| { 84 let ptr: *mut u8 = x.as_mut_ptr(); 85 core::ptr::write_bytes(ptr, 0, x.len()); 86 ptr 87 }) 88 .unwrap_or(core::ptr::null_mut()); 89 } else { 90 if let Some(ref mut slab) = SLABALLOCATOR { 91 return slab.allocate(layout); 92 }; 93 return core::ptr::null_mut(); 94 } 95 } 96 97 unsafe fn local_dealloc(&self, ptr: *mut u8, layout: Layout) { 98 if allocator_select_condition(layout) || ((ptr as usize) % 4096) == 0 { 99 self.free_in_buddy(ptr, layout) 100 } else if let Some(ref mut slab) = SLABALLOCATOR { 101 slab.deallocate(ptr, layout).unwrap() 102 } 103 } 104 } 105 106 /// 为内核slab分配器实现GlobalAlloc特性 107 unsafe impl GlobalAlloc for KernelAllocator { 108 unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 109 let r = self.local_alloc_zeroed(layout); 110 if allocator_select_condition(layout) { 111 alloc_debug_log(klog_types::LogSource::Buddy, layout, r); 112 } else { 113 alloc_debug_log(klog_types::LogSource::Slab, layout, r); 114 } 115 return r; 116 } 117 118 unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { 119 let r = self.local_alloc_zeroed(layout); 120 if allocator_select_condition(layout) { 121 alloc_debug_log(klog_types::LogSource::Buddy, layout, r); 122 } else { 123 alloc_debug_log(klog_types::LogSource::Slab, layout, r); 124 } 125 return r; 126 } 127 128 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 129 if allocator_select_condition(layout) || ((ptr as usize) % 4096) == 0 { 130 dealloc_debug_log(klog_types::LogSource::Buddy, layout, ptr); 131 } else { 132 dealloc_debug_log(klog_types::LogSource::Slab, layout, ptr); 133 } 134 self.local_dealloc(ptr, layout); 135 } 136 } 137 138 /// 判断选择buddy分配器还是slab分配器 139 fn allocator_select_condition(layout: Layout) -> bool { 140 layout.size() > 2048 || !slab_init_state() 141 } 142 143 fn alloc_debug_log(source: LogSource, layout: Layout, ptr: *mut u8) { 144 mm_debug_log( 145 klog_types::AllocatorLogType::Alloc(AllocLogItem::new(layout, Some(ptr as usize), None)), 146 source, 147 ) 148 } 149 150 fn dealloc_debug_log(source: LogSource, layout: Layout, ptr: *mut u8) { 151 mm_debug_log( 152 klog_types::AllocatorLogType::Free(AllocLogItem::new(layout, Some(ptr as usize), None)), 153 source, 154 ) 155 } 156 157 /// 为内核slab分配器实现Allocator特性 158 // unsafe impl Allocator for KernelAllocator { 159 // fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { 160 // let memory = unsafe {self.local_alloc(layout)}; 161 // if memory.is_null() { 162 // Err(AllocError) 163 // } else { 164 // let slice = unsafe { core::slice::from_raw_parts_mut(memory, layout.size()) }; 165 // Ok(unsafe { NonNull::new_unchecked(slice) }) 166 // } 167 // } 168 169 // fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { 170 // let memory = unsafe {self.local_alloc_zeroed(layout)}; 171 // if memory.is_null() { 172 // Err(AllocError) 173 // } else { 174 // let slice = unsafe { core::slice::from_raw_parts_mut(memory, layout.size()) }; 175 // Ok(unsafe { NonNull::new_unchecked(slice) }) 176 // } 177 // } 178 179 // unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { 180 // self.local_dealloc(ptr.cast().as_ptr(), layout); 181 // } 182 // } 183 184 /// 内存分配错误处理函数 185 #[cfg(target_os = "none")] 186 #[alloc_error_handler] 187 pub fn global_alloc_err_handler(layout: Layout) -> ! { 188 panic!("global_alloc_error, layout: {:?}", layout); 189 } 190