1ceeb2e94Slaokengwt //! A ZoneAllocator to allocate arbitrary object sizes (up to `ZoneAllocator::MAX_ALLOC_SIZE`) 2ceeb2e94Slaokengwt //! 3ceeb2e94Slaokengwt //! The ZoneAllocator achieves this by having many `SCAllocator` 4ceeb2e94Slaokengwt 5ceeb2e94Slaokengwt use crate::*; 6*7401bec5Slaokengwt use core::sync::atomic::Ordering; 7ceeb2e94Slaokengwt 8ceeb2e94Slaokengwt /// Creates an instance of a zone, we do this in a macro because we 9ceeb2e94Slaokengwt /// re-use the code in const and non-const functions 10ceeb2e94Slaokengwt /// 11ceeb2e94Slaokengwt /// We can get rid of this once the const fn feature is fully stabilized. 12ceeb2e94Slaokengwt macro_rules! new_zone { 13ceeb2e94Slaokengwt () => { 14ceeb2e94Slaokengwt ZoneAllocator { 15ceeb2e94Slaokengwt // TODO(perf): We should probably pick better classes 16ceeb2e94Slaokengwt // rather than powers-of-two (see SuperMalloc etc.) 17ceeb2e94Slaokengwt small_slabs: [ 18ceeb2e94Slaokengwt SCAllocator::new(1 << 3), // 8 19ceeb2e94Slaokengwt SCAllocator::new(1 << 4), // 16 20ceeb2e94Slaokengwt SCAllocator::new(1 << 5), // 32 21ceeb2e94Slaokengwt SCAllocator::new(1 << 6), // 64 22ceeb2e94Slaokengwt SCAllocator::new(1 << 7), // 128 23ceeb2e94Slaokengwt SCAllocator::new(1 << 8), // 256 24ceeb2e94Slaokengwt SCAllocator::new(1 << 9), // 512 25ceeb2e94Slaokengwt SCAllocator::new(1 << 10), // 1024 26ceeb2e94Slaokengwt SCAllocator::new(1 << 11), // 2048 ], 27ceeb2e94Slaokengwt ], 28*7401bec5Slaokengwt total: 0, 29ceeb2e94Slaokengwt } 30ceeb2e94Slaokengwt }; 31ceeb2e94Slaokengwt } 32ceeb2e94Slaokengwt 33ceeb2e94Slaokengwt /// A zone allocator for arbitrary sized allocations. 34ceeb2e94Slaokengwt /// 35ceeb2e94Slaokengwt /// Has a bunch of `SCAllocator` and through that can serve allocation 36ceeb2e94Slaokengwt /// requests for many different object sizes up to (MAX_SIZE_CLASSES) by selecting 37ceeb2e94Slaokengwt /// the right `SCAllocator` for allocation and deallocation. 38ceeb2e94Slaokengwt /// 39ceeb2e94Slaokengwt /// The allocator provides to refill functions `refill` and `refill_large` 40ceeb2e94Slaokengwt /// to provide the underlying `SCAllocator` with more memory in case it runs out. 41ceeb2e94Slaokengwt pub struct ZoneAllocator<'a> { 42ceeb2e94Slaokengwt small_slabs: [SCAllocator<'a, ObjectPage<'a>>; ZoneAllocator::MAX_BASE_SIZE_CLASSES], 43*7401bec5Slaokengwt total: u64, 44ceeb2e94Slaokengwt } 45ceeb2e94Slaokengwt 46ceeb2e94Slaokengwt impl<'a> Default for ZoneAllocator<'a> { default() -> ZoneAllocator<'a>47ceeb2e94Slaokengwt fn default() -> ZoneAllocator<'a> { 48ceeb2e94Slaokengwt new_zone!() 49ceeb2e94Slaokengwt } 50ceeb2e94Slaokengwt } 51ceeb2e94Slaokengwt 52ceeb2e94Slaokengwt enum Slab { 53ceeb2e94Slaokengwt Base(usize), 54ceeb2e94Slaokengwt Unsupported, 55ceeb2e94Slaokengwt } 56ceeb2e94Slaokengwt 57ceeb2e94Slaokengwt impl<'a> ZoneAllocator<'a> { 58ceeb2e94Slaokengwt /// Maximum size which is allocated with ObjectPages (4 KiB pages). 59ceeb2e94Slaokengwt /// 60ceeb2e94Slaokengwt /// e.g. this is 4 KiB - 80 bytes of meta-data. 61*7401bec5Slaokengwt pub const MAX_BASE_ALLOC_SIZE: usize = 1 << 11; 62ceeb2e94Slaokengwt 63ceeb2e94Slaokengwt /// How many allocators of type SCAllocator<ObjectPage> we have. 64*7401bec5Slaokengwt pub const MAX_BASE_SIZE_CLASSES: usize = 9; 65ceeb2e94Slaokengwt 66ceeb2e94Slaokengwt #[cfg(feature = "unstable")] new() -> ZoneAllocator<'a>67ceeb2e94Slaokengwt pub const fn new() -> ZoneAllocator<'a> { 68ceeb2e94Slaokengwt new_zone!() 69ceeb2e94Slaokengwt } 70ceeb2e94Slaokengwt 71ceeb2e94Slaokengwt #[cfg(not(feature = "unstable"))] new() -> ZoneAllocator<'a>72ceeb2e94Slaokengwt pub fn new() -> ZoneAllocator<'a> { 73ceeb2e94Slaokengwt new_zone!() 74ceeb2e94Slaokengwt } 75ceeb2e94Slaokengwt 76ceeb2e94Slaokengwt /// Return maximum size an object of size `current_size` can use. 77ceeb2e94Slaokengwt /// 78ceeb2e94Slaokengwt /// Used to optimize `realloc`. get_max_size(current_size: usize) -> Option<usize>79ceeb2e94Slaokengwt pub fn get_max_size(current_size: usize) -> Option<usize> { 80ceeb2e94Slaokengwt match current_size { 81ceeb2e94Slaokengwt 0..=8 => Some(8), 82ceeb2e94Slaokengwt 9..=16 => Some(16), 83ceeb2e94Slaokengwt 17..=32 => Some(32), 84ceeb2e94Slaokengwt 33..=64 => Some(64), 85ceeb2e94Slaokengwt 65..=128 => Some(128), 86ceeb2e94Slaokengwt 129..=256 => Some(256), 87ceeb2e94Slaokengwt 257..=512 => Some(512), 88ceeb2e94Slaokengwt 513..=1024 => Some(1024), 89ceeb2e94Slaokengwt 1025..=2048 => Some(2048), 90ceeb2e94Slaokengwt _ => None, 91ceeb2e94Slaokengwt } 92ceeb2e94Slaokengwt } 93ceeb2e94Slaokengwt 94ceeb2e94Slaokengwt /// Figure out index into zone array to get the correct slab allocator for that size. get_slab(requested_size: usize) -> Slab95ceeb2e94Slaokengwt fn get_slab(requested_size: usize) -> Slab { 96ceeb2e94Slaokengwt match requested_size { 97ceeb2e94Slaokengwt 0..=8 => Slab::Base(0), 98ceeb2e94Slaokengwt 9..=16 => Slab::Base(1), 99ceeb2e94Slaokengwt 17..=32 => Slab::Base(2), 100ceeb2e94Slaokengwt 33..=64 => Slab::Base(3), 101ceeb2e94Slaokengwt 65..=128 => Slab::Base(4), 102ceeb2e94Slaokengwt 129..=256 => Slab::Base(5), 103ceeb2e94Slaokengwt 257..=512 => Slab::Base(6), 104ceeb2e94Slaokengwt 513..=1024 => Slab::Base(7), 105ceeb2e94Slaokengwt 1025..=2048 => Slab::Base(8), 106ceeb2e94Slaokengwt _ => Slab::Unsupported, 107ceeb2e94Slaokengwt } 108ceeb2e94Slaokengwt } 109ceeb2e94Slaokengwt 110ceeb2e94Slaokengwt /// Reclaims empty pages by calling `dealloc` on it and removing it from the 111ceeb2e94Slaokengwt /// empty lists in the [`SCAllocator`]. 112ceeb2e94Slaokengwt /// 113ceeb2e94Slaokengwt /// The `dealloc` function is called at most `reclaim_base_max` times for 114ceeb2e94Slaokengwt /// base pages, and at most `reclaim_large_max` for large pages. try_reclaim_base_pages<F>(&mut self, mut to_reclaim: usize, mut dealloc: F) where F: Fn(*mut ObjectPage),115ceeb2e94Slaokengwt pub fn try_reclaim_base_pages<F>(&mut self, mut to_reclaim: usize, mut dealloc: F) 116ceeb2e94Slaokengwt where 117ceeb2e94Slaokengwt F: Fn(*mut ObjectPage), 118ceeb2e94Slaokengwt { 119ceeb2e94Slaokengwt for i in 0..ZoneAllocator::MAX_BASE_SIZE_CLASSES { 120ceeb2e94Slaokengwt let slab = &mut self.small_slabs[i]; 121*7401bec5Slaokengwt // reclaim的page数 122ceeb2e94Slaokengwt let just_reclaimed = slab.try_reclaim_pages(to_reclaim, &mut dealloc); 123*7401bec5Slaokengwt self.total -= (just_reclaimed * OBJECT_PAGE_SIZE) as u64; 124ceeb2e94Slaokengwt to_reclaim = to_reclaim.saturating_sub(just_reclaimed); 125ceeb2e94Slaokengwt if to_reclaim == 0 { 126ceeb2e94Slaokengwt break; 127ceeb2e94Slaokengwt } 128ceeb2e94Slaokengwt } 129ceeb2e94Slaokengwt } 130*7401bec5Slaokengwt 131*7401bec5Slaokengwt /// 获取scallocator中的还未被分配的空间 free_space(&mut self) -> u64132*7401bec5Slaokengwt pub fn free_space(&mut self) -> u64 { 133*7401bec5Slaokengwt // 记录空闲空间 134*7401bec5Slaokengwt let mut free = 0; 135*7401bec5Slaokengwt // 遍历所有scallocator 136*7401bec5Slaokengwt for count in 0..ZoneAllocator::MAX_BASE_SIZE_CLASSES { 137*7401bec5Slaokengwt // 获取scallocator 138*7401bec5Slaokengwt let scallocator = &mut self.small_slabs[count]; 139*7401bec5Slaokengwt 140*7401bec5Slaokengwt // 遍历scallocator中的部分分配的page(partial_page) 141*7401bec5Slaokengwt for slab_page in scallocator.slabs.iter_mut() { 142*7401bec5Slaokengwt // 统计page中还可以分配多少个object 143*7401bec5Slaokengwt let mut free_obj_count = 0; 144*7401bec5Slaokengwt // 遍历page中的bitfield(用来统计内存分配情况的u64数组) 145*7401bec5Slaokengwt for b in slab_page.bitfield().iter() { 146*7401bec5Slaokengwt let bitval = b.load(Ordering::Relaxed); 147*7401bec5Slaokengwt let free_count = bitval.count_zeros() as usize; 148*7401bec5Slaokengwt free_obj_count += free_count; 149*7401bec5Slaokengwt } 150*7401bec5Slaokengwt // 剩余可分配object数乘上page中规定的每个object的大小,即空闲空间 151*7401bec5Slaokengwt free += free_obj_count * scallocator.size(); 152*7401bec5Slaokengwt } 153*7401bec5Slaokengwt // 遍历scallocator中的empty_page,把空页空间也加上去 154*7401bec5Slaokengwt free += 155*7401bec5Slaokengwt scallocator.empty_slabs.elements * (scallocator.obj_per_page * scallocator.size()); 156*7401bec5Slaokengwt } 157*7401bec5Slaokengwt free as u64 158*7401bec5Slaokengwt } 159*7401bec5Slaokengwt usage(&mut self) -> SlabUsage160*7401bec5Slaokengwt pub fn usage(&mut self) -> SlabUsage { 161*7401bec5Slaokengwt let free_num = self.free_space(); 162*7401bec5Slaokengwt SlabUsage::new(self.total, free_num) 163*7401bec5Slaokengwt } 164ceeb2e94Slaokengwt } 165ceeb2e94Slaokengwt 166ceeb2e94Slaokengwt unsafe impl<'a> crate::Allocator<'a> for ZoneAllocator<'a> { 167ceeb2e94Slaokengwt /// Allocate a pointer to a block of memory described by `layout`. allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError>168ceeb2e94Slaokengwt fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> { 169ceeb2e94Slaokengwt match ZoneAllocator::get_slab(layout.size()) { 170ceeb2e94Slaokengwt Slab::Base(idx) => self.small_slabs[idx].allocate(layout), 171ceeb2e94Slaokengwt Slab::Unsupported => Err(AllocationError::InvalidLayout), 172ceeb2e94Slaokengwt } 173ceeb2e94Slaokengwt } 174ceeb2e94Slaokengwt 175ceeb2e94Slaokengwt /// Deallocates a pointer to a block of memory, which was 176ceeb2e94Slaokengwt /// previously allocated by `allocate`. 177ceeb2e94Slaokengwt /// 178ceeb2e94Slaokengwt /// # Arguments 179ceeb2e94Slaokengwt /// * `ptr` - Address of the memory location to free. 180ceeb2e94Slaokengwt /// * `layout` - Memory layout of the block pointed to by `ptr`. deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError>181ceeb2e94Slaokengwt fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> { 182ceeb2e94Slaokengwt match ZoneAllocator::get_slab(layout.size()) { 183ceeb2e94Slaokengwt Slab::Base(idx) => self.small_slabs[idx].deallocate(ptr, layout), 184ceeb2e94Slaokengwt Slab::Unsupported => Err(AllocationError::InvalidLayout), 185ceeb2e94Slaokengwt } 186ceeb2e94Slaokengwt } 187ceeb2e94Slaokengwt 188ceeb2e94Slaokengwt /// Refills the SCAllocator for a given Layout with an ObjectPage. 189ceeb2e94Slaokengwt /// 190ceeb2e94Slaokengwt /// # Safety 191ceeb2e94Slaokengwt /// ObjectPage needs to be emtpy etc. refill( &mut self, layout: Layout, new_page: &'a mut ObjectPage<'a>, ) -> Result<(), AllocationError>192ceeb2e94Slaokengwt unsafe fn refill( 193ceeb2e94Slaokengwt &mut self, 194ceeb2e94Slaokengwt layout: Layout, 195ceeb2e94Slaokengwt new_page: &'a mut ObjectPage<'a>, 196ceeb2e94Slaokengwt ) -> Result<(), AllocationError> { 197ceeb2e94Slaokengwt match ZoneAllocator::get_slab(layout.size()) { 198ceeb2e94Slaokengwt Slab::Base(idx) => { 199ceeb2e94Slaokengwt self.small_slabs[idx].refill(new_page); 200*7401bec5Slaokengwt // 每refill一个page就为slab的总空间统计加上4KB 201*7401bec5Slaokengwt self.total += OBJECT_PAGE_SIZE as u64; 202ceeb2e94Slaokengwt Ok(()) 203ceeb2e94Slaokengwt } 204ceeb2e94Slaokengwt Slab::Unsupported => Err(AllocationError::InvalidLayout), 205ceeb2e94Slaokengwt } 206ceeb2e94Slaokengwt } 207ceeb2e94Slaokengwt } 208*7401bec5Slaokengwt 209*7401bec5Slaokengwt /// Slab内存空间使用情况 210*7401bec5Slaokengwt pub struct SlabUsage { 211*7401bec5Slaokengwt // slab总共使用的内存空间 212*7401bec5Slaokengwt total: u64, 213*7401bec5Slaokengwt // slab的空闲空间 214*7401bec5Slaokengwt free: u64, 215*7401bec5Slaokengwt } 216*7401bec5Slaokengwt 217*7401bec5Slaokengwt impl SlabUsage { 218*7401bec5Slaokengwt /// 初始化SlabUsage new(total: u64, free: u64) -> Self219*7401bec5Slaokengwt pub fn new(total: u64, free: u64) -> Self { 220*7401bec5Slaokengwt Self { total, free } 221*7401bec5Slaokengwt } 222*7401bec5Slaokengwt total(&self) -> u64223*7401bec5Slaokengwt pub fn total(&self) -> u64 { 224*7401bec5Slaokengwt self.total 225*7401bec5Slaokengwt } 226*7401bec5Slaokengwt used(&self) -> u64227*7401bec5Slaokengwt pub fn used(&self) -> u64 { 228*7401bec5Slaokengwt self.total - self.free 229*7401bec5Slaokengwt } 230*7401bec5Slaokengwt free(&self) -> u64231*7401bec5Slaokengwt pub fn free(&self) -> u64 { 232*7401bec5Slaokengwt self.free 233*7401bec5Slaokengwt } 234*7401bec5Slaokengwt } 235