1 //! A ZoneAllocator to allocate arbitrary object sizes (up to `ZoneAllocator::MAX_ALLOC_SIZE`) 2 //! 3 //! The ZoneAllocator achieves this by having many `SCAllocator` 4 5 use crate::*; 6 7 /// Creates an instance of a zone, we do this in a macro because we 8 /// re-use the code in const and non-const functions 9 /// 10 /// We can get rid of this once the const fn feature is fully stabilized. 11 macro_rules! new_zone { 12 () => { 13 ZoneAllocator { 14 // TODO(perf): We should probably pick better classes 15 // rather than powers-of-two (see SuperMalloc etc.) 16 small_slabs: [ 17 SCAllocator::new(1 << 3), // 8 18 SCAllocator::new(1 << 4), // 16 19 SCAllocator::new(1 << 5), // 32 20 SCAllocator::new(1 << 6), // 64 21 SCAllocator::new(1 << 7), // 128 22 SCAllocator::new(1 << 8), // 256 23 SCAllocator::new(1 << 9), // 512 24 SCAllocator::new(1 << 10), // 1024 25 SCAllocator::new(1 << 11), // 2048 ], 26 ], 27 } 28 }; 29 } 30 31 /// A zone allocator for arbitrary sized allocations. 32 /// 33 /// Has a bunch of `SCAllocator` and through that can serve allocation 34 /// requests for many different object sizes up to (MAX_SIZE_CLASSES) by selecting 35 /// the right `SCAllocator` for allocation and deallocation. 36 /// 37 /// The allocator provides to refill functions `refill` and `refill_large` 38 /// to provide the underlying `SCAllocator` with more memory in case it runs out. 39 pub struct ZoneAllocator<'a> { 40 small_slabs: [SCAllocator<'a, ObjectPage<'a>>; ZoneAllocator::MAX_BASE_SIZE_CLASSES], 41 } 42 43 impl<'a> Default for ZoneAllocator<'a> { 44 fn default() -> ZoneAllocator<'a> { 45 new_zone!() 46 } 47 } 48 49 enum Slab { 50 Base(usize), 51 Unsupported, 52 } 53 54 impl<'a> ZoneAllocator<'a> { 55 /// Maximum size that allocated within LargeObjectPages (2 MiB). 56 /// This is also the maximum object size that this allocator can handle. 57 pub const MAX_ALLOC_SIZE: usize = 1 << 11; 58 59 /// Maximum size which is allocated with ObjectPages (4 KiB pages). 60 /// 61 /// e.g. this is 4 KiB - 80 bytes of meta-data. 62 pub const MAX_BASE_ALLOC_SIZE: usize = 256; 63 64 /// How many allocators of type SCAllocator<ObjectPage> we have. 65 const MAX_BASE_SIZE_CLASSES: usize = 9; 66 67 #[cfg(feature = "unstable")] 68 pub const fn new() -> ZoneAllocator<'a> { 69 new_zone!() 70 } 71 72 #[cfg(not(feature = "unstable"))] 73 pub fn new() -> ZoneAllocator<'a> { 74 new_zone!() 75 } 76 77 /// Return maximum size an object of size `current_size` can use. 78 /// 79 /// Used to optimize `realloc`. 80 pub fn get_max_size(current_size: usize) -> Option<usize> { 81 match current_size { 82 0..=8 => Some(8), 83 9..=16 => Some(16), 84 17..=32 => Some(32), 85 33..=64 => Some(64), 86 65..=128 => Some(128), 87 129..=256 => Some(256), 88 257..=512 => Some(512), 89 513..=1024 => Some(1024), 90 1025..=2048 => Some(2048), 91 _ => None, 92 } 93 } 94 95 /// Figure out index into zone array to get the correct slab allocator for that size. 96 fn get_slab(requested_size: usize) -> Slab { 97 match requested_size { 98 0..=8 => Slab::Base(0), 99 9..=16 => Slab::Base(1), 100 17..=32 => Slab::Base(2), 101 33..=64 => Slab::Base(3), 102 65..=128 => Slab::Base(4), 103 129..=256 => Slab::Base(5), 104 257..=512 => Slab::Base(6), 105 513..=1024 => Slab::Base(7), 106 1025..=2048 => Slab::Base(8), 107 _ => Slab::Unsupported, 108 } 109 } 110 111 /// Reclaims empty pages by calling `dealloc` on it and removing it from the 112 /// empty lists in the [`SCAllocator`]. 113 /// 114 /// The `dealloc` function is called at most `reclaim_base_max` times for 115 /// base pages, and at most `reclaim_large_max` for large pages. 116 pub fn try_reclaim_base_pages<F>(&mut self, mut to_reclaim: usize, mut dealloc: F) 117 where 118 F: Fn(*mut ObjectPage), 119 { 120 for i in 0..ZoneAllocator::MAX_BASE_SIZE_CLASSES { 121 let slab = &mut self.small_slabs[i]; 122 let just_reclaimed = slab.try_reclaim_pages(to_reclaim, &mut dealloc); 123 to_reclaim = to_reclaim.saturating_sub(just_reclaimed); 124 if to_reclaim == 0 { 125 break; 126 } 127 } 128 } 129 } 130 131 unsafe impl<'a> crate::Allocator<'a> for ZoneAllocator<'a> { 132 /// Allocate a pointer to a block of memory described by `layout`. 133 fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> { 134 match ZoneAllocator::get_slab(layout.size()) { 135 Slab::Base(idx) => self.small_slabs[idx].allocate(layout), 136 Slab::Unsupported => Err(AllocationError::InvalidLayout), 137 } 138 } 139 140 /// Deallocates a pointer to a block of memory, which was 141 /// previously allocated by `allocate`. 142 /// 143 /// # Arguments 144 /// * `ptr` - Address of the memory location to free. 145 /// * `layout` - Memory layout of the block pointed to by `ptr`. 146 fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> { 147 match ZoneAllocator::get_slab(layout.size()) { 148 Slab::Base(idx) => self.small_slabs[idx].deallocate(ptr, layout), 149 Slab::Unsupported => Err(AllocationError::InvalidLayout), 150 } 151 } 152 153 /// Refills the SCAllocator for a given Layout with an ObjectPage. 154 /// 155 /// # Safety 156 /// ObjectPage needs to be emtpy etc. 157 unsafe fn refill( 158 &mut self, 159 layout: Layout, 160 new_page: &'a mut ObjectPage<'a>, 161 ) -> Result<(), AllocationError> { 162 match ZoneAllocator::get_slab(layout.size()) { 163 Slab::Base(idx) => { 164 self.small_slabs[idx].refill(new_page); 165 Ok(()) 166 } 167 Slab::Unsupported => Err(AllocationError::InvalidLayout), 168 } 169 } 170 } 171