1 //! A SCAllocator that can allocate fixed size objects.
2
3 use core::mem;
4
5 use crate::*;
6
7 /// A genius(?) const min()
8 ///
9 /// # What this does
10 /// * create an array of the two elements you want to choose between
11 /// * create an arbitrary boolean expression
12 /// * cast said expresison to a usize
13 /// * use that value to index into the array created above
14 ///
15 /// # Source
16 /// https://stackoverflow.com/questions/53619695/calculating-maximum-value-of-a-set-of-constant-expressions-at-compile-time
17 #[cfg(feature = "unstable")]
cmin(a: usize, b: usize) -> usize18 const fn cmin(a: usize, b: usize) -> usize {
19 [a, b][(a > b) as usize]
20 }
21
22 /// The boring variant of min (not const).
23 #[cfg(not(feature = "unstable"))]
cmin(a: usize, b: usize) -> usize24 fn cmin(a: usize, b: usize) -> usize {
25 core::cmp::min(a, b)
26 }
27
28 /// A slab allocator allocates elements of a fixed size.
29 ///
30 /// It maintains three internal lists of objects that implement `AllocablePage`
31 /// from which it can allocate memory.
32 ///
33 /// * `empty_slabs`: Is a list of pages that the SCAllocator maintains, but
34 /// has 0 allocations in them, these can be given back to a requestor in case
35 /// of reclamation.
36 /// * `slabs`: A list of pages partially allocated and still have room for more.
37 /// * `full_slabs`: A list of pages that are completely allocated.
38 ///
39 /// On allocation we allocate memory from `slabs`, however if the list is empty
40 /// we try to reclaim a page from `empty_slabs` before we return with an out-of-memory
41 /// error. If a page becomes full after the allocation we move it from `slabs` to
42 /// `full_slabs`.
43 ///
44 /// Similarly, on dealloaction we might move a page from `full_slabs` to `slabs`
45 /// or from `slabs` to `empty_slabs` after we deallocated an object.
46 ///
47 /// If an allocation returns `OutOfMemory` a client using SCAllocator can refill
48 /// it using the `refill` function.
49 pub struct SCAllocator<'a, P: AllocablePage> {
50 /// Maximum possible allocation size for this `SCAllocator`.
51 pub(crate) size: usize,
52 /// Keeps track of succeeded allocations.
53 pub(crate) allocation_count: usize,
54 /// max objects per page
55 pub(crate) obj_per_page: usize,
56 /// List of empty ObjectPages (nothing allocated in these).
57 pub(crate) empty_slabs: PageList<'a, P>,
58 /// List of partially used ObjectPage (some objects allocated but pages are not full).
59 pub(crate) slabs: PageList<'a, P>,
60 /// List of full ObjectPages (everything allocated in these don't need to search them).
61 pub(crate) full_slabs: PageList<'a, P>,
62 /// Free objects count
63 pub(crate) free_obj_count: usize,
64 /// Maximum free objects num for this `SCAllocator`.
65 pub(crate) free_limit: usize,
66 }
67
68 /// Creates an instance of a scallocator, we do this in a macro because we
69 /// re-use the code in const and non-const functions
70 macro_rules! new_sc_allocator {
71 ($size:expr) => {{
72 let obj_per_page = cmin((P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD) / $size, 8 * 64);
73 SCAllocator {
74 size: $size,
75 allocation_count: 0,
76 obj_per_page,
77 empty_slabs: PageList::new(),
78 slabs: PageList::new(),
79 full_slabs: PageList::new(),
80 // TODO: 优化free_limit的计算: https://bbs.dragonos.org.cn/t/topic/358
81 free_limit: 2 * obj_per_page,
82 free_obj_count: 0,
83 }
84 }};
85 }
86
87 impl<'a, P: AllocablePage> SCAllocator<'a, P> {
88 const REBALANCE_COUNT: usize = 64;
89
90 /// Create a new SCAllocator.
91 #[cfg(feature = "unstable")]
new(size: usize) -> SCAllocator<'a, P>92 pub const fn new(size: usize) -> SCAllocator<'a, P> {
93 new_sc_allocator!(size)
94 }
95
96 #[cfg(not(feature = "unstable"))]
new(size: usize) -> SCAllocator<'a, P>97 pub fn new(size: usize) -> SCAllocator<'a, P> {
98 new_sc_allocator!(size)
99 }
100
101 /// Returns the maximum supported object size of this allocator.
size(&self) -> usize102 pub fn size(&self) -> usize {
103 self.size
104 }
105
106 /// Add a new ObjectPage.
insert_partial_slab(&mut self, new_head: &'a mut P)107 fn insert_partial_slab(&mut self, new_head: &'a mut P) {
108 self.slabs.insert_front(new_head);
109 }
110
111 /// Add page to empty list.
insert_empty(&mut self, new_head: &'a mut P)112 fn insert_empty(&mut self, new_head: &'a mut P) {
113 assert_eq!(
114 new_head as *const P as usize % P::SIZE,
115 0,
116 "Inserted page is not aligned to page-size."
117 );
118 self.empty_slabs.insert_front(new_head);
119 }
120
121 /// Since `dealloc` can not reassign pages without requiring a lock
122 /// we check slabs and full slabs periodically as part of `alloc`
123 /// and move them to the empty or partially allocated slab lists.
check_page_assignments(&mut self)124 pub(crate) fn check_page_assignments(&mut self) {
125 for slab_page in self.full_slabs.iter_mut() {
126 if !slab_page.is_full() {
127 // We need to move it from self.full_slabs -> self.slabs
128 trace!("move {:p} full -> partial", slab_page);
129 self.move_full_to_partial(slab_page);
130 }
131 }
132
133 for slab_page in self.slabs.iter_mut() {
134 if slab_page.is_empty(self.obj_per_page) {
135 // We need to move it from self.slabs -> self.empty_slabs
136 trace!("move {:p} partial -> empty", slab_page);
137 self.move_to_empty(slab_page);
138 }
139 }
140 }
141
142 /// Move a page from `slabs` to `empty_slabs`.
move_to_empty(&mut self, page: &'a mut P)143 fn move_to_empty(&mut self, page: &'a mut P) {
144 let page_ptr = page as *const P;
145
146 debug_assert!(self.slabs.contains(page_ptr));
147 debug_assert!(
148 !self.empty_slabs.contains(page_ptr),
149 "Page {:p} already in emtpy_slabs",
150 page_ptr
151 );
152
153 self.slabs.remove_from_list(page);
154 self.empty_slabs.insert_front(page);
155
156 debug_assert!(!self.slabs.contains(page_ptr));
157 debug_assert!(self.empty_slabs.contains(page_ptr));
158 }
159
160 /// Move a page from `full_slabs` to `slab`.
move_partial_to_full(&mut self, page: &'a mut P)161 fn move_partial_to_full(&mut self, page: &'a mut P) {
162 let page_ptr = page as *const P;
163
164 debug_assert!(self.slabs.contains(page_ptr));
165 debug_assert!(!self.full_slabs.contains(page_ptr));
166
167 self.slabs.remove_from_list(page);
168 self.full_slabs.insert_front(page);
169
170 debug_assert!(!self.slabs.contains(page_ptr));
171 debug_assert!(self.full_slabs.contains(page_ptr));
172 }
173
174 /// Move a page from `full_slabs` to `slab`.
move_full_to_partial(&mut self, page: &'a mut P)175 fn move_full_to_partial(&mut self, page: &'a mut P) {
176 let page_ptr = page as *const P;
177
178 debug_assert!(!self.slabs.contains(page_ptr));
179 debug_assert!(self.full_slabs.contains(page_ptr));
180
181 self.full_slabs.remove_from_list(page);
182 self.slabs.insert_front(page);
183
184 debug_assert!(self.slabs.contains(page_ptr));
185 debug_assert!(!self.full_slabs.contains(page_ptr));
186 }
187
188 /// Tries to allocate a block of memory with respect to the `layout`.
189 /// Searches within already allocated slab pages, if no suitable spot is found
190 /// will try to use a page from the empty page list.
191 ///
192 /// # Arguments
193 /// * `sc_layout`: This is not the original layout but adjusted for the
194 /// SCAllocator size (>= original).
try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8195 fn try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8 {
196 // TODO: Do we really need to check multiple slab pages (due to alignment)
197 // If not we can get away with a singly-linked list and have 8 more bytes
198 // for the bitfield in an ObjectPage.
199
200 for slab_page in self.slabs.iter_mut() {
201 let ptr = slab_page.allocate(sc_layout);
202 if !ptr.is_null() {
203 if slab_page.is_full() {
204 trace!("move {:p} partial -> full", slab_page);
205 self.move_partial_to_full(slab_page);
206 }
207 self.allocation_count += 1;
208 return ptr;
209 } else {
210 continue;
211 }
212 }
213
214 // Periodically rebalance page-lists (since dealloc can't do it for us)
215 if self.allocation_count > SCAllocator::<P>::REBALANCE_COUNT {
216 self.check_page_assignments();
217 self.allocation_count = 0;
218 }
219
220 ptr::null_mut()
221 }
222
try_reclaim_pages<F>(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize where F: FnMut(*mut P),223 pub fn try_reclaim_pages<F>(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize
224 where
225 F: FnMut(*mut P),
226 {
227 self.check_page_assignments();
228 let mut reclaimed = 0;
229 while reclaimed < to_reclaim {
230 if let Some(page) = self.empty_slabs.pop() {
231 dealloc(page as *mut P);
232 reclaimed += 1;
233 } else {
234 break;
235 }
236 }
237
238 reclaimed
239 }
240
241 /// Refill the SCAllocator
242 ///
243 /// # Safety
244 /// ObjectPage needs to be empty etc.
refill(&mut self, page: &'a mut P)245 pub unsafe fn refill(&mut self, page: &'a mut P) {
246 page.bitfield_mut()
247 .initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD);
248 *page.prev() = Rawlink::none();
249 *page.next() = Rawlink::none();
250 trace!("adding page to SCAllocator {:p}", page);
251 self.insert_empty(page);
252 self.free_obj_count += self.obj_per_page;
253 }
254
255 /// Allocates a block of memory descriped by `layout`.
256 ///
257 /// Returns a pointer to a valid region of memory or an
258 /// AllocationError.
259 ///
260 /// The function may also move around pages between lists
261 /// (empty -> partial or partial -> full).
allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError>262 pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> {
263 trace!(
264 "SCAllocator({}) is trying to allocate {:?}",
265 self.size,
266 layout
267 );
268 assert!(layout.size() <= self.size);
269 assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
270 let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
271 assert!(new_layout.size() >= layout.size());
272
273 let ptr = {
274 // Try to allocate from partial slabs,
275 // if we fail check if we have empty pages and allocate from there
276 let ptr = self.try_allocate_from_pagelist(new_layout);
277 if ptr.is_null() && self.empty_slabs.head.is_some() {
278 // Re-try allocation in empty page
279 let empty_page = self.empty_slabs.pop().expect("We checked head.is_some()");
280 debug_assert!(!self.empty_slabs.contains(empty_page));
281
282 let ptr = empty_page.allocate(layout);
283 debug_assert!(!ptr.is_null(), "Allocation must have succeeded here.");
284
285 trace!(
286 "move {:p} empty -> partial empty count {}",
287 empty_page,
288 self.empty_slabs.elements
289 );
290 // Move empty page to partial pages
291 self.insert_partial_slab(empty_page);
292 ptr
293 } else {
294 ptr
295 }
296 };
297
298 let res = NonNull::new(ptr).ok_or(AllocationError::OutOfMemory);
299
300 if !ptr.is_null() {
301 trace!(
302 "SCAllocator({}) allocated ptr=0x{:x}",
303 self.size,
304 ptr as usize
305 );
306 self.free_obj_count -= 1;
307 }
308
309 res
310 }
311
312 /// Deallocates a previously allocated `ptr` described by `Layout`.
313 ///
314 /// May return an error in case an invalid `layout` is provided.
315 /// The function may also move internal slab pages between lists partial -> empty
316 /// or full -> partial lists.
317 ///
318 /// # Safety
319 /// The caller must ensure that the `layout` is valid.
deallocate( &mut self, ptr: NonNull<u8>, layout: Layout, slab_callback: &'static dyn CallBack, ) -> Result<(), AllocationError>320 pub unsafe fn deallocate(
321 &mut self,
322 ptr: NonNull<u8>,
323 layout: Layout,
324 slab_callback: &'static dyn CallBack,
325 ) -> Result<(), AllocationError> {
326 assert!(layout.size() <= self.size);
327 assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
328 trace!(
329 "SCAllocator({}) is trying to deallocate ptr = {:p} layout={:?} P.size= {}",
330 self.size,
331 ptr,
332 layout,
333 P::SIZE
334 );
335
336 let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1);
337
338 // Figure out which page we are on and construct a reference to it
339 // TODO: The linked list will have another &mut reference
340 let slab_page = unsafe { mem::transmute::<VAddr, &'a mut P>(page) };
341 let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
342
343 let ret = slab_page.deallocate(ptr, new_layout);
344 debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment");
345 self.free_obj_count += 1;
346 let is_empty_after_dealloc = slab_page.is_empty(self.obj_per_page);
347
348 // 如果slab_page是空白的,且空闲块数大于free_limit,将slab_page归还buddy
349 if self.free_obj_count >= self.free_limit && is_empty_after_dealloc {
350 self.slabs.remove_from_list(slab_page);
351 // 将slab_page归还buddy
352 slab_callback.free_slab_page(slab_page as *const P as *mut u8, P::SIZE);
353 }
354 self.check_page_assignments();
355
356 ret
357 }
358 }
359