xref: /DragonOS/kernel/crates/rust-slabmalloc/src/sc.rs (revision c635d8a9cfe25bc11779f323ef0c7d7a0f597d4a)
1 //! A SCAllocator that can allocate fixed size objects.
2 
3 use core::mem;
4 
5 use crate::*;
6 
7 /// A genius(?) const min()
8 ///
9 /// # What this does
10 /// * create an array of the two elements you want to choose between
11 /// * create an arbitrary boolean expression
12 /// * cast said expresison to a usize
13 /// * use that value to index into the array created above
14 ///
15 /// # Source
16 /// https://stackoverflow.com/questions/53619695/calculating-maximum-value-of-a-set-of-constant-expressions-at-compile-time
17 #[cfg(feature = "unstable")]
18 const fn cmin(a: usize, b: usize) -> usize {
19     [a, b][(a > b) as usize]
20 }
21 
22 /// The boring variant of min (not const).
23 #[cfg(not(feature = "unstable"))]
24 fn cmin(a: usize, b: usize) -> usize {
25     core::cmp::min(a, b)
26 }
27 
28 /// A slab allocator allocates elements of a fixed size.
29 ///
30 /// It maintains three internal lists of objects that implement `AllocablePage`
31 /// from which it can allocate memory.
32 ///
33 ///  * `empty_slabs`: Is a list of pages that the SCAllocator maintains, but
34 ///    has 0 allocations in them, these can be given back to a requestor in case
35 ///    of reclamation.
36 ///  * `slabs`: A list of pages partially allocated and still have room for more.
37 ///  * `full_slabs`: A list of pages that are completely allocated.
38 ///
39 /// On allocation we allocate memory from `slabs`, however if the list is empty
40 /// we try to reclaim a page from `empty_slabs` before we return with an out-of-memory
41 /// error. If a page becomes full after the allocation we move it from `slabs` to
42 /// `full_slabs`.
43 ///
44 /// Similarly, on dealloaction we might move a page from `full_slabs` to `slabs`
45 /// or from `slabs` to `empty_slabs` after we deallocated an object.
46 ///
47 /// If an allocation returns `OutOfMemory` a client using SCAllocator can refill
48 /// it using the `refill` function.
49 pub struct SCAllocator<'a, P: AllocablePage> {
50     /// Maximum possible allocation size for this `SCAllocator`.
51     pub(crate) size: usize,
52     /// Keeps track of succeeded allocations.
53     pub(crate) allocation_count: usize,
54     /// max objects per page
55     pub(crate) obj_per_page: usize,
56     /// List of empty ObjectPages (nothing allocated in these).
57     pub(crate) empty_slabs: PageList<'a, P>,
58     /// List of partially used ObjectPage (some objects allocated but pages are not full).
59     pub(crate) slabs: PageList<'a, P>,
60     /// List of full ObjectPages (everything allocated in these don't need to search them).
61     pub(crate) full_slabs: PageList<'a, P>,
62 }
63 
64 /// Creates an instance of a scallocator, we do this in a macro because we
65 /// re-use the code in const and non-const functions
66 macro_rules! new_sc_allocator {
67     ($size:expr) => {
68         SCAllocator {
69             size: $size,
70             allocation_count: 0,
71             obj_per_page: cmin((P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD) / $size, 8 * 64),
72             empty_slabs: PageList::new(),
73             slabs: PageList::new(),
74             full_slabs: PageList::new(),
75         }
76     };
77 }
78 
79 impl<'a, P: AllocablePage> SCAllocator<'a, P> {
80     const REBALANCE_COUNT: usize = 64;
81 
82     /// Create a new SCAllocator.
83     #[cfg(feature = "unstable")]
84     pub const fn new(size: usize) -> SCAllocator<'a, P> {
85         new_sc_allocator!(size)
86     }
87 
88     #[cfg(not(feature = "unstable"))]
89     pub fn new(size: usize) -> SCAllocator<'a, P> {
90         new_sc_allocator!(size)
91     }
92 
93     /// Returns the maximum supported object size of this allocator.
94     pub fn size(&self) -> usize {
95         self.size
96     }
97 
98     /// Add a new ObjectPage.
99     fn insert_partial_slab(&mut self, new_head: &'a mut P) {
100         self.slabs.insert_front(new_head);
101     }
102 
103     /// Add page to empty list.
104     fn insert_empty(&mut self, new_head: &'a mut P) {
105         assert_eq!(
106             new_head as *const P as usize % P::SIZE,
107             0,
108             "Inserted page is not aligned to page-size."
109         );
110         self.empty_slabs.insert_front(new_head);
111     }
112 
113     /// Since `dealloc` can not reassign pages without requiring a lock
114     /// we check slabs and full slabs periodically as part of `alloc`
115     /// and move them to the empty or partially allocated slab lists.
116     pub(crate) fn check_page_assignments(&mut self) {
117         for slab_page in self.full_slabs.iter_mut() {
118             if !slab_page.is_full() {
119                 // We need to move it from self.full_slabs -> self.slabs
120                 trace!("move {:p} full -> partial", slab_page);
121                 self.move_full_to_partial(slab_page);
122             }
123         }
124 
125         for slab_page in self.slabs.iter_mut() {
126             if slab_page.is_empty(self.obj_per_page) {
127                 // We need to move it from self.slabs -> self.empty_slabs
128                 trace!("move {:p} partial -> empty", slab_page);
129                 self.move_to_empty(slab_page);
130             }
131         }
132     }
133 
134     /// Move a page from `slabs` to `empty_slabs`.
135     fn move_to_empty(&mut self, page: &'a mut P) {
136         let page_ptr = page as *const P;
137 
138         debug_assert!(self.slabs.contains(page_ptr));
139         debug_assert!(
140             !self.empty_slabs.contains(page_ptr),
141             "Page {:p} already in emtpy_slabs",
142             page_ptr
143         );
144 
145         self.slabs.remove_from_list(page);
146         self.empty_slabs.insert_front(page);
147 
148         debug_assert!(!self.slabs.contains(page_ptr));
149         debug_assert!(self.empty_slabs.contains(page_ptr));
150     }
151 
152     /// Move a page from `full_slabs` to `slab`.
153     fn move_partial_to_full(&mut self, page: &'a mut P) {
154         let page_ptr = page as *const P;
155 
156         debug_assert!(self.slabs.contains(page_ptr));
157         debug_assert!(!self.full_slabs.contains(page_ptr));
158 
159         self.slabs.remove_from_list(page);
160         self.full_slabs.insert_front(page);
161 
162         debug_assert!(!self.slabs.contains(page_ptr));
163         debug_assert!(self.full_slabs.contains(page_ptr));
164     }
165 
166     /// Move a page from `full_slabs` to `slab`.
167     fn move_full_to_partial(&mut self, page: &'a mut P) {
168         let page_ptr = page as *const P;
169 
170         debug_assert!(!self.slabs.contains(page_ptr));
171         debug_assert!(self.full_slabs.contains(page_ptr));
172 
173         self.full_slabs.remove_from_list(page);
174         self.slabs.insert_front(page);
175 
176         debug_assert!(self.slabs.contains(page_ptr));
177         debug_assert!(!self.full_slabs.contains(page_ptr));
178     }
179 
180     /// Tries to allocate a block of memory with respect to the `layout`.
181     /// Searches within already allocated slab pages, if no suitable spot is found
182     /// will try to use a page from the empty page list.
183     ///
184     /// # Arguments
185     ///  * `sc_layout`: This is not the original layout but adjusted for the
186     ///     SCAllocator size (>= original).
187     fn try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8 {
188         // TODO: Do we really need to check multiple slab pages (due to alignment)
189         // If not we can get away with a singly-linked list and have 8 more bytes
190         // for the bitfield in an ObjectPage.
191 
192         for slab_page in self.slabs.iter_mut() {
193             let ptr = slab_page.allocate(sc_layout);
194             if !ptr.is_null() {
195                 if slab_page.is_full() {
196                     trace!("move {:p} partial -> full", slab_page);
197                     self.move_partial_to_full(slab_page);
198                 }
199                 self.allocation_count += 1;
200                 return ptr;
201             } else {
202                 continue;
203             }
204         }
205 
206         // Periodically rebalance page-lists (since dealloc can't do it for us)
207         if self.allocation_count > SCAllocator::<P>::REBALANCE_COUNT {
208             self.check_page_assignments();
209             self.allocation_count = 0;
210         }
211 
212         ptr::null_mut()
213     }
214 
215     pub fn try_reclaim_pages<F>(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize
216     where
217         F: FnMut(*mut P),
218     {
219         self.check_page_assignments();
220         let mut reclaimed = 0;
221         while reclaimed < to_reclaim {
222             if let Some(page) = self.empty_slabs.pop() {
223                 dealloc(page as *mut P);
224                 reclaimed += 1;
225             } else {
226                 break;
227             }
228         }
229 
230         reclaimed
231     }
232 
233     /// Refill the SCAllocator
234     ///
235     /// # Safety
236     /// ObjectPage needs to be empty etc.
237     pub unsafe fn refill(&mut self, page: &'a mut P) {
238         page.bitfield_mut()
239             .initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD);
240         *page.prev() = Rawlink::none();
241         *page.next() = Rawlink::none();
242         trace!("adding page to SCAllocator {:p}", page);
243         self.insert_empty(page);
244     }
245 
246     /// Allocates a block of memory descriped by `layout`.
247     ///
248     /// Returns a pointer to a valid region of memory or an
249     /// AllocationError.
250     ///
251     /// The function may also move around pages between lists
252     /// (empty -> partial or partial -> full).
253     pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> {
254         trace!(
255             "SCAllocator({}) is trying to allocate {:?}",
256             self.size,
257             layout
258         );
259         assert!(layout.size() <= self.size);
260         assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
261         let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
262         assert!(new_layout.size() >= layout.size());
263 
264         let ptr = {
265             // Try to allocate from partial slabs,
266             // if we fail check if we have empty pages and allocate from there
267             let ptr = self.try_allocate_from_pagelist(new_layout);
268             if ptr.is_null() && self.empty_slabs.head.is_some() {
269                 // Re-try allocation in empty page
270                 let empty_page = self.empty_slabs.pop().expect("We checked head.is_some()");
271                 debug_assert!(!self.empty_slabs.contains(empty_page));
272 
273                 let ptr = empty_page.allocate(layout);
274                 debug_assert!(!ptr.is_null(), "Allocation must have succeeded here.");
275 
276                 trace!(
277                     "move {:p} empty -> partial empty count {}",
278                     empty_page,
279                     self.empty_slabs.elements
280                 );
281                 // Move empty page to partial pages
282                 self.insert_partial_slab(empty_page);
283                 ptr
284             } else {
285                 ptr
286             }
287         };
288 
289         let res = NonNull::new(ptr).ok_or(AllocationError::OutOfMemory);
290 
291         if !ptr.is_null() {
292             trace!(
293                 "SCAllocator({}) allocated ptr=0x{:x}",
294                 self.size,
295                 ptr as usize
296             );
297         }
298 
299         res
300     }
301 
302     /// Deallocates a previously allocated `ptr` described by `Layout`.
303     ///
304     /// May return an error in case an invalid `layout` is provided.
305     /// The function may also move internal slab pages between lists partial -> empty
306     /// or full -> partial lists.
307     pub fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
308         assert!(layout.size() <= self.size);
309         assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
310         trace!(
311             "SCAllocator({}) is trying to deallocate ptr = {:p} layout={:?} P.size= {}",
312             self.size,
313             ptr,
314             layout,
315             P::SIZE
316         );
317 
318         let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1);
319 
320         // Figure out which page we are on and construct a reference to it
321         // TODO: The linked list will have another &mut reference
322         let slab_page = unsafe { mem::transmute::<VAddr, &'a mut P>(page) };
323         let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
324 
325         let ret = slab_page.deallocate(ptr, new_layout);
326         debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment");
327         ret
328     }
329 }
330