xref: /DragonOS/kernel/crates/rust-slabmalloc/src/pages.rs (revision bd70d2d1f490aabd570a5301b858bd5eb04149fa)
1 use alloc::boxed::Box;
2 
3 use crate::*;
4 use core::{
5     mem,
6     sync::atomic::{AtomicU64, Ordering},
7 };
8 
9 /// A trait defining bitfield operations we need for tracking allocated objects within a page.
10 pub(crate) trait Bitfield {
initialize(&mut self, for_size: usize, capacity: usize)11     fn initialize(&mut self, for_size: usize, capacity: usize);
first_fit( &self, base_addr: usize, layout: Layout, page_size: usize, ) -> Option<(usize, usize)>12     fn first_fit(
13         &self,
14         base_addr: usize,
15         layout: Layout,
16         page_size: usize,
17     ) -> Option<(usize, usize)>;
is_allocated(&self, idx: usize) -> bool18     fn is_allocated(&self, idx: usize) -> bool;
set_bit(&self, idx: usize)19     fn set_bit(&self, idx: usize);
clear_bit(&self, idx: usize)20     fn clear_bit(&self, idx: usize);
is_full(&self) -> bool21     fn is_full(&self) -> bool;
all_free(&self, relevant_bits: usize) -> bool22     fn all_free(&self, relevant_bits: usize) -> bool;
23 }
24 
25 /// Implementation of bit operations on u64 slices.
26 ///
27 /// We allow deallocations (i.e. clearning a bit in the field)
28 /// from any thread. That's why the bitfield is a bunch of AtomicU64.
29 impl Bitfield for [AtomicU64] {
30     /// Initialize the bitfield
31     ///
32     /// # Arguments
33     ///  * `for_size`: Object size we want to allocate
34     ///  * `capacity`: Maximum size of the buffer the bitmap maintains.
35     ///
36     /// Ensures that we only have free slots for what we can allocate
37     /// within the page (by marking everything else allocated).
initialize(&mut self, for_size: usize, capacity: usize)38     fn initialize(&mut self, for_size: usize, capacity: usize) {
39         // Set everything to allocated
40         for bitmap in self.iter_mut() {
41             *bitmap = AtomicU64::new(u64::MAX);
42         }
43 
44         // Mark actual slots as free
45         let relevant_bits = core::cmp::min(capacity / for_size, self.len() * 64);
46         for idx in 0..relevant_bits {
47             self.clear_bit(idx);
48         }
49     }
50 
51     /// Tries to find a free block of memory that satisfies `alignment` requirement.
52     ///
53     /// # Notes
54     /// * We pass size here to be able to calculate the resulting address within `data`.
55     #[inline(always)]
first_fit( &self, base_addr: usize, layout: Layout, page_size: usize, ) -> Option<(usize, usize)>56     fn first_fit(
57         &self,
58         base_addr: usize,
59         layout: Layout,
60         page_size: usize,
61     ) -> Option<(usize, usize)> {
62         let start_offset = get_offset_for_align(layout);
63         let data_start = base_addr + start_offset;
64 
65         for (base_idx, b) in self.iter().enumerate() {
66             let bitval = b.load(Ordering::Relaxed);
67             if bitval == u64::MAX {
68                 continue;
69             } else {
70                 let negated = !bitval;
71                 let first_free = negated.trailing_zeros() as usize;
72                 let idx: usize = base_idx * 64 + first_free;
73                 let offset = idx * layout.size();
74 
75                 // TODO(bad): psize needs to be passed as arg
76                 let offset_inside_data_area =
77                     offset <= (page_size - OBJECT_PAGE_METADATA_OVERHEAD - layout.size());
78                 if !offset_inside_data_area {
79                     return None;
80                 }
81 
82                 let addr: usize = data_start + offset;
83                 let alignment_ok = addr % layout.align() == 0;
84                 let block_is_free = bitval & (1 << first_free) == 0;
85                 if alignment_ok && block_is_free {
86                     return Some((idx, addr));
87                 }
88             }
89         }
90         None
91     }
92 
93     /// Check if the bit `idx` is set.
94     #[inline(always)]
is_allocated(&self, idx: usize) -> bool95     fn is_allocated(&self, idx: usize) -> bool {
96         let base_idx = idx / 64;
97         let bit_idx = idx % 64;
98         (self[base_idx].load(Ordering::Relaxed) & (1 << bit_idx)) > 0
99     }
100 
101     /// Sets the bit number `idx` in the bit-field.
102     #[inline(always)]
set_bit(&self, idx: usize)103     fn set_bit(&self, idx: usize) {
104         let base_idx = idx / 64;
105         let bit_idx = idx % 64;
106         self[base_idx].fetch_or(1 << bit_idx, Ordering::Relaxed);
107     }
108 
109     /// Clears bit number `idx` in the bit-field.
110     #[inline(always)]
clear_bit(&self, idx: usize)111     fn clear_bit(&self, idx: usize) {
112         let base_idx = idx / 64;
113         let bit_idx = idx % 64;
114         self[base_idx].fetch_and(!(1 << bit_idx), Ordering::Relaxed);
115     }
116 
117     /// Checks if we could allocate more objects of a given `alloc_size` within the
118     /// `capacity` of the memory allocator.
119     ///
120     /// # Note
121     /// The ObjectPage will make sure to mark the top-most bits as allocated
122     /// for large sizes (i.e., a size 512 SCAllocator will only really need 3 bits)
123     /// to track allocated objects). That's why this function can be simpler
124     /// than it would need to be in practice.
125     #[inline(always)]
is_full(&self) -> bool126     fn is_full(&self) -> bool {
127         self.iter()
128             .filter(|&x| x.load(Ordering::Relaxed) != u64::MAX)
129             .count()
130             == 0
131     }
132 
133     /// Checks if the page has currently no allocations.
134     ///
135     /// This is called `all_free` rather than `is_emtpy` because
136     /// we already have an is_empty fn as part of the slice.
137     #[inline(always)]
all_free(&self, relevant_bits: usize) -> bool138     fn all_free(&self, relevant_bits: usize) -> bool {
139         for (idx, bitmap) in self.iter().enumerate() {
140             let checking_bit_range = (idx * 64, (idx + 1) * 64);
141             if relevant_bits >= checking_bit_range.0 && relevant_bits < checking_bit_range.1 {
142                 // Last relevant bitmap, here we only have to check that a subset of bitmap is marked free
143                 // the rest will be marked full
144                 let bits_that_should_be_free = relevant_bits - checking_bit_range.0;
145                 let free_mask = (1 << bits_that_should_be_free) - 1;
146                 return (free_mask & bitmap.load(Ordering::Relaxed)) == 0;
147             }
148 
149             if bitmap.load(Ordering::Relaxed) == 0 {
150                 continue;
151             } else {
152                 return false;
153             }
154         }
155 
156         true
157     }
158 }
159 
160 /// # get_offset_for_align - 根据布局大小获取page内对齐偏移量
161 ///
162 /// 这个函数根据给定的`Layout`大小确定一个合适的对齐偏移量。
163 ///
164 /// ## 参数
165 ///
166 /// - layout: Layout,这是需要计算对齐偏移量的布局参数。
167 ///
168 /// ## 返回值
169 ///
170 /// - usize: 成功时返回一个usize类型的对齐偏移量。
get_offset_for_align(layout: Layout) -> usize171 fn get_offset_for_align(layout: Layout) -> usize {
172     match layout.size() {
173         0..=8 => 80,
174         9..=16 => 80,
175         17..=32 => 96,
176         33..=64 => 128,
177         65..=128 => 128,
178         129..=256 => 256,
179         257..=512 => 512,
180         513..=1024 => 1024,
181         1025..=2048 => 2048,
182         _ => panic!(),
183     }
184 }
185 
186 /// This trait is used to define a page from which objects are allocated
187 /// in an `SCAllocator`.
188 ///
189 /// The implementor of this trait needs to provide access to the page meta-data,
190 /// which consists of:
191 /// - A bitfield (to track allocations),
192 /// - `prev` and `next` pointers to insert the page in free lists
193 pub trait AllocablePage {
194     /// The total size (in bytes) of the page.
195     ///
196     /// # Note
197     /// We also assume that the address of the page will be aligned to `SIZE`.
198     const SIZE: usize;
199 
bitfield(&self) -> &[AtomicU64; 8]200     fn bitfield(&self) -> &[AtomicU64; 8];
bitfield_mut(&mut self) -> &mut [AtomicU64; 8]201     fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8];
prev(&mut self) -> &mut Rawlink<Self> where Self: core::marker::Sized202     fn prev(&mut self) -> &mut Rawlink<Self>
203     where
204         Self: core::marker::Sized;
next(&mut self) -> &mut Rawlink<Self> where Self: core::marker::Sized205     fn next(&mut self) -> &mut Rawlink<Self>
206     where
207         Self: core::marker::Sized;
208 
209     /// Tries to find a free block within `data` that satisfies `alignment` requirement.
first_fit(&self, layout: Layout) -> Option<(usize, usize)>210     fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
211         let base_addr = (self as *const Self as *const u8) as usize;
212         self.bitfield().first_fit(base_addr, layout, Self::SIZE)
213     }
214 
215     /// Tries to allocate an object within this page.
216     ///
217     /// In case the slab is full, returns a null ptr.
allocate(&mut self, layout: Layout) -> *mut u8218     fn allocate(&mut self, layout: Layout) -> *mut u8 {
219         match self.first_fit(layout) {
220             Some((idx, addr)) => {
221                 self.bitfield().set_bit(idx);
222                 addr as *mut u8
223             }
224             None => ptr::null_mut(),
225         }
226     }
227 
228     /// Checks if we can still allocate more objects of a given layout within the page.
is_full(&self) -> bool229     fn is_full(&self) -> bool {
230         self.bitfield().is_full()
231     }
232 
233     /// Checks if the page has currently no allocations.
is_empty(&self, relevant_bits: usize) -> bool234     fn is_empty(&self, relevant_bits: usize) -> bool {
235         self.bitfield().all_free(relevant_bits)
236     }
237 
238     /// Deallocates a memory object within this page.
deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError>239     fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
240         trace!(
241             "AllocablePage deallocating ptr = {:p} with {:?}",
242             ptr,
243             layout
244         );
245         let align_offset = get_offset_for_align(layout);
246         let page_offset = ((ptr.as_ptr() as usize) - align_offset) & (Self::SIZE - 1);
247         assert!(page_offset % layout.size() == 0);
248         let idx = page_offset / layout.size();
249         assert!(
250             self.bitfield().is_allocated(idx),
251             "{:p} not marked allocated?",
252             ptr
253         );
254 
255         self.bitfield().clear_bit(idx);
256         Ok(())
257     }
258 }
259 
260 /// Holds allocated data within a 4 KiB page.
261 ///
262 /// Has a data-section where objects are allocated from
263 /// and a small amount of meta-data in form of a bitmap
264 /// to track allocations at the end of the page.
265 ///
266 /// # Notes
267 /// An object of this type will be exactly 4 KiB.
268 /// It is marked `repr(C)` because we rely on a well defined order of struct
269 /// members (e.g., dealloc does a cast to find the bitfield).
270 #[repr(C)]
271 pub struct ObjectPage<'a> {
272     #[allow(dead_code)]
273     /// A bit-field to track free/allocated memory within `data`.
274     pub(crate) bitfield: [AtomicU64; 8],
275 
276     /// Next element in list (used by `PageList`).
277     next: Rawlink<ObjectPage<'a>>,
278     /// Previous element in  list (used by `PageList`)
279     prev: Rawlink<ObjectPage<'a>>,
280 
281     /// Holds memory objects.
282     data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD],
283 }
284 
285 impl<'a> ObjectPage<'a> {
new() -> Box<ObjectPage<'a>>286     pub fn new() -> Box<ObjectPage<'a>> {
287         unsafe { Box::new_uninit().assume_init() }
288     }
289 }
290 
291 // These needs some more work to be really safe...
292 unsafe impl<'a> Send for ObjectPage<'a> {}
293 unsafe impl<'a> Sync for ObjectPage<'a> {}
294 
295 impl<'a> AllocablePage for ObjectPage<'a> {
296     const SIZE: usize = OBJECT_PAGE_SIZE;
297 
bitfield(&self) -> &[AtomicU64; 8]298     fn bitfield(&self) -> &[AtomicU64; 8] {
299         &self.bitfield
300     }
bitfield_mut(&mut self) -> &mut [AtomicU64; 8]301     fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8] {
302         &mut self.bitfield
303     }
304 
prev(&mut self) -> &mut Rawlink<Self>305     fn prev(&mut self) -> &mut Rawlink<Self> {
306         &mut self.prev
307     }
308 
next(&mut self) -> &mut Rawlink<Self>309     fn next(&mut self) -> &mut Rawlink<Self> {
310         &mut self.next
311     }
312 }
313 
314 impl<'a> Default for ObjectPage<'a> {
default() -> ObjectPage<'a>315     fn default() -> ObjectPage<'a> {
316         unsafe { mem::MaybeUninit::zeroed().assume_init() }
317     }
318 }
319 
320 impl<'a> fmt::Debug for ObjectPage<'a> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result321     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
322         write!(f, "ObjectPage")
323     }
324 }
325 
326 /// A list of pages.
327 pub(crate) struct PageList<'a, T: AllocablePage> {
328     /// Points to the head of the list.
329     pub(crate) head: Option<&'a mut T>,
330     /// Number of elements in the list.
331     pub(crate) elements: usize,
332 }
333 
334 impl<'a, T: AllocablePage> PageList<'a, T> {
335     #[cfg(feature = "unstable")]
new() -> PageList<'a, T>336     pub(crate) const fn new() -> PageList<'a, T> {
337         PageList {
338             head: None,
339             elements: 0,
340         }
341     }
342 
343     #[cfg(not(feature = "unstable"))]
new() -> PageList<'a, T>344     pub(crate) fn new() -> PageList<'a, T> {
345         PageList {
346             head: None,
347             elements: 0,
348         }
349     }
350 
iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T>351     pub(crate) fn iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T> {
352         let m = match self.head {
353             None => Rawlink::none(),
354             Some(ref mut m) => Rawlink::some(*m),
355         };
356 
357         ObjectPageIterMut {
358             head: m,
359             phantom: core::marker::PhantomData,
360         }
361     }
362 
363     /// Inserts `new_head` at the front of the list.
insert_front<'b>(&'b mut self, mut new_head: &'a mut T)364     pub(crate) fn insert_front<'b>(&'b mut self, mut new_head: &'a mut T) {
365         match self.head {
366             None => {
367                 *new_head.prev() = Rawlink::none();
368                 self.head = Some(new_head);
369             }
370             Some(ref mut head) => {
371                 *new_head.prev() = Rawlink::none();
372                 *head.prev() = Rawlink::some(new_head);
373                 mem::swap(head, &mut new_head);
374                 *head.next() = Rawlink::some(new_head);
375             }
376         }
377 
378         self.elements += 1;
379     }
380 
381     /// Removes `slab_page` from the list.
remove_from_list(&mut self, slab_page: &mut T)382     pub(crate) fn remove_from_list(&mut self, slab_page: &mut T) {
383         unsafe {
384             match slab_page.prev().resolve_mut() {
385                 None => {
386                     self.head = slab_page.next().resolve_mut();
387                 }
388                 Some(prev) => {
389                     *prev.next() = match slab_page.next().resolve_mut() {
390                         None => Rawlink::none(),
391                         Some(next) => Rawlink::some(next),
392                     };
393                 }
394             }
395 
396             match slab_page.next().resolve_mut() {
397                 None => (),
398                 Some(next) => {
399                     *next.prev() = match slab_page.prev().resolve_mut() {
400                         None => Rawlink::none(),
401                         Some(prev) => Rawlink::some(prev),
402                     };
403                 }
404             }
405         }
406 
407         *slab_page.prev() = Rawlink::none();
408         *slab_page.next() = Rawlink::none();
409         self.elements -= 1;
410     }
411 
412     /// Removes `slab_page` from the list.
413     #[allow(clippy::manual_inspect)]
pop<'b>(&'b mut self) -> Option<&'a mut T>414     pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> {
415         match self.head {
416             None => None,
417             Some(ref mut head) => {
418                 let head_next = head.next();
419                 let mut new_head = unsafe { head_next.resolve_mut() };
420                 mem::swap(&mut self.head, &mut new_head);
421                 let _ = self.head.as_mut().map(|n| {
422                     *n.prev() = Rawlink::none();
423                 });
424 
425                 self.elements -= 1;
426                 new_head.map(|node| {
427                     *node.prev() = Rawlink::none();
428                     *node.next() = Rawlink::none();
429                     node
430                 })
431             }
432         }
433     }
434 
435     /// Does the list contain `s`?
contains(&mut self, s: *const T) -> bool436     pub(crate) fn contains(&mut self, s: *const T) -> bool {
437         for slab_page in self.iter_mut() {
438             if core::ptr::eq(slab_page, s) {
439                 return true;
440             }
441         }
442 
443         false
444     }
445 }
446 
447 /// Iterate over all the pages inside a slab allocator
448 pub(crate) struct ObjectPageIterMut<'a, P: AllocablePage> {
449     head: Rawlink<P>,
450     phantom: core::marker::PhantomData<&'a P>,
451 }
452 
453 impl<'a, P: AllocablePage + 'a> Iterator for ObjectPageIterMut<'a, P> {
454     type Item = &'a mut P;
455 
456     #[inline]
457     #[allow(clippy::manual_inspect)]
next(&mut self) -> Option<&'a mut P>458     fn next(&mut self) -> Option<&'a mut P> {
459         unsafe {
460             self.head.resolve_mut().map(|next| {
461                 self.head = match next.next().resolve_mut() {
462                     None => Rawlink::none(),
463                     Some(ref mut sp) => Rawlink::some(*sp),
464                 };
465                 next
466             })
467         }
468     }
469 }
470 
471 /// Rawlink is a type like Option<T> but for holding a raw pointer.
472 ///
473 /// We use it to link AllocablePages together. You probably won't need
474 /// to use this type if you're not implementing AllocablePage
475 /// for a custom page-size.
476 pub struct Rawlink<T> {
477     p: *mut T,
478 }
479 
480 impl<T> Default for Rawlink<T> {
default() -> Self481     fn default() -> Self {
482         Rawlink { p: ptr::null_mut() }
483     }
484 }
485 
486 impl<T> Rawlink<T> {
487     /// Like Option::None for Rawlink
none() -> Rawlink<T>488     pub(crate) fn none() -> Rawlink<T> {
489         Rawlink { p: ptr::null_mut() }
490     }
491 
492     /// Like Option::Some for Rawlink
some(n: &mut T) -> Rawlink<T>493     pub(crate) fn some(n: &mut T) -> Rawlink<T> {
494         Rawlink { p: n }
495     }
496 
497     /// Convert the `Rawlink` into an Option value
498     ///
499     /// **unsafe** because:
500     ///
501     /// - Dereference of raw pointer.
502     /// - Returns reference of arbitrary lifetime.
503     #[allow(dead_code)]
resolve<'a>(&self) -> Option<&'a T>504     pub(crate) unsafe fn resolve<'a>(&self) -> Option<&'a T> {
505         self.p.as_ref()
506     }
507 
508     /// Convert the `Rawlink` into an Option value
509     ///
510     /// **unsafe** because:
511     ///
512     /// - Dereference of raw pointer.
513     /// - Returns reference of arbitrary lifetime.
resolve_mut<'a>(&mut self) -> Option<&'a mut T>514     pub(crate) unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
515         self.p.as_mut()
516     }
517 
518     /// Return the `Rawlink` and replace with `Rawlink::none()`
519     #[allow(dead_code)]
take(&mut self) -> Rawlink<T>520     pub(crate) fn take(&mut self) -> Rawlink<T> {
521         mem::replace(self, Rawlink::none())
522     }
523 }
524