1 use alloc::boxed::Box;
2
3 use crate::*;
4 use core::{
5 mem,
6 sync::atomic::{AtomicU64, Ordering},
7 };
8
9 /// A trait defining bitfield operations we need for tracking allocated objects within a page.
10 pub(crate) trait Bitfield {
initialize(&mut self, for_size: usize, capacity: usize)11 fn initialize(&mut self, for_size: usize, capacity: usize);
first_fit( &self, base_addr: usize, layout: Layout, page_size: usize, ) -> Option<(usize, usize)>12 fn first_fit(
13 &self,
14 base_addr: usize,
15 layout: Layout,
16 page_size: usize,
17 ) -> Option<(usize, usize)>;
is_allocated(&self, idx: usize) -> bool18 fn is_allocated(&self, idx: usize) -> bool;
set_bit(&self, idx: usize)19 fn set_bit(&self, idx: usize);
clear_bit(&self, idx: usize)20 fn clear_bit(&self, idx: usize);
is_full(&self) -> bool21 fn is_full(&self) -> bool;
all_free(&self, relevant_bits: usize) -> bool22 fn all_free(&self, relevant_bits: usize) -> bool;
23 }
24
25 /// Implementation of bit operations on u64 slices.
26 ///
27 /// We allow deallocations (i.e. clearning a bit in the field)
28 /// from any thread. That's why the bitfield is a bunch of AtomicU64.
29 impl Bitfield for [AtomicU64] {
30 /// Initialize the bitfield
31 ///
32 /// # Arguments
33 /// * `for_size`: Object size we want to allocate
34 /// * `capacity`: Maximum size of the buffer the bitmap maintains.
35 ///
36 /// Ensures that we only have free slots for what we can allocate
37 /// within the page (by marking everything else allocated).
initialize(&mut self, for_size: usize, capacity: usize)38 fn initialize(&mut self, for_size: usize, capacity: usize) {
39 // Set everything to allocated
40 for bitmap in self.iter_mut() {
41 *bitmap = AtomicU64::new(u64::MAX);
42 }
43
44 // Mark actual slots as free
45 let relevant_bits = core::cmp::min(capacity / for_size, self.len() * 64);
46 for idx in 0..relevant_bits {
47 self.clear_bit(idx);
48 }
49 }
50
51 /// Tries to find a free block of memory that satisfies `alignment` requirement.
52 ///
53 /// # Notes
54 /// * We pass size here to be able to calculate the resulting address within `data`.
55 #[inline(always)]
first_fit( &self, base_addr: usize, layout: Layout, page_size: usize, ) -> Option<(usize, usize)>56 fn first_fit(
57 &self,
58 base_addr: usize,
59 layout: Layout,
60 page_size: usize,
61 ) -> Option<(usize, usize)> {
62 let start_offset = get_offset_for_align(layout);
63 let data_start = base_addr + start_offset;
64
65 for (base_idx, b) in self.iter().enumerate() {
66 let bitval = b.load(Ordering::Relaxed);
67 if bitval == u64::MAX {
68 continue;
69 } else {
70 let negated = !bitval;
71 let first_free = negated.trailing_zeros() as usize;
72 let idx: usize = base_idx * 64 + first_free;
73 let offset = idx * layout.size();
74
75 // TODO(bad): psize needs to be passed as arg
76 let offset_inside_data_area =
77 offset <= (page_size - OBJECT_PAGE_METADATA_OVERHEAD - layout.size());
78 if !offset_inside_data_area {
79 return None;
80 }
81
82 let addr: usize = data_start + offset;
83 let alignment_ok = addr % layout.align() == 0;
84 let block_is_free = bitval & (1 << first_free) == 0;
85 if alignment_ok && block_is_free {
86 return Some((idx, addr));
87 }
88 }
89 }
90 None
91 }
92
93 /// Check if the bit `idx` is set.
94 #[inline(always)]
is_allocated(&self, idx: usize) -> bool95 fn is_allocated(&self, idx: usize) -> bool {
96 let base_idx = idx / 64;
97 let bit_idx = idx % 64;
98 (self[base_idx].load(Ordering::Relaxed) & (1 << bit_idx)) > 0
99 }
100
101 /// Sets the bit number `idx` in the bit-field.
102 #[inline(always)]
set_bit(&self, idx: usize)103 fn set_bit(&self, idx: usize) {
104 let base_idx = idx / 64;
105 let bit_idx = idx % 64;
106 self[base_idx].fetch_or(1 << bit_idx, Ordering::Relaxed);
107 }
108
109 /// Clears bit number `idx` in the bit-field.
110 #[inline(always)]
clear_bit(&self, idx: usize)111 fn clear_bit(&self, idx: usize) {
112 let base_idx = idx / 64;
113 let bit_idx = idx % 64;
114 self[base_idx].fetch_and(!(1 << bit_idx), Ordering::Relaxed);
115 }
116
117 /// Checks if we could allocate more objects of a given `alloc_size` within the
118 /// `capacity` of the memory allocator.
119 ///
120 /// # Note
121 /// The ObjectPage will make sure to mark the top-most bits as allocated
122 /// for large sizes (i.e., a size 512 SCAllocator will only really need 3 bits)
123 /// to track allocated objects). That's why this function can be simpler
124 /// than it would need to be in practice.
125 #[inline(always)]
is_full(&self) -> bool126 fn is_full(&self) -> bool {
127 self.iter()
128 .filter(|&x| x.load(Ordering::Relaxed) != u64::MAX)
129 .count()
130 == 0
131 }
132
133 /// Checks if the page has currently no allocations.
134 ///
135 /// This is called `all_free` rather than `is_emtpy` because
136 /// we already have an is_empty fn as part of the slice.
137 #[inline(always)]
all_free(&self, relevant_bits: usize) -> bool138 fn all_free(&self, relevant_bits: usize) -> bool {
139 for (idx, bitmap) in self.iter().enumerate() {
140 let checking_bit_range = (idx * 64, (idx + 1) * 64);
141 if relevant_bits >= checking_bit_range.0 && relevant_bits < checking_bit_range.1 {
142 // Last relevant bitmap, here we only have to check that a subset of bitmap is marked free
143 // the rest will be marked full
144 let bits_that_should_be_free = relevant_bits - checking_bit_range.0;
145 let free_mask = (1 << bits_that_should_be_free) - 1;
146 return (free_mask & bitmap.load(Ordering::Relaxed)) == 0;
147 }
148
149 if bitmap.load(Ordering::Relaxed) == 0 {
150 continue;
151 } else {
152 return false;
153 }
154 }
155
156 true
157 }
158 }
159
160 /// # get_offset_for_align - 根据布局大小获取page内对齐偏移量
161 ///
162 /// 这个函数根据给定的`Layout`大小确定一个合适的对齐偏移量。
163 ///
164 /// ## 参数
165 ///
166 /// - layout: Layout,这是需要计算对齐偏移量的布局参数。
167 ///
168 /// ## 返回值
169 ///
170 /// - usize: 成功时返回一个usize类型的对齐偏移量。
get_offset_for_align(layout: Layout) -> usize171 fn get_offset_for_align(layout: Layout) -> usize {
172 match layout.size() {
173 0..=8 => 80,
174 9..=16 => 80,
175 17..=32 => 96,
176 33..=64 => 128,
177 65..=128 => 128,
178 129..=256 => 256,
179 257..=512 => 512,
180 513..=1024 => 1024,
181 1025..=2048 => 2048,
182 _ => panic!(),
183 }
184 }
185
186 /// This trait is used to define a page from which objects are allocated
187 /// in an `SCAllocator`.
188 ///
189 /// The implementor of this trait needs to provide access to the page meta-data,
190 /// which consists of:
191 /// - A bitfield (to track allocations),
192 /// - `prev` and `next` pointers to insert the page in free lists
193 pub trait AllocablePage {
194 /// The total size (in bytes) of the page.
195 ///
196 /// # Note
197 /// We also assume that the address of the page will be aligned to `SIZE`.
198 const SIZE: usize;
199
bitfield(&self) -> &[AtomicU64; 8]200 fn bitfield(&self) -> &[AtomicU64; 8];
bitfield_mut(&mut self) -> &mut [AtomicU64; 8]201 fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8];
prev(&mut self) -> &mut Rawlink<Self> where Self: core::marker::Sized202 fn prev(&mut self) -> &mut Rawlink<Self>
203 where
204 Self: core::marker::Sized;
next(&mut self) -> &mut Rawlink<Self> where Self: core::marker::Sized205 fn next(&mut self) -> &mut Rawlink<Self>
206 where
207 Self: core::marker::Sized;
208
209 /// Tries to find a free block within `data` that satisfies `alignment` requirement.
first_fit(&self, layout: Layout) -> Option<(usize, usize)>210 fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
211 let base_addr = (self as *const Self as *const u8) as usize;
212 self.bitfield().first_fit(base_addr, layout, Self::SIZE)
213 }
214
215 /// Tries to allocate an object within this page.
216 ///
217 /// In case the slab is full, returns a null ptr.
allocate(&mut self, layout: Layout) -> *mut u8218 fn allocate(&mut self, layout: Layout) -> *mut u8 {
219 match self.first_fit(layout) {
220 Some((idx, addr)) => {
221 self.bitfield().set_bit(idx);
222 addr as *mut u8
223 }
224 None => ptr::null_mut(),
225 }
226 }
227
228 /// Checks if we can still allocate more objects of a given layout within the page.
is_full(&self) -> bool229 fn is_full(&self) -> bool {
230 self.bitfield().is_full()
231 }
232
233 /// Checks if the page has currently no allocations.
is_empty(&self, relevant_bits: usize) -> bool234 fn is_empty(&self, relevant_bits: usize) -> bool {
235 self.bitfield().all_free(relevant_bits)
236 }
237
238 /// Deallocates a memory object within this page.
deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError>239 fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
240 trace!(
241 "AllocablePage deallocating ptr = {:p} with {:?}",
242 ptr,
243 layout
244 );
245 let align_offset = get_offset_for_align(layout);
246 let page_offset = ((ptr.as_ptr() as usize) - align_offset) & (Self::SIZE - 1);
247 assert!(page_offset % layout.size() == 0);
248 let idx = page_offset / layout.size();
249 assert!(
250 self.bitfield().is_allocated(idx),
251 "{:p} not marked allocated?",
252 ptr
253 );
254
255 self.bitfield().clear_bit(idx);
256 Ok(())
257 }
258
259 /// 统计page中还可以分配多少个object
free_obj_count(&self) -> usize260 fn free_obj_count(&self) -> usize {
261 // 统计page中还可以分配多少个object
262 let mut free_obj_count = 0;
263
264 // 遍历page中的bitfield(用来统计内存分配情况的u64数组)
265 for b in self.bitfield().iter() {
266 let bitval = b.load(Ordering::Relaxed);
267 free_obj_count += bitval.count_zeros() as usize;
268 }
269
270 free_obj_count
271 }
272 }
273
274 /// Holds allocated data within a 4 KiB page.
275 ///
276 /// Has a data-section where objects are allocated from
277 /// and a small amount of meta-data in form of a bitmap
278 /// to track allocations at the end of the page.
279 ///
280 /// # Notes
281 /// An object of this type will be exactly 4 KiB.
282 /// It is marked `repr(C)` because we rely on a well defined order of struct
283 /// members (e.g., dealloc does a cast to find the bitfield).
284 #[repr(C)]
285 pub struct ObjectPage<'a> {
286 #[allow(dead_code)]
287 /// A bit-field to track free/allocated memory within `data`.
288 pub(crate) bitfield: [AtomicU64; 8],
289
290 /// Next element in list (used by `PageList`).
291 next: Rawlink<ObjectPage<'a>>,
292 /// Previous element in list (used by `PageList`)
293 prev: Rawlink<ObjectPage<'a>>,
294
295 /// Holds memory objects.
296 data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD],
297 }
298
299 impl<'a> ObjectPage<'a> {
new() -> Box<ObjectPage<'a>>300 pub fn new() -> Box<ObjectPage<'a>> {
301 unsafe { Box::new_uninit().assume_init() }
302 }
303 }
304
305 // These needs some more work to be really safe...
306 unsafe impl<'a> Send for ObjectPage<'a> {}
307 unsafe impl<'a> Sync for ObjectPage<'a> {}
308
309 impl<'a> AllocablePage for ObjectPage<'a> {
310 const SIZE: usize = OBJECT_PAGE_SIZE;
311
bitfield(&self) -> &[AtomicU64; 8]312 fn bitfield(&self) -> &[AtomicU64; 8] {
313 &self.bitfield
314 }
bitfield_mut(&mut self) -> &mut [AtomicU64; 8]315 fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8] {
316 &mut self.bitfield
317 }
318
prev(&mut self) -> &mut Rawlink<Self>319 fn prev(&mut self) -> &mut Rawlink<Self> {
320 &mut self.prev
321 }
322
next(&mut self) -> &mut Rawlink<Self>323 fn next(&mut self) -> &mut Rawlink<Self> {
324 &mut self.next
325 }
326 }
327
328 impl<'a> Default for ObjectPage<'a> {
default() -> ObjectPage<'a>329 fn default() -> ObjectPage<'a> {
330 unsafe { mem::MaybeUninit::zeroed().assume_init() }
331 }
332 }
333
334 impl<'a> fmt::Debug for ObjectPage<'a> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result335 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
336 write!(f, "ObjectPage")
337 }
338 }
339
340 /// A list of pages.
341 pub(crate) struct PageList<'a, T: AllocablePage> {
342 /// Points to the head of the list.
343 pub(crate) head: Option<&'a mut T>,
344 /// Number of elements in the list.
345 pub(crate) elements: usize,
346 }
347
348 impl<'a, T: AllocablePage> PageList<'a, T> {
349 #[cfg(feature = "unstable")]
new() -> PageList<'a, T>350 pub(crate) const fn new() -> PageList<'a, T> {
351 PageList {
352 head: None,
353 elements: 0,
354 }
355 }
356
357 #[cfg(not(feature = "unstable"))]
new() -> PageList<'a, T>358 pub(crate) fn new() -> PageList<'a, T> {
359 PageList {
360 head: None,
361 elements: 0,
362 }
363 }
364
iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T>365 pub(crate) fn iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T> {
366 let m = match self.head {
367 None => Rawlink::none(),
368 Some(ref mut m) => Rawlink::some(*m),
369 };
370
371 ObjectPageIterMut {
372 head: m,
373 phantom: core::marker::PhantomData,
374 }
375 }
376
377 /// Inserts `new_head` at the front of the list.
insert_front<'b>(&'b mut self, mut new_head: &'a mut T)378 pub(crate) fn insert_front<'b>(&'b mut self, mut new_head: &'a mut T) {
379 match self.head {
380 None => {
381 *new_head.prev() = Rawlink::none();
382 self.head = Some(new_head);
383 }
384 Some(ref mut head) => {
385 *new_head.prev() = Rawlink::none();
386 *head.prev() = Rawlink::some(new_head);
387 mem::swap(head, &mut new_head);
388 *head.next() = Rawlink::some(new_head);
389 }
390 }
391
392 self.elements += 1;
393 }
394
395 /// Removes `slab_page` from the list.
remove_from_list(&mut self, slab_page: &mut T)396 pub(crate) fn remove_from_list(&mut self, slab_page: &mut T) {
397 unsafe {
398 match slab_page.prev().resolve_mut() {
399 None => {
400 self.head = slab_page.next().resolve_mut();
401 }
402 Some(prev) => {
403 *prev.next() = match slab_page.next().resolve_mut() {
404 None => Rawlink::none(),
405 Some(next) => Rawlink::some(next),
406 };
407 }
408 }
409
410 match slab_page.next().resolve_mut() {
411 None => (),
412 Some(next) => {
413 *next.prev() = match slab_page.prev().resolve_mut() {
414 None => Rawlink::none(),
415 Some(prev) => Rawlink::some(prev),
416 };
417 }
418 }
419 }
420
421 *slab_page.prev() = Rawlink::none();
422 *slab_page.next() = Rawlink::none();
423 self.elements -= 1;
424 }
425
426 /// Removes `slab_page` from the list.
427 #[allow(clippy::manual_inspect)]
pop<'b>(&'b mut self) -> Option<&'a mut T>428 pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> {
429 match self.head {
430 None => None,
431 Some(ref mut head) => {
432 let head_next = head.next();
433 let mut new_head = unsafe { head_next.resolve_mut() };
434 mem::swap(&mut self.head, &mut new_head);
435 let _ = self.head.as_mut().map(|n| {
436 *n.prev() = Rawlink::none();
437 });
438
439 self.elements -= 1;
440 new_head.map(|node| {
441 *node.prev() = Rawlink::none();
442 *node.next() = Rawlink::none();
443 node
444 })
445 }
446 }
447 }
448
449 /// Does the list contain `s`?
contains(&mut self, s: *const T) -> bool450 pub(crate) fn contains(&mut self, s: *const T) -> bool {
451 for slab_page in self.iter_mut() {
452 if core::ptr::eq(slab_page, s) {
453 return true;
454 }
455 }
456
457 false
458 }
459 }
460
461 /// Iterate over all the pages inside a slab allocator
462 pub(crate) struct ObjectPageIterMut<'a, P: AllocablePage> {
463 head: Rawlink<P>,
464 phantom: core::marker::PhantomData<&'a P>,
465 }
466
467 impl<'a, P: AllocablePage + 'a> Iterator for ObjectPageIterMut<'a, P> {
468 type Item = &'a mut P;
469
470 #[inline]
471 #[allow(clippy::manual_inspect)]
next(&mut self) -> Option<&'a mut P>472 fn next(&mut self) -> Option<&'a mut P> {
473 unsafe {
474 self.head.resolve_mut().map(|next| {
475 self.head = match next.next().resolve_mut() {
476 None => Rawlink::none(),
477 Some(ref mut sp) => Rawlink::some(*sp),
478 };
479 next
480 })
481 }
482 }
483 }
484
485 /// Rawlink is a type like Option<T> but for holding a raw pointer.
486 ///
487 /// We use it to link AllocablePages together. You probably won't need
488 /// to use this type if you're not implementing AllocablePage
489 /// for a custom page-size.
490 pub struct Rawlink<T> {
491 p: *mut T,
492 }
493
494 impl<T> Default for Rawlink<T> {
default() -> Self495 fn default() -> Self {
496 Rawlink { p: ptr::null_mut() }
497 }
498 }
499
500 impl<T> Rawlink<T> {
501 /// Like Option::None for Rawlink
none() -> Rawlink<T>502 pub(crate) fn none() -> Rawlink<T> {
503 Rawlink { p: ptr::null_mut() }
504 }
505
506 /// Like Option::Some for Rawlink
some(n: &mut T) -> Rawlink<T>507 pub(crate) fn some(n: &mut T) -> Rawlink<T> {
508 Rawlink { p: n }
509 }
510
511 /// Convert the `Rawlink` into an Option value
512 ///
513 /// **unsafe** because:
514 ///
515 /// - Dereference of raw pointer.
516 /// - Returns reference of arbitrary lifetime.
517 #[allow(dead_code)]
resolve<'a>(&self) -> Option<&'a T>518 pub(crate) unsafe fn resolve<'a>(&self) -> Option<&'a T> {
519 self.p.as_ref()
520 }
521
522 /// Convert the `Rawlink` into an Option value
523 ///
524 /// **unsafe** because:
525 ///
526 /// - Dereference of raw pointer.
527 /// - Returns reference of arbitrary lifetime.
resolve_mut<'a>(&mut self) -> Option<&'a mut T>528 pub(crate) unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
529 self.p.as_mut()
530 }
531
532 /// Return the `Rawlink` and replace with `Rawlink::none()`
533 #[allow(dead_code)]
take(&mut self) -> Rawlink<T>534 pub(crate) fn take(&mut self) -> Rawlink<T> {
535 mem::replace(self, Rawlink::none())
536 }
537 }
538