1ceeb2e94Slaokengwt use alloc::boxed::Box;
2ceeb2e94Slaokengwt
3ceeb2e94Slaokengwt use crate::*;
4ceeb2e94Slaokengwt use core::{
5ceeb2e94Slaokengwt mem,
6ceeb2e94Slaokengwt sync::atomic::{AtomicU64, Ordering},
7ceeb2e94Slaokengwt };
8ceeb2e94Slaokengwt
9ceeb2e94Slaokengwt /// A trait defining bitfield operations we need for tracking allocated objects within a page.
10ceeb2e94Slaokengwt pub(crate) trait Bitfield {
initialize(&mut self, for_size: usize, capacity: usize)11ceeb2e94Slaokengwt fn initialize(&mut self, for_size: usize, capacity: usize);
first_fit( &self, base_addr: usize, layout: Layout, page_size: usize, ) -> Option<(usize, usize)>12ceeb2e94Slaokengwt fn first_fit(
13ceeb2e94Slaokengwt &self,
14ceeb2e94Slaokengwt base_addr: usize,
15ceeb2e94Slaokengwt layout: Layout,
16ceeb2e94Slaokengwt page_size: usize,
17ceeb2e94Slaokengwt ) -> Option<(usize, usize)>;
is_allocated(&self, idx: usize) -> bool18ceeb2e94Slaokengwt fn is_allocated(&self, idx: usize) -> bool;
set_bit(&self, idx: usize)19ceeb2e94Slaokengwt fn set_bit(&self, idx: usize);
clear_bit(&self, idx: usize)20ceeb2e94Slaokengwt fn clear_bit(&self, idx: usize);
is_full(&self) -> bool21ceeb2e94Slaokengwt fn is_full(&self) -> bool;
all_free(&self, relevant_bits: usize) -> bool22ceeb2e94Slaokengwt fn all_free(&self, relevant_bits: usize) -> bool;
23ceeb2e94Slaokengwt }
24ceeb2e94Slaokengwt
25ceeb2e94Slaokengwt /// Implementation of bit operations on u64 slices.
26ceeb2e94Slaokengwt ///
27ceeb2e94Slaokengwt /// We allow deallocations (i.e. clearning a bit in the field)
28ceeb2e94Slaokengwt /// from any thread. That's why the bitfield is a bunch of AtomicU64.
29ceeb2e94Slaokengwt impl Bitfield for [AtomicU64] {
30ceeb2e94Slaokengwt /// Initialize the bitfield
31ceeb2e94Slaokengwt ///
32ceeb2e94Slaokengwt /// # Arguments
33ceeb2e94Slaokengwt /// * `for_size`: Object size we want to allocate
34ceeb2e94Slaokengwt /// * `capacity`: Maximum size of the buffer the bitmap maintains.
35ceeb2e94Slaokengwt ///
36ceeb2e94Slaokengwt /// Ensures that we only have free slots for what we can allocate
37ceeb2e94Slaokengwt /// within the page (by marking everything else allocated).
initialize(&mut self, for_size: usize, capacity: usize)38ceeb2e94Slaokengwt fn initialize(&mut self, for_size: usize, capacity: usize) {
39ceeb2e94Slaokengwt // Set everything to allocated
40ceeb2e94Slaokengwt for bitmap in self.iter_mut() {
41*bd70d2d1SLoGin *bitmap = AtomicU64::new(u64::MAX);
42ceeb2e94Slaokengwt }
43ceeb2e94Slaokengwt
44ceeb2e94Slaokengwt // Mark actual slots as free
45ceeb2e94Slaokengwt let relevant_bits = core::cmp::min(capacity / for_size, self.len() * 64);
46ceeb2e94Slaokengwt for idx in 0..relevant_bits {
47ceeb2e94Slaokengwt self.clear_bit(idx);
48ceeb2e94Slaokengwt }
49ceeb2e94Slaokengwt }
50ceeb2e94Slaokengwt
51ceeb2e94Slaokengwt /// Tries to find a free block of memory that satisfies `alignment` requirement.
52ceeb2e94Slaokengwt ///
53ceeb2e94Slaokengwt /// # Notes
54ceeb2e94Slaokengwt /// * We pass size here to be able to calculate the resulting address within `data`.
55ceeb2e94Slaokengwt #[inline(always)]
first_fit( &self, base_addr: usize, layout: Layout, page_size: usize, ) -> Option<(usize, usize)>56ceeb2e94Slaokengwt fn first_fit(
57ceeb2e94Slaokengwt &self,
58ceeb2e94Slaokengwt base_addr: usize,
59ceeb2e94Slaokengwt layout: Layout,
60ceeb2e94Slaokengwt page_size: usize,
61ceeb2e94Slaokengwt ) -> Option<(usize, usize)> {
62ceeb2e94Slaokengwt let start_offset = get_offset_for_align(layout);
63ceeb2e94Slaokengwt let data_start = base_addr + start_offset;
64ceeb2e94Slaokengwt
65ceeb2e94Slaokengwt for (base_idx, b) in self.iter().enumerate() {
66ceeb2e94Slaokengwt let bitval = b.load(Ordering::Relaxed);
67*bd70d2d1SLoGin if bitval == u64::MAX {
68ceeb2e94Slaokengwt continue;
69ceeb2e94Slaokengwt } else {
70ceeb2e94Slaokengwt let negated = !bitval;
71ceeb2e94Slaokengwt let first_free = negated.trailing_zeros() as usize;
72ceeb2e94Slaokengwt let idx: usize = base_idx * 64 + first_free;
73ceeb2e94Slaokengwt let offset = idx * layout.size();
74ceeb2e94Slaokengwt
75ceeb2e94Slaokengwt // TODO(bad): psize needs to be passed as arg
76ceeb2e94Slaokengwt let offset_inside_data_area =
77ceeb2e94Slaokengwt offset <= (page_size - OBJECT_PAGE_METADATA_OVERHEAD - layout.size());
78ceeb2e94Slaokengwt if !offset_inside_data_area {
79ceeb2e94Slaokengwt return None;
80ceeb2e94Slaokengwt }
81ceeb2e94Slaokengwt
82ceeb2e94Slaokengwt let addr: usize = data_start + offset;
83ceeb2e94Slaokengwt let alignment_ok = addr % layout.align() == 0;
84ceeb2e94Slaokengwt let block_is_free = bitval & (1 << first_free) == 0;
85ceeb2e94Slaokengwt if alignment_ok && block_is_free {
86ceeb2e94Slaokengwt return Some((idx, addr));
87ceeb2e94Slaokengwt }
88ceeb2e94Slaokengwt }
89ceeb2e94Slaokengwt }
90ceeb2e94Slaokengwt None
91ceeb2e94Slaokengwt }
92ceeb2e94Slaokengwt
93ceeb2e94Slaokengwt /// Check if the bit `idx` is set.
94ceeb2e94Slaokengwt #[inline(always)]
is_allocated(&self, idx: usize) -> bool95ceeb2e94Slaokengwt fn is_allocated(&self, idx: usize) -> bool {
96ceeb2e94Slaokengwt let base_idx = idx / 64;
97ceeb2e94Slaokengwt let bit_idx = idx % 64;
98ceeb2e94Slaokengwt (self[base_idx].load(Ordering::Relaxed) & (1 << bit_idx)) > 0
99ceeb2e94Slaokengwt }
100ceeb2e94Slaokengwt
101ceeb2e94Slaokengwt /// Sets the bit number `idx` in the bit-field.
102ceeb2e94Slaokengwt #[inline(always)]
set_bit(&self, idx: usize)103ceeb2e94Slaokengwt fn set_bit(&self, idx: usize) {
104ceeb2e94Slaokengwt let base_idx = idx / 64;
105ceeb2e94Slaokengwt let bit_idx = idx % 64;
106ceeb2e94Slaokengwt self[base_idx].fetch_or(1 << bit_idx, Ordering::Relaxed);
107ceeb2e94Slaokengwt }
108ceeb2e94Slaokengwt
109ceeb2e94Slaokengwt /// Clears bit number `idx` in the bit-field.
110ceeb2e94Slaokengwt #[inline(always)]
clear_bit(&self, idx: usize)111ceeb2e94Slaokengwt fn clear_bit(&self, idx: usize) {
112ceeb2e94Slaokengwt let base_idx = idx / 64;
113ceeb2e94Slaokengwt let bit_idx = idx % 64;
114ceeb2e94Slaokengwt self[base_idx].fetch_and(!(1 << bit_idx), Ordering::Relaxed);
115ceeb2e94Slaokengwt }
116ceeb2e94Slaokengwt
117ceeb2e94Slaokengwt /// Checks if we could allocate more objects of a given `alloc_size` within the
118ceeb2e94Slaokengwt /// `capacity` of the memory allocator.
119ceeb2e94Slaokengwt ///
120ceeb2e94Slaokengwt /// # Note
121ceeb2e94Slaokengwt /// The ObjectPage will make sure to mark the top-most bits as allocated
122ceeb2e94Slaokengwt /// for large sizes (i.e., a size 512 SCAllocator will only really need 3 bits)
123ceeb2e94Slaokengwt /// to track allocated objects). That's why this function can be simpler
124ceeb2e94Slaokengwt /// than it would need to be in practice.
125ceeb2e94Slaokengwt #[inline(always)]
is_full(&self) -> bool126ceeb2e94Slaokengwt fn is_full(&self) -> bool {
127ceeb2e94Slaokengwt self.iter()
128*bd70d2d1SLoGin .filter(|&x| x.load(Ordering::Relaxed) != u64::MAX)
129ceeb2e94Slaokengwt .count()
130ceeb2e94Slaokengwt == 0
131ceeb2e94Slaokengwt }
132ceeb2e94Slaokengwt
133ceeb2e94Slaokengwt /// Checks if the page has currently no allocations.
134ceeb2e94Slaokengwt ///
135ceeb2e94Slaokengwt /// This is called `all_free` rather than `is_emtpy` because
136ceeb2e94Slaokengwt /// we already have an is_empty fn as part of the slice.
137ceeb2e94Slaokengwt #[inline(always)]
all_free(&self, relevant_bits: usize) -> bool138ceeb2e94Slaokengwt fn all_free(&self, relevant_bits: usize) -> bool {
139ceeb2e94Slaokengwt for (idx, bitmap) in self.iter().enumerate() {
140ceeb2e94Slaokengwt let checking_bit_range = (idx * 64, (idx + 1) * 64);
141ceeb2e94Slaokengwt if relevant_bits >= checking_bit_range.0 && relevant_bits < checking_bit_range.1 {
142ceeb2e94Slaokengwt // Last relevant bitmap, here we only have to check that a subset of bitmap is marked free
143ceeb2e94Slaokengwt // the rest will be marked full
144ceeb2e94Slaokengwt let bits_that_should_be_free = relevant_bits - checking_bit_range.0;
145ceeb2e94Slaokengwt let free_mask = (1 << bits_that_should_be_free) - 1;
146ceeb2e94Slaokengwt return (free_mask & bitmap.load(Ordering::Relaxed)) == 0;
147ceeb2e94Slaokengwt }
148ceeb2e94Slaokengwt
149ceeb2e94Slaokengwt if bitmap.load(Ordering::Relaxed) == 0 {
150ceeb2e94Slaokengwt continue;
151ceeb2e94Slaokengwt } else {
152ceeb2e94Slaokengwt return false;
153ceeb2e94Slaokengwt }
154ceeb2e94Slaokengwt }
155ceeb2e94Slaokengwt
156ceeb2e94Slaokengwt true
157ceeb2e94Slaokengwt }
158ceeb2e94Slaokengwt }
159ceeb2e94Slaokengwt
160ceeb2e94Slaokengwt /// # get_offset_for_align - 根据布局大小获取page内对齐偏移量
161ceeb2e94Slaokengwt ///
162ceeb2e94Slaokengwt /// 这个函数根据给定的`Layout`大小确定一个合适的对齐偏移量。
163ceeb2e94Slaokengwt ///
164ceeb2e94Slaokengwt /// ## 参数
165ceeb2e94Slaokengwt ///
166ceeb2e94Slaokengwt /// - layout: Layout,这是需要计算对齐偏移量的布局参数。
167ceeb2e94Slaokengwt ///
168ceeb2e94Slaokengwt /// ## 返回值
169ceeb2e94Slaokengwt ///
170ceeb2e94Slaokengwt /// - usize: 成功时返回一个usize类型的对齐偏移量。
get_offset_for_align(layout: Layout) -> usize171ceeb2e94Slaokengwt fn get_offset_for_align(layout: Layout) -> usize {
172ceeb2e94Slaokengwt match layout.size() {
173ceeb2e94Slaokengwt 0..=8 => 80,
174ceeb2e94Slaokengwt 9..=16 => 80,
175ceeb2e94Slaokengwt 17..=32 => 96,
176ceeb2e94Slaokengwt 33..=64 => 128,
177ceeb2e94Slaokengwt 65..=128 => 128,
178ceeb2e94Slaokengwt 129..=256 => 256,
179ceeb2e94Slaokengwt 257..=512 => 512,
180ceeb2e94Slaokengwt 513..=1024 => 1024,
181ceeb2e94Slaokengwt 1025..=2048 => 2048,
182ceeb2e94Slaokengwt _ => panic!(),
183ceeb2e94Slaokengwt }
184ceeb2e94Slaokengwt }
185ceeb2e94Slaokengwt
186ceeb2e94Slaokengwt /// This trait is used to define a page from which objects are allocated
187ceeb2e94Slaokengwt /// in an `SCAllocator`.
188ceeb2e94Slaokengwt ///
189ceeb2e94Slaokengwt /// The implementor of this trait needs to provide access to the page meta-data,
190ceeb2e94Slaokengwt /// which consists of:
191ceeb2e94Slaokengwt /// - A bitfield (to track allocations),
192ceeb2e94Slaokengwt /// - `prev` and `next` pointers to insert the page in free lists
193ceeb2e94Slaokengwt pub trait AllocablePage {
194ceeb2e94Slaokengwt /// The total size (in bytes) of the page.
195ceeb2e94Slaokengwt ///
196ceeb2e94Slaokengwt /// # Note
197ceeb2e94Slaokengwt /// We also assume that the address of the page will be aligned to `SIZE`.
198ceeb2e94Slaokengwt const SIZE: usize;
199ceeb2e94Slaokengwt
bitfield(&self) -> &[AtomicU64; 8]200ceeb2e94Slaokengwt fn bitfield(&self) -> &[AtomicU64; 8];
bitfield_mut(&mut self) -> &mut [AtomicU64; 8]201ceeb2e94Slaokengwt fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8];
prev(&mut self) -> &mut Rawlink<Self> where Self: core::marker::Sized202ceeb2e94Slaokengwt fn prev(&mut self) -> &mut Rawlink<Self>
203ceeb2e94Slaokengwt where
204ceeb2e94Slaokengwt Self: core::marker::Sized;
next(&mut self) -> &mut Rawlink<Self> where Self: core::marker::Sized205ceeb2e94Slaokengwt fn next(&mut self) -> &mut Rawlink<Self>
206ceeb2e94Slaokengwt where
207ceeb2e94Slaokengwt Self: core::marker::Sized;
208ceeb2e94Slaokengwt
209ceeb2e94Slaokengwt /// Tries to find a free block within `data` that satisfies `alignment` requirement.
first_fit(&self, layout: Layout) -> Option<(usize, usize)>210ceeb2e94Slaokengwt fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
211ceeb2e94Slaokengwt let base_addr = (self as *const Self as *const u8) as usize;
212ceeb2e94Slaokengwt self.bitfield().first_fit(base_addr, layout, Self::SIZE)
213ceeb2e94Slaokengwt }
214ceeb2e94Slaokengwt
215ceeb2e94Slaokengwt /// Tries to allocate an object within this page.
216ceeb2e94Slaokengwt ///
217ceeb2e94Slaokengwt /// In case the slab is full, returns a null ptr.
allocate(&mut self, layout: Layout) -> *mut u8218ceeb2e94Slaokengwt fn allocate(&mut self, layout: Layout) -> *mut u8 {
219ceeb2e94Slaokengwt match self.first_fit(layout) {
220ceeb2e94Slaokengwt Some((idx, addr)) => {
221ceeb2e94Slaokengwt self.bitfield().set_bit(idx);
222ceeb2e94Slaokengwt addr as *mut u8
223ceeb2e94Slaokengwt }
224ceeb2e94Slaokengwt None => ptr::null_mut(),
225ceeb2e94Slaokengwt }
226ceeb2e94Slaokengwt }
227ceeb2e94Slaokengwt
228ceeb2e94Slaokengwt /// Checks if we can still allocate more objects of a given layout within the page.
is_full(&self) -> bool229ceeb2e94Slaokengwt fn is_full(&self) -> bool {
230ceeb2e94Slaokengwt self.bitfield().is_full()
231ceeb2e94Slaokengwt }
232ceeb2e94Slaokengwt
233ceeb2e94Slaokengwt /// Checks if the page has currently no allocations.
is_empty(&self, relevant_bits: usize) -> bool234ceeb2e94Slaokengwt fn is_empty(&self, relevant_bits: usize) -> bool {
235ceeb2e94Slaokengwt self.bitfield().all_free(relevant_bits)
236ceeb2e94Slaokengwt }
237ceeb2e94Slaokengwt
238ceeb2e94Slaokengwt /// Deallocates a memory object within this page.
deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError>239ceeb2e94Slaokengwt fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
240ceeb2e94Slaokengwt trace!(
241ceeb2e94Slaokengwt "AllocablePage deallocating ptr = {:p} with {:?}",
242ceeb2e94Slaokengwt ptr,
243ceeb2e94Slaokengwt layout
244ceeb2e94Slaokengwt );
245ceeb2e94Slaokengwt let align_offset = get_offset_for_align(layout);
246ceeb2e94Slaokengwt let page_offset = ((ptr.as_ptr() as usize) - align_offset) & (Self::SIZE - 1);
247ceeb2e94Slaokengwt assert!(page_offset % layout.size() == 0);
248ceeb2e94Slaokengwt let idx = page_offset / layout.size();
249ceeb2e94Slaokengwt assert!(
250ceeb2e94Slaokengwt self.bitfield().is_allocated(idx),
251ceeb2e94Slaokengwt "{:p} not marked allocated?",
252ceeb2e94Slaokengwt ptr
253ceeb2e94Slaokengwt );
254ceeb2e94Slaokengwt
255ceeb2e94Slaokengwt self.bitfield().clear_bit(idx);
256ceeb2e94Slaokengwt Ok(())
257ceeb2e94Slaokengwt }
258ceeb2e94Slaokengwt }
259ceeb2e94Slaokengwt
260ceeb2e94Slaokengwt /// Holds allocated data within a 4 KiB page.
261ceeb2e94Slaokengwt ///
262ceeb2e94Slaokengwt /// Has a data-section where objects are allocated from
263ceeb2e94Slaokengwt /// and a small amount of meta-data in form of a bitmap
264ceeb2e94Slaokengwt /// to track allocations at the end of the page.
265ceeb2e94Slaokengwt ///
266ceeb2e94Slaokengwt /// # Notes
267ceeb2e94Slaokengwt /// An object of this type will be exactly 4 KiB.
268ceeb2e94Slaokengwt /// It is marked `repr(C)` because we rely on a well defined order of struct
269ceeb2e94Slaokengwt /// members (e.g., dealloc does a cast to find the bitfield).
270ceeb2e94Slaokengwt #[repr(C)]
271ceeb2e94Slaokengwt pub struct ObjectPage<'a> {
272ceeb2e94Slaokengwt #[allow(dead_code)]
273ceeb2e94Slaokengwt /// A bit-field to track free/allocated memory within `data`.
274ceeb2e94Slaokengwt pub(crate) bitfield: [AtomicU64; 8],
275ceeb2e94Slaokengwt
276ceeb2e94Slaokengwt /// Next element in list (used by `PageList`).
277ceeb2e94Slaokengwt next: Rawlink<ObjectPage<'a>>,
278ceeb2e94Slaokengwt /// Previous element in list (used by `PageList`)
279ceeb2e94Slaokengwt prev: Rawlink<ObjectPage<'a>>,
280ceeb2e94Slaokengwt
281ceeb2e94Slaokengwt /// Holds memory objects.
282ceeb2e94Slaokengwt data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD],
283ceeb2e94Slaokengwt }
284ceeb2e94Slaokengwt
285ceeb2e94Slaokengwt impl<'a> ObjectPage<'a> {
new() -> Box<ObjectPage<'a>>286ceeb2e94Slaokengwt pub fn new() -> Box<ObjectPage<'a>> {
287ceeb2e94Slaokengwt unsafe { Box::new_uninit().assume_init() }
288ceeb2e94Slaokengwt }
289ceeb2e94Slaokengwt }
290ceeb2e94Slaokengwt
291ceeb2e94Slaokengwt // These needs some more work to be really safe...
292ceeb2e94Slaokengwt unsafe impl<'a> Send for ObjectPage<'a> {}
293ceeb2e94Slaokengwt unsafe impl<'a> Sync for ObjectPage<'a> {}
294ceeb2e94Slaokengwt
295ceeb2e94Slaokengwt impl<'a> AllocablePage for ObjectPage<'a> {
296ceeb2e94Slaokengwt const SIZE: usize = OBJECT_PAGE_SIZE;
297ceeb2e94Slaokengwt
bitfield(&self) -> &[AtomicU64; 8]298ceeb2e94Slaokengwt fn bitfield(&self) -> &[AtomicU64; 8] {
299ceeb2e94Slaokengwt &self.bitfield
300ceeb2e94Slaokengwt }
bitfield_mut(&mut self) -> &mut [AtomicU64; 8]301ceeb2e94Slaokengwt fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8] {
302ceeb2e94Slaokengwt &mut self.bitfield
303ceeb2e94Slaokengwt }
304ceeb2e94Slaokengwt
prev(&mut self) -> &mut Rawlink<Self>305ceeb2e94Slaokengwt fn prev(&mut self) -> &mut Rawlink<Self> {
306ceeb2e94Slaokengwt &mut self.prev
307ceeb2e94Slaokengwt }
308ceeb2e94Slaokengwt
next(&mut self) -> &mut Rawlink<Self>309ceeb2e94Slaokengwt fn next(&mut self) -> &mut Rawlink<Self> {
310ceeb2e94Slaokengwt &mut self.next
311ceeb2e94Slaokengwt }
312ceeb2e94Slaokengwt }
313ceeb2e94Slaokengwt
314ceeb2e94Slaokengwt impl<'a> Default for ObjectPage<'a> {
default() -> ObjectPage<'a>315ceeb2e94Slaokengwt fn default() -> ObjectPage<'a> {
316ceeb2e94Slaokengwt unsafe { mem::MaybeUninit::zeroed().assume_init() }
317ceeb2e94Slaokengwt }
318ceeb2e94Slaokengwt }
319ceeb2e94Slaokengwt
320ceeb2e94Slaokengwt impl<'a> fmt::Debug for ObjectPage<'a> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result321ceeb2e94Slaokengwt fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
322ceeb2e94Slaokengwt write!(f, "ObjectPage")
323ceeb2e94Slaokengwt }
324ceeb2e94Slaokengwt }
325ceeb2e94Slaokengwt
326ceeb2e94Slaokengwt /// A list of pages.
327ceeb2e94Slaokengwt pub(crate) struct PageList<'a, T: AllocablePage> {
328ceeb2e94Slaokengwt /// Points to the head of the list.
329ceeb2e94Slaokengwt pub(crate) head: Option<&'a mut T>,
330ceeb2e94Slaokengwt /// Number of elements in the list.
331ceeb2e94Slaokengwt pub(crate) elements: usize,
332ceeb2e94Slaokengwt }
333ceeb2e94Slaokengwt
334ceeb2e94Slaokengwt impl<'a, T: AllocablePage> PageList<'a, T> {
335ceeb2e94Slaokengwt #[cfg(feature = "unstable")]
new() -> PageList<'a, T>336ceeb2e94Slaokengwt pub(crate) const fn new() -> PageList<'a, T> {
337ceeb2e94Slaokengwt PageList {
338ceeb2e94Slaokengwt head: None,
339ceeb2e94Slaokengwt elements: 0,
340ceeb2e94Slaokengwt }
341ceeb2e94Slaokengwt }
342ceeb2e94Slaokengwt
343ceeb2e94Slaokengwt #[cfg(not(feature = "unstable"))]
new() -> PageList<'a, T>344ceeb2e94Slaokengwt pub(crate) fn new() -> PageList<'a, T> {
345ceeb2e94Slaokengwt PageList {
346ceeb2e94Slaokengwt head: None,
347ceeb2e94Slaokengwt elements: 0,
348ceeb2e94Slaokengwt }
349ceeb2e94Slaokengwt }
350ceeb2e94Slaokengwt
iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T>351ceeb2e94Slaokengwt pub(crate) fn iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T> {
352ceeb2e94Slaokengwt let m = match self.head {
353ceeb2e94Slaokengwt None => Rawlink::none(),
354ceeb2e94Slaokengwt Some(ref mut m) => Rawlink::some(*m),
355ceeb2e94Slaokengwt };
356ceeb2e94Slaokengwt
357ceeb2e94Slaokengwt ObjectPageIterMut {
358ceeb2e94Slaokengwt head: m,
359ceeb2e94Slaokengwt phantom: core::marker::PhantomData,
360ceeb2e94Slaokengwt }
361ceeb2e94Slaokengwt }
362ceeb2e94Slaokengwt
363ceeb2e94Slaokengwt /// Inserts `new_head` at the front of the list.
insert_front<'b>(&'b mut self, mut new_head: &'a mut T)364ceeb2e94Slaokengwt pub(crate) fn insert_front<'b>(&'b mut self, mut new_head: &'a mut T) {
365ceeb2e94Slaokengwt match self.head {
366ceeb2e94Slaokengwt None => {
367ceeb2e94Slaokengwt *new_head.prev() = Rawlink::none();
368ceeb2e94Slaokengwt self.head = Some(new_head);
369ceeb2e94Slaokengwt }
370ceeb2e94Slaokengwt Some(ref mut head) => {
371ceeb2e94Slaokengwt *new_head.prev() = Rawlink::none();
372ceeb2e94Slaokengwt *head.prev() = Rawlink::some(new_head);
373ceeb2e94Slaokengwt mem::swap(head, &mut new_head);
374ceeb2e94Slaokengwt *head.next() = Rawlink::some(new_head);
375ceeb2e94Slaokengwt }
376ceeb2e94Slaokengwt }
377ceeb2e94Slaokengwt
378ceeb2e94Slaokengwt self.elements += 1;
379ceeb2e94Slaokengwt }
380ceeb2e94Slaokengwt
381ceeb2e94Slaokengwt /// Removes `slab_page` from the list.
remove_from_list(&mut self, slab_page: &mut T)382ceeb2e94Slaokengwt pub(crate) fn remove_from_list(&mut self, slab_page: &mut T) {
383ceeb2e94Slaokengwt unsafe {
384ceeb2e94Slaokengwt match slab_page.prev().resolve_mut() {
385ceeb2e94Slaokengwt None => {
386ceeb2e94Slaokengwt self.head = slab_page.next().resolve_mut();
387ceeb2e94Slaokengwt }
388ceeb2e94Slaokengwt Some(prev) => {
389ceeb2e94Slaokengwt *prev.next() = match slab_page.next().resolve_mut() {
390ceeb2e94Slaokengwt None => Rawlink::none(),
391ceeb2e94Slaokengwt Some(next) => Rawlink::some(next),
392ceeb2e94Slaokengwt };
393ceeb2e94Slaokengwt }
394ceeb2e94Slaokengwt }
395ceeb2e94Slaokengwt
396ceeb2e94Slaokengwt match slab_page.next().resolve_mut() {
397ceeb2e94Slaokengwt None => (),
398ceeb2e94Slaokengwt Some(next) => {
399ceeb2e94Slaokengwt *next.prev() = match slab_page.prev().resolve_mut() {
400ceeb2e94Slaokengwt None => Rawlink::none(),
401ceeb2e94Slaokengwt Some(prev) => Rawlink::some(prev),
402ceeb2e94Slaokengwt };
403ceeb2e94Slaokengwt }
404ceeb2e94Slaokengwt }
405ceeb2e94Slaokengwt }
406ceeb2e94Slaokengwt
407ceeb2e94Slaokengwt *slab_page.prev() = Rawlink::none();
408ceeb2e94Slaokengwt *slab_page.next() = Rawlink::none();
409ceeb2e94Slaokengwt self.elements -= 1;
410ceeb2e94Slaokengwt }
411ceeb2e94Slaokengwt
412ceeb2e94Slaokengwt /// Removes `slab_page` from the list.
413*bd70d2d1SLoGin #[allow(clippy::manual_inspect)]
pop<'b>(&'b mut self) -> Option<&'a mut T>414ceeb2e94Slaokengwt pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> {
415ceeb2e94Slaokengwt match self.head {
416ceeb2e94Slaokengwt None => None,
417ceeb2e94Slaokengwt Some(ref mut head) => {
418ceeb2e94Slaokengwt let head_next = head.next();
419ceeb2e94Slaokengwt let mut new_head = unsafe { head_next.resolve_mut() };
420ceeb2e94Slaokengwt mem::swap(&mut self.head, &mut new_head);
421ceeb2e94Slaokengwt let _ = self.head.as_mut().map(|n| {
422ceeb2e94Slaokengwt *n.prev() = Rawlink::none();
423ceeb2e94Slaokengwt });
424ceeb2e94Slaokengwt
425ceeb2e94Slaokengwt self.elements -= 1;
426ceeb2e94Slaokengwt new_head.map(|node| {
427ceeb2e94Slaokengwt *node.prev() = Rawlink::none();
428ceeb2e94Slaokengwt *node.next() = Rawlink::none();
429ceeb2e94Slaokengwt node
430ceeb2e94Slaokengwt })
431ceeb2e94Slaokengwt }
432ceeb2e94Slaokengwt }
433ceeb2e94Slaokengwt }
434ceeb2e94Slaokengwt
435ceeb2e94Slaokengwt /// Does the list contain `s`?
contains(&mut self, s: *const T) -> bool436ceeb2e94Slaokengwt pub(crate) fn contains(&mut self, s: *const T) -> bool {
437ceeb2e94Slaokengwt for slab_page in self.iter_mut() {
438ceeb2e94Slaokengwt if core::ptr::eq(slab_page, s) {
439ceeb2e94Slaokengwt return true;
440ceeb2e94Slaokengwt }
441ceeb2e94Slaokengwt }
442ceeb2e94Slaokengwt
443ceeb2e94Slaokengwt false
444ceeb2e94Slaokengwt }
445ceeb2e94Slaokengwt }
446ceeb2e94Slaokengwt
447ceeb2e94Slaokengwt /// Iterate over all the pages inside a slab allocator
448ceeb2e94Slaokengwt pub(crate) struct ObjectPageIterMut<'a, P: AllocablePage> {
449ceeb2e94Slaokengwt head: Rawlink<P>,
450ceeb2e94Slaokengwt phantom: core::marker::PhantomData<&'a P>,
451ceeb2e94Slaokengwt }
452ceeb2e94Slaokengwt
453ceeb2e94Slaokengwt impl<'a, P: AllocablePage + 'a> Iterator for ObjectPageIterMut<'a, P> {
454ceeb2e94Slaokengwt type Item = &'a mut P;
455ceeb2e94Slaokengwt
456ceeb2e94Slaokengwt #[inline]
457*bd70d2d1SLoGin #[allow(clippy::manual_inspect)]
next(&mut self) -> Option<&'a mut P>458ceeb2e94Slaokengwt fn next(&mut self) -> Option<&'a mut P> {
459ceeb2e94Slaokengwt unsafe {
460ceeb2e94Slaokengwt self.head.resolve_mut().map(|next| {
461ceeb2e94Slaokengwt self.head = match next.next().resolve_mut() {
462ceeb2e94Slaokengwt None => Rawlink::none(),
463ceeb2e94Slaokengwt Some(ref mut sp) => Rawlink::some(*sp),
464ceeb2e94Slaokengwt };
465ceeb2e94Slaokengwt next
466ceeb2e94Slaokengwt })
467ceeb2e94Slaokengwt }
468ceeb2e94Slaokengwt }
469ceeb2e94Slaokengwt }
470ceeb2e94Slaokengwt
471ceeb2e94Slaokengwt /// Rawlink is a type like Option<T> but for holding a raw pointer.
472ceeb2e94Slaokengwt ///
473ceeb2e94Slaokengwt /// We use it to link AllocablePages together. You probably won't need
474ceeb2e94Slaokengwt /// to use this type if you're not implementing AllocablePage
475ceeb2e94Slaokengwt /// for a custom page-size.
476ceeb2e94Slaokengwt pub struct Rawlink<T> {
477ceeb2e94Slaokengwt p: *mut T,
478ceeb2e94Slaokengwt }
479ceeb2e94Slaokengwt
480ceeb2e94Slaokengwt impl<T> Default for Rawlink<T> {
default() -> Self481ceeb2e94Slaokengwt fn default() -> Self {
482ceeb2e94Slaokengwt Rawlink { p: ptr::null_mut() }
483ceeb2e94Slaokengwt }
484ceeb2e94Slaokengwt }
485ceeb2e94Slaokengwt
486ceeb2e94Slaokengwt impl<T> Rawlink<T> {
487ceeb2e94Slaokengwt /// Like Option::None for Rawlink
none() -> Rawlink<T>488ceeb2e94Slaokengwt pub(crate) fn none() -> Rawlink<T> {
489ceeb2e94Slaokengwt Rawlink { p: ptr::null_mut() }
490ceeb2e94Slaokengwt }
491ceeb2e94Slaokengwt
492ceeb2e94Slaokengwt /// Like Option::Some for Rawlink
some(n: &mut T) -> Rawlink<T>493ceeb2e94Slaokengwt pub(crate) fn some(n: &mut T) -> Rawlink<T> {
494ceeb2e94Slaokengwt Rawlink { p: n }
495ceeb2e94Slaokengwt }
496ceeb2e94Slaokengwt
497ceeb2e94Slaokengwt /// Convert the `Rawlink` into an Option value
498ceeb2e94Slaokengwt ///
499ceeb2e94Slaokengwt /// **unsafe** because:
500ceeb2e94Slaokengwt ///
501ceeb2e94Slaokengwt /// - Dereference of raw pointer.
502ceeb2e94Slaokengwt /// - Returns reference of arbitrary lifetime.
503ceeb2e94Slaokengwt #[allow(dead_code)]
resolve<'a>(&self) -> Option<&'a T>504ceeb2e94Slaokengwt pub(crate) unsafe fn resolve<'a>(&self) -> Option<&'a T> {
505ceeb2e94Slaokengwt self.p.as_ref()
506ceeb2e94Slaokengwt }
507ceeb2e94Slaokengwt
508ceeb2e94Slaokengwt /// Convert the `Rawlink` into an Option value
509ceeb2e94Slaokengwt ///
510ceeb2e94Slaokengwt /// **unsafe** because:
511ceeb2e94Slaokengwt ///
512ceeb2e94Slaokengwt /// - Dereference of raw pointer.
513ceeb2e94Slaokengwt /// - Returns reference of arbitrary lifetime.
resolve_mut<'a>(&mut self) -> Option<&'a mut T>514ceeb2e94Slaokengwt pub(crate) unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
515ceeb2e94Slaokengwt self.p.as_mut()
516ceeb2e94Slaokengwt }
517ceeb2e94Slaokengwt
518ceeb2e94Slaokengwt /// Return the `Rawlink` and replace with `Rawlink::none()`
519ceeb2e94Slaokengwt #[allow(dead_code)]
take(&mut self) -> Rawlink<T>520ceeb2e94Slaokengwt pub(crate) fn take(&mut self) -> Rawlink<T> {
521ceeb2e94Slaokengwt mem::replace(self, Rawlink::none())
522ceeb2e94Slaokengwt }
523ceeb2e94Slaokengwt }
524