xref: /DragonOS/kernel/crates/rust-slabmalloc/src/sc.rs (revision ceeb2e943ca7645609920ec7ad8bfceea2b13de6)
1*ceeb2e94Slaokengwt //! A SCAllocator that can allocate fixed size objects.
2*ceeb2e94Slaokengwt 
3*ceeb2e94Slaokengwt use core::mem;
4*ceeb2e94Slaokengwt 
5*ceeb2e94Slaokengwt use crate::*;
6*ceeb2e94Slaokengwt 
7*ceeb2e94Slaokengwt /// A genius(?) const min()
8*ceeb2e94Slaokengwt ///
9*ceeb2e94Slaokengwt /// # What this does
10*ceeb2e94Slaokengwt /// * create an array of the two elements you want to choose between
11*ceeb2e94Slaokengwt /// * create an arbitrary boolean expression
12*ceeb2e94Slaokengwt /// * cast said expresison to a usize
13*ceeb2e94Slaokengwt /// * use that value to index into the array created above
14*ceeb2e94Slaokengwt ///
15*ceeb2e94Slaokengwt /// # Source
16*ceeb2e94Slaokengwt /// https://stackoverflow.com/questions/53619695/calculating-maximum-value-of-a-set-of-constant-expressions-at-compile-time
17*ceeb2e94Slaokengwt #[cfg(feature = "unstable")]
cmin(a: usize, b: usize) -> usize18*ceeb2e94Slaokengwt const fn cmin(a: usize, b: usize) -> usize {
19*ceeb2e94Slaokengwt     [a, b][(a > b) as usize]
20*ceeb2e94Slaokengwt }
21*ceeb2e94Slaokengwt 
22*ceeb2e94Slaokengwt /// The boring variant of min (not const).
23*ceeb2e94Slaokengwt #[cfg(not(feature = "unstable"))]
cmin(a: usize, b: usize) -> usize24*ceeb2e94Slaokengwt fn cmin(a: usize, b: usize) -> usize {
25*ceeb2e94Slaokengwt     core::cmp::min(a, b)
26*ceeb2e94Slaokengwt }
27*ceeb2e94Slaokengwt 
28*ceeb2e94Slaokengwt /// A slab allocator allocates elements of a fixed size.
29*ceeb2e94Slaokengwt ///
30*ceeb2e94Slaokengwt /// It maintains three internal lists of objects that implement `AllocablePage`
31*ceeb2e94Slaokengwt /// from which it can allocate memory.
32*ceeb2e94Slaokengwt ///
33*ceeb2e94Slaokengwt ///  * `empty_slabs`: Is a list of pages that the SCAllocator maintains, but
34*ceeb2e94Slaokengwt ///    has 0 allocations in them, these can be given back to a requestor in case
35*ceeb2e94Slaokengwt ///    of reclamation.
36*ceeb2e94Slaokengwt ///  * `slabs`: A list of pages partially allocated and still have room for more.
37*ceeb2e94Slaokengwt ///  * `full_slabs`: A list of pages that are completely allocated.
38*ceeb2e94Slaokengwt ///
39*ceeb2e94Slaokengwt /// On allocation we allocate memory from `slabs`, however if the list is empty
40*ceeb2e94Slaokengwt /// we try to reclaim a page from `empty_slabs` before we return with an out-of-memory
41*ceeb2e94Slaokengwt /// error. If a page becomes full after the allocation we move it from `slabs` to
42*ceeb2e94Slaokengwt /// `full_slabs`.
43*ceeb2e94Slaokengwt ///
44*ceeb2e94Slaokengwt /// Similarly, on dealloaction we might move a page from `full_slabs` to `slabs`
45*ceeb2e94Slaokengwt /// or from `slabs` to `empty_slabs` after we deallocated an object.
46*ceeb2e94Slaokengwt ///
47*ceeb2e94Slaokengwt /// If an allocation returns `OutOfMemory` a client using SCAllocator can refill
48*ceeb2e94Slaokengwt /// it using the `refill` function.
49*ceeb2e94Slaokengwt pub struct SCAllocator<'a, P: AllocablePage> {
50*ceeb2e94Slaokengwt     /// Maximum possible allocation size for this `SCAllocator`.
51*ceeb2e94Slaokengwt     pub(crate) size: usize,
52*ceeb2e94Slaokengwt     /// Keeps track of succeeded allocations.
53*ceeb2e94Slaokengwt     pub(crate) allocation_count: usize,
54*ceeb2e94Slaokengwt     /// max objects per page
55*ceeb2e94Slaokengwt     pub(crate) obj_per_page: usize,
56*ceeb2e94Slaokengwt     /// List of empty ObjectPages (nothing allocated in these).
57*ceeb2e94Slaokengwt     pub(crate) empty_slabs: PageList<'a, P>,
58*ceeb2e94Slaokengwt     /// List of partially used ObjectPage (some objects allocated but pages are not full).
59*ceeb2e94Slaokengwt     pub(crate) slabs: PageList<'a, P>,
60*ceeb2e94Slaokengwt     /// List of full ObjectPages (everything allocated in these don't need to search them).
61*ceeb2e94Slaokengwt     pub(crate) full_slabs: PageList<'a, P>,
62*ceeb2e94Slaokengwt }
63*ceeb2e94Slaokengwt 
64*ceeb2e94Slaokengwt /// Creates an instance of a scallocator, we do this in a macro because we
65*ceeb2e94Slaokengwt /// re-use the code in const and non-const functions
66*ceeb2e94Slaokengwt macro_rules! new_sc_allocator {
67*ceeb2e94Slaokengwt     ($size:expr) => {
68*ceeb2e94Slaokengwt         SCAllocator {
69*ceeb2e94Slaokengwt             size: $size,
70*ceeb2e94Slaokengwt             allocation_count: 0,
71*ceeb2e94Slaokengwt             obj_per_page: cmin((P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD) / $size, 8 * 64),
72*ceeb2e94Slaokengwt             empty_slabs: PageList::new(),
73*ceeb2e94Slaokengwt             slabs: PageList::new(),
74*ceeb2e94Slaokengwt             full_slabs: PageList::new(),
75*ceeb2e94Slaokengwt         }
76*ceeb2e94Slaokengwt     };
77*ceeb2e94Slaokengwt }
78*ceeb2e94Slaokengwt 
79*ceeb2e94Slaokengwt impl<'a, P: AllocablePage> SCAllocator<'a, P> {
80*ceeb2e94Slaokengwt     const REBALANCE_COUNT: usize = 64;
81*ceeb2e94Slaokengwt 
82*ceeb2e94Slaokengwt     /// Create a new SCAllocator.
83*ceeb2e94Slaokengwt     #[cfg(feature = "unstable")]
new(size: usize) -> SCAllocator<'a, P>84*ceeb2e94Slaokengwt     pub const fn new(size: usize) -> SCAllocator<'a, P> {
85*ceeb2e94Slaokengwt         new_sc_allocator!(size)
86*ceeb2e94Slaokengwt     }
87*ceeb2e94Slaokengwt 
88*ceeb2e94Slaokengwt     #[cfg(not(feature = "unstable"))]
new(size: usize) -> SCAllocator<'a, P>89*ceeb2e94Slaokengwt     pub fn new(size: usize) -> SCAllocator<'a, P> {
90*ceeb2e94Slaokengwt         new_sc_allocator!(size)
91*ceeb2e94Slaokengwt     }
92*ceeb2e94Slaokengwt 
93*ceeb2e94Slaokengwt     /// Returns the maximum supported object size of this allocator.
size(&self) -> usize94*ceeb2e94Slaokengwt     pub fn size(&self) -> usize {
95*ceeb2e94Slaokengwt         self.size
96*ceeb2e94Slaokengwt     }
97*ceeb2e94Slaokengwt 
98*ceeb2e94Slaokengwt     /// Add a new ObjectPage.
insert_partial_slab(&mut self, new_head: &'a mut P)99*ceeb2e94Slaokengwt     fn insert_partial_slab(&mut self, new_head: &'a mut P) {
100*ceeb2e94Slaokengwt         self.slabs.insert_front(new_head);
101*ceeb2e94Slaokengwt     }
102*ceeb2e94Slaokengwt 
103*ceeb2e94Slaokengwt     /// Add page to empty list.
insert_empty(&mut self, new_head: &'a mut P)104*ceeb2e94Slaokengwt     fn insert_empty(&mut self, new_head: &'a mut P) {
105*ceeb2e94Slaokengwt         assert_eq!(
106*ceeb2e94Slaokengwt             new_head as *const P as usize % P::SIZE,
107*ceeb2e94Slaokengwt             0,
108*ceeb2e94Slaokengwt             "Inserted page is not aligned to page-size."
109*ceeb2e94Slaokengwt         );
110*ceeb2e94Slaokengwt         self.empty_slabs.insert_front(new_head);
111*ceeb2e94Slaokengwt     }
112*ceeb2e94Slaokengwt 
113*ceeb2e94Slaokengwt     /// Since `dealloc` can not reassign pages without requiring a lock
114*ceeb2e94Slaokengwt     /// we check slabs and full slabs periodically as part of `alloc`
115*ceeb2e94Slaokengwt     /// and move them to the empty or partially allocated slab lists.
check_page_assignments(&mut self)116*ceeb2e94Slaokengwt     pub(crate) fn check_page_assignments(&mut self) {
117*ceeb2e94Slaokengwt         for slab_page in self.full_slabs.iter_mut() {
118*ceeb2e94Slaokengwt             if !slab_page.is_full() {
119*ceeb2e94Slaokengwt                 // We need to move it from self.full_slabs -> self.slabs
120*ceeb2e94Slaokengwt                 trace!("move {:p} full -> partial", slab_page);
121*ceeb2e94Slaokengwt                 self.move_full_to_partial(slab_page);
122*ceeb2e94Slaokengwt             }
123*ceeb2e94Slaokengwt         }
124*ceeb2e94Slaokengwt 
125*ceeb2e94Slaokengwt         for slab_page in self.slabs.iter_mut() {
126*ceeb2e94Slaokengwt             if slab_page.is_empty(self.obj_per_page) {
127*ceeb2e94Slaokengwt                 // We need to move it from self.slabs -> self.empty_slabs
128*ceeb2e94Slaokengwt                 trace!("move {:p} partial -> empty", slab_page);
129*ceeb2e94Slaokengwt                 self.move_to_empty(slab_page);
130*ceeb2e94Slaokengwt             }
131*ceeb2e94Slaokengwt         }
132*ceeb2e94Slaokengwt     }
133*ceeb2e94Slaokengwt 
134*ceeb2e94Slaokengwt     /// Move a page from `slabs` to `empty_slabs`.
move_to_empty(&mut self, page: &'a mut P)135*ceeb2e94Slaokengwt     fn move_to_empty(&mut self, page: &'a mut P) {
136*ceeb2e94Slaokengwt         let page_ptr = page as *const P;
137*ceeb2e94Slaokengwt 
138*ceeb2e94Slaokengwt         debug_assert!(self.slabs.contains(page_ptr));
139*ceeb2e94Slaokengwt         debug_assert!(
140*ceeb2e94Slaokengwt             !self.empty_slabs.contains(page_ptr),
141*ceeb2e94Slaokengwt             "Page {:p} already in emtpy_slabs",
142*ceeb2e94Slaokengwt             page_ptr
143*ceeb2e94Slaokengwt         );
144*ceeb2e94Slaokengwt 
145*ceeb2e94Slaokengwt         self.slabs.remove_from_list(page);
146*ceeb2e94Slaokengwt         self.empty_slabs.insert_front(page);
147*ceeb2e94Slaokengwt 
148*ceeb2e94Slaokengwt         debug_assert!(!self.slabs.contains(page_ptr));
149*ceeb2e94Slaokengwt         debug_assert!(self.empty_slabs.contains(page_ptr));
150*ceeb2e94Slaokengwt     }
151*ceeb2e94Slaokengwt 
152*ceeb2e94Slaokengwt     /// Move a page from `full_slabs` to `slab`.
move_partial_to_full(&mut self, page: &'a mut P)153*ceeb2e94Slaokengwt     fn move_partial_to_full(&mut self, page: &'a mut P) {
154*ceeb2e94Slaokengwt         let page_ptr = page as *const P;
155*ceeb2e94Slaokengwt 
156*ceeb2e94Slaokengwt         debug_assert!(self.slabs.contains(page_ptr));
157*ceeb2e94Slaokengwt         debug_assert!(!self.full_slabs.contains(page_ptr));
158*ceeb2e94Slaokengwt 
159*ceeb2e94Slaokengwt         self.slabs.remove_from_list(page);
160*ceeb2e94Slaokengwt         self.full_slabs.insert_front(page);
161*ceeb2e94Slaokengwt 
162*ceeb2e94Slaokengwt         debug_assert!(!self.slabs.contains(page_ptr));
163*ceeb2e94Slaokengwt         debug_assert!(self.full_slabs.contains(page_ptr));
164*ceeb2e94Slaokengwt     }
165*ceeb2e94Slaokengwt 
166*ceeb2e94Slaokengwt     /// Move a page from `full_slabs` to `slab`.
move_full_to_partial(&mut self, page: &'a mut P)167*ceeb2e94Slaokengwt     fn move_full_to_partial(&mut self, page: &'a mut P) {
168*ceeb2e94Slaokengwt         let page_ptr = page as *const P;
169*ceeb2e94Slaokengwt 
170*ceeb2e94Slaokengwt         debug_assert!(!self.slabs.contains(page_ptr));
171*ceeb2e94Slaokengwt         debug_assert!(self.full_slabs.contains(page_ptr));
172*ceeb2e94Slaokengwt 
173*ceeb2e94Slaokengwt         self.full_slabs.remove_from_list(page);
174*ceeb2e94Slaokengwt         self.slabs.insert_front(page);
175*ceeb2e94Slaokengwt 
176*ceeb2e94Slaokengwt         debug_assert!(self.slabs.contains(page_ptr));
177*ceeb2e94Slaokengwt         debug_assert!(!self.full_slabs.contains(page_ptr));
178*ceeb2e94Slaokengwt     }
179*ceeb2e94Slaokengwt 
180*ceeb2e94Slaokengwt     /// Tries to allocate a block of memory with respect to the `layout`.
181*ceeb2e94Slaokengwt     /// Searches within already allocated slab pages, if no suitable spot is found
182*ceeb2e94Slaokengwt     /// will try to use a page from the empty page list.
183*ceeb2e94Slaokengwt     ///
184*ceeb2e94Slaokengwt     /// # Arguments
185*ceeb2e94Slaokengwt     ///  * `sc_layout`: This is not the original layout but adjusted for the
186*ceeb2e94Slaokengwt     ///     SCAllocator size (>= original).
try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8187*ceeb2e94Slaokengwt     fn try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8 {
188*ceeb2e94Slaokengwt         // TODO: Do we really need to check multiple slab pages (due to alignment)
189*ceeb2e94Slaokengwt         // If not we can get away with a singly-linked list and have 8 more bytes
190*ceeb2e94Slaokengwt         // for the bitfield in an ObjectPage.
191*ceeb2e94Slaokengwt 
192*ceeb2e94Slaokengwt         for slab_page in self.slabs.iter_mut() {
193*ceeb2e94Slaokengwt             let ptr = slab_page.allocate(sc_layout);
194*ceeb2e94Slaokengwt             if !ptr.is_null() {
195*ceeb2e94Slaokengwt                 if slab_page.is_full() {
196*ceeb2e94Slaokengwt                     trace!("move {:p} partial -> full", slab_page);
197*ceeb2e94Slaokengwt                     self.move_partial_to_full(slab_page);
198*ceeb2e94Slaokengwt                 }
199*ceeb2e94Slaokengwt                 self.allocation_count += 1;
200*ceeb2e94Slaokengwt                 return ptr;
201*ceeb2e94Slaokengwt             } else {
202*ceeb2e94Slaokengwt                 continue;
203*ceeb2e94Slaokengwt             }
204*ceeb2e94Slaokengwt         }
205*ceeb2e94Slaokengwt 
206*ceeb2e94Slaokengwt         // Periodically rebalance page-lists (since dealloc can't do it for us)
207*ceeb2e94Slaokengwt         if self.allocation_count > SCAllocator::<P>::REBALANCE_COUNT {
208*ceeb2e94Slaokengwt             self.check_page_assignments();
209*ceeb2e94Slaokengwt             self.allocation_count = 0;
210*ceeb2e94Slaokengwt         }
211*ceeb2e94Slaokengwt 
212*ceeb2e94Slaokengwt         ptr::null_mut()
213*ceeb2e94Slaokengwt     }
214*ceeb2e94Slaokengwt 
try_reclaim_pages<F>(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize where F: FnMut(*mut P),215*ceeb2e94Slaokengwt     pub fn try_reclaim_pages<F>(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize
216*ceeb2e94Slaokengwt     where
217*ceeb2e94Slaokengwt         F: FnMut(*mut P),
218*ceeb2e94Slaokengwt     {
219*ceeb2e94Slaokengwt         self.check_page_assignments();
220*ceeb2e94Slaokengwt         let mut reclaimed = 0;
221*ceeb2e94Slaokengwt         while reclaimed < to_reclaim {
222*ceeb2e94Slaokengwt             if let Some(page) = self.empty_slabs.pop() {
223*ceeb2e94Slaokengwt                 dealloc(page as *mut P);
224*ceeb2e94Slaokengwt                 reclaimed += 1;
225*ceeb2e94Slaokengwt             } else {
226*ceeb2e94Slaokengwt                 break;
227*ceeb2e94Slaokengwt             }
228*ceeb2e94Slaokengwt         }
229*ceeb2e94Slaokengwt 
230*ceeb2e94Slaokengwt         reclaimed
231*ceeb2e94Slaokengwt     }
232*ceeb2e94Slaokengwt 
233*ceeb2e94Slaokengwt     /// Refill the SCAllocator
234*ceeb2e94Slaokengwt     ///
235*ceeb2e94Slaokengwt     /// # Safety
236*ceeb2e94Slaokengwt     /// ObjectPage needs to be empty etc.
refill(&mut self, page: &'a mut P)237*ceeb2e94Slaokengwt     pub unsafe fn refill(&mut self, page: &'a mut P) {
238*ceeb2e94Slaokengwt         page.bitfield_mut()
239*ceeb2e94Slaokengwt             .initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD);
240*ceeb2e94Slaokengwt         *page.prev() = Rawlink::none();
241*ceeb2e94Slaokengwt         *page.next() = Rawlink::none();
242*ceeb2e94Slaokengwt         trace!("adding page to SCAllocator {:p}", page);
243*ceeb2e94Slaokengwt         self.insert_empty(page);
244*ceeb2e94Slaokengwt     }
245*ceeb2e94Slaokengwt 
246*ceeb2e94Slaokengwt     /// Allocates a block of memory descriped by `layout`.
247*ceeb2e94Slaokengwt     ///
248*ceeb2e94Slaokengwt     /// Returns a pointer to a valid region of memory or an
249*ceeb2e94Slaokengwt     /// AllocationError.
250*ceeb2e94Slaokengwt     ///
251*ceeb2e94Slaokengwt     /// The function may also move around pages between lists
252*ceeb2e94Slaokengwt     /// (empty -> partial or partial -> full).
allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError>253*ceeb2e94Slaokengwt     pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> {
254*ceeb2e94Slaokengwt         trace!(
255*ceeb2e94Slaokengwt             "SCAllocator({}) is trying to allocate {:?}",
256*ceeb2e94Slaokengwt             self.size,
257*ceeb2e94Slaokengwt             layout
258*ceeb2e94Slaokengwt         );
259*ceeb2e94Slaokengwt         assert!(layout.size() <= self.size);
260*ceeb2e94Slaokengwt         assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
261*ceeb2e94Slaokengwt         let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
262*ceeb2e94Slaokengwt         assert!(new_layout.size() >= layout.size());
263*ceeb2e94Slaokengwt 
264*ceeb2e94Slaokengwt         let ptr = {
265*ceeb2e94Slaokengwt             // Try to allocate from partial slabs,
266*ceeb2e94Slaokengwt             // if we fail check if we have empty pages and allocate from there
267*ceeb2e94Slaokengwt             let ptr = self.try_allocate_from_pagelist(new_layout);
268*ceeb2e94Slaokengwt             if ptr.is_null() && self.empty_slabs.head.is_some() {
269*ceeb2e94Slaokengwt                 // Re-try allocation in empty page
270*ceeb2e94Slaokengwt                 let empty_page = self.empty_slabs.pop().expect("We checked head.is_some()");
271*ceeb2e94Slaokengwt                 debug_assert!(!self.empty_slabs.contains(empty_page));
272*ceeb2e94Slaokengwt 
273*ceeb2e94Slaokengwt                 let ptr = empty_page.allocate(layout);
274*ceeb2e94Slaokengwt                 debug_assert!(!ptr.is_null(), "Allocation must have succeeded here.");
275*ceeb2e94Slaokengwt 
276*ceeb2e94Slaokengwt                 trace!(
277*ceeb2e94Slaokengwt                     "move {:p} empty -> partial empty count {}",
278*ceeb2e94Slaokengwt                     empty_page,
279*ceeb2e94Slaokengwt                     self.empty_slabs.elements
280*ceeb2e94Slaokengwt                 );
281*ceeb2e94Slaokengwt                 // Move empty page to partial pages
282*ceeb2e94Slaokengwt                 self.insert_partial_slab(empty_page);
283*ceeb2e94Slaokengwt                 ptr
284*ceeb2e94Slaokengwt             } else {
285*ceeb2e94Slaokengwt                 ptr
286*ceeb2e94Slaokengwt             }
287*ceeb2e94Slaokengwt         };
288*ceeb2e94Slaokengwt 
289*ceeb2e94Slaokengwt         let res = NonNull::new(ptr).ok_or(AllocationError::OutOfMemory);
290*ceeb2e94Slaokengwt 
291*ceeb2e94Slaokengwt         if !ptr.is_null() {
292*ceeb2e94Slaokengwt             trace!(
293*ceeb2e94Slaokengwt                 "SCAllocator({}) allocated ptr=0x{:x}",
294*ceeb2e94Slaokengwt                 self.size,
295*ceeb2e94Slaokengwt                 ptr as usize
296*ceeb2e94Slaokengwt             );
297*ceeb2e94Slaokengwt         }
298*ceeb2e94Slaokengwt 
299*ceeb2e94Slaokengwt         res
300*ceeb2e94Slaokengwt     }
301*ceeb2e94Slaokengwt 
302*ceeb2e94Slaokengwt     /// Deallocates a previously allocated `ptr` described by `Layout`.
303*ceeb2e94Slaokengwt     ///
304*ceeb2e94Slaokengwt     /// May return an error in case an invalid `layout` is provided.
305*ceeb2e94Slaokengwt     /// The function may also move internal slab pages between lists partial -> empty
306*ceeb2e94Slaokengwt     /// or full -> partial lists.
deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError>307*ceeb2e94Slaokengwt     pub fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
308*ceeb2e94Slaokengwt         assert!(layout.size() <= self.size);
309*ceeb2e94Slaokengwt         assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
310*ceeb2e94Slaokengwt         trace!(
311*ceeb2e94Slaokengwt             "SCAllocator({}) is trying to deallocate ptr = {:p} layout={:?} P.size= {}",
312*ceeb2e94Slaokengwt             self.size,
313*ceeb2e94Slaokengwt             ptr,
314*ceeb2e94Slaokengwt             layout,
315*ceeb2e94Slaokengwt             P::SIZE
316*ceeb2e94Slaokengwt         );
317*ceeb2e94Slaokengwt 
318*ceeb2e94Slaokengwt         let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1);
319*ceeb2e94Slaokengwt 
320*ceeb2e94Slaokengwt         // Figure out which page we are on and construct a reference to it
321*ceeb2e94Slaokengwt         // TODO: The linked list will have another &mut reference
322*ceeb2e94Slaokengwt         let slab_page = unsafe { mem::transmute::<VAddr, &'a mut P>(page) };
323*ceeb2e94Slaokengwt         let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
324*ceeb2e94Slaokengwt 
325*ceeb2e94Slaokengwt         let ret = slab_page.deallocate(ptr, new_layout);
326*ceeb2e94Slaokengwt         debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment");
327*ceeb2e94Slaokengwt         ret
328*ceeb2e94Slaokengwt     }
329*ceeb2e94Slaokengwt }
330