1 /* Minimal malloc implementation for dynamic linker and static
2 initialization.
3 Copyright (C) 1995-2022 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <https://www.gnu.org/licenses/>. */
19
20 /* Mark symbols hidden in static PIE for early self relocation to work.
21 Note: string.h may have ifuncs which cannot be hidden on i686. */
22 #if BUILD_PIE_DEFAULT
23 # pragma GCC visibility push(hidden)
24 #endif
25 #include <assert.h>
26 #include <string.h>
27 #include <ldsodefs.h>
28 #include <malloc/malloc-internal.h>
29
30 static void *alloc_ptr, *alloc_end, *alloc_last_block;
31
32 /* Allocate an aligned memory block. */
33 void *
__minimal_malloc(size_t n)34 __minimal_malloc (size_t n)
35 {
36 if (alloc_end == 0)
37 {
38 /* Consume any unused space in the last page of our data segment. */
39 extern int _end attribute_hidden;
40 alloc_ptr = &_end;
41 alloc_end = (void *) 0 + (((alloc_ptr - (void *) 0)
42 + GLRO(dl_pagesize) - 1)
43 & ~(GLRO(dl_pagesize) - 1));
44 }
45
46 /* Make sure the allocation pointer is ideally aligned. */
47 alloc_ptr = (void *) 0 + (((alloc_ptr - (void *) 0) + MALLOC_ALIGNMENT - 1)
48 & ~(MALLOC_ALIGNMENT - 1));
49
50 if (alloc_ptr + n >= alloc_end || n >= -(uintptr_t) alloc_ptr)
51 {
52 /* Insufficient space left; allocate another page plus one extra
53 page to reduce number of mmap calls. */
54 caddr_t page;
55 size_t nup = (n + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1);
56 if (__glibc_unlikely (nup == 0 && n != 0))
57 return NULL;
58 nup += GLRO(dl_pagesize);
59 page = __mmap (0, nup, PROT_READ|PROT_WRITE,
60 MAP_ANON|MAP_PRIVATE, -1, 0);
61 if (page == MAP_FAILED)
62 return NULL;
63 if (page != alloc_end)
64 alloc_ptr = page;
65 alloc_end = page + nup;
66 }
67
68 alloc_last_block = (void *) alloc_ptr;
69 alloc_ptr += n;
70 return alloc_last_block;
71 }
72
73 /* We use this function occasionally since the real implementation may
74 be optimized when it can assume the memory it returns already is
75 set to NUL. */
76 void *
__minimal_calloc(size_t nmemb,size_t size)77 __minimal_calloc (size_t nmemb, size_t size)
78 {
79 /* New memory from the trivial malloc above is always already cleared.
80 (We make sure that's true in the rare occasion it might not be,
81 by clearing memory in free, below.) */
82 size_t bytes = nmemb * size;
83
84 #define HALF_SIZE_T (((size_t) 1) << (8 * sizeof (size_t) / 2))
85 if (__builtin_expect ((nmemb | size) >= HALF_SIZE_T, 0)
86 && size != 0 && bytes / size != nmemb)
87 return NULL;
88
89 return malloc (bytes);
90 }
91
92 /* This will rarely be called. */
93 void
__minimal_free(void * ptr)94 __minimal_free (void *ptr)
95 {
96 /* We can free only the last block allocated. */
97 if (ptr == alloc_last_block)
98 {
99 /* Since this is rare, we clear the freed block here
100 so that calloc can presume malloc returns cleared memory. */
101 memset (alloc_last_block, '\0', alloc_ptr - alloc_last_block);
102 alloc_ptr = alloc_last_block;
103 }
104 }
105
106 /* This is only called with the most recent block returned by malloc. */
107 void *
__minimal_realloc(void * ptr,size_t n)108 __minimal_realloc (void *ptr, size_t n)
109 {
110 if (ptr == NULL)
111 return malloc (n);
112 assert (ptr == alloc_last_block);
113 size_t old_size = alloc_ptr - alloc_last_block;
114 alloc_ptr = alloc_last_block;
115 void *new = malloc (n);
116 return new != ptr ? memcpy (new, ptr, old_size) : new;
117 }
118