1 #ifndef _LINUX_SLAB_DEF_H
2 #define	_LINUX_SLAB_DEF_H
3 
4 /*
5  * Definitions unique to the original Linux SLAB allocator.
6  *
7  * What we provide here is a way to optimize the frequent kmalloc
8  * calls in the kernel by selecting the appropriate general cache
9  * if kmalloc was called with a size that can be established at
10  * compile time.
11  */
12 
13 #include <linux/init.h>
14 #include <asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17 
18 #include <trace/events/kmem.h>
19 
20 /*
21  * Enforce a minimum alignment for the kmalloc caches.
22  * Usually, the kmalloc caches are cache_line_size() aligned, except when
23  * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
24  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
25  * alignment larger than the alignment of a 64-bit integer.
26  * ARCH_KMALLOC_MINALIGN allows that.
27  * Note that increasing this value may disable some debug features.
28  */
29 #ifdef ARCH_DMA_MINALIGN
30 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
31 #else
32 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
33 #endif
34 
35 #ifndef ARCH_SLAB_MINALIGN
36 /*
37  * Enforce a minimum alignment for all caches.
38  * Intended for archs that get misalignment faults even for BYTES_PER_WORD
39  * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
40  * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
41  * some debug features.
42  */
43 #define ARCH_SLAB_MINALIGN 0
44 #endif
45 
46 /*
47  * struct kmem_cache
48  *
49  * manages a cache.
50  */
51 
52 struct kmem_cache {
53 /* 1) per-cpu data, touched during every alloc/free */
54 	struct array_cache *array[NR_CPUS];
55 /* 2) Cache tunables. Protected by cache_chain_mutex */
56 	unsigned int batchcount;
57 	unsigned int limit;
58 	unsigned int shared;
59 
60 	unsigned int buffer_size;
61 	u32 reciprocal_buffer_size;
62 /* 3) touched by every alloc & free from the backend */
63 
64 	unsigned int flags;		/* constant flags */
65 	unsigned int num;		/* # of objs per slab */
66 
67 /* 4) cache_grow/shrink */
68 	/* order of pgs per slab (2^n) */
69 	unsigned int gfporder;
70 
71 	/* force GFP flags, e.g. GFP_DMA */
72 	gfp_t gfpflags;
73 
74 	size_t colour;			/* cache colouring range */
75 	unsigned int colour_off;	/* colour offset */
76 	struct kmem_cache *slabp_cache;
77 	unsigned int slab_size;
78 	unsigned int dflags;		/* dynamic flags */
79 
80 	/* constructor func */
81 	void (*ctor)(void *obj);
82 
83 /* 5) cache creation/removal */
84 	const char *name;
85 	struct list_head next;
86 
87 /* 6) statistics */
88 #ifdef CONFIG_DEBUG_SLAB
89 	unsigned long num_active;
90 	unsigned long num_allocations;
91 	unsigned long high_mark;
92 	unsigned long grown;
93 	unsigned long reaped;
94 	unsigned long errors;
95 	unsigned long max_freeable;
96 	unsigned long node_allocs;
97 	unsigned long node_frees;
98 	unsigned long node_overflow;
99 	atomic_t allochit;
100 	atomic_t allocmiss;
101 	atomic_t freehit;
102 	atomic_t freemiss;
103 
104 	/*
105 	 * If debugging is enabled, then the allocator can add additional
106 	 * fields and/or padding to every object. buffer_size contains the total
107 	 * object size including these internal fields, the following two
108 	 * variables contain the offset to the user object and its size.
109 	 */
110 	int obj_offset;
111 	int obj_size;
112 #endif /* CONFIG_DEBUG_SLAB */
113 
114 	/*
115 	 * We put nodelists[] at the end of kmem_cache, because we want to size
116 	 * this array to nr_node_ids slots instead of MAX_NUMNODES
117 	 * (see kmem_cache_init())
118 	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
119 	 * is statically defined, so we reserve the max number of nodes.
120 	 */
121 	struct kmem_list3 *nodelists[MAX_NUMNODES];
122 	/*
123 	 * Do not add fields after nodelists[]
124 	 */
125 };
126 
127 /* Size description struct for general caches. */
128 struct cache_sizes {
129 	size_t		 	cs_size;
130 	struct kmem_cache	*cs_cachep;
131 #ifdef CONFIG_ZONE_DMA
132 	struct kmem_cache	*cs_dmacachep;
133 #endif
134 };
135 extern struct cache_sizes malloc_sizes[];
136 
137 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
138 void *__kmalloc(size_t size, gfp_t flags);
139 
140 #ifdef CONFIG_TRACING
141 extern void *kmem_cache_alloc_trace(size_t size,
142 				    struct kmem_cache *cachep, gfp_t flags);
143 extern size_t slab_buffer_size(struct kmem_cache *cachep);
144 #else
145 static __always_inline void *
kmem_cache_alloc_trace(size_t size,struct kmem_cache * cachep,gfp_t flags)146 kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
147 {
148 	return kmem_cache_alloc(cachep, flags);
149 }
slab_buffer_size(struct kmem_cache * cachep)150 static inline size_t slab_buffer_size(struct kmem_cache *cachep)
151 {
152 	return 0;
153 }
154 #endif
155 
kmalloc(size_t size,gfp_t flags)156 static __always_inline void *kmalloc(size_t size, gfp_t flags)
157 {
158 	struct kmem_cache *cachep;
159 	void *ret;
160 
161 	if (__builtin_constant_p(size)) {
162 		int i = 0;
163 
164 		if (!size)
165 			return ZERO_SIZE_PTR;
166 
167 #define CACHE(x) \
168 		if (size <= x) \
169 			goto found; \
170 		else \
171 			i++;
172 #include <linux/kmalloc_sizes.h>
173 #undef CACHE
174 		return NULL;
175 found:
176 #ifdef CONFIG_ZONE_DMA
177 		if (flags & GFP_DMA)
178 			cachep = malloc_sizes[i].cs_dmacachep;
179 		else
180 #endif
181 			cachep = malloc_sizes[i].cs_cachep;
182 
183 		ret = kmem_cache_alloc_trace(size, cachep, flags);
184 
185 		return ret;
186 	}
187 	return __kmalloc(size, flags);
188 }
189 
190 #ifdef CONFIG_NUMA
191 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
192 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
193 
194 #ifdef CONFIG_TRACING
195 extern void *kmem_cache_alloc_node_trace(size_t size,
196 					 struct kmem_cache *cachep,
197 					 gfp_t flags,
198 					 int nodeid);
199 #else
200 static __always_inline void *
kmem_cache_alloc_node_trace(size_t size,struct kmem_cache * cachep,gfp_t flags,int nodeid)201 kmem_cache_alloc_node_trace(size_t size,
202 			    struct kmem_cache *cachep,
203 			    gfp_t flags,
204 			    int nodeid)
205 {
206 	return kmem_cache_alloc_node(cachep, flags, nodeid);
207 }
208 #endif
209 
kmalloc_node(size_t size,gfp_t flags,int node)210 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
211 {
212 	struct kmem_cache *cachep;
213 
214 	if (__builtin_constant_p(size)) {
215 		int i = 0;
216 
217 		if (!size)
218 			return ZERO_SIZE_PTR;
219 
220 #define CACHE(x) \
221 		if (size <= x) \
222 			goto found; \
223 		else \
224 			i++;
225 #include <linux/kmalloc_sizes.h>
226 #undef CACHE
227 		return NULL;
228 found:
229 #ifdef CONFIG_ZONE_DMA
230 		if (flags & GFP_DMA)
231 			cachep = malloc_sizes[i].cs_dmacachep;
232 		else
233 #endif
234 			cachep = malloc_sizes[i].cs_cachep;
235 
236 		return kmem_cache_alloc_node_trace(size, cachep, flags, node);
237 	}
238 	return __kmalloc_node(size, flags, node);
239 }
240 
241 #endif	/* CONFIG_NUMA */
242 
243 #endif	/* _LINUX_SLAB_DEF_H */
244