1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
4
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
8 #include <linux/bug.h>
9
10 struct page;
11 struct zone;
12 struct pglist_data;
13 struct mem_section;
14 struct memory_block;
15 struct memory_group;
16 struct resource;
17 struct vmem_altmap;
18 struct dev_pagemap;
19
20 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
21 /*
22 * For supporting node-hotadd, we have to allocate a new pgdat.
23 *
24 * If an arch has generic style NODE_DATA(),
25 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
26 *
27 * In general, generic_alloc_nodedata() is used.
28 *
29 */
30 extern pg_data_t *arch_alloc_nodedata(int nid);
31 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
32
33 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
34
35 #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
36
37 #ifdef CONFIG_NUMA
38 /*
39 * XXX: node aware allocation can't work well to get new node's memory at this time.
40 * Because, pgdat for the new node is not allocated/initialized yet itself.
41 * To use new node's memory, more consideration will be necessary.
42 */
43 #define generic_alloc_nodedata(nid) \
44 ({ \
45 memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
46 })
47 /*
48 * This definition is just for error path in node hotadd.
49 * For node hotremove, we have to replace this.
50 */
51 #define generic_free_nodedata(pgdat) kfree(pgdat)
52
53 extern pg_data_t *node_data[];
arch_refresh_nodedata(int nid,pg_data_t * pgdat)54 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
55 {
56 node_data[nid] = pgdat;
57 }
58
59 #else /* !CONFIG_NUMA */
60
61 /* never called */
generic_alloc_nodedata(int nid)62 static inline pg_data_t *generic_alloc_nodedata(int nid)
63 {
64 BUG();
65 return NULL;
66 }
generic_free_nodedata(pg_data_t * pgdat)67 static inline void generic_free_nodedata(pg_data_t *pgdat)
68 {
69 }
arch_refresh_nodedata(int nid,pg_data_t * pgdat)70 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
71 {
72 }
73 #endif /* CONFIG_NUMA */
74 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
75
76 #ifdef CONFIG_MEMORY_HOTPLUG
77 struct page *pfn_to_online_page(unsigned long pfn);
78
79 /* Types for control the zone type of onlined and offlined memory */
80 enum {
81 /* Offline the memory. */
82 MMOP_OFFLINE = 0,
83 /* Online the memory. Zone depends, see default_zone_for_pfn(). */
84 MMOP_ONLINE,
85 /* Online the memory to ZONE_NORMAL. */
86 MMOP_ONLINE_KERNEL,
87 /* Online the memory to ZONE_MOVABLE. */
88 MMOP_ONLINE_MOVABLE,
89 };
90
91 /* Flags for add_memory() and friends to specify memory hotplug details. */
92 typedef int __bitwise mhp_t;
93
94 /* No special request */
95 #define MHP_NONE ((__force mhp_t)0)
96 /*
97 * Allow merging of the added System RAM resource with adjacent,
98 * mergeable resources. After a successful call to add_memory_resource()
99 * with this flag set, the resource pointer must no longer be used as it
100 * might be stale, or the resource might have changed.
101 */
102 #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
103
104 /*
105 * We want memmap (struct page array) to be self contained.
106 * To do so, we will use the beginning of the hot-added range to build
107 * the page tables for the memmap array that describes the entire range.
108 * Only selected architectures support it with SPARSE_VMEMMAP.
109 */
110 #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
111 /*
112 * The nid field specifies a memory group id (mgid) instead. The memory group
113 * implies the node id (nid).
114 */
115 #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
116
117 /*
118 * Extended parameters for memory hotplug:
119 * altmap: alternative allocator for memmap array (optional)
120 * pgprot: page protection flags to apply to newly created page tables
121 * (required)
122 */
123 struct mhp_params {
124 struct vmem_altmap *altmap;
125 pgprot_t pgprot;
126 struct dev_pagemap *pgmap;
127 };
128
129 bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
130 struct range mhp_get_pluggable_range(bool need_mapping);
131
132 /*
133 * Zone resizing functions
134 *
135 * Note: any attempt to resize a zone should has pgdat_resize_lock()
136 * zone_span_writelock() both held. This ensure the size of a zone
137 * can't be changed while pgdat_resize_lock() held.
138 */
zone_span_seqbegin(struct zone * zone)139 static inline unsigned zone_span_seqbegin(struct zone *zone)
140 {
141 return read_seqbegin(&zone->span_seqlock);
142 }
zone_span_seqretry(struct zone * zone,unsigned iv)143 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
144 {
145 return read_seqretry(&zone->span_seqlock, iv);
146 }
zone_span_writelock(struct zone * zone)147 static inline void zone_span_writelock(struct zone *zone)
148 {
149 write_seqlock(&zone->span_seqlock);
150 }
zone_span_writeunlock(struct zone * zone)151 static inline void zone_span_writeunlock(struct zone *zone)
152 {
153 write_sequnlock(&zone->span_seqlock);
154 }
zone_seqlock_init(struct zone * zone)155 static inline void zone_seqlock_init(struct zone *zone)
156 {
157 seqlock_init(&zone->span_seqlock);
158 }
159 extern void adjust_present_page_count(struct page *page,
160 struct memory_group *group,
161 long nr_pages);
162 /* VM interface that may be used by firmware interface */
163 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
164 struct zone *zone);
165 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
166 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
167 struct zone *zone, struct memory_group *group);
168 extern void __offline_isolated_pages(unsigned long start_pfn,
169 unsigned long end_pfn);
170
171 typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
172
173 extern void generic_online_page(struct page *page, unsigned int order);
174 extern int set_online_page_callback(online_page_callback_t callback);
175 extern int restore_online_page_callback(online_page_callback_t callback);
176
177 extern int try_online_node(int nid);
178
179 extern int arch_add_memory(int nid, u64 start, u64 size,
180 struct mhp_params *params);
181 extern u64 max_mem_size;
182
183 extern int mhp_online_type_from_str(const char *str);
184
185 /* Default online_type (MMOP_*) when new memory blocks are added. */
186 extern int mhp_default_online_type;
187 /* If movable_node boot option specified */
188 extern bool movable_node_enabled;
movable_node_is_enabled(void)189 static inline bool movable_node_is_enabled(void)
190 {
191 return movable_node_enabled;
192 }
193
194 extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
195 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
196 struct vmem_altmap *altmap);
197
198 /* reasonably generic interface to expand the physical pages */
199 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
200 struct mhp_params *params);
201
202 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)203 static inline int add_pages(int nid, unsigned long start_pfn,
204 unsigned long nr_pages, struct mhp_params *params)
205 {
206 return __add_pages(nid, start_pfn, nr_pages, params);
207 }
208 #else /* ARCH_HAS_ADD_PAGES */
209 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
210 struct mhp_params *params);
211 #endif /* ARCH_HAS_ADD_PAGES */
212
213 void get_online_mems(void);
214 void put_online_mems(void);
215
216 void mem_hotplug_begin(void);
217 void mem_hotplug_done(void);
218
219 #else /* ! CONFIG_MEMORY_HOTPLUG */
220 #define pfn_to_online_page(pfn) \
221 ({ \
222 struct page *___page = NULL; \
223 if (pfn_valid(pfn)) \
224 ___page = pfn_to_page(pfn); \
225 ___page; \
226 })
227
zone_span_seqbegin(struct zone * zone)228 static inline unsigned zone_span_seqbegin(struct zone *zone)
229 {
230 return 0;
231 }
zone_span_seqretry(struct zone * zone,unsigned iv)232 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
233 {
234 return 0;
235 }
zone_span_writelock(struct zone * zone)236 static inline void zone_span_writelock(struct zone *zone) {}
zone_span_writeunlock(struct zone * zone)237 static inline void zone_span_writeunlock(struct zone *zone) {}
zone_seqlock_init(struct zone * zone)238 static inline void zone_seqlock_init(struct zone *zone) {}
239
try_online_node(int nid)240 static inline int try_online_node(int nid)
241 {
242 return 0;
243 }
244
get_online_mems(void)245 static inline void get_online_mems(void) {}
put_online_mems(void)246 static inline void put_online_mems(void) {}
247
mem_hotplug_begin(void)248 static inline void mem_hotplug_begin(void) {}
mem_hotplug_done(void)249 static inline void mem_hotplug_done(void) {}
250
movable_node_is_enabled(void)251 static inline bool movable_node_is_enabled(void)
252 {
253 return false;
254 }
255 #endif /* ! CONFIG_MEMORY_HOTPLUG */
256
257 /*
258 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
259 * platforms might override and use arch_get_mappable_range()
260 * for internal non memory hotplug purposes.
261 */
262 struct range arch_get_mappable_range(void);
263
264 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
265 /*
266 * pgdat resizing functions
267 */
268 static inline
pgdat_resize_lock(struct pglist_data * pgdat,unsigned long * flags)269 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
270 {
271 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
272 }
273 static inline
pgdat_resize_unlock(struct pglist_data * pgdat,unsigned long * flags)274 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
275 {
276 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
277 }
278 static inline
pgdat_resize_init(struct pglist_data * pgdat)279 void pgdat_resize_init(struct pglist_data *pgdat)
280 {
281 spin_lock_init(&pgdat->node_size_lock);
282 }
283 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
284 /*
285 * Stub functions for when hotplug is off
286 */
pgdat_resize_lock(struct pglist_data * p,unsigned long * f)287 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_unlock(struct pglist_data * p,unsigned long * f)288 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_init(struct pglist_data * pgdat)289 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
290 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
291
292 #ifdef CONFIG_MEMORY_HOTREMOVE
293
294 extern void try_offline_node(int nid);
295 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
296 struct zone *zone, struct memory_group *group);
297 extern int remove_memory(u64 start, u64 size);
298 extern void __remove_memory(u64 start, u64 size);
299 extern int offline_and_remove_memory(u64 start, u64 size);
300
301 #else
try_offline_node(int nid)302 static inline void try_offline_node(int nid) {}
303
offline_pages(unsigned long start_pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group)304 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
305 struct zone *zone, struct memory_group *group)
306 {
307 return -EINVAL;
308 }
309
remove_memory(u64 start,u64 size)310 static inline int remove_memory(u64 start, u64 size)
311 {
312 return -EBUSY;
313 }
314
__remove_memory(u64 start,u64 size)315 static inline void __remove_memory(u64 start, u64 size) {}
316 #endif /* CONFIG_MEMORY_HOTREMOVE */
317
318 extern void set_zone_contiguous(struct zone *zone);
319 extern void clear_zone_contiguous(struct zone *zone);
320
321 #ifdef CONFIG_MEMORY_HOTPLUG
322 extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
323 extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
324 extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
325 extern int add_memory_resource(int nid, struct resource *resource,
326 mhp_t mhp_flags);
327 extern int add_memory_driver_managed(int nid, u64 start, u64 size,
328 const char *resource_name,
329 mhp_t mhp_flags);
330 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
331 unsigned long nr_pages,
332 struct vmem_altmap *altmap, int migratetype);
333 extern void remove_pfn_range_from_zone(struct zone *zone,
334 unsigned long start_pfn,
335 unsigned long nr_pages);
336 extern bool is_memblock_offlined(struct memory_block *mem);
337 extern int sparse_add_section(int nid, unsigned long pfn,
338 unsigned long nr_pages, struct vmem_altmap *altmap,
339 struct dev_pagemap *pgmap);
340 extern void sparse_remove_section(struct mem_section *ms,
341 unsigned long pfn, unsigned long nr_pages,
342 unsigned long map_offset, struct vmem_altmap *altmap);
343 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
344 unsigned long pnum);
345 extern struct zone *zone_for_pfn_range(int online_type, int nid,
346 struct memory_group *group, unsigned long start_pfn,
347 unsigned long nr_pages);
348 extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
349 struct mhp_params *params);
350 void arch_remove_linear_mapping(u64 start, u64 size);
351 extern bool mhp_supports_memmap_on_memory(unsigned long size);
352 #endif /* CONFIG_MEMORY_HOTPLUG */
353
354 #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY
355 bool mhp_memmap_on_memory(void);
356 #else
mhp_memmap_on_memory(void)357 static inline bool mhp_memmap_on_memory(void)
358 {
359 return false;
360 }
361 #endif
362
363 #endif /* __LINUX_MEMORY_HOTPLUG_H */
364