1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
4
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
8 #include <linux/bug.h>
9
10 struct page;
11 struct zone;
12 struct pglist_data;
13 struct mem_section;
14 struct memory_group;
15 struct resource;
16 struct vmem_altmap;
17 struct dev_pagemap;
18
19 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
20 /*
21 * For supporting node-hotadd, we have to allocate a new pgdat.
22 *
23 * If an arch has generic style NODE_DATA(),
24 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
25 *
26 * In general, generic_alloc_nodedata() is used.
27 *
28 */
29 extern pg_data_t *arch_alloc_nodedata(int nid);
30 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
31
32 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
33
34 #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
35
36 #ifdef CONFIG_NUMA
37 /*
38 * XXX: node aware allocation can't work well to get new node's memory at this time.
39 * Because, pgdat for the new node is not allocated/initialized yet itself.
40 * To use new node's memory, more consideration will be necessary.
41 */
42 #define generic_alloc_nodedata(nid) \
43 ({ \
44 memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
45 })
46
47 extern pg_data_t *node_data[];
arch_refresh_nodedata(int nid,pg_data_t * pgdat)48 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
49 {
50 node_data[nid] = pgdat;
51 }
52
53 #else /* !CONFIG_NUMA */
54
55 /* never called */
generic_alloc_nodedata(int nid)56 static inline pg_data_t *generic_alloc_nodedata(int nid)
57 {
58 BUG();
59 return NULL;
60 }
arch_refresh_nodedata(int nid,pg_data_t * pgdat)61 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
62 {
63 }
64 #endif /* CONFIG_NUMA */
65 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
66
67 #ifdef CONFIG_MEMORY_HOTPLUG
68 struct page *pfn_to_online_page(unsigned long pfn);
69
70 /* Types for control the zone type of onlined and offlined memory */
71 enum {
72 /* Offline the memory. */
73 MMOP_OFFLINE = 0,
74 /* Online the memory. Zone depends, see default_zone_for_pfn(). */
75 MMOP_ONLINE,
76 /* Online the memory to ZONE_NORMAL. */
77 MMOP_ONLINE_KERNEL,
78 /* Online the memory to ZONE_MOVABLE. */
79 MMOP_ONLINE_MOVABLE,
80 };
81
82 /* Flags for add_memory() and friends to specify memory hotplug details. */
83 typedef int __bitwise mhp_t;
84
85 /* No special request */
86 #define MHP_NONE ((__force mhp_t)0)
87 /*
88 * Allow merging of the added System RAM resource with adjacent,
89 * mergeable resources. After a successful call to add_memory_resource()
90 * with this flag set, the resource pointer must no longer be used as it
91 * might be stale, or the resource might have changed.
92 */
93 #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
94
95 /*
96 * We want memmap (struct page array) to be self contained.
97 * To do so, we will use the beginning of the hot-added range to build
98 * the page tables for the memmap array that describes the entire range.
99 * Only selected architectures support it with SPARSE_VMEMMAP.
100 */
101 #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
102 /*
103 * The nid field specifies a memory group id (mgid) instead. The memory group
104 * implies the node id (nid).
105 */
106 #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
107
108 /*
109 * Extended parameters for memory hotplug:
110 * altmap: alternative allocator for memmap array (optional)
111 * pgprot: page protection flags to apply to newly created page tables
112 * (required)
113 */
114 struct mhp_params {
115 struct vmem_altmap *altmap;
116 pgprot_t pgprot;
117 struct dev_pagemap *pgmap;
118 };
119
120 bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
121 struct range mhp_get_pluggable_range(bool need_mapping);
122
123 /*
124 * Zone resizing functions
125 *
126 * Note: any attempt to resize a zone should has pgdat_resize_lock()
127 * zone_span_writelock() both held. This ensure the size of a zone
128 * can't be changed while pgdat_resize_lock() held.
129 */
zone_span_seqbegin(struct zone * zone)130 static inline unsigned zone_span_seqbegin(struct zone *zone)
131 {
132 return read_seqbegin(&zone->span_seqlock);
133 }
zone_span_seqretry(struct zone * zone,unsigned iv)134 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
135 {
136 return read_seqretry(&zone->span_seqlock, iv);
137 }
zone_span_writelock(struct zone * zone)138 static inline void zone_span_writelock(struct zone *zone)
139 {
140 write_seqlock(&zone->span_seqlock);
141 }
zone_span_writeunlock(struct zone * zone)142 static inline void zone_span_writeunlock(struct zone *zone)
143 {
144 write_sequnlock(&zone->span_seqlock);
145 }
zone_seqlock_init(struct zone * zone)146 static inline void zone_seqlock_init(struct zone *zone)
147 {
148 seqlock_init(&zone->span_seqlock);
149 }
150 extern void adjust_present_page_count(struct page *page,
151 struct memory_group *group,
152 long nr_pages);
153 /* VM interface that may be used by firmware interface */
154 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
155 struct zone *zone);
156 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
157 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
158 struct zone *zone, struct memory_group *group);
159 extern void __offline_isolated_pages(unsigned long start_pfn,
160 unsigned long end_pfn);
161
162 typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
163
164 extern void generic_online_page(struct page *page, unsigned int order);
165 extern int set_online_page_callback(online_page_callback_t callback);
166 extern int restore_online_page_callback(online_page_callback_t callback);
167
168 extern int try_online_node(int nid);
169
170 extern int arch_add_memory(int nid, u64 start, u64 size,
171 struct mhp_params *params);
172 extern u64 max_mem_size;
173
174 extern int mhp_online_type_from_str(const char *str);
175
176 /* Default online_type (MMOP_*) when new memory blocks are added. */
177 extern int mhp_default_online_type;
178 /* If movable_node boot option specified */
179 extern bool movable_node_enabled;
movable_node_is_enabled(void)180 static inline bool movable_node_is_enabled(void)
181 {
182 return movable_node_enabled;
183 }
184
185 extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
186 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
187 struct vmem_altmap *altmap);
188
189 /* reasonably generic interface to expand the physical pages */
190 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
191 struct mhp_params *params);
192
193 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)194 static inline int add_pages(int nid, unsigned long start_pfn,
195 unsigned long nr_pages, struct mhp_params *params)
196 {
197 return __add_pages(nid, start_pfn, nr_pages, params);
198 }
199 #else /* ARCH_HAS_ADD_PAGES */
200 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
201 struct mhp_params *params);
202 #endif /* ARCH_HAS_ADD_PAGES */
203
204 void get_online_mems(void);
205 void put_online_mems(void);
206
207 void mem_hotplug_begin(void);
208 void mem_hotplug_done(void);
209
210 /* See kswapd_is_running() */
pgdat_kswapd_lock(pg_data_t * pgdat)211 static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
212 {
213 mutex_lock(&pgdat->kswapd_lock);
214 }
215
pgdat_kswapd_unlock(pg_data_t * pgdat)216 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
217 {
218 mutex_unlock(&pgdat->kswapd_lock);
219 }
220
pgdat_kswapd_lock_init(pg_data_t * pgdat)221 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
222 {
223 mutex_init(&pgdat->kswapd_lock);
224 }
225
226 #else /* ! CONFIG_MEMORY_HOTPLUG */
227 #define pfn_to_online_page(pfn) \
228 ({ \
229 struct page *___page = NULL; \
230 if (pfn_valid(pfn)) \
231 ___page = pfn_to_page(pfn); \
232 ___page; \
233 })
234
zone_span_seqbegin(struct zone * zone)235 static inline unsigned zone_span_seqbegin(struct zone *zone)
236 {
237 return 0;
238 }
zone_span_seqretry(struct zone * zone,unsigned iv)239 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
240 {
241 return 0;
242 }
zone_span_writelock(struct zone * zone)243 static inline void zone_span_writelock(struct zone *zone) {}
zone_span_writeunlock(struct zone * zone)244 static inline void zone_span_writeunlock(struct zone *zone) {}
zone_seqlock_init(struct zone * zone)245 static inline void zone_seqlock_init(struct zone *zone) {}
246
try_online_node(int nid)247 static inline int try_online_node(int nid)
248 {
249 return 0;
250 }
251
get_online_mems(void)252 static inline void get_online_mems(void) {}
put_online_mems(void)253 static inline void put_online_mems(void) {}
254
mem_hotplug_begin(void)255 static inline void mem_hotplug_begin(void) {}
mem_hotplug_done(void)256 static inline void mem_hotplug_done(void) {}
257
movable_node_is_enabled(void)258 static inline bool movable_node_is_enabled(void)
259 {
260 return false;
261 }
262
pgdat_kswapd_lock(pg_data_t * pgdat)263 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
pgdat_kswapd_unlock(pg_data_t * pgdat)264 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
pgdat_kswapd_lock_init(pg_data_t * pgdat)265 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
266 #endif /* ! CONFIG_MEMORY_HOTPLUG */
267
268 /*
269 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
270 * platforms might override and use arch_get_mappable_range()
271 * for internal non memory hotplug purposes.
272 */
273 struct range arch_get_mappable_range(void);
274
275 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
276 /*
277 * pgdat resizing functions
278 */
279 static inline
pgdat_resize_lock(struct pglist_data * pgdat,unsigned long * flags)280 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
281 {
282 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
283 }
284 static inline
pgdat_resize_unlock(struct pglist_data * pgdat,unsigned long * flags)285 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
286 {
287 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
288 }
289 static inline
pgdat_resize_init(struct pglist_data * pgdat)290 void pgdat_resize_init(struct pglist_data *pgdat)
291 {
292 spin_lock_init(&pgdat->node_size_lock);
293 }
294 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
295 /*
296 * Stub functions for when hotplug is off
297 */
pgdat_resize_lock(struct pglist_data * p,unsigned long * f)298 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_unlock(struct pglist_data * p,unsigned long * f)299 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_init(struct pglist_data * pgdat)300 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
301 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
302
303 #ifdef CONFIG_MEMORY_HOTREMOVE
304
305 extern void try_offline_node(int nid);
306 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
307 struct zone *zone, struct memory_group *group);
308 extern int remove_memory(u64 start, u64 size);
309 extern void __remove_memory(u64 start, u64 size);
310 extern int offline_and_remove_memory(u64 start, u64 size);
311
312 #else
try_offline_node(int nid)313 static inline void try_offline_node(int nid) {}
314
offline_pages(unsigned long start_pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group)315 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
316 struct zone *zone, struct memory_group *group)
317 {
318 return -EINVAL;
319 }
320
remove_memory(u64 start,u64 size)321 static inline int remove_memory(u64 start, u64 size)
322 {
323 return -EBUSY;
324 }
325
__remove_memory(u64 start,u64 size)326 static inline void __remove_memory(u64 start, u64 size) {}
327 #endif /* CONFIG_MEMORY_HOTREMOVE */
328
329 extern void set_zone_contiguous(struct zone *zone);
330 extern void clear_zone_contiguous(struct zone *zone);
331
332 #ifdef CONFIG_MEMORY_HOTPLUG
333 extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
334 extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
335 extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
336 extern int add_memory_resource(int nid, struct resource *resource,
337 mhp_t mhp_flags);
338 extern int add_memory_driver_managed(int nid, u64 start, u64 size,
339 const char *resource_name,
340 mhp_t mhp_flags);
341 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
342 unsigned long nr_pages,
343 struct vmem_altmap *altmap, int migratetype);
344 extern void remove_pfn_range_from_zone(struct zone *zone,
345 unsigned long start_pfn,
346 unsigned long nr_pages);
347 extern int sparse_add_section(int nid, unsigned long pfn,
348 unsigned long nr_pages, struct vmem_altmap *altmap,
349 struct dev_pagemap *pgmap);
350 extern void sparse_remove_section(struct mem_section *ms,
351 unsigned long pfn, unsigned long nr_pages,
352 unsigned long map_offset, struct vmem_altmap *altmap);
353 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
354 unsigned long pnum);
355 extern struct zone *zone_for_pfn_range(int online_type, int nid,
356 struct memory_group *group, unsigned long start_pfn,
357 unsigned long nr_pages);
358 extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
359 struct mhp_params *params);
360 void arch_remove_linear_mapping(u64 start, u64 size);
361 extern bool mhp_supports_memmap_on_memory(unsigned long size);
362 #endif /* CONFIG_MEMORY_HOTPLUG */
363
364 #endif /* __LINUX_MEMORY_HOTPLUG_H */
365