1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13 #include <linux/memcontrol.h>
14 #include <linux/sched/clock.h>
15 
16 #include "internal.h"
17 
18 /*
19  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20  * to use off stack temporal storage
21  */
22 #define PAGE_OWNER_STACK_DEPTH (16)
23 
24 struct page_owner {
25 	unsigned short order;
26 	short last_migrate_reason;
27 	gfp_t gfp_mask;
28 	depot_stack_handle_t handle;
29 	depot_stack_handle_t free_handle;
30 	u64 ts_nsec;
31 	u64 free_ts_nsec;
32 	char comm[TASK_COMM_LEN];
33 	pid_t pid;
34 	pid_t tgid;
35 };
36 
37 static bool page_owner_enabled __initdata;
38 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
39 
40 static depot_stack_handle_t dummy_handle;
41 static depot_stack_handle_t failure_handle;
42 static depot_stack_handle_t early_handle;
43 
44 static void init_early_allocated_pages(void);
45 
early_page_owner_param(char * buf)46 static int __init early_page_owner_param(char *buf)
47 {
48 	int ret = kstrtobool(buf, &page_owner_enabled);
49 
50 	if (page_owner_enabled)
51 		stack_depot_want_early_init();
52 
53 	return ret;
54 }
55 early_param("page_owner", early_page_owner_param);
56 
need_page_owner(void)57 static __init bool need_page_owner(void)
58 {
59 	return page_owner_enabled;
60 }
61 
create_dummy_stack(void)62 static __always_inline depot_stack_handle_t create_dummy_stack(void)
63 {
64 	unsigned long entries[4];
65 	unsigned int nr_entries;
66 
67 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
68 	return stack_depot_save(entries, nr_entries, GFP_KERNEL);
69 }
70 
register_dummy_stack(void)71 static noinline void register_dummy_stack(void)
72 {
73 	dummy_handle = create_dummy_stack();
74 }
75 
register_failure_stack(void)76 static noinline void register_failure_stack(void)
77 {
78 	failure_handle = create_dummy_stack();
79 }
80 
register_early_stack(void)81 static noinline void register_early_stack(void)
82 {
83 	early_handle = create_dummy_stack();
84 }
85 
init_page_owner(void)86 static __init void init_page_owner(void)
87 {
88 	if (!page_owner_enabled)
89 		return;
90 
91 	register_dummy_stack();
92 	register_failure_stack();
93 	register_early_stack();
94 	static_branch_enable(&page_owner_inited);
95 	init_early_allocated_pages();
96 }
97 
98 struct page_ext_operations page_owner_ops = {
99 	.size = sizeof(struct page_owner),
100 	.need = need_page_owner,
101 	.init = init_page_owner,
102 };
103 
get_page_owner(struct page_ext * page_ext)104 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
105 {
106 	return (void *)page_ext + page_owner_ops.offset;
107 }
108 
save_stack(gfp_t flags)109 static noinline depot_stack_handle_t save_stack(gfp_t flags)
110 {
111 	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
112 	depot_stack_handle_t handle;
113 	unsigned int nr_entries;
114 
115 	/*
116 	 * Avoid recursion.
117 	 *
118 	 * Sometimes page metadata allocation tracking requires more
119 	 * memory to be allocated:
120 	 * - when new stack trace is saved to stack depot
121 	 * - when backtrace itself is calculated (ia64)
122 	 */
123 	if (current->in_page_owner)
124 		return dummy_handle;
125 	current->in_page_owner = 1;
126 
127 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
128 	handle = stack_depot_save(entries, nr_entries, flags);
129 	if (!handle)
130 		handle = failure_handle;
131 
132 	current->in_page_owner = 0;
133 	return handle;
134 }
135 
__reset_page_owner(struct page * page,unsigned short order)136 void __reset_page_owner(struct page *page, unsigned short order)
137 {
138 	int i;
139 	struct page_ext *page_ext;
140 	depot_stack_handle_t handle;
141 	struct page_owner *page_owner;
142 	u64 free_ts_nsec = local_clock();
143 
144 	page_ext = page_ext_get(page);
145 	if (unlikely(!page_ext))
146 		return;
147 
148 	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
149 	for (i = 0; i < (1 << order); i++) {
150 		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
151 		page_owner = get_page_owner(page_ext);
152 		page_owner->free_handle = handle;
153 		page_owner->free_ts_nsec = free_ts_nsec;
154 		page_ext = page_ext_next(page_ext);
155 	}
156 	page_ext_put(page_ext);
157 }
158 
__set_page_owner_handle(struct page_ext * page_ext,depot_stack_handle_t handle,unsigned short order,gfp_t gfp_mask)159 static inline void __set_page_owner_handle(struct page_ext *page_ext,
160 					depot_stack_handle_t handle,
161 					unsigned short order, gfp_t gfp_mask)
162 {
163 	struct page_owner *page_owner;
164 	int i;
165 
166 	for (i = 0; i < (1 << order); i++) {
167 		page_owner = get_page_owner(page_ext);
168 		page_owner->handle = handle;
169 		page_owner->order = order;
170 		page_owner->gfp_mask = gfp_mask;
171 		page_owner->last_migrate_reason = -1;
172 		page_owner->pid = current->pid;
173 		page_owner->tgid = current->tgid;
174 		page_owner->ts_nsec = local_clock();
175 		strscpy(page_owner->comm, current->comm,
176 			sizeof(page_owner->comm));
177 		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
178 		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
179 
180 		page_ext = page_ext_next(page_ext);
181 	}
182 }
183 
__set_page_owner(struct page * page,unsigned short order,gfp_t gfp_mask)184 noinline void __set_page_owner(struct page *page, unsigned short order,
185 					gfp_t gfp_mask)
186 {
187 	struct page_ext *page_ext;
188 	depot_stack_handle_t handle;
189 
190 	handle = save_stack(gfp_mask);
191 
192 	page_ext = page_ext_get(page);
193 	if (unlikely(!page_ext))
194 		return;
195 	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
196 	page_ext_put(page_ext);
197 }
198 
__set_page_owner_migrate_reason(struct page * page,int reason)199 void __set_page_owner_migrate_reason(struct page *page, int reason)
200 {
201 	struct page_ext *page_ext = page_ext_get(page);
202 	struct page_owner *page_owner;
203 
204 	if (unlikely(!page_ext))
205 		return;
206 
207 	page_owner = get_page_owner(page_ext);
208 	page_owner->last_migrate_reason = reason;
209 	page_ext_put(page_ext);
210 }
211 
__split_page_owner(struct page * page,unsigned int nr)212 void __split_page_owner(struct page *page, unsigned int nr)
213 {
214 	int i;
215 	struct page_ext *page_ext = page_ext_get(page);
216 	struct page_owner *page_owner;
217 
218 	if (unlikely(!page_ext))
219 		return;
220 
221 	for (i = 0; i < nr; i++) {
222 		page_owner = get_page_owner(page_ext);
223 		page_owner->order = 0;
224 		page_ext = page_ext_next(page_ext);
225 	}
226 	page_ext_put(page_ext);
227 }
228 
__folio_copy_owner(struct folio * newfolio,struct folio * old)229 void __folio_copy_owner(struct folio *newfolio, struct folio *old)
230 {
231 	struct page_ext *old_ext;
232 	struct page_ext *new_ext;
233 	struct page_owner *old_page_owner, *new_page_owner;
234 
235 	old_ext = page_ext_get(&old->page);
236 	if (unlikely(!old_ext))
237 		return;
238 
239 	new_ext = page_ext_get(&newfolio->page);
240 	if (unlikely(!new_ext)) {
241 		page_ext_put(old_ext);
242 		return;
243 	}
244 
245 	old_page_owner = get_page_owner(old_ext);
246 	new_page_owner = get_page_owner(new_ext);
247 	new_page_owner->order = old_page_owner->order;
248 	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
249 	new_page_owner->last_migrate_reason =
250 		old_page_owner->last_migrate_reason;
251 	new_page_owner->handle = old_page_owner->handle;
252 	new_page_owner->pid = old_page_owner->pid;
253 	new_page_owner->tgid = old_page_owner->tgid;
254 	new_page_owner->ts_nsec = old_page_owner->ts_nsec;
255 	new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
256 	strcpy(new_page_owner->comm, old_page_owner->comm);
257 
258 	/*
259 	 * We don't clear the bit on the old folio as it's going to be freed
260 	 * after migration. Until then, the info can be useful in case of
261 	 * a bug, and the overall stats will be off a bit only temporarily.
262 	 * Also, migrate_misplaced_transhuge_page() can still fail the
263 	 * migration and then we want the old folio to retain the info. But
264 	 * in that case we also don't need to explicitly clear the info from
265 	 * the new page, which will be freed.
266 	 */
267 	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
268 	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
269 	page_ext_put(new_ext);
270 	page_ext_put(old_ext);
271 }
272 
pagetypeinfo_showmixedcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)273 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
274 				       pg_data_t *pgdat, struct zone *zone)
275 {
276 	struct page *page;
277 	struct page_ext *page_ext;
278 	struct page_owner *page_owner;
279 	unsigned long pfn, block_end_pfn;
280 	unsigned long end_pfn = zone_end_pfn(zone);
281 	unsigned long count[MIGRATE_TYPES] = { 0, };
282 	int pageblock_mt, page_mt;
283 	int i;
284 
285 	/* Scan block by block. First and last block may be incomplete */
286 	pfn = zone->zone_start_pfn;
287 
288 	/*
289 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
290 	 * a zone boundary, it will be double counted between zones. This does
291 	 * not matter as the mixed block count will still be correct
292 	 */
293 	for (; pfn < end_pfn; ) {
294 		page = pfn_to_online_page(pfn);
295 		if (!page) {
296 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
297 			continue;
298 		}
299 
300 		block_end_pfn = pageblock_end_pfn(pfn);
301 		block_end_pfn = min(block_end_pfn, end_pfn);
302 
303 		pageblock_mt = get_pageblock_migratetype(page);
304 
305 		for (; pfn < block_end_pfn; pfn++) {
306 			/* The pageblock is online, no need to recheck. */
307 			page = pfn_to_page(pfn);
308 
309 			if (page_zone(page) != zone)
310 				continue;
311 
312 			if (PageBuddy(page)) {
313 				unsigned long freepage_order;
314 
315 				freepage_order = buddy_order_unsafe(page);
316 				if (freepage_order < MAX_ORDER)
317 					pfn += (1UL << freepage_order) - 1;
318 				continue;
319 			}
320 
321 			if (PageReserved(page))
322 				continue;
323 
324 			page_ext = page_ext_get(page);
325 			if (unlikely(!page_ext))
326 				continue;
327 
328 			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
329 				goto ext_put_continue;
330 
331 			page_owner = get_page_owner(page_ext);
332 			page_mt = gfp_migratetype(page_owner->gfp_mask);
333 			if (pageblock_mt != page_mt) {
334 				if (is_migrate_cma(pageblock_mt))
335 					count[MIGRATE_MOVABLE]++;
336 				else
337 					count[pageblock_mt]++;
338 
339 				pfn = block_end_pfn;
340 				page_ext_put(page_ext);
341 				break;
342 			}
343 			pfn += (1UL << page_owner->order) - 1;
344 ext_put_continue:
345 			page_ext_put(page_ext);
346 		}
347 	}
348 
349 	/* Print counts */
350 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
351 	for (i = 0; i < MIGRATE_TYPES; i++)
352 		seq_printf(m, "%12lu ", count[i]);
353 	seq_putc(m, '\n');
354 }
355 
356 /*
357  * Looking for memcg information and print it out
358  */
print_page_owner_memcg(char * kbuf,size_t count,int ret,struct page * page)359 static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
360 					 struct page *page)
361 {
362 #ifdef CONFIG_MEMCG
363 	unsigned long memcg_data;
364 	struct mem_cgroup *memcg;
365 	bool online;
366 	char name[80];
367 
368 	rcu_read_lock();
369 	memcg_data = READ_ONCE(page->memcg_data);
370 	if (!memcg_data)
371 		goto out_unlock;
372 
373 	if (memcg_data & MEMCG_DATA_OBJCGS)
374 		ret += scnprintf(kbuf + ret, count - ret,
375 				"Slab cache page\n");
376 
377 	memcg = page_memcg_check(page);
378 	if (!memcg)
379 		goto out_unlock;
380 
381 	online = (memcg->css.flags & CSS_ONLINE);
382 	cgroup_name(memcg->css.cgroup, name, sizeof(name));
383 	ret += scnprintf(kbuf + ret, count - ret,
384 			"Charged %sto %smemcg %s\n",
385 			PageMemcgKmem(page) ? "(via objcg) " : "",
386 			online ? "" : "offline ",
387 			name);
388 out_unlock:
389 	rcu_read_unlock();
390 #endif /* CONFIG_MEMCG */
391 
392 	return ret;
393 }
394 
395 static ssize_t
print_page_owner(char __user * buf,size_t count,unsigned long pfn,struct page * page,struct page_owner * page_owner,depot_stack_handle_t handle)396 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
397 		struct page *page, struct page_owner *page_owner,
398 		depot_stack_handle_t handle)
399 {
400 	int ret, pageblock_mt, page_mt;
401 	char *kbuf;
402 
403 	count = min_t(size_t, count, PAGE_SIZE);
404 	kbuf = kmalloc(count, GFP_KERNEL);
405 	if (!kbuf)
406 		return -ENOMEM;
407 
408 	ret = scnprintf(kbuf, count,
409 			"Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns\n",
410 			page_owner->order, page_owner->gfp_mask,
411 			&page_owner->gfp_mask, page_owner->pid,
412 			page_owner->tgid, page_owner->comm,
413 			page_owner->ts_nsec, page_owner->free_ts_nsec);
414 
415 	/* Print information relevant to grouping pages by mobility */
416 	pageblock_mt = get_pageblock_migratetype(page);
417 	page_mt  = gfp_migratetype(page_owner->gfp_mask);
418 	ret += scnprintf(kbuf + ret, count - ret,
419 			"PFN %lu type %s Block %lu type %s Flags %pGp\n",
420 			pfn,
421 			migratetype_names[page_mt],
422 			pfn >> pageblock_order,
423 			migratetype_names[pageblock_mt],
424 			&page->flags);
425 
426 	ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
427 	if (ret >= count)
428 		goto err;
429 
430 	if (page_owner->last_migrate_reason != -1) {
431 		ret += scnprintf(kbuf + ret, count - ret,
432 			"Page has been migrated, last migrate reason: %s\n",
433 			migrate_reason_names[page_owner->last_migrate_reason]);
434 	}
435 
436 	ret = print_page_owner_memcg(kbuf, count, ret, page);
437 
438 	ret += snprintf(kbuf + ret, count - ret, "\n");
439 	if (ret >= count)
440 		goto err;
441 
442 	if (copy_to_user(buf, kbuf, ret))
443 		ret = -EFAULT;
444 
445 	kfree(kbuf);
446 	return ret;
447 
448 err:
449 	kfree(kbuf);
450 	return -ENOMEM;
451 }
452 
__dump_page_owner(const struct page * page)453 void __dump_page_owner(const struct page *page)
454 {
455 	struct page_ext *page_ext = page_ext_get((void *)page);
456 	struct page_owner *page_owner;
457 	depot_stack_handle_t handle;
458 	gfp_t gfp_mask;
459 	int mt;
460 
461 	if (unlikely(!page_ext)) {
462 		pr_alert("There is not page extension available.\n");
463 		return;
464 	}
465 
466 	page_owner = get_page_owner(page_ext);
467 	gfp_mask = page_owner->gfp_mask;
468 	mt = gfp_migratetype(gfp_mask);
469 
470 	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
471 		pr_alert("page_owner info is not present (never set?)\n");
472 		page_ext_put(page_ext);
473 		return;
474 	}
475 
476 	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
477 		pr_alert("page_owner tracks the page as allocated\n");
478 	else
479 		pr_alert("page_owner tracks the page as freed\n");
480 
481 	pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
482 		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
483 		 page_owner->pid, page_owner->tgid, page_owner->comm,
484 		 page_owner->ts_nsec, page_owner->free_ts_nsec);
485 
486 	handle = READ_ONCE(page_owner->handle);
487 	if (!handle)
488 		pr_alert("page_owner allocation stack trace missing\n");
489 	else
490 		stack_depot_print(handle);
491 
492 	handle = READ_ONCE(page_owner->free_handle);
493 	if (!handle) {
494 		pr_alert("page_owner free stack trace missing\n");
495 	} else {
496 		pr_alert("page last free stack trace:\n");
497 		stack_depot_print(handle);
498 	}
499 
500 	if (page_owner->last_migrate_reason != -1)
501 		pr_alert("page has been migrated, last migrate reason: %s\n",
502 			migrate_reason_names[page_owner->last_migrate_reason]);
503 	page_ext_put(page_ext);
504 }
505 
506 static ssize_t
read_page_owner(struct file * file,char __user * buf,size_t count,loff_t * ppos)507 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
508 {
509 	unsigned long pfn;
510 	struct page *page;
511 	struct page_ext *page_ext;
512 	struct page_owner *page_owner;
513 	depot_stack_handle_t handle;
514 
515 	if (!static_branch_unlikely(&page_owner_inited))
516 		return -EINVAL;
517 
518 	page = NULL;
519 	if (*ppos == 0)
520 		pfn = min_low_pfn;
521 	else
522 		pfn = *ppos;
523 	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
524 	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
525 		pfn++;
526 
527 	/* Find an allocated page */
528 	for (; pfn < max_pfn; pfn++) {
529 		/*
530 		 * This temporary page_owner is required so
531 		 * that we can avoid the context switches while holding
532 		 * the rcu lock and copying the page owner information to
533 		 * user through copy_to_user() or GFP_KERNEL allocations.
534 		 */
535 		struct page_owner page_owner_tmp;
536 
537 		/*
538 		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
539 		 * validate the area as existing, skip it if not
540 		 */
541 		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
542 			pfn += MAX_ORDER_NR_PAGES - 1;
543 			continue;
544 		}
545 
546 		page = pfn_to_page(pfn);
547 		if (PageBuddy(page)) {
548 			unsigned long freepage_order = buddy_order_unsafe(page);
549 
550 			if (freepage_order < MAX_ORDER)
551 				pfn += (1UL << freepage_order) - 1;
552 			continue;
553 		}
554 
555 		page_ext = page_ext_get(page);
556 		if (unlikely(!page_ext))
557 			continue;
558 
559 		/*
560 		 * Some pages could be missed by concurrent allocation or free,
561 		 * because we don't hold the zone lock.
562 		 */
563 		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
564 			goto ext_put_continue;
565 
566 		/*
567 		 * Although we do have the info about past allocation of free
568 		 * pages, it's not relevant for current memory usage.
569 		 */
570 		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
571 			goto ext_put_continue;
572 
573 		page_owner = get_page_owner(page_ext);
574 
575 		/*
576 		 * Don't print "tail" pages of high-order allocations as that
577 		 * would inflate the stats.
578 		 */
579 		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
580 			goto ext_put_continue;
581 
582 		/*
583 		 * Access to page_ext->handle isn't synchronous so we should
584 		 * be careful to access it.
585 		 */
586 		handle = READ_ONCE(page_owner->handle);
587 		if (!handle)
588 			goto ext_put_continue;
589 
590 		/* Record the next PFN to read in the file offset */
591 		*ppos = pfn + 1;
592 
593 		page_owner_tmp = *page_owner;
594 		page_ext_put(page_ext);
595 		return print_page_owner(buf, count, pfn, page,
596 				&page_owner_tmp, handle);
597 ext_put_continue:
598 		page_ext_put(page_ext);
599 	}
600 
601 	return 0;
602 }
603 
lseek_page_owner(struct file * file,loff_t offset,int orig)604 static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
605 {
606 	switch (orig) {
607 	case SEEK_SET:
608 		file->f_pos = offset;
609 		break;
610 	case SEEK_CUR:
611 		file->f_pos += offset;
612 		break;
613 	default:
614 		return -EINVAL;
615 	}
616 	return file->f_pos;
617 }
618 
init_pages_in_zone(pg_data_t * pgdat,struct zone * zone)619 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
620 {
621 	unsigned long pfn = zone->zone_start_pfn;
622 	unsigned long end_pfn = zone_end_pfn(zone);
623 	unsigned long count = 0;
624 
625 	/*
626 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
627 	 * a zone boundary, it will be double counted between zones. This does
628 	 * not matter as the mixed block count will still be correct
629 	 */
630 	for (; pfn < end_pfn; ) {
631 		unsigned long block_end_pfn;
632 
633 		if (!pfn_valid(pfn)) {
634 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
635 			continue;
636 		}
637 
638 		block_end_pfn = pageblock_end_pfn(pfn);
639 		block_end_pfn = min(block_end_pfn, end_pfn);
640 
641 		for (; pfn < block_end_pfn; pfn++) {
642 			struct page *page = pfn_to_page(pfn);
643 			struct page_ext *page_ext;
644 
645 			if (page_zone(page) != zone)
646 				continue;
647 
648 			/*
649 			 * To avoid having to grab zone->lock, be a little
650 			 * careful when reading buddy page order. The only
651 			 * danger is that we skip too much and potentially miss
652 			 * some early allocated pages, which is better than
653 			 * heavy lock contention.
654 			 */
655 			if (PageBuddy(page)) {
656 				unsigned long order = buddy_order_unsafe(page);
657 
658 				if (order > 0 && order < MAX_ORDER)
659 					pfn += (1UL << order) - 1;
660 				continue;
661 			}
662 
663 			if (PageReserved(page))
664 				continue;
665 
666 			page_ext = page_ext_get(page);
667 			if (unlikely(!page_ext))
668 				continue;
669 
670 			/* Maybe overlapping zone */
671 			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
672 				goto ext_put_continue;
673 
674 			/* Found early allocated page */
675 			__set_page_owner_handle(page_ext, early_handle,
676 						0, 0);
677 			count++;
678 ext_put_continue:
679 			page_ext_put(page_ext);
680 		}
681 		cond_resched();
682 	}
683 
684 	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
685 		pgdat->node_id, zone->name, count);
686 }
687 
init_zones_in_node(pg_data_t * pgdat)688 static void init_zones_in_node(pg_data_t *pgdat)
689 {
690 	struct zone *zone;
691 	struct zone *node_zones = pgdat->node_zones;
692 
693 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
694 		if (!populated_zone(zone))
695 			continue;
696 
697 		init_pages_in_zone(pgdat, zone);
698 	}
699 }
700 
init_early_allocated_pages(void)701 static void init_early_allocated_pages(void)
702 {
703 	pg_data_t *pgdat;
704 
705 	for_each_online_pgdat(pgdat)
706 		init_zones_in_node(pgdat);
707 }
708 
709 static const struct file_operations proc_page_owner_operations = {
710 	.read		= read_page_owner,
711 	.llseek		= lseek_page_owner,
712 };
713 
pageowner_init(void)714 static int __init pageowner_init(void)
715 {
716 	if (!static_branch_unlikely(&page_owner_inited)) {
717 		pr_info("page_owner is disabled\n");
718 		return 0;
719 	}
720 
721 	debugfs_create_file("page_owner", 0400, NULL, NULL,
722 			    &proc_page_owner_operations);
723 
724 	return 0;
725 }
726 late_initcall(pageowner_init)
727