1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 */
16
17 #undef DEBUG
18
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/mman.h>
26 #include <linux/mm.h>
27 #include <linux/swap.h>
28 #include <linux/stddef.h>
29 #include <linux/vmalloc.h>
30 #include <linux/init.h>
31 #include <linux/delay.h>
32 #include <linux/highmem.h>
33 #include <linux/idr.h>
34 #include <linux/nodemask.h>
35 #include <linux/module.h>
36 #include <linux/poison.h>
37 #include <linux/memblock.h>
38 #include <linux/hugetlb.h>
39 #include <linux/slab.h>
40 #include <linux/of_fdt.h>
41 #include <linux/libfdt.h>
42 #include <linux/memremap.h>
43
44 #include <asm/pgalloc.h>
45 #include <asm/page.h>
46 #include <asm/prom.h>
47 #include <asm/rtas.h>
48 #include <asm/io.h>
49 #include <asm/mmu_context.h>
50 #include <asm/mmu.h>
51 #include <linux/uaccess.h>
52 #include <asm/smp.h>
53 #include <asm/machdep.h>
54 #include <asm/tlb.h>
55 #include <asm/eeh.h>
56 #include <asm/processor.h>
57 #include <asm/mmzone.h>
58 #include <asm/cputable.h>
59 #include <asm/sections.h>
60 #include <asm/iommu.h>
61 #include <asm/vdso.h>
62 #include <asm/hugetlb.h>
63
64 #include <mm/mmu_decl.h>
65
66 #ifdef CONFIG_SPARSEMEM_VMEMMAP
67 /*
68 * Given an address within the vmemmap, determine the page that
69 * represents the start of the subsection it is within. Note that we have to
70 * do this by hand as the proffered address may not be correctly aligned.
71 * Subtraction of non-aligned pointers produces undefined results.
72 */
vmemmap_subsection_start(unsigned long vmemmap_addr)73 static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_addr)
74 {
75 unsigned long start_pfn;
76 unsigned long offset = vmemmap_addr - ((unsigned long)(vmemmap));
77
78 /* Return the pfn of the start of the section. */
79 start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK;
80 return pfn_to_page(start_pfn);
81 }
82
83 /*
84 * Since memory is added in sub-section chunks, before creating a new vmemmap
85 * mapping, the kernel should check whether there is an existing memmap mapping
86 * covering the new subsection added. This is needed because kernel can map
87 * vmemmap area using 16MB pages which will cover a memory range of 16G. Such
88 * a range covers multiple subsections (2M)
89 *
90 * If any subsection in the 16G range mapped by vmemmap is valid we consider the
91 * vmemmap populated (There is a page table entry already present). We can't do
92 * a page table lookup here because with the hash translation we don't keep
93 * vmemmap details in linux page table.
94 */
vmemmap_populated(unsigned long vmemmap_addr,int vmemmap_map_size)95 static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size)
96 {
97 struct page *start;
98 unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size;
99 start = vmemmap_subsection_start(vmemmap_addr);
100
101 for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION)
102 /*
103 * pfn valid check here is intended to really check
104 * whether we have any subsection already initialized
105 * in this range.
106 */
107 if (pfn_valid(page_to_pfn(start)))
108 return 1;
109
110 return 0;
111 }
112
113 /*
114 * vmemmap virtual address space management does not have a traditional page
115 * table to track which virtual struct pages are backed by physical mapping.
116 * The virtual to physical mappings are tracked in a simple linked list
117 * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
118 * all times where as the 'next' list maintains the available
119 * vmemmap_backing structures which have been deleted from the
120 * 'vmemmap_global' list during system runtime (memory hotplug remove
121 * operation). The freed 'vmemmap_backing' structures are reused later when
122 * new requests come in without allocating fresh memory. This pointer also
123 * tracks the allocated 'vmemmap_backing' structures as we allocate one
124 * full page memory at a time when we dont have any.
125 */
126 struct vmemmap_backing *vmemmap_list;
127 static struct vmemmap_backing *next;
128
129 /*
130 * The same pointer 'next' tracks individual chunks inside the allocated
131 * full page during the boot time and again tracks the freed nodes during
132 * runtime. It is racy but it does not happen as they are separated by the
133 * boot process. Will create problem if some how we have memory hotplug
134 * operation during boot !!
135 */
136 static int num_left;
137 static int num_freed;
138
vmemmap_list_alloc(int node)139 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
140 {
141 struct vmemmap_backing *vmem_back;
142 /* get from freed entries first */
143 if (num_freed) {
144 num_freed--;
145 vmem_back = next;
146 next = next->list;
147
148 return vmem_back;
149 }
150
151 /* allocate a page when required and hand out chunks */
152 if (!num_left) {
153 next = vmemmap_alloc_block(PAGE_SIZE, node);
154 if (unlikely(!next)) {
155 WARN_ON(1);
156 return NULL;
157 }
158 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
159 }
160
161 num_left--;
162
163 return next++;
164 }
165
vmemmap_list_populate(unsigned long phys,unsigned long start,int node)166 static __meminit int vmemmap_list_populate(unsigned long phys,
167 unsigned long start,
168 int node)
169 {
170 struct vmemmap_backing *vmem_back;
171
172 vmem_back = vmemmap_list_alloc(node);
173 if (unlikely(!vmem_back)) {
174 pr_debug("vmemap list allocation failed\n");
175 return -ENOMEM;
176 }
177
178 vmem_back->phys = phys;
179 vmem_back->virt_addr = start;
180 vmem_back->list = vmemmap_list;
181
182 vmemmap_list = vmem_back;
183 return 0;
184 }
185
altmap_cross_boundary(struct vmem_altmap * altmap,unsigned long start,unsigned long page_size)186 static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
187 unsigned long page_size)
188 {
189 unsigned long nr_pfn = page_size / sizeof(struct page);
190 unsigned long start_pfn = page_to_pfn((struct page *)start);
191
192 if ((start_pfn + nr_pfn) > altmap->end_pfn)
193 return true;
194
195 if (start_pfn < altmap->base_pfn)
196 return true;
197
198 return false;
199 }
200
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)201 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
202 struct vmem_altmap *altmap)
203 {
204 bool altmap_alloc;
205 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
206
207 /* Align to the page size of the linear mapping. */
208 start = ALIGN_DOWN(start, page_size);
209
210 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
211
212 for (; start < end; start += page_size) {
213 void *p = NULL;
214 int rc;
215
216 /*
217 * This vmemmap range is backing different subsections. If any
218 * of that subsection is marked valid, that means we already
219 * have initialized a page table covering this range and hence
220 * the vmemmap range is populated.
221 */
222 if (vmemmap_populated(start, page_size))
223 continue;
224
225 /*
226 * Allocate from the altmap first if we have one. This may
227 * fail due to alignment issues when using 16MB hugepages, so
228 * fall back to system memory if the altmap allocation fail.
229 */
230 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
231 p = vmemmap_alloc_block_buf(page_size, node, altmap);
232 if (!p)
233 pr_debug("altmap block allocation failed, falling back to system memory");
234 else
235 altmap_alloc = true;
236 }
237 if (!p) {
238 p = vmemmap_alloc_block_buf(page_size, node, NULL);
239 altmap_alloc = false;
240 }
241 if (!p)
242 return -ENOMEM;
243
244 if (vmemmap_list_populate(__pa(p), start, node)) {
245 /*
246 * If we don't populate vmemap list, we don't have
247 * the ability to free the allocated vmemmap
248 * pages in section_deactivate. Hence free them
249 * here.
250 */
251 int nr_pfns = page_size >> PAGE_SHIFT;
252 unsigned long page_order = get_order(page_size);
253
254 if (altmap_alloc)
255 vmem_altmap_free(altmap, nr_pfns);
256 else
257 free_pages((unsigned long)p, page_order);
258 return -ENOMEM;
259 }
260
261 pr_debug(" * %016lx..%016lx allocated at %p\n",
262 start, start + page_size, p);
263
264 rc = vmemmap_create_mapping(start, page_size, __pa(p));
265 if (rc < 0) {
266 pr_warn("%s: Unable to create vmemmap mapping: %d\n",
267 __func__, rc);
268 return -EFAULT;
269 }
270 }
271
272 return 0;
273 }
274
275 #ifdef CONFIG_MEMORY_HOTPLUG
vmemmap_list_free(unsigned long start)276 static unsigned long vmemmap_list_free(unsigned long start)
277 {
278 struct vmemmap_backing *vmem_back, *vmem_back_prev;
279
280 vmem_back_prev = vmem_back = vmemmap_list;
281
282 /* look for it with prev pointer recorded */
283 for (; vmem_back; vmem_back = vmem_back->list) {
284 if (vmem_back->virt_addr == start)
285 break;
286 vmem_back_prev = vmem_back;
287 }
288
289 if (unlikely(!vmem_back))
290 return 0;
291
292 /* remove it from vmemmap_list */
293 if (vmem_back == vmemmap_list) /* remove head */
294 vmemmap_list = vmem_back->list;
295 else
296 vmem_back_prev->list = vmem_back->list;
297
298 /* next point to this freed entry */
299 vmem_back->list = next;
300 next = vmem_back;
301 num_freed++;
302
303 return vmem_back->phys;
304 }
305
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)306 void __ref vmemmap_free(unsigned long start, unsigned long end,
307 struct vmem_altmap *altmap)
308 {
309 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
310 unsigned long page_order = get_order(page_size);
311 unsigned long alt_start = ~0, alt_end = ~0;
312 unsigned long base_pfn;
313
314 start = ALIGN_DOWN(start, page_size);
315 if (altmap) {
316 alt_start = altmap->base_pfn;
317 alt_end = altmap->base_pfn + altmap->reserve +
318 altmap->free + altmap->alloc + altmap->align;
319 }
320
321 pr_debug("vmemmap_free %lx...%lx\n", start, end);
322
323 for (; start < end; start += page_size) {
324 unsigned long nr_pages, addr;
325 struct page *page;
326
327 /*
328 * We have already marked the subsection we are trying to remove
329 * invalid. So if we want to remove the vmemmap range, we
330 * need to make sure there is no subsection marked valid
331 * in this range.
332 */
333 if (vmemmap_populated(start, page_size))
334 continue;
335
336 addr = vmemmap_list_free(start);
337 if (!addr)
338 continue;
339
340 page = pfn_to_page(addr >> PAGE_SHIFT);
341 nr_pages = 1 << page_order;
342 base_pfn = PHYS_PFN(addr);
343
344 if (base_pfn >= alt_start && base_pfn < alt_end) {
345 vmem_altmap_free(altmap, nr_pages);
346 } else if (PageReserved(page)) {
347 /* allocated from bootmem */
348 if (page_size < PAGE_SIZE) {
349 /*
350 * this shouldn't happen, but if it is
351 * the case, leave the memory there
352 */
353 WARN_ON_ONCE(1);
354 } else {
355 while (nr_pages--)
356 free_reserved_page(page++);
357 }
358 } else {
359 free_pages((unsigned long)(__va(addr)), page_order);
360 }
361
362 vmemmap_remove_mapping(start, page_size);
363 }
364 }
365 #endif
register_page_bootmem_memmap(unsigned long section_nr,struct page * start_page,unsigned long size)366 void register_page_bootmem_memmap(unsigned long section_nr,
367 struct page *start_page, unsigned long size)
368 {
369 }
370
371 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
372
373 #ifdef CONFIG_PPC_BOOK3S_64
374 unsigned int mmu_lpid_bits;
375 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
376 EXPORT_SYMBOL_GPL(mmu_lpid_bits);
377 #endif
378 unsigned int mmu_pid_bits;
379
380 static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
381
parse_disable_radix(char * p)382 static int __init parse_disable_radix(char *p)
383 {
384 bool val;
385
386 if (!p)
387 val = true;
388 else if (kstrtobool(p, &val))
389 return -EINVAL;
390
391 disable_radix = val;
392
393 return 0;
394 }
395 early_param("disable_radix", parse_disable_radix);
396
397 /*
398 * If we're running under a hypervisor, we need to check the contents of
399 * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
400 * radix. If not, we clear the radix feature bit so we fall back to hash.
401 */
early_check_vec5(void)402 static void __init early_check_vec5(void)
403 {
404 unsigned long root, chosen;
405 int size;
406 const u8 *vec5;
407 u8 mmu_supported;
408
409 root = of_get_flat_dt_root();
410 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
411 if (chosen == -FDT_ERR_NOTFOUND) {
412 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
413 return;
414 }
415 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
416 if (!vec5) {
417 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
418 return;
419 }
420 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
421 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
422 return;
423 }
424
425 /* Check for supported configuration */
426 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
427 OV5_FEAT(OV5_MMU_SUPPORT);
428 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
429 /* Hypervisor only supports radix - check enabled && GTSE */
430 if (!early_radix_enabled()) {
431 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
432 }
433 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
434 OV5_FEAT(OV5_RADIX_GTSE))) {
435 cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
436 } else
437 cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
438 /* Do radix anyway - the hypervisor said we had to */
439 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
440 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
441 /* Hypervisor only supports hash - disable radix */
442 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
443 cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
444 }
445 }
446
dt_scan_mmu_pid_width(unsigned long node,const char * uname,int depth,void * data)447 static int __init dt_scan_mmu_pid_width(unsigned long node,
448 const char *uname, int depth,
449 void *data)
450 {
451 int size = 0;
452 const __be32 *prop;
453 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
454
455 /* We are scanning "cpu" nodes only */
456 if (type == NULL || strcmp(type, "cpu") != 0)
457 return 0;
458
459 /* Find MMU LPID, PID register size */
460 prop = of_get_flat_dt_prop(node, "ibm,mmu-lpid-bits", &size);
461 if (prop && size == 4)
462 mmu_lpid_bits = be32_to_cpup(prop);
463
464 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
465 if (prop && size == 4)
466 mmu_pid_bits = be32_to_cpup(prop);
467
468 if (!mmu_pid_bits && !mmu_lpid_bits)
469 return 0;
470
471 return 1;
472 }
473
mmu_early_init_devtree(void)474 void __init mmu_early_init_devtree(void)
475 {
476 bool hvmode = !!(mfmsr() & MSR_HV);
477
478 /* Disable radix mode based on kernel command line. */
479 if (disable_radix) {
480 if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
481 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
482 else
483 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
484 }
485
486 of_scan_flat_dt(dt_scan_mmu_pid_width, NULL);
487 if (hvmode && !mmu_lpid_bits) {
488 if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
489 mmu_lpid_bits = 12; /* POWER8-10 */
490 else
491 mmu_lpid_bits = 10; /* POWER7 */
492 }
493 if (!mmu_pid_bits) {
494 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
495 mmu_pid_bits = 20; /* POWER9-10 */
496 }
497
498 /*
499 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
500 * When running bare-metal, we can use radix if we like
501 * even though the ibm,architecture-vec-5 property created by
502 * skiboot doesn't have the necessary bits set.
503 */
504 if (!hvmode)
505 early_check_vec5();
506
507 if (early_radix_enabled()) {
508 radix__early_init_devtree();
509
510 /*
511 * We have finalized the translation we are going to use by now.
512 * Radix mode is not limited by RMA / VRMA addressing.
513 * Hence don't limit memblock allocations.
514 */
515 ppc64_rma_size = ULONG_MAX;
516 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
517 } else
518 hash__early_init_devtree();
519
520 if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
521 hugetlbpage_init_defaultsize();
522
523 if (!(cur_cpu_spec->mmu_features & MMU_FTR_HPTE_TABLE) &&
524 !(cur_cpu_spec->mmu_features & MMU_FTR_TYPE_RADIX))
525 panic("kernel does not support any MMU type offered by platform");
526 }
527 #endif /* CONFIG_PPC_BOOK3S_64 */
528