1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <asm/setup.h>
5 #include <asm/processor.h>
6 #include <asm/sclp.h>
7 #include <asm/sections.h>
8 #include <asm/mem_detect.h>
9 #include <asm/sparsemem.h>
10 #include "decompressor.h"
11 #include "boot.h"
12 
13 struct mem_detect_info __bootdata(mem_detect);
14 
15 /* up to 256 storage elements, 1020 subincrements each */
16 #define ENTRIES_EXTENDED_MAX						       \
17 	(256 * (1020 / 2) * sizeof(struct mem_detect_block))
18 
19 /*
20  * To avoid corrupting old kernel memory during dump, find lowest memory
21  * chunk possible either right after the kernel end (decompressed kernel) or
22  * after initrd (if it is present and there is no hole between the kernel end
23  * and initrd)
24  */
mem_detect_alloc_extended(void)25 static void *mem_detect_alloc_extended(void)
26 {
27 	unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
28 
29 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
30 	    initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
31 		offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
32 
33 	return (void *)offset;
34 }
35 
__get_mem_detect_block_ptr(u32 n)36 static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
37 {
38 	if (n < MEM_INLINED_ENTRIES)
39 		return &mem_detect.entries[n];
40 	if (unlikely(!mem_detect.entries_extended))
41 		mem_detect.entries_extended = mem_detect_alloc_extended();
42 	return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
43 }
44 
45 /*
46  * sequential calls to add_mem_detect_block with adjacent memory areas
47  * are merged together into single memory block.
48  */
add_mem_detect_block(u64 start,u64 end)49 void add_mem_detect_block(u64 start, u64 end)
50 {
51 	struct mem_detect_block *block;
52 
53 	if (mem_detect.count) {
54 		block = __get_mem_detect_block_ptr(mem_detect.count - 1);
55 		if (block->end == start) {
56 			block->end = end;
57 			return;
58 		}
59 	}
60 
61 	block = __get_mem_detect_block_ptr(mem_detect.count);
62 	block->start = start;
63 	block->end = end;
64 	mem_detect.count++;
65 }
66 
__diag260(unsigned long rx1,unsigned long rx2)67 static int __diag260(unsigned long rx1, unsigned long rx2)
68 {
69 	unsigned long reg1, reg2, ry;
70 	union register_pair rx;
71 	psw_t old;
72 	int rc;
73 
74 	rx.even = rx1;
75 	rx.odd	= rx2;
76 	ry = 0x10; /* storage configuration */
77 	rc = -1;   /* fail */
78 	asm volatile(
79 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
80 		"	epsw	%[reg1],%[reg2]\n"
81 		"	st	%[reg1],0(%[psw_pgm])\n"
82 		"	st	%[reg2],4(%[psw_pgm])\n"
83 		"	larl	%[reg1],1f\n"
84 		"	stg	%[reg1],8(%[psw_pgm])\n"
85 		"	diag	%[rx],%[ry],0x260\n"
86 		"	ipm	%[rc]\n"
87 		"	srl	%[rc],28\n"
88 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
89 		: [reg1] "=&d" (reg1),
90 		  [reg2] "=&a" (reg2),
91 		  [rc] "+&d" (rc),
92 		  [ry] "+&d" (ry),
93 		  "+Q" (S390_lowcore.program_new_psw),
94 		  "=Q" (old)
95 		: [rx] "d" (rx.pair),
96 		  [psw_old] "a" (&old),
97 		  [psw_pgm] "a" (&S390_lowcore.program_new_psw)
98 		: "cc", "memory");
99 	return rc == 0 ? ry : -1;
100 }
101 
diag260(void)102 static int diag260(void)
103 {
104 	int rc, i;
105 
106 	struct {
107 		unsigned long start;
108 		unsigned long end;
109 	} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
110 
111 	memset(storage_extents, 0, sizeof(storage_extents));
112 	rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
113 	if (rc == -1)
114 		return -1;
115 
116 	for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
117 		add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
118 	return 0;
119 }
120 
tprot(unsigned long addr)121 static int tprot(unsigned long addr)
122 {
123 	unsigned long reg1, reg2;
124 	int rc = -EFAULT;
125 	psw_t old;
126 
127 	asm volatile(
128 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
129 		"	epsw	%[reg1],%[reg2]\n"
130 		"	st	%[reg1],0(%[psw_pgm])\n"
131 		"	st	%[reg2],4(%[psw_pgm])\n"
132 		"	larl	%[reg1],1f\n"
133 		"	stg	%[reg1],8(%[psw_pgm])\n"
134 		"	tprot	0(%[addr]),0\n"
135 		"	ipm	%[rc]\n"
136 		"	srl	%[rc],28\n"
137 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
138 		: [reg1] "=&d" (reg1),
139 		  [reg2] "=&a" (reg2),
140 		  [rc] "+&d" (rc),
141 		  "=Q" (S390_lowcore.program_new_psw.addr),
142 		  "=Q" (old)
143 		: [psw_old] "a" (&old),
144 		  [psw_pgm] "a" (&S390_lowcore.program_new_psw),
145 		  [addr] "a" (addr)
146 		: "cc", "memory");
147 	return rc;
148 }
149 
search_mem_end(void)150 static void search_mem_end(void)
151 {
152 	unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
153 	unsigned long offset = 0;
154 	unsigned long pivot;
155 
156 	while (range > 1) {
157 		range >>= 1;
158 		pivot = offset + range;
159 		if (!tprot(pivot << 20))
160 			offset = pivot;
161 	}
162 
163 	add_mem_detect_block(0, (offset + 1) << 20);
164 }
165 
detect_memory(void)166 unsigned long detect_memory(void)
167 {
168 	unsigned long max_physmem_end;
169 
170 	sclp_early_get_memsize(&max_physmem_end);
171 
172 	if (!sclp_early_read_storage_info()) {
173 		mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
174 		return max_physmem_end;
175 	}
176 
177 	if (!diag260()) {
178 		mem_detect.info_source = MEM_DETECT_DIAG260;
179 		return max_physmem_end;
180 	}
181 
182 	if (max_physmem_end) {
183 		add_mem_detect_block(0, max_physmem_end);
184 		mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
185 		return max_physmem_end;
186 	}
187 
188 	search_mem_end();
189 	mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
190 	return get_mem_detect_end();
191 }
192