1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/processor.h>
3 #include <linux/errno.h>
4 #include <linux/init.h>
5 #include <asm/physmem_info.h>
6 #include <asm/stacktrace.h>
7 #include <asm/boot_data.h>
8 #include <asm/sparsemem.h>
9 #include <asm/sections.h>
10 #include <asm/setup.h>
11 #include <asm/sclp.h>
12 #include <asm/uv.h>
13 #include "decompressor.h"
14 #include "boot.h"
15 
16 struct physmem_info __bootdata(physmem_info);
17 static unsigned int physmem_alloc_ranges;
18 static unsigned long physmem_alloc_pos;
19 
20 /* up to 256 storage elements, 1020 subincrements each */
21 #define ENTRIES_EXTENDED_MAX						       \
22 	(256 * (1020 / 2) * sizeof(struct physmem_range))
23 
__get_physmem_range_ptr(u32 n)24 static struct physmem_range *__get_physmem_range_ptr(u32 n)
25 {
26 	if (n < MEM_INLINED_ENTRIES)
27 		return &physmem_info.online[n];
28 	if (unlikely(!physmem_info.online_extended)) {
29 		physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
30 			RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
31 			physmem_alloc_pos, true);
32 	}
33 	return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
34 }
35 
36 /*
37  * sequential calls to add_physmem_online_range with adjacent memory ranges
38  * are merged together into single memory range.
39  */
add_physmem_online_range(u64 start,u64 end)40 void add_physmem_online_range(u64 start, u64 end)
41 {
42 	struct physmem_range *range;
43 
44 	if (physmem_info.range_count) {
45 		range = __get_physmem_range_ptr(physmem_info.range_count - 1);
46 		if (range->end == start) {
47 			range->end = end;
48 			return;
49 		}
50 	}
51 
52 	range = __get_physmem_range_ptr(physmem_info.range_count);
53 	range->start = start;
54 	range->end = end;
55 	physmem_info.range_count++;
56 }
57 
__diag260(unsigned long rx1,unsigned long rx2)58 static int __diag260(unsigned long rx1, unsigned long rx2)
59 {
60 	unsigned long reg1, reg2, ry;
61 	union register_pair rx;
62 	psw_t old;
63 	int rc;
64 
65 	rx.even = rx1;
66 	rx.odd	= rx2;
67 	ry = 0x10; /* storage configuration */
68 	rc = -1;   /* fail */
69 	asm volatile(
70 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
71 		"	epsw	%[reg1],%[reg2]\n"
72 		"	st	%[reg1],0(%[psw_pgm])\n"
73 		"	st	%[reg2],4(%[psw_pgm])\n"
74 		"	larl	%[reg1],1f\n"
75 		"	stg	%[reg1],8(%[psw_pgm])\n"
76 		"	diag	%[rx],%[ry],0x260\n"
77 		"	ipm	%[rc]\n"
78 		"	srl	%[rc],28\n"
79 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
80 		: [reg1] "=&d" (reg1),
81 		  [reg2] "=&a" (reg2),
82 		  [rc] "+&d" (rc),
83 		  [ry] "+&d" (ry),
84 		  "+Q" (S390_lowcore.program_new_psw),
85 		  "=Q" (old)
86 		: [rx] "d" (rx.pair),
87 		  [psw_old] "a" (&old),
88 		  [psw_pgm] "a" (&S390_lowcore.program_new_psw)
89 		: "cc", "memory");
90 	return rc == 0 ? ry : -1;
91 }
92 
diag260(void)93 static int diag260(void)
94 {
95 	int rc, i;
96 
97 	struct {
98 		unsigned long start;
99 		unsigned long end;
100 	} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
101 
102 	memset(storage_extents, 0, sizeof(storage_extents));
103 	rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
104 	if (rc == -1)
105 		return -1;
106 
107 	for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
108 		add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
109 	return 0;
110 }
111 
tprot(unsigned long addr)112 static int tprot(unsigned long addr)
113 {
114 	unsigned long reg1, reg2;
115 	int rc = -EFAULT;
116 	psw_t old;
117 
118 	asm volatile(
119 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
120 		"	epsw	%[reg1],%[reg2]\n"
121 		"	st	%[reg1],0(%[psw_pgm])\n"
122 		"	st	%[reg2],4(%[psw_pgm])\n"
123 		"	larl	%[reg1],1f\n"
124 		"	stg	%[reg1],8(%[psw_pgm])\n"
125 		"	tprot	0(%[addr]),0\n"
126 		"	ipm	%[rc]\n"
127 		"	srl	%[rc],28\n"
128 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
129 		: [reg1] "=&d" (reg1),
130 		  [reg2] "=&a" (reg2),
131 		  [rc] "+&d" (rc),
132 		  "=Q" (S390_lowcore.program_new_psw.addr),
133 		  "=Q" (old)
134 		: [psw_old] "a" (&old),
135 		  [psw_pgm] "a" (&S390_lowcore.program_new_psw),
136 		  [addr] "a" (addr)
137 		: "cc", "memory");
138 	return rc;
139 }
140 
search_mem_end(void)141 static unsigned long search_mem_end(void)
142 {
143 	unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
144 	unsigned long offset = 0;
145 	unsigned long pivot;
146 
147 	while (range > 1) {
148 		range >>= 1;
149 		pivot = offset + range;
150 		if (!tprot(pivot << 20))
151 			offset = pivot;
152 	}
153 	return (offset + 1) << 20;
154 }
155 
detect_max_physmem_end(void)156 unsigned long detect_max_physmem_end(void)
157 {
158 	unsigned long max_physmem_end = 0;
159 
160 	if (!sclp_early_get_memsize(&max_physmem_end)) {
161 		physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
162 	} else {
163 		max_physmem_end = search_mem_end();
164 		physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
165 	}
166 	return max_physmem_end;
167 }
168 
detect_physmem_online_ranges(unsigned long max_physmem_end)169 void detect_physmem_online_ranges(unsigned long max_physmem_end)
170 {
171 	if (!sclp_early_read_storage_info()) {
172 		physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
173 	} else if (!diag260()) {
174 		physmem_info.info_source = MEM_DETECT_DIAG260;
175 	} else if (max_physmem_end) {
176 		add_physmem_online_range(0, max_physmem_end);
177 	}
178 }
179 
physmem_set_usable_limit(unsigned long limit)180 void physmem_set_usable_limit(unsigned long limit)
181 {
182 	physmem_info.usable = limit;
183 	physmem_alloc_pos = limit;
184 }
185 
die_oom(unsigned long size,unsigned long align,unsigned long min,unsigned long max)186 static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
187 {
188 	unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
189 	struct reserved_range *range;
190 	enum reserved_range_type t;
191 	int i;
192 
193 	decompressor_printk("Linux version %s\n", kernel_version);
194 	if (!is_prot_virt_guest() && early_command_line[0])
195 		decompressor_printk("Kernel command line: %s\n", early_command_line);
196 	decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
197 			    size, align, min, max);
198 	decompressor_printk("Reserved memory ranges:\n");
199 	for_each_physmem_reserved_range(t, range, &start, &end) {
200 		decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
201 		total_reserved_mem += end - start;
202 	}
203 	decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
204 			    get_physmem_info_source(), physmem_info.info_source);
205 	for_each_physmem_usable_range(i, &start, &end) {
206 		decompressor_printk("%016lx %016lx\n", start, end);
207 		total_mem += end - start;
208 	}
209 	decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
210 			    total_mem, total_reserved_mem,
211 			    total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
212 	print_stacktrace(current_frame_address());
213 	sclp_early_printk("\n\n -- System halted\n");
214 	disabled_wait();
215 }
216 
physmem_reserve(enum reserved_range_type type,unsigned long addr,unsigned long size)217 void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
218 {
219 	physmem_info.reserved[type].start = addr;
220 	physmem_info.reserved[type].end = addr + size;
221 }
222 
physmem_free(enum reserved_range_type type)223 void physmem_free(enum reserved_range_type type)
224 {
225 	physmem_info.reserved[type].start = 0;
226 	physmem_info.reserved[type].end = 0;
227 }
228 
__physmem_alloc_intersects(unsigned long addr,unsigned long size,unsigned long * intersection_start)229 static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
230 				       unsigned long *intersection_start)
231 {
232 	unsigned long res_addr, res_size;
233 	int t;
234 
235 	for (t = 0; t < RR_MAX; t++) {
236 		if (!get_physmem_reserved(t, &res_addr, &res_size))
237 			continue;
238 		if (intersects(addr, size, res_addr, res_size)) {
239 			*intersection_start = res_addr;
240 			return true;
241 		}
242 	}
243 	return ipl_report_certs_intersects(addr, size, intersection_start);
244 }
245 
__physmem_alloc_range(unsigned long size,unsigned long align,unsigned long min,unsigned long max,unsigned int from_ranges,unsigned int * ranges_left,bool die_on_oom)246 static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
247 					   unsigned long min, unsigned long max,
248 					   unsigned int from_ranges, unsigned int *ranges_left,
249 					   bool die_on_oom)
250 {
251 	unsigned int nranges = from_ranges ?: physmem_info.range_count;
252 	unsigned long range_start, range_end;
253 	unsigned long intersection_start;
254 	unsigned long addr, pos = max;
255 
256 	align = max(align, 8UL);
257 	while (nranges) {
258 		__get_physmem_range(nranges - 1, &range_start, &range_end, false);
259 		pos = min(range_end, pos);
260 
261 		if (round_up(min, align) + size > pos)
262 			break;
263 		addr = round_down(pos - size, align);
264 		if (range_start > addr) {
265 			nranges--;
266 			continue;
267 		}
268 		if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
269 			pos = intersection_start;
270 			continue;
271 		}
272 
273 		if (ranges_left)
274 			*ranges_left = nranges;
275 		return addr;
276 	}
277 	if (die_on_oom)
278 		die_oom(size, align, min, max);
279 	return 0;
280 }
281 
physmem_alloc_range(enum reserved_range_type type,unsigned long size,unsigned long align,unsigned long min,unsigned long max,bool die_on_oom)282 unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
283 				  unsigned long align, unsigned long min, unsigned long max,
284 				  bool die_on_oom)
285 {
286 	unsigned long addr;
287 
288 	max = min(max, physmem_alloc_pos);
289 	addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
290 	if (addr)
291 		physmem_reserve(type, addr, size);
292 	return addr;
293 }
294 
physmem_alloc_top_down(enum reserved_range_type type,unsigned long size,unsigned long align)295 unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
296 				     unsigned long align)
297 {
298 	struct reserved_range *range = &physmem_info.reserved[type];
299 	struct reserved_range *new_range;
300 	unsigned int ranges_left;
301 	unsigned long addr;
302 
303 	addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
304 				     &ranges_left, true);
305 	/* if not a consecutive allocation of the same type or first allocation */
306 	if (range->start != addr + size) {
307 		if (range->end) {
308 			physmem_alloc_pos = __physmem_alloc_range(
309 				sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
310 				physmem_alloc_ranges, &ranges_left, true);
311 			new_range = (struct reserved_range *)physmem_alloc_pos;
312 			*new_range = *range;
313 			range->chain = new_range;
314 			addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
315 						     ranges_left, &ranges_left, true);
316 		}
317 		range->end = addr + size;
318 	}
319 	range->start = addr;
320 	physmem_alloc_pos = addr;
321 	physmem_alloc_ranges = ranges_left;
322 	return addr;
323 }
324 
get_physmem_alloc_pos(void)325 unsigned long get_physmem_alloc_pos(void)
326 {
327 	return physmem_alloc_pos;
328 }
329