1 /*
2 * Copyright IBM Corp. 2008, 2009
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <asm/ipl.h>
10 #include <asm/sclp.h>
11 #include <asm/setup.h>
12
13 #define ADDR2G (1ULL << 31)
14
find_memory_chunks(struct mem_chunk chunk[])15 static void find_memory_chunks(struct mem_chunk chunk[])
16 {
17 unsigned long long memsize, rnmax, rzm;
18 unsigned long addr = 0, size;
19 int i = 0, type;
20
21 rzm = sclp_get_rzm();
22 rnmax = sclp_get_rnmax();
23 memsize = rzm * rnmax;
24 if (!rzm)
25 rzm = 1ULL << 17;
26 if (sizeof(long) == 4) {
27 rzm = min(ADDR2G, rzm);
28 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
29 }
30 do {
31 size = 0;
32 type = tprot(addr);
33 do {
34 size += rzm;
35 if (memsize && addr + size >= memsize)
36 break;
37 } while (type == tprot(addr + size));
38 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
39 chunk[i].addr = addr;
40 chunk[i].size = size;
41 chunk[i].type = type;
42 i++;
43 }
44 addr += size;
45 } while (addr < memsize && i < MEMORY_CHUNKS);
46 }
47
detect_memory_layout(struct mem_chunk chunk[])48 void detect_memory_layout(struct mem_chunk chunk[])
49 {
50 unsigned long flags, cr0;
51
52 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
53 /* Disable IRQs, DAT and low address protection so tprot does the
54 * right thing and we don't get scheduled away with low address
55 * protection disabled.
56 */
57 flags = __arch_local_irq_stnsm(0xf8);
58 __ctl_store(cr0, 0, 0);
59 __ctl_clear_bit(0, 28);
60 find_memory_chunks(chunk);
61 __ctl_load(cr0, 0, 0);
62 arch_local_irq_restore(flags);
63 }
64 EXPORT_SYMBOL(detect_memory_layout);
65