1 /*
2  *
3  * Procedures for interfacing to Open Firmware.
4  *
5  * Peter Bergner, IBM Corp.	June 2001.
6  * Copyright (C) 2001 Peter Bergner.
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 #include <linux/config.h>
15 #include <linux/kernel.h>
16 #include <asm/types.h>
17 #include <asm/page.h>
18 #include <asm/prom.h>
19 #include <asm/lmb.h>
20 #include <asm/abs_addr.h>
21 #include <asm/bitops.h>
22 #include <asm/udbg.h>
23 
24 extern unsigned long klimit;
25 extern unsigned long reloc_offset(void);
26 
27 
28 static long lmb_add_region(struct lmb_region *, unsigned long, unsigned long, unsigned long);
29 
30 struct lmb lmb = {
31 	0, 0,
32 	{0,0,0,0,{{0,0,0}}},
33 	{0,0,0,0,{{0,0,0}}}
34 };
35 
36 
37 /* Assumption: base addr of region 1 < base addr of region 2 */
38 static void
lmb_coalesce_regions(struct lmb_region * rgn,unsigned long r1,unsigned long r2)39 lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
40 {
41 	unsigned long i;
42 
43 	rgn->region[r1].size += rgn->region[r2].size;
44 	for (i=r2; i < rgn->cnt-1 ;i++) {
45 		rgn->region[i].base = rgn->region[i+1].base;
46 		rgn->region[i].physbase = rgn->region[i+1].physbase;
47 		rgn->region[i].size = rgn->region[i+1].size;
48 		rgn->region[i].type = rgn->region[i+1].type;
49 	}
50 	rgn->cnt--;
51 }
52 
53 
54 /* This routine called with relocation disabled. */
55 void
lmb_init(void)56 lmb_init(void)
57 {
58 	unsigned long offset = reloc_offset();
59 	struct lmb *_lmb = PTRRELOC(&lmb);
60 
61 	/* Create a dummy zero size LMB which will get coalesced away later.
62 	 * This simplifies the lmb_add() code below...
63 	 */
64 	_lmb->memory.region[0].base = 0;
65 	_lmb->memory.region[0].size = 0;
66 	_lmb->memory.region[0].type = LMB_MEMORY_AREA;
67 	_lmb->memory.cnt = 1;
68 
69 	/* Ditto. */
70 	_lmb->reserved.region[0].base = 0;
71 	_lmb->reserved.region[0].size = 0;
72 	_lmb->reserved.region[0].type = LMB_MEMORY_AREA;
73 	_lmb->reserved.cnt = 1;
74 }
75 
76 /* This routine called with relocation disabled. */
77 void
lmb_analyze(void)78 lmb_analyze(void)
79 {
80 	unsigned long i;
81 	unsigned long mem_size = 0;
82 	unsigned long io_size = 0;
83 	unsigned long size_mask = 0;
84 	unsigned long offset = reloc_offset();
85 	struct lmb *_lmb = PTRRELOC(&lmb);
86 #ifdef CONFIG_MSCHUNKS
87 	unsigned long physbase = 0;
88 #endif
89 
90 	for (i=0; i < _lmb->memory.cnt ;i++) {
91 		unsigned long lmb_type = _lmb->memory.region[i].type;
92 		unsigned long lmb_size;
93 
94 		if ( lmb_type != LMB_MEMORY_AREA )
95 			continue;
96 
97 		lmb_size = _lmb->memory.region[i].size;
98 
99 #ifdef CONFIG_MSCHUNKS
100 		_lmb->memory.region[i].physbase = physbase;
101 		physbase += lmb_size;
102 #else
103 		_lmb->memory.region[i].physbase = _lmb->memory.region[i].base;
104 #endif
105 		mem_size += lmb_size;
106 		size_mask |= lmb_size;
107 	}
108 
109 #ifdef CONFIG_MSCHUNKS
110 	for (i=0; i < _lmb->memory.cnt ;i++) {
111 		unsigned long lmb_type = _lmb->memory.region[i].type;
112 		unsigned long lmb_size;
113 
114 		if ( lmb_type != LMB_IO_AREA )
115 			continue;
116 
117 		lmb_size = _lmb->memory.region[i].size;
118 
119 		_lmb->memory.region[i].physbase = physbase;
120 		physbase += lmb_size;
121 		io_size += lmb_size;
122 		size_mask |= lmb_size;
123 	}
124 #endif /* CONFIG_MSCHUNKS */
125 
126 	_lmb->memory.size = mem_size;
127 	_lmb->memory.iosize = io_size;
128 	_lmb->memory.lcd_size = (1UL << cnt_trailing_zeros(size_mask));
129 }
130 
131 /* This routine called with relocation disabled. */
132 long
lmb_add(unsigned long base,unsigned long size)133 lmb_add(unsigned long base, unsigned long size)
134 {
135 	unsigned long offset = reloc_offset();
136 	struct lmb *_lmb = PTRRELOC(&lmb);
137 	struct lmb_region *_rgn = &(_lmb->memory);
138 
139 	/* On pSeries LPAR systems, the first LMB is our RMO region. */
140 	if ( base == 0 )
141 		_lmb->rmo_size = size;
142 
143 	return lmb_add_region(_rgn, base, size, LMB_MEMORY_AREA);
144 
145 }
146 
147 #ifdef CONFIG_MSCHUNKS
148 /* This routine called with relocation disabled. */
149 long
lmb_add_io(unsigned long base,unsigned long size)150 lmb_add_io(unsigned long base, unsigned long size)
151 {
152 	unsigned long offset = reloc_offset();
153 	struct lmb *_lmb = PTRRELOC(&lmb);
154 	struct lmb_region *_rgn = &(_lmb->memory);
155 
156 	return lmb_add_region(_rgn, base, size, LMB_IO_AREA);
157 
158 }
159 #endif /* CONFIG_MSCHUNKS */
160 
161 long
lmb_reserve(unsigned long base,unsigned long size)162 lmb_reserve(unsigned long base, unsigned long size)
163 {
164 	unsigned long offset = reloc_offset();
165 	struct lmb *_lmb = PTRRELOC(&lmb);
166 	struct lmb_region *_rgn = &(_lmb->reserved);
167 
168 	return lmb_add_region(_rgn, base, size, LMB_MEMORY_AREA);
169 }
170 
171 /* This routine called with relocation disabled. */
172 static long
lmb_add_region(struct lmb_region * rgn,unsigned long base,unsigned long size,unsigned long type)173 lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size,
174 		unsigned long type)
175 {
176 	unsigned long i, coalesced = 0;
177 	long adjacent;
178 
179 	/* First try and coalesce this LMB with another. */
180 	for (i=0; i < rgn->cnt ;i++) {
181 		unsigned long rgnbase = rgn->region[i].base;
182 		unsigned long rgnsize = rgn->region[i].size;
183 		unsigned long rgntype = rgn->region[i].type;
184 
185 		if ( rgntype != type )
186 			continue;
187 
188 		adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
189 		if ( adjacent > 0 ) {
190 			rgn->region[i].base -= size;
191 			rgn->region[i].physbase -= size;
192 			rgn->region[i].size += size;
193 			coalesced++;
194 			break;
195 		}
196 		else if ( adjacent < 0 ) {
197 			rgn->region[i].size += size;
198 			coalesced++;
199 			break;
200 		}
201 	}
202 
203 	if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
204 		lmb_coalesce_regions(rgn, i, i+1);
205 		coalesced++;
206 	}
207 
208 	if ( coalesced ) {
209 		return coalesced;
210 	} else if ( rgn->cnt >= MAX_LMB_REGIONS ) {
211 		return -1;
212 	}
213 
214 	/* Couldn't coalesce the LMB, so add it to the sorted table. */
215 	for (i=rgn->cnt-1; i >= 0 ;i--) {
216 		if (base < rgn->region[i].base) {
217 			rgn->region[i+1].base = rgn->region[i].base;
218 			rgn->region[i+1].physbase = rgn->region[i].physbase;
219 			rgn->region[i+1].size = rgn->region[i].size;
220 			rgn->region[i+1].type = rgn->region[i].type;
221 		}  else {
222 			rgn->region[i+1].base = base;
223 			rgn->region[i+1].physbase = lmb_abs_to_phys(base);
224 			rgn->region[i+1].size = size;
225 			rgn->region[i+1].type = type;
226 			break;
227 		}
228 	}
229 	rgn->cnt++;
230 
231 	return 0;
232 }
233 
234 long
lmb_overlaps_region(struct lmb_region * rgn,unsigned long base,unsigned long size)235 lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
236 {
237 	unsigned long i;
238 
239 	for (i=0; i < rgn->cnt ;i++) {
240 		unsigned long rgnbase = rgn->region[i].base;
241 		unsigned long rgnsize = rgn->region[i].size;
242 		if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
243 			break;
244 		}
245 	}
246 
247 	return (i < rgn->cnt) ? i : -1;
248 }
249 
250 unsigned long
lmb_alloc(unsigned long size,unsigned long align)251 lmb_alloc(unsigned long size, unsigned long align)
252 {
253 	return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
254 }
255 
256 unsigned long
lmb_alloc_base(unsigned long size,unsigned long align,unsigned long max_addr)257 lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
258 {
259 	long i, j;
260 	unsigned long base = 0;
261 	unsigned long offset = reloc_offset();
262 	struct lmb *_lmb = PTRRELOC(&lmb);
263 	struct lmb_region *_mem = &(_lmb->memory);
264 	struct lmb_region *_rsv = &(_lmb->reserved);
265 
266 	for (i=_mem->cnt-1; i >= 0 ;i--) {
267 		unsigned long lmbbase = _mem->region[i].base;
268 		unsigned long lmbsize = _mem->region[i].size;
269 		unsigned long lmbtype = _mem->region[i].type;
270 
271 		if ( lmbtype != LMB_MEMORY_AREA )
272 			continue;
273 
274 		if ( max_addr == LMB_ALLOC_ANYWHERE )
275 			base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
276 		else if ( lmbbase < max_addr )
277 			base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align);
278 		else
279 			continue;
280 
281 		while ( (lmbbase <= base) &&
282 			((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) {
283 			base = _ALIGN_DOWN(_rsv->region[j].base-size, align);
284 		}
285 
286 		if ( (base != 0) && (lmbbase <= base) )
287 			break;
288 	}
289 
290 	if ( i < 0 )
291 		return 0;
292 
293 	lmb_add_region(_rsv, base, size, LMB_MEMORY_AREA);
294 
295 	return base;
296 }
297 
298 unsigned long
lmb_phys_mem_size(void)299 lmb_phys_mem_size(void)
300 {
301 	unsigned long offset = reloc_offset();
302 	struct lmb *_lmb = PTRRELOC(&lmb);
303 #ifdef CONFIG_MSCHUNKS
304 	return _lmb->memory.size;
305 #else
306 	struct lmb_region *_mem = &(_lmb->memory);
307 	unsigned long idx = _mem->cnt-1;
308 	unsigned long lastbase = _mem->region[idx].physbase;
309 	unsigned long lastsize = _mem->region[idx].size;
310 
311 	return (lastbase + lastsize);
312 #endif /* CONFIG_MSCHUNKS */
313 }
314 
315 unsigned long
lmb_end_of_DRAM(void)316 lmb_end_of_DRAM(void)
317 {
318 	unsigned long offset = reloc_offset();
319 	struct lmb *_lmb = PTRRELOC(&lmb);
320 	struct lmb_region *_mem = &(_lmb->memory);
321 	unsigned long idx;
322 
323 	for(idx=_mem->cnt-1; idx >= 0 ;idx--) {
324 		if ( _mem->region[idx].type != LMB_MEMORY_AREA )
325 			continue;
326 #ifdef CONFIG_MSCHUNKS
327 		return (_mem->region[idx].physbase + _mem->region[idx].size);
328 #else
329 		return (_mem->region[idx].base + _mem->region[idx].size);
330 #endif /* CONFIG_MSCHUNKS */
331 	}
332 
333 	return 0;
334 }
335 
336 
337 unsigned long
lmb_abs_to_phys(unsigned long aa)338 lmb_abs_to_phys(unsigned long aa)
339 {
340 	unsigned long i, pa = aa;
341 	unsigned long offset = reloc_offset();
342 	struct lmb *_lmb = PTRRELOC(&lmb);
343 	struct lmb_region *_mem = &(_lmb->memory);
344 
345 	for (i=0; i < _mem->cnt ;i++) {
346 		unsigned long lmbbase = _mem->region[i].base;
347 		unsigned long lmbsize = _mem->region[i].size;
348 		if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) {
349 			pa = _mem->region[i].physbase + (aa - lmbbase);
350 			break;
351 		}
352 	}
353 
354 	return pa;
355 }
356 
357 void
lmb_dump(char * str)358 lmb_dump(char *str)
359 {
360 	unsigned long i;
361 
362 	udbg_printf("\nlmb_dump: %s\n", str);
363 	udbg_printf("    debug                       = %s\n",
364 		(lmb.debug) ? "TRUE" : "FALSE");
365 	udbg_printf("    memory.cnt                  = %d\n",
366 		lmb.memory.cnt);
367 	udbg_printf("    memory.size                 = 0x%lx\n",
368 		lmb.memory.size);
369 	udbg_printf("    memory.lcd_size             = 0x%lx\n",
370 		lmb.memory.lcd_size);
371 	for (i=0; i < lmb.memory.cnt ;i++) {
372 		udbg_printf("    memory.region[%d].base       = 0x%lx\n",
373 			i, lmb.memory.region[i].base);
374 		udbg_printf("                      .physbase = 0x%lx\n",
375 			lmb.memory.region[i].physbase);
376 		udbg_printf("                      .size     = 0x%lx\n",
377 			lmb.memory.region[i].size);
378 		udbg_printf("                      .type     = 0x%lx\n",
379 			lmb.memory.region[i].type);
380 	}
381 
382 	udbg_printf("\n");
383 	udbg_printf("    reserved.cnt                = %d\n",
384 		lmb.reserved.cnt);
385 	udbg_printf("    reserved.size               = 0x%lx\n",
386 		lmb.reserved.size);
387 	udbg_printf("    reserved.lcd_size           = 0x%lx\n",
388 		lmb.reserved.lcd_size);
389 	for (i=0; i < lmb.reserved.cnt ;i++) {
390 		udbg_printf("    reserved.region[%d].base     = 0x%lx\n",
391 			i, lmb.reserved.region[i].base);
392 		udbg_printf("                      .physbase = 0x%lx\n",
393 			lmb.reserved.region[i].physbase);
394 		udbg_printf("                      .size     = 0x%lx\n",
395 			lmb.reserved.region[i].size);
396 		udbg_printf("                      .type     = 0x%lx\n",
397 			lmb.reserved.region[i].type);
398 	}
399 }
400