1 /*
2  * Copyright 2004-2010 Analog Devices Inc.
3  *
4  * Licensed under the GPL-2 or later.
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/console.h>
9 #include <linux/bootmem.h>
10 #include <linux/seq_file.h>
11 #include <linux/cpu.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/tty.h>
15 #include <linux/pfn.h>
16 
17 #ifdef CONFIG_MTD_UCLINUX
18 #include <linux/mtd/map.h>
19 #include <linux/ext2_fs.h>
20 #include <linux/cramfs_fs.h>
21 #include <linux/romfs_fs.h>
22 #endif
23 
24 #include <asm/cplb.h>
25 #include <asm/cacheflush.h>
26 #include <asm/blackfin.h>
27 #include <asm/cplbinit.h>
28 #include <asm/div64.h>
29 #include <asm/cpu.h>
30 #include <asm/fixed_code.h>
31 #include <asm/early_printk.h>
32 #include <asm/irq_handler.h>
33 #include <asm/pda.h>
34 
35 u16 _bfin_swrst;
36 EXPORT_SYMBOL(_bfin_swrst);
37 
38 unsigned long memory_start, memory_end, physical_mem_end;
39 unsigned long _rambase, _ramstart, _ramend;
40 unsigned long reserved_mem_dcache_on;
41 unsigned long reserved_mem_icache_on;
42 EXPORT_SYMBOL(memory_start);
43 EXPORT_SYMBOL(memory_end);
44 EXPORT_SYMBOL(physical_mem_end);
45 EXPORT_SYMBOL(_ramend);
46 EXPORT_SYMBOL(reserved_mem_dcache_on);
47 
48 #ifdef CONFIG_MTD_UCLINUX
49 extern struct map_info uclinux_ram_map;
50 unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
51 unsigned long _ebss;
52 EXPORT_SYMBOL(memory_mtd_end);
53 EXPORT_SYMBOL(memory_mtd_start);
54 EXPORT_SYMBOL(mtd_size);
55 #endif
56 
57 char __initdata command_line[COMMAND_LINE_SIZE];
58 struct blackfin_initial_pda __initdata initial_pda;
59 
60 /* boot memmap, for parsing "memmap=" */
61 #define BFIN_MEMMAP_MAX		128 /* number of entries in bfin_memmap */
62 #define BFIN_MEMMAP_RAM		1
63 #define BFIN_MEMMAP_RESERVED	2
64 static struct bfin_memmap {
65 	int nr_map;
66 	struct bfin_memmap_entry {
67 		unsigned long long addr; /* start of memory segment */
68 		unsigned long long size;
69 		unsigned long type;
70 	} map[BFIN_MEMMAP_MAX];
71 } bfin_memmap __initdata;
72 
73 /* for memmap sanitization */
74 struct change_member {
75 	struct bfin_memmap_entry *pentry; /* pointer to original entry */
76 	unsigned long long addr; /* address for this change point */
77 };
78 static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
79 static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
80 static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
81 static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
82 
83 DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
84 
85 static int early_init_clkin_hz(char *buf);
86 
87 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
generate_cplb_tables(void)88 void __init generate_cplb_tables(void)
89 {
90 	unsigned int cpu;
91 
92 	generate_cplb_tables_all();
93 	/* Generate per-CPU I&D CPLB tables */
94 	for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
95 		generate_cplb_tables_cpu(cpu);
96 }
97 #endif
98 
bfin_setup_caches(unsigned int cpu)99 void __cpuinit bfin_setup_caches(unsigned int cpu)
100 {
101 #ifdef CONFIG_BFIN_ICACHE
102 	bfin_icache_init(icplb_tbl[cpu]);
103 #endif
104 
105 #ifdef CONFIG_BFIN_DCACHE
106 	bfin_dcache_init(dcplb_tbl[cpu]);
107 #endif
108 
109 	bfin_setup_cpudata(cpu);
110 
111 	/*
112 	 * In cache coherence emulation mode, we need to have the
113 	 * D-cache enabled before running any atomic operation which
114 	 * might involve cache invalidation (i.e. spinlock, rwlock).
115 	 * So printk's are deferred until then.
116 	 */
117 #ifdef CONFIG_BFIN_ICACHE
118 	printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
119 	printk(KERN_INFO "  External memory:"
120 # ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
121 	       " cacheable"
122 # else
123 	       " uncacheable"
124 # endif
125 	       " in instruction cache\n");
126 	if (L2_LENGTH)
127 		printk(KERN_INFO "  L2 SRAM        :"
128 # ifdef CONFIG_BFIN_L2_ICACHEABLE
129 		       " cacheable"
130 # else
131 		       " uncacheable"
132 # endif
133 		       " in instruction cache\n");
134 
135 #else
136 	printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
137 #endif
138 
139 #ifdef CONFIG_BFIN_DCACHE
140 	printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
141 	printk(KERN_INFO "  External memory:"
142 # if defined CONFIG_BFIN_EXTMEM_WRITEBACK
143 	       " cacheable (write-back)"
144 # elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
145 	       " cacheable (write-through)"
146 # else
147 	       " uncacheable"
148 # endif
149 	       " in data cache\n");
150 	if (L2_LENGTH)
151 		printk(KERN_INFO "  L2 SRAM        :"
152 # if defined CONFIG_BFIN_L2_WRITEBACK
153 		       " cacheable (write-back)"
154 # elif defined CONFIG_BFIN_L2_WRITETHROUGH
155 		       " cacheable (write-through)"
156 # else
157 		       " uncacheable"
158 # endif
159 		       " in data cache\n");
160 #else
161 	printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
162 #endif
163 }
164 
bfin_setup_cpudata(unsigned int cpu)165 void __cpuinit bfin_setup_cpudata(unsigned int cpu)
166 {
167 	struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
168 
169 	cpudata->imemctl = bfin_read_IMEM_CONTROL();
170 	cpudata->dmemctl = bfin_read_DMEM_CONTROL();
171 }
172 
bfin_cache_init(void)173 void __init bfin_cache_init(void)
174 {
175 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
176 	generate_cplb_tables();
177 #endif
178 	bfin_setup_caches(0);
179 }
180 
bfin_relocate_l1_mem(void)181 void __init bfin_relocate_l1_mem(void)
182 {
183 	unsigned long text_l1_len = (unsigned long)_text_l1_len;
184 	unsigned long data_l1_len = (unsigned long)_data_l1_len;
185 	unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
186 	unsigned long l2_len = (unsigned long)_l2_len;
187 
188 	early_shadow_stamp();
189 
190 	/*
191 	 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
192 	 * we know that everything about l1 text/data is nice and aligned,
193 	 * so copy by 4 byte chunks, and don't worry about overlapping
194 	 * src/dest.
195 	 *
196 	 * We can't use the dma_memcpy functions, since they can call
197 	 * scheduler functions which might be in L1 :( and core writes
198 	 * into L1 instruction cause bad access errors, so we are stuck,
199 	 * we are required to use DMA, but can't use the common dma
200 	 * functions. We can't use memcpy either - since that might be
201 	 * going to be in the relocated L1
202 	 */
203 
204 	blackfin_dma_early_init();
205 
206 	/* if necessary, copy L1 text to L1 instruction SRAM */
207 	if (L1_CODE_LENGTH && text_l1_len)
208 		early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
209 
210 	/* if necessary, copy L1 data to L1 data bank A SRAM */
211 	if (L1_DATA_A_LENGTH && data_l1_len)
212 		early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
213 
214 	/* if necessary, copy L1 data B to L1 data bank B SRAM */
215 	if (L1_DATA_B_LENGTH && data_b_l1_len)
216 		early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
217 
218 	early_dma_memcpy_done();
219 
220 #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
221 	blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
222 #endif
223 
224 	/* if necessary, copy L2 text/data to L2 SRAM */
225 	if (L2_LENGTH && l2_len)
226 		memcpy(_stext_l2, _l2_lma, l2_len);
227 }
228 
229 #ifdef CONFIG_SMP
bfin_relocate_coreb_l1_mem(void)230 void __init bfin_relocate_coreb_l1_mem(void)
231 {
232 	unsigned long text_l1_len = (unsigned long)_text_l1_len;
233 	unsigned long data_l1_len = (unsigned long)_data_l1_len;
234 	unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
235 
236 	blackfin_dma_early_init();
237 
238 	/* if necessary, copy L1 text to L1 instruction SRAM */
239 	if (L1_CODE_LENGTH && text_l1_len)
240 		early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
241 				text_l1_len);
242 
243 	/* if necessary, copy L1 data to L1 data bank A SRAM */
244 	if (L1_DATA_A_LENGTH && data_l1_len)
245 		early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
246 				data_l1_len);
247 
248 	/* if necessary, copy L1 data B to L1 data bank B SRAM */
249 	if (L1_DATA_B_LENGTH && data_b_l1_len)
250 		early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
251 				data_b_l1_len);
252 
253 	early_dma_memcpy_done();
254 
255 #ifdef CONFIG_ICACHE_FLUSH_L1
256 	blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
257 			(unsigned long)_stext_l1 + COREB_L1_CODE_START;
258 #endif
259 }
260 #endif
261 
262 #ifdef CONFIG_ROMKERNEL
bfin_relocate_xip_data(void)263 void __init bfin_relocate_xip_data(void)
264 {
265 	early_shadow_stamp();
266 
267 	memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
268 	memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
269 }
270 #endif
271 
272 /* add_memory_region to memmap */
add_memory_region(unsigned long long start,unsigned long long size,int type)273 static void __init add_memory_region(unsigned long long start,
274 			      unsigned long long size, int type)
275 {
276 	int i;
277 
278 	i = bfin_memmap.nr_map;
279 
280 	if (i == BFIN_MEMMAP_MAX) {
281 		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
282 		return;
283 	}
284 
285 	bfin_memmap.map[i].addr = start;
286 	bfin_memmap.map[i].size = size;
287 	bfin_memmap.map[i].type = type;
288 	bfin_memmap.nr_map++;
289 }
290 
291 /*
292  * Sanitize the boot memmap, removing overlaps.
293  */
sanitize_memmap(struct bfin_memmap_entry * map,int * pnr_map)294 static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
295 {
296 	struct change_member *change_tmp;
297 	unsigned long current_type, last_type;
298 	unsigned long long last_addr;
299 	int chgidx, still_changing;
300 	int overlap_entries;
301 	int new_entry;
302 	int old_nr, new_nr, chg_nr;
303 	int i;
304 
305 	/*
306 		Visually we're performing the following (1,2,3,4 = memory types)
307 
308 		Sample memory map (w/overlaps):
309 		   ____22__________________
310 		   ______________________4_
311 		   ____1111________________
312 		   _44_____________________
313 		   11111111________________
314 		   ____________________33__
315 		   ___________44___________
316 		   __________33333_________
317 		   ______________22________
318 		   ___________________2222_
319 		   _________111111111______
320 		   _____________________11_
321 		   _________________4______
322 
323 		Sanitized equivalent (no overlap):
324 		   1_______________________
325 		   _44_____________________
326 		   ___1____________________
327 		   ____22__________________
328 		   ______11________________
329 		   _________1______________
330 		   __________3_____________
331 		   ___________44___________
332 		   _____________33_________
333 		   _______________2________
334 		   ________________1_______
335 		   _________________4______
336 		   ___________________2____
337 		   ____________________33__
338 		   ______________________4_
339 	*/
340 	/* if there's only one memory region, don't bother */
341 	if (*pnr_map < 2)
342 		return -1;
343 
344 	old_nr = *pnr_map;
345 
346 	/* bail out if we find any unreasonable addresses in memmap */
347 	for (i = 0; i < old_nr; i++)
348 		if (map[i].addr + map[i].size < map[i].addr)
349 			return -1;
350 
351 	/* create pointers for initial change-point information (for sorting) */
352 	for (i = 0; i < 2*old_nr; i++)
353 		change_point[i] = &change_point_list[i];
354 
355 	/* record all known change-points (starting and ending addresses),
356 	   omitting those that are for empty memory regions */
357 	chgidx = 0;
358 	for (i = 0; i < old_nr; i++) {
359 		if (map[i].size != 0) {
360 			change_point[chgidx]->addr = map[i].addr;
361 			change_point[chgidx++]->pentry = &map[i];
362 			change_point[chgidx]->addr = map[i].addr + map[i].size;
363 			change_point[chgidx++]->pentry = &map[i];
364 		}
365 	}
366 	chg_nr = chgidx;	/* true number of change-points */
367 
368 	/* sort change-point list by memory addresses (low -> high) */
369 	still_changing = 1;
370 	while (still_changing) {
371 		still_changing = 0;
372 		for (i = 1; i < chg_nr; i++) {
373 			/* if <current_addr> > <last_addr>, swap */
374 			/* or, if current=<start_addr> & last=<end_addr>, swap */
375 			if ((change_point[i]->addr < change_point[i-1]->addr) ||
376 				((change_point[i]->addr == change_point[i-1]->addr) &&
377 				 (change_point[i]->addr == change_point[i]->pentry->addr) &&
378 				 (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
379 			   ) {
380 				change_tmp = change_point[i];
381 				change_point[i] = change_point[i-1];
382 				change_point[i-1] = change_tmp;
383 				still_changing = 1;
384 			}
385 		}
386 	}
387 
388 	/* create a new memmap, removing overlaps */
389 	overlap_entries = 0;	/* number of entries in the overlap table */
390 	new_entry = 0;		/* index for creating new memmap entries */
391 	last_type = 0;		/* start with undefined memory type */
392 	last_addr = 0;		/* start with 0 as last starting address */
393 	/* loop through change-points, determining affect on the new memmap */
394 	for (chgidx = 0; chgidx < chg_nr; chgidx++) {
395 		/* keep track of all overlapping memmap entries */
396 		if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
397 			/* add map entry to overlap list (> 1 entry implies an overlap) */
398 			overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
399 		} else {
400 			/* remove entry from list (order independent, so swap with last) */
401 			for (i = 0; i < overlap_entries; i++) {
402 				if (overlap_list[i] == change_point[chgidx]->pentry)
403 					overlap_list[i] = overlap_list[overlap_entries-1];
404 			}
405 			overlap_entries--;
406 		}
407 		/* if there are overlapping entries, decide which "type" to use */
408 		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
409 		current_type = 0;
410 		for (i = 0; i < overlap_entries; i++)
411 			if (overlap_list[i]->type > current_type)
412 				current_type = overlap_list[i]->type;
413 		/* continue building up new memmap based on this information */
414 		if (current_type != last_type) {
415 			if (last_type != 0) {
416 				new_map[new_entry].size =
417 					change_point[chgidx]->addr - last_addr;
418 				/* move forward only if the new size was non-zero */
419 				if (new_map[new_entry].size != 0)
420 					if (++new_entry >= BFIN_MEMMAP_MAX)
421 						break;	/* no more space left for new entries */
422 			}
423 			if (current_type != 0) {
424 				new_map[new_entry].addr = change_point[chgidx]->addr;
425 				new_map[new_entry].type = current_type;
426 				last_addr = change_point[chgidx]->addr;
427 			}
428 			last_type = current_type;
429 		}
430 	}
431 	new_nr = new_entry;	/* retain count for new entries */
432 
433 	/* copy new mapping into original location */
434 	memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
435 	*pnr_map = new_nr;
436 
437 	return 0;
438 }
439 
print_memory_map(char * who)440 static void __init print_memory_map(char *who)
441 {
442 	int i;
443 
444 	for (i = 0; i < bfin_memmap.nr_map; i++) {
445 		printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
446 			bfin_memmap.map[i].addr,
447 			bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
448 		switch (bfin_memmap.map[i].type) {
449 		case BFIN_MEMMAP_RAM:
450 			printk(KERN_CONT "(usable)\n");
451 			break;
452 		case BFIN_MEMMAP_RESERVED:
453 			printk(KERN_CONT "(reserved)\n");
454 			break;
455 		default:
456 			printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
457 			break;
458 		}
459 	}
460 }
461 
parse_memmap(char * arg)462 static __init int parse_memmap(char *arg)
463 {
464 	unsigned long long start_at, mem_size;
465 
466 	if (!arg)
467 		return -EINVAL;
468 
469 	mem_size = memparse(arg, &arg);
470 	if (*arg == '@') {
471 		start_at = memparse(arg+1, &arg);
472 		add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
473 	} else if (*arg == '$') {
474 		start_at = memparse(arg+1, &arg);
475 		add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
476 	}
477 
478 	return 0;
479 }
480 
481 /*
482  * Initial parsing of the command line.  Currently, we support:
483  *  - Controlling the linux memory size: mem=xxx[KMG]
484  *  - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
485  *       $ -> reserved memory is dcacheable
486  *       # -> reserved memory is icacheable
487  *  - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
488  *       @ from <start> to <start>+<mem>, type RAM
489  *       $ from <start> to <start>+<mem>, type RESERVED
490  */
parse_cmdline_early(char * cmdline_p)491 static __init void parse_cmdline_early(char *cmdline_p)
492 {
493 	char c = ' ', *to = cmdline_p;
494 	unsigned int memsize;
495 	for (;;) {
496 		if (c == ' ') {
497 			if (!memcmp(to, "mem=", 4)) {
498 				to += 4;
499 				memsize = memparse(to, &to);
500 				if (memsize)
501 					_ramend = memsize;
502 
503 			} else if (!memcmp(to, "max_mem=", 8)) {
504 				to += 8;
505 				memsize = memparse(to, &to);
506 				if (memsize) {
507 					physical_mem_end = memsize;
508 					if (*to != ' ') {
509 						if (*to == '$'
510 						    || *(to + 1) == '$')
511 							reserved_mem_dcache_on = 1;
512 						if (*to == '#'
513 						    || *(to + 1) == '#')
514 							reserved_mem_icache_on = 1;
515 					}
516 				}
517 			} else if (!memcmp(to, "clkin_hz=", 9)) {
518 				to += 9;
519 				early_init_clkin_hz(to);
520 #ifdef CONFIG_EARLY_PRINTK
521 			} else if (!memcmp(to, "earlyprintk=", 12)) {
522 				to += 12;
523 				setup_early_printk(to);
524 #endif
525 			} else if (!memcmp(to, "memmap=", 7)) {
526 				to += 7;
527 				parse_memmap(to);
528 			}
529 		}
530 		c = *(to++);
531 		if (!c)
532 			break;
533 	}
534 }
535 
536 /*
537  * Setup memory defaults from user config.
538  * The physical memory layout looks like:
539  *
540  *  [_rambase, _ramstart]:		kernel image
541  *  [memory_start, memory_end]:		dynamic memory managed by kernel
542  *  [memory_end, _ramend]:		reserved memory
543  *  	[memory_mtd_start(memory_end),
544  *  		memory_mtd_start + mtd_size]:	rootfs (if any)
545  *	[_ramend - DMA_UNCACHED_REGION,
546  *		_ramend]:			uncached DMA region
547  *  [_ramend, physical_mem_end]:	memory not managed by kernel
548  */
memory_setup(void)549 static __init void memory_setup(void)
550 {
551 #ifdef CONFIG_MTD_UCLINUX
552 	unsigned long mtd_phys = 0;
553 	unsigned long n;
554 #endif
555 	unsigned long max_mem;
556 
557 	_rambase = CONFIG_BOOT_LOAD;
558 	_ramstart = (unsigned long)_end;
559 
560 	if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
561 		console_init();
562 		panic("DMA region exceeds memory limit: %lu.",
563 			_ramend - _ramstart);
564 	}
565 	max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
566 
567 #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
568 	/* Due to a Hardware Anomaly we need to limit the size of usable
569 	 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
570 	 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
571 	 */
572 # if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
573 	if (max_mem >= 56 * 1024 * 1024)
574 		max_mem = 56 * 1024 * 1024;
575 # else
576 	if (max_mem >= 60 * 1024 * 1024)
577 		max_mem = 60 * 1024 * 1024;
578 # endif				/* CONFIG_DEBUG_HUNT_FOR_ZERO */
579 #endif				/* ANOMALY_05000263 */
580 
581 
582 #ifdef CONFIG_MPU
583 	/* Round up to multiple of 4MB */
584 	memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
585 #else
586 	memory_start = PAGE_ALIGN(_ramstart);
587 #endif
588 
589 #if defined(CONFIG_MTD_UCLINUX)
590 	/* generic memory mapped MTD driver */
591 	memory_mtd_end = memory_end;
592 
593 	mtd_phys = _ramstart;
594 	mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
595 
596 # if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
597 	n = ext2_image_size((void *)(mtd_phys + 0x400));
598 	if (n)
599 		mtd_size = PAGE_ALIGN(n * 1024);
600 # endif
601 
602 # if defined(CONFIG_CRAMFS)
603 	if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
604 		mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
605 # endif
606 
607 # if defined(CONFIG_ROMFS_FS)
608 	if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
609 	    && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
610 		mtd_size =
611 		    PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
612 
613 		/* ROM_FS is XIP, so if we found it, we need to limit memory */
614 		if (memory_end > max_mem) {
615 			pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
616 			memory_end = max_mem;
617 		}
618 	}
619 # endif				/* CONFIG_ROMFS_FS */
620 
621 	/* Since the default MTD_UCLINUX has no magic number, we just blindly
622 	 * read 8 past the end of the kernel's image, and look at it.
623 	 * When no image is attached, mtd_size is set to a random number
624 	 * Do some basic sanity checks before operating on things
625 	 */
626 	if (mtd_size == 0 || memory_end <= mtd_size) {
627 		pr_emerg("Could not find valid ram mtd attached.\n");
628 	} else {
629 		memory_end -= mtd_size;
630 
631 		/* Relocate MTD image to the top of memory after the uncached memory area */
632 		uclinux_ram_map.phys = memory_mtd_start = memory_end;
633 		uclinux_ram_map.size = mtd_size;
634 		pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
635 			_end, mtd_size, (void *)memory_mtd_start);
636 		dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
637 	}
638 #endif				/* CONFIG_MTD_UCLINUX */
639 
640 	/* We need lo limit memory, since everything could have a text section
641 	 * of userspace in it, and expose anomaly 05000263. If the anomaly
642 	 * doesn't exist, or we don't need to - then dont.
643 	 */
644 	if (memory_end > max_mem) {
645 		pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
646 		memory_end = max_mem;
647 	}
648 
649 #ifdef CONFIG_MPU
650 #if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
651 	page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
652 					ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
653 #else
654 	page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
655 #endif
656 	page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
657 #endif
658 
659 	init_mm.start_code = (unsigned long)_stext;
660 	init_mm.end_code = (unsigned long)_etext;
661 	init_mm.end_data = (unsigned long)_edata;
662 	init_mm.brk = (unsigned long)0;
663 
664 	printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
665 	printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
666 
667 	printk(KERN_INFO "Memory map:\n"
668 	       "  fixedcode = 0x%p-0x%p\n"
669 	       "  text      = 0x%p-0x%p\n"
670 	       "  rodata    = 0x%p-0x%p\n"
671 	       "  bss       = 0x%p-0x%p\n"
672 	       "  data      = 0x%p-0x%p\n"
673 	       "    stack   = 0x%p-0x%p\n"
674 	       "  init      = 0x%p-0x%p\n"
675 	       "  available = 0x%p-0x%p\n"
676 #ifdef CONFIG_MTD_UCLINUX
677 	       "  rootfs    = 0x%p-0x%p\n"
678 #endif
679 #if DMA_UNCACHED_REGION > 0
680 	       "  DMA Zone  = 0x%p-0x%p\n"
681 #endif
682 		, (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
683 		_stext, _etext,
684 		__start_rodata, __end_rodata,
685 		__bss_start, __bss_stop,
686 		_sdata, _edata,
687 		(void *)&init_thread_union,
688 		(void *)((int)(&init_thread_union) + THREAD_SIZE),
689 		__init_begin, __init_end,
690 		(void *)_ramstart, (void *)memory_end
691 #ifdef CONFIG_MTD_UCLINUX
692 		, (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
693 #endif
694 #if DMA_UNCACHED_REGION > 0
695 		, (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
696 #endif
697 		);
698 }
699 
700 /*
701  * Find the lowest, highest page frame number we have available
702  */
find_min_max_pfn(void)703 void __init find_min_max_pfn(void)
704 {
705 	int i;
706 
707 	max_pfn = 0;
708 	min_low_pfn = memory_end;
709 
710 	for (i = 0; i < bfin_memmap.nr_map; i++) {
711 		unsigned long start, end;
712 		/* RAM? */
713 		if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
714 			continue;
715 		start = PFN_UP(bfin_memmap.map[i].addr);
716 		end = PFN_DOWN(bfin_memmap.map[i].addr +
717 				bfin_memmap.map[i].size);
718 		if (start >= end)
719 			continue;
720 		if (end > max_pfn)
721 			max_pfn = end;
722 		if (start < min_low_pfn)
723 			min_low_pfn = start;
724 	}
725 }
726 
setup_bootmem_allocator(void)727 static __init void setup_bootmem_allocator(void)
728 {
729 	int bootmap_size;
730 	int i;
731 	unsigned long start_pfn, end_pfn;
732 	unsigned long curr_pfn, last_pfn, size;
733 
734 	/* mark memory between memory_start and memory_end usable */
735 	add_memory_region(memory_start,
736 		memory_end - memory_start, BFIN_MEMMAP_RAM);
737 	/* sanity check for overlap */
738 	sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
739 	print_memory_map("boot memmap");
740 
741 	/* initialize globals in linux/bootmem.h */
742 	find_min_max_pfn();
743 	/* pfn of the last usable page frame */
744 	if (max_pfn > memory_end >> PAGE_SHIFT)
745 		max_pfn = memory_end >> PAGE_SHIFT;
746 	/* pfn of last page frame directly mapped by kernel */
747 	max_low_pfn = max_pfn;
748 	/* pfn of the first usable page frame after kernel image*/
749 	if (min_low_pfn < memory_start >> PAGE_SHIFT)
750 		min_low_pfn = memory_start >> PAGE_SHIFT;
751 
752 	start_pfn = PAGE_OFFSET >> PAGE_SHIFT;
753 	end_pfn = memory_end >> PAGE_SHIFT;
754 
755 	/*
756 	 * give all the memory to the bootmap allocator, tell it to put the
757 	 * boot mem_map at the start of memory.
758 	 */
759 	bootmap_size = init_bootmem_node(NODE_DATA(0),
760 			memory_start >> PAGE_SHIFT,	/* map goes here */
761 			start_pfn, end_pfn);
762 
763 	/* register the memmap regions with the bootmem allocator */
764 	for (i = 0; i < bfin_memmap.nr_map; i++) {
765 		/*
766 		 * Reserve usable memory
767 		 */
768 		if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
769 			continue;
770 		/*
771 		 * We are rounding up the start address of usable memory:
772 		 */
773 		curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
774 		if (curr_pfn >= end_pfn)
775 			continue;
776 		/*
777 		 * ... and at the end of the usable range downwards:
778 		 */
779 		last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
780 					 bfin_memmap.map[i].size);
781 
782 		if (last_pfn > end_pfn)
783 			last_pfn = end_pfn;
784 
785 		/*
786 		 * .. finally, did all the rounding and playing
787 		 * around just make the area go away?
788 		 */
789 		if (last_pfn <= curr_pfn)
790 			continue;
791 
792 		size = last_pfn - curr_pfn;
793 		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
794 	}
795 
796 	/* reserve memory before memory_start, including bootmap */
797 	reserve_bootmem(PAGE_OFFSET,
798 		memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET,
799 		BOOTMEM_DEFAULT);
800 }
801 
802 #define EBSZ_TO_MEG(ebsz) \
803 ({ \
804 	int meg = 0; \
805 	switch (ebsz & 0xf) { \
806 		case 0x1: meg =  16; break; \
807 		case 0x3: meg =  32; break; \
808 		case 0x5: meg =  64; break; \
809 		case 0x7: meg = 128; break; \
810 		case 0x9: meg = 256; break; \
811 		case 0xb: meg = 512; break; \
812 	} \
813 	meg; \
814 })
get_mem_size(void)815 static inline int __init get_mem_size(void)
816 {
817 #if defined(EBIU_SDBCTL)
818 # if defined(BF561_FAMILY)
819 	int ret = 0;
820 	u32 sdbctl = bfin_read_EBIU_SDBCTL();
821 	ret += EBSZ_TO_MEG(sdbctl >>  0);
822 	ret += EBSZ_TO_MEG(sdbctl >>  8);
823 	ret += EBSZ_TO_MEG(sdbctl >> 16);
824 	ret += EBSZ_TO_MEG(sdbctl >> 24);
825 	return ret;
826 # else
827 	return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
828 # endif
829 #elif defined(EBIU_DDRCTL1)
830 	u32 ddrctl = bfin_read_EBIU_DDRCTL1();
831 	int ret = 0;
832 	switch (ddrctl & 0xc0000) {
833 	case DEVSZ_64:
834 		ret = 64 / 8;
835 		break;
836 	case DEVSZ_128:
837 		ret = 128 / 8;
838 		break;
839 	case DEVSZ_256:
840 		ret = 256 / 8;
841 		break;
842 	case DEVSZ_512:
843 		ret = 512 / 8;
844 		break;
845 	}
846 	switch (ddrctl & 0x30000) {
847 		case DEVWD_4:  ret *= 2;
848 		case DEVWD_8:  ret *= 2;
849 		case DEVWD_16: break;
850 	}
851 	if ((ddrctl & 0xc000) == 0x4000)
852 		ret *= 2;
853 	return ret;
854 #endif
855 	BUG();
856 }
857 
858 __attribute__((weak))
native_machine_early_platform_add_devices(void)859 void __init native_machine_early_platform_add_devices(void)
860 {
861 }
862 
setup_arch(char ** cmdline_p)863 void __init setup_arch(char **cmdline_p)
864 {
865 	u32 mmr;
866 	unsigned long sclk, cclk;
867 
868 	native_machine_early_platform_add_devices();
869 
870 	enable_shadow_console();
871 
872 	/* Check to make sure we are running on the right processor */
873 	if (unlikely(CPUID != bfin_cpuid()))
874 		printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
875 			CPU, bfin_cpuid(), bfin_revid());
876 
877 #ifdef CONFIG_DUMMY_CONSOLE
878 	conswitchp = &dummy_con;
879 #endif
880 
881 #if defined(CONFIG_CMDLINE_BOOL)
882 	strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
883 	command_line[sizeof(command_line) - 1] = 0;
884 #endif
885 
886 	/* Keep a copy of command line */
887 	*cmdline_p = &command_line[0];
888 	memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
889 	boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
890 
891 	memset(&bfin_memmap, 0, sizeof(bfin_memmap));
892 
893 	/* If the user does not specify things on the command line, use
894 	 * what the bootloader set things up as
895 	 */
896 	physical_mem_end = 0;
897 	parse_cmdline_early(&command_line[0]);
898 
899 	if (_ramend == 0)
900 		_ramend = get_mem_size() * 1024 * 1024;
901 
902 	if (physical_mem_end == 0)
903 		physical_mem_end = _ramend;
904 
905 	memory_setup();
906 
907 	/* Initialize Async memory banks */
908 	bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
909 	bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
910 	bfin_write_EBIU_AMGCTL(AMGCTLVAL);
911 #ifdef CONFIG_EBIU_MBSCTLVAL
912 	bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
913 	bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
914 	bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
915 #endif
916 #ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
917 	bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
918 	bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
919 	bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
920 	bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
921 					~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
922 #endif
923 
924 	cclk = get_cclk();
925 	sclk = get_sclk();
926 
927 	if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
928 		panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
929 
930 #ifdef BF561_FAMILY
931 	if (ANOMALY_05000266) {
932 		bfin_read_IMDMA_D0_IRQ_STATUS();
933 		bfin_read_IMDMA_D1_IRQ_STATUS();
934 	}
935 #endif
936 
937 	mmr = bfin_read_TBUFCTL();
938 	printk(KERN_INFO "Hardware Trace %s and %sabled\n",
939 		(mmr & 0x1) ? "active" : "off",
940 		(mmr & 0x2) ? "en" : "dis");
941 
942 	mmr = bfin_read_SYSCR();
943 	printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
944 
945 	/* Newer parts mirror SWRST bits in SYSCR */
946 #if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
947     defined(CONFIG_BF538) || defined(CONFIG_BF539)
948 	_bfin_swrst = bfin_read_SWRST();
949 #else
950 	/* Clear boot mode field */
951 	_bfin_swrst = mmr & ~0xf;
952 #endif
953 
954 #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
955 	bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
956 #endif
957 #ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
958 	bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
959 #endif
960 
961 #ifdef CONFIG_SMP
962 	if (_bfin_swrst & SWRST_DBL_FAULT_A) {
963 #else
964 	if (_bfin_swrst & RESET_DOUBLE) {
965 #endif
966 		printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
967 #ifdef CONFIG_DEBUG_DOUBLEFAULT
968 		/* We assume the crashing kernel, and the current symbol table match */
969 		printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
970 			initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
971 			initial_pda.retx_doublefault);
972 		printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n",
973 			initial_pda.dcplb_doublefault_addr);
974 		printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n",
975 			initial_pda.icplb_doublefault_addr);
976 #endif
977 		printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
978 			initial_pda.retx);
979 	} else if (_bfin_swrst & RESET_WDOG)
980 		printk(KERN_INFO "Recovering from Watchdog event\n");
981 	else if (_bfin_swrst & RESET_SOFTWARE)
982 		printk(KERN_NOTICE "Reset caused by Software reset\n");
983 
984 	printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
985 	if (bfin_compiled_revid() == 0xffff)
986 		printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
987 	else if (bfin_compiled_revid() == -1)
988 		printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
989 	else
990 		printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
991 
992 	if (likely(CPUID == bfin_cpuid())) {
993 		if (bfin_revid() != bfin_compiled_revid()) {
994 			if (bfin_compiled_revid() == -1)
995 				printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
996 				       bfin_revid());
997 			else if (bfin_compiled_revid() != 0xffff) {
998 				printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
999 				       bfin_compiled_revid(), bfin_revid());
1000 				if (bfin_compiled_revid() > bfin_revid())
1001 					panic("Error: you are missing anomaly workarounds for this rev");
1002 			}
1003 		}
1004 		if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
1005 			printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
1006 			       CPU, bfin_revid());
1007 	}
1008 
1009 	printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
1010 
1011 	printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
1012 	       cclk / 1000000, sclk / 1000000);
1013 
1014 	setup_bootmem_allocator();
1015 
1016 	paging_init();
1017 
1018 	/* Copy atomic sequences to their fixed location, and sanity check that
1019 	   these locations are the ones that we advertise to userspace.  */
1020 	memcpy((void *)FIXED_CODE_START, &fixed_code_start,
1021 	       FIXED_CODE_END - FIXED_CODE_START);
1022 	BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
1023 	       != SIGRETURN_STUB - FIXED_CODE_START);
1024 	BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
1025 	       != ATOMIC_XCHG32 - FIXED_CODE_START);
1026 	BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
1027 	       != ATOMIC_CAS32 - FIXED_CODE_START);
1028 	BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
1029 	       != ATOMIC_ADD32 - FIXED_CODE_START);
1030 	BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
1031 	       != ATOMIC_SUB32 - FIXED_CODE_START);
1032 	BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
1033 	       != ATOMIC_IOR32 - FIXED_CODE_START);
1034 	BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
1035 	       != ATOMIC_AND32 - FIXED_CODE_START);
1036 	BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
1037 	       != ATOMIC_XOR32 - FIXED_CODE_START);
1038 	BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
1039 		!= SAFE_USER_INSTRUCTION - FIXED_CODE_START);
1040 
1041 #ifdef CONFIG_SMP
1042 	platform_init_cpus();
1043 #endif
1044 	init_exception_vectors();
1045 	bfin_cache_init();	/* Initialize caches for the boot CPU */
1046 }
1047 
1048 static int __init topology_init(void)
1049 {
1050 	unsigned int cpu;
1051 
1052 	for_each_possible_cpu(cpu) {
1053 		register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 subsys_initcall(topology_init);
1060 
1061 /* Get the input clock frequency */
1062 static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
1063 static u_long get_clkin_hz(void)
1064 {
1065 	return cached_clkin_hz;
1066 }
1067 static int __init early_init_clkin_hz(char *buf)
1068 {
1069 	cached_clkin_hz = simple_strtoul(buf, NULL, 0);
1070 #ifdef BFIN_KERNEL_CLOCK
1071 	if (cached_clkin_hz != CONFIG_CLKIN_HZ)
1072 		panic("cannot change clkin_hz when reprogramming clocks");
1073 #endif
1074 	return 1;
1075 }
1076 early_param("clkin_hz=", early_init_clkin_hz);
1077 
1078 /* Get the voltage input multiplier */
1079 static u_long get_vco(void)
1080 {
1081 	static u_long cached_vco;
1082 	u_long msel, pll_ctl;
1083 
1084 	/* The assumption here is that VCO never changes at runtime.
1085 	 * If, someday, we support that, then we'll have to change this.
1086 	 */
1087 	if (cached_vco)
1088 		return cached_vco;
1089 
1090 	pll_ctl = bfin_read_PLL_CTL();
1091 	msel = (pll_ctl >> 9) & 0x3F;
1092 	if (0 == msel)
1093 		msel = 64;
1094 
1095 	cached_vco = get_clkin_hz();
1096 	cached_vco >>= (1 & pll_ctl);	/* DF bit */
1097 	cached_vco *= msel;
1098 	return cached_vco;
1099 }
1100 
1101 /* Get the Core clock */
1102 u_long get_cclk(void)
1103 {
1104 	static u_long cached_cclk_pll_div, cached_cclk;
1105 	u_long csel, ssel;
1106 
1107 	if (bfin_read_PLL_STAT() & 0x1)
1108 		return get_clkin_hz();
1109 
1110 	ssel = bfin_read_PLL_DIV();
1111 	if (ssel == cached_cclk_pll_div)
1112 		return cached_cclk;
1113 	else
1114 		cached_cclk_pll_div = ssel;
1115 
1116 	csel = ((ssel >> 4) & 0x03);
1117 	ssel &= 0xf;
1118 	if (ssel && ssel < (1 << csel))	/* SCLK > CCLK */
1119 		cached_cclk = get_vco() / ssel;
1120 	else
1121 		cached_cclk = get_vco() >> csel;
1122 	return cached_cclk;
1123 }
1124 EXPORT_SYMBOL(get_cclk);
1125 
1126 /* Get the System clock */
1127 u_long get_sclk(void)
1128 {
1129 	static u_long cached_sclk;
1130 	u_long ssel;
1131 
1132 	/* The assumption here is that SCLK never changes at runtime.
1133 	 * If, someday, we support that, then we'll have to change this.
1134 	 */
1135 	if (cached_sclk)
1136 		return cached_sclk;
1137 
1138 	if (bfin_read_PLL_STAT() & 0x1)
1139 		return get_clkin_hz();
1140 
1141 	ssel = bfin_read_PLL_DIV() & 0xf;
1142 	if (0 == ssel) {
1143 		printk(KERN_WARNING "Invalid System Clock\n");
1144 		ssel = 1;
1145 	}
1146 
1147 	cached_sclk = get_vco() / ssel;
1148 	return cached_sclk;
1149 }
1150 EXPORT_SYMBOL(get_sclk);
1151 
1152 unsigned long sclk_to_usecs(unsigned long sclk)
1153 {
1154 	u64 tmp = USEC_PER_SEC * (u64)sclk;
1155 	do_div(tmp, get_sclk());
1156 	return tmp;
1157 }
1158 EXPORT_SYMBOL(sclk_to_usecs);
1159 
1160 unsigned long usecs_to_sclk(unsigned long usecs)
1161 {
1162 	u64 tmp = get_sclk() * (u64)usecs;
1163 	do_div(tmp, USEC_PER_SEC);
1164 	return tmp;
1165 }
1166 EXPORT_SYMBOL(usecs_to_sclk);
1167 
1168 /*
1169  *	Get CPU information for use by the procfs.
1170  */
1171 static int show_cpuinfo(struct seq_file *m, void *v)
1172 {
1173 	char *cpu, *mmu, *fpu, *vendor, *cache;
1174 	uint32_t revid;
1175 	int cpu_num = *(unsigned int *)v;
1176 	u_long sclk, cclk;
1177 	u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1178 	struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
1179 
1180 	cpu = CPU;
1181 	mmu = "none";
1182 	fpu = "none";
1183 	revid = bfin_revid();
1184 
1185 	sclk = get_sclk();
1186 	cclk = get_cclk();
1187 
1188 	switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
1189 	case 0xca:
1190 		vendor = "Analog Devices";
1191 		break;
1192 	default:
1193 		vendor = "unknown";
1194 		break;
1195 	}
1196 
1197 	seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1198 
1199 	if (CPUID == bfin_cpuid())
1200 		seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
1201 	else
1202 		seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1203 			CPUID, bfin_cpuid());
1204 
1205 	seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1206 		"stepping\t: %d ",
1207 		cpu, cclk/1000000, sclk/1000000,
1208 #ifdef CONFIG_MPU
1209 		"mpu on",
1210 #else
1211 		"mpu off",
1212 #endif
1213 		revid);
1214 
1215 	if (bfin_revid() != bfin_compiled_revid()) {
1216 		if (bfin_compiled_revid() == -1)
1217 			seq_printf(m, "(Compiled for Rev none)");
1218 		else if (bfin_compiled_revid() == 0xffff)
1219 			seq_printf(m, "(Compiled for Rev any)");
1220 		else
1221 			seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1222 	}
1223 
1224 	seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1225 		cclk/1000000, cclk%1000000,
1226 		sclk/1000000, sclk%1000000);
1227 	seq_printf(m, "bogomips\t: %lu.%02lu\n"
1228 		"Calibration\t: %lu loops\n",
1229 		(loops_per_jiffy * HZ) / 500000,
1230 		((loops_per_jiffy * HZ) / 5000) % 100,
1231 		(loops_per_jiffy * HZ));
1232 
1233 	/* Check Cache configutation */
1234 	switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1235 	case ACACHE_BSRAM:
1236 		cache = "dbank-A/B\t: cache/sram";
1237 		dcache_size = 16;
1238 		dsup_banks = 1;
1239 		break;
1240 	case ACACHE_BCACHE:
1241 		cache = "dbank-A/B\t: cache/cache";
1242 		dcache_size = 32;
1243 		dsup_banks = 2;
1244 		break;
1245 	case ASRAM_BSRAM:
1246 		cache = "dbank-A/B\t: sram/sram";
1247 		dcache_size = 0;
1248 		dsup_banks = 0;
1249 		break;
1250 	default:
1251 		cache = "unknown";
1252 		dcache_size = 0;
1253 		dsup_banks = 0;
1254 		break;
1255 	}
1256 
1257 	/* Is it turned on? */
1258 	if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1259 		dcache_size = 0;
1260 
1261 	if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1262 		icache_size = 0;
1263 
1264 	seq_printf(m, "cache size\t: %d KB(L1 icache) "
1265 		"%d KB(L1 dcache) %d KB(L2 cache)\n",
1266 		icache_size, dcache_size, 0);
1267 	seq_printf(m, "%s\n", cache);
1268 	seq_printf(m, "external memory\t: "
1269 #if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1270 		   "cacheable"
1271 #else
1272 		   "uncacheable"
1273 #endif
1274 		   " in instruction cache\n");
1275 	seq_printf(m, "external memory\t: "
1276 #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1277 		      "cacheable (write-back)"
1278 #elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1279 		      "cacheable (write-through)"
1280 #else
1281 		      "uncacheable"
1282 #endif
1283 		      " in data cache\n");
1284 
1285 	if (icache_size)
1286 		seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1287 			   BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
1288 	else
1289 		seq_printf(m, "icache setup\t: off\n");
1290 
1291 	seq_printf(m,
1292 		   "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1293 		   dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1294 		   BFIN_DLINES);
1295 #ifdef __ARCH_SYNC_CORE_DCACHE
1296 	seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1297 #endif
1298 #ifdef __ARCH_SYNC_CORE_ICACHE
1299 	seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1300 #endif
1301 
1302 	seq_printf(m, "\n");
1303 
1304 	if (cpu_num != num_possible_cpus() - 1)
1305 		return 0;
1306 
1307 	if (L2_LENGTH) {
1308 		seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1309 		seq_printf(m, "L2 SRAM\t\t: "
1310 #if defined(CONFIG_BFIN_L2_ICACHEABLE)
1311 			      "cacheable"
1312 #else
1313 			      "uncacheable"
1314 #endif
1315 			      " in instruction cache\n");
1316 		seq_printf(m, "L2 SRAM\t\t: "
1317 #if defined(CONFIG_BFIN_L2_WRITEBACK)
1318 			      "cacheable (write-back)"
1319 #elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1320 			      "cacheable (write-through)"
1321 #else
1322 			      "uncacheable"
1323 #endif
1324 			      " in data cache\n");
1325 	}
1326 	seq_printf(m, "board name\t: %s\n", bfin_board_name);
1327 	seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1328 		physical_mem_end >> 10, 0ul, physical_mem_end);
1329 	seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1330 		((int)memory_end - (int)_rambase) >> 10,
1331 		_rambase, memory_end);
1332 
1333 	return 0;
1334 }
1335 
1336 static void *c_start(struct seq_file *m, loff_t *pos)
1337 {
1338 	if (*pos == 0)
1339 		*pos = cpumask_first(cpu_online_mask);
1340 	if (*pos >= num_online_cpus())
1341 		return NULL;
1342 
1343 	return pos;
1344 }
1345 
1346 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1347 {
1348 	*pos = cpumask_next(*pos, cpu_online_mask);
1349 
1350 	return c_start(m, pos);
1351 }
1352 
1353 static void c_stop(struct seq_file *m, void *v)
1354 {
1355 }
1356 
1357 const struct seq_operations cpuinfo_op = {
1358 	.start = c_start,
1359 	.next = c_next,
1360 	.stop = c_stop,
1361 	.show = show_cpuinfo,
1362 };
1363 
1364 void __init cmdline_init(const char *r0)
1365 {
1366 	early_shadow_stamp();
1367 	if (r0)
1368 		strncpy(command_line, r0, COMMAND_LINE_SIZE);
1369 }
1370