1/* $Id: head.S,v 1.86 2001/12/05 01:02:16 davem Exp $
2 * head.S: Initial boot code for the Sparc64 port of Linux.
3 *
4 * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 */
9
10#include <linux/config.h>
11#include <linux/version.h>
12#include <linux/errno.h>
13#include <asm/asm_offsets.h>
14#include <asm/asi.h>
15#include <asm/pstate.h>
16#include <asm/ptrace.h>
17#include <asm/spitfire.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/errno.h>
21#include <asm/signal.h>
22#include <asm/processor.h>
23#include <asm/lsu.h>
24#include <asm/dcr.h>
25#include <asm/dcu.h>
26#include <asm/head.h>
27#include <asm/ttable.h>
28
29/* This section from from _start to sparc64_boot_end should fit into
30 * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
31 * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
32 * 0x0000.0000.0040.6000 and empty_bad_page, which is from
33 * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
34 */
35
36	.text
37	.globl	start, _start, stext, _stext
38_start:
39start:
40_stext:
41stext:
42bootup_user_stack:
43! 0x0000000000404000
44	b	sparc64_boot
45	 flushw					/* Flush register file.      */
46
47/* This stuff has to be in sync with SILO and other potential boot loaders
48 * Fields should be kept upward compatible and whenever any change is made,
49 * HdrS version should be incremented.
50 */
51        .global root_flags, ram_flags, root_dev
52        .global sparc_ramdisk_image, sparc_ramdisk_size
53	.global sparc_ramdisk_image64
54
55        .ascii  "HdrS"
56        .word   LINUX_VERSION_CODE
57
58	/* History:
59	 *
60	 * 0x0300 : Supports being located at other than 0x4000
61	 * 0x0202 : Supports kernel params string
62	 * 0x0201 : Supports reboot_command
63	 */
64	.half   0x0301          /* HdrS version */
65
66root_flags:
67        .half   1
68root_dev:
69        .half   0
70ram_flags:
71        .half   0
72sparc_ramdisk_image:
73        .word   0
74sparc_ramdisk_size:
75        .word   0
76        .xword  reboot_command
77	.xword	bootstr_info
78sparc_ramdisk_image64:
79	.xword	0
80	.word	_end
81
82	/* We must be careful, 32-bit OpenBOOT will get confused if it
83	 * tries to save away a register window to a 64-bit kernel
84	 * stack address.  Flush all windows, disable interrupts,
85	 * remap if necessary, jump onto kernel trap table, then kernel
86	 * stack, or else we die.
87	 *
88	 * PROM entry point is on %o4
89	 */
90sparc64_boot:
91	BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_boot)
92	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_boot)
93	ba,pt	%xcc, spitfire_boot
94	 nop
95
96cheetah_plus_boot:
97	/* Preserve OBP choosen DCU and DCR register settings.  */
98	ba,pt	%xcc, cheetah_generic_boot
99	 nop
100
101cheetah_boot:
102	mov	DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
103	wr	%g1, %asr18
104
105	sethi	%uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
106	or	%g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
107	sllx	%g5, 32, %g5
108	or	%g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
109	stxa	%g5, [%g0] ASI_DCU_CONTROL_REG
110	membar	#Sync
111
112cheetah_generic_boot:
113	mov	TSB_EXTENSION_P, %g3
114	stxa	%g0, [%g3] ASI_DMMU
115	stxa	%g0, [%g3] ASI_IMMU
116	membar	#Sync
117
118	mov	TSB_EXTENSION_S, %g3
119	stxa	%g0, [%g3] ASI_DMMU
120	membar	#Sync
121
122	mov	TSB_EXTENSION_N, %g3
123	stxa	%g0, [%g3] ASI_DMMU
124	stxa	%g0, [%g3] ASI_IMMU
125	membar	#Sync
126
127	wrpr    %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
128	wr	%g0, 0, %fprs
129
130	/* Just like for Spitfire, we probe itlb-2 for a mapping which
131	 * matches our current %pc.  We take the physical address in
132	 * that mapping and use it to make our own.
133	 */
134
135	/* %g5 holds the tlb data */
136        sethi   %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
137        sllx    %g5, 32, %g5
138        or      %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
139
140	/* Put PADDR tlb data mask into %g3. */
141	sethi	%uhi(_PAGE_PADDR), %g3
142	or	%g3, %ulo(_PAGE_PADDR), %g3
143	sllx	%g3, 32, %g3
144	sethi	%hi(_PAGE_PADDR), %g7
145	or	%g7, %lo(_PAGE_PADDR), %g7
146	or	%g3, %g7, %g3
147
148	set	2 << 16, %l0		/* TLB entry walker. */
149	set	0x1fff, %l2		/* Page mask. */
150	rd	%pc, %l3
151	andn	%l3, %l2, %g2		/* vaddr comparator */
152
1531:	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
154	membar	#Sync
155	andn	%g1, %l2, %g1
156	cmp	%g1, %g2
157	be,pn	%xcc, cheetah_got_tlbentry
158	 nop
159	and	%l0, (127 << 3), %g1
160	cmp	%g1, (127 << 3)
161	blu,pt	%xcc, 1b
162	 add	%l0, (1 << 3), %l0
163
164	/* Search the small TLB.  OBP never maps us like that but
165	 * newer SILO can.
166	 */
167	clr	%l0
168
1691:	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
170	membar	#Sync
171	andn	%g1, %l2, %g1
172	cmp	%g1, %g2
173	be,pn	%xcc, cheetah_got_tlbentry
174	 nop
175	cmp	%l0, (15 << 3)
176	blu,pt	%xcc, 1b
177	 add	%l0, (1 << 3), %l0
178
179	/* BUG() if we get here... */
180	ta	0x5
181
182cheetah_got_tlbentry:
183	ldxa	[%l0] ASI_ITLB_DATA_ACCESS, %g0
184	ldxa	[%l0] ASI_ITLB_DATA_ACCESS, %g1
185	membar	#Sync
186	and	%g1, %g3, %g1
187	set	0x5fff, %l0
188	andn	%g1, %l0, %g1
189	or	%g5, %g1, %g5
190
191	/* Clear out any KERNBASE area entries. */
192	set	2 << 16, %l0
193	sethi	%hi(KERNBASE), %g3
194	sethi	%hi(KERNBASE<<1), %g7
195	mov	TLB_TAG_ACCESS, %l7
196
197	/* First, check ITLB */
1981:	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
199	membar	#Sync
200	andn	%g1, %l2, %g1
201	cmp	%g1, %g3
202	blu,pn	%xcc, 2f
203	 cmp	%g1, %g7
204	bgeu,pn	%xcc, 2f
205	 nop
206	stxa	%g0, [%l7] ASI_IMMU
207	membar	#Sync
208	stxa	%g0, [%l0] ASI_ITLB_DATA_ACCESS
209	membar	#Sync
210
2112:	and	%l0, (127 << 3), %g1
212	cmp	%g1, (127 << 3)
213	blu,pt	%xcc, 1b
214	 add	%l0, (1 << 3), %l0
215
216	/* Next, check DTLB */
217	set	2 << 16, %l0
2181:	ldxa	[%l0] ASI_DTLB_TAG_READ, %g1
219	membar	#Sync
220	andn	%g1, %l2, %g1
221	cmp	%g1, %g3
222	blu,pn	%xcc, 2f
223	 cmp	%g1, %g7
224	bgeu,pn	%xcc, 2f
225	 nop
226	stxa	%g0, [%l7] ASI_DMMU
227	membar	#Sync
228	stxa	%g0, [%l0] ASI_DTLB_DATA_ACCESS
229	membar	#Sync
230
2312:	and	%l0, (511 << 3), %g1
232	cmp	%g1, (511 << 3)
233	blu,pt	%xcc, 1b
234	 add	%l0, (1 << 3), %l0
235
236	/* On Cheetah+, have to check second DTLB.  */
237	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
238	ba,pt	%xcc, 9f
239	 nop
240
2412:	set	3 << 16, %l0
2421:	ldxa	[%l0] ASI_DTLB_TAG_READ, %g1
243	membar	#Sync
244	andn	%g1, %l2, %g1
245	cmp	%g1, %g3
246	blu,pn	%xcc, 2f
247	 cmp	%g1, %g7
248	bgeu,pn	%xcc, 2f
249	 nop
250	stxa	%g0, [%l7] ASI_DMMU
251	membar	#Sync
252	stxa	%g0, [%l0] ASI_DTLB_DATA_ACCESS
253	membar	#Sync
254
2552:	and	%l0, (511 << 3), %g1
256	cmp	%g1, (511 << 3)
257	blu,pt	%xcc, 1b
258	 add	%l0, (1 << 3), %l0
259
2609:
261
262	/* Now lock the TTE we created into ITLB-0 and DTLB-0,
263	 * entry 15 (and maybe 14 too).
264	 */
265	sethi	%hi(KERNBASE), %g3
266	set	(0 << 16) | (15 << 3), %g7
267	stxa	%g3, [%l7] ASI_DMMU
268	membar	#Sync
269	stxa	%g5, [%g7] ASI_DTLB_DATA_ACCESS
270	membar	#Sync
271	stxa	%g3, [%l7] ASI_IMMU
272	membar	#Sync
273	stxa	%g5, [%g7] ASI_ITLB_DATA_ACCESS
274	membar	#Sync
275	flush	%g3
276	membar	#Sync
277	sethi	%hi(_end), %g3			/* Check for bigkernel case */
278	or	%g3, %lo(_end), %g3
279	srl	%g3, 23, %g3			/* Check if _end > 8M */
280	brz,pt	%g3, 1f
281	 sethi	%hi(KERNBASE), %g3		/* Restore for fixup code below */
282	sethi	%hi(0x400000), %g3
283	or	%g3, %lo(0x400000), %g3
284	add	%g5, %g3, %g5			/* New tte data */
285	andn	%g5, (_PAGE_G), %g5
286	sethi	%hi(KERNBASE+0x400000), %g3
287	or	%g3, %lo(KERNBASE+0x400000), %g3
288	set	(0 << 16) | (14 << 3), %g7
289	stxa	%g3, [%l7] ASI_DMMU
290	membar	#Sync
291	stxa	%g5, [%g7] ASI_DTLB_DATA_ACCESS
292	membar	#Sync
293	stxa	%g3, [%l7] ASI_IMMU
294	membar	#Sync
295	stxa	%g5, [%g7] ASI_ITLB_DATA_ACCESS
296	membar	#Sync
297	flush	%g3
298	membar	#Sync
299	sethi	%hi(KERNBASE), %g3		/* Restore for fixup code below */
300	ba,pt	%xcc, 1f
301	 nop
302
3031:	set	sun4u_init, %g2
304	jmpl    %g2 + %g0, %g0
305	 nop
306
307spitfire_boot:
308	/* Typically PROM has already enabled both MMU's and both on-chip
309	 * caches, but we do it here anyway just to be paranoid.
310	 */
311	mov	(LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
312	stxa	%g1, [%g0] ASI_LSU_CONTROL
313	membar	#Sync
314
315	/*
316	 * Make sure we are in privileged mode, have address masking,
317         * using the ordinary globals and have enabled floating
318         * point.
319	 *
320	 * Again, typically PROM has left %pil at 13 or similar, and
321	 * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
322         */
323	wrpr    %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
324	wr	%g0, 0, %fprs
325
326spitfire_create_mappings:
327	/* %g5 holds the tlb data */
328        sethi   %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
329        sllx    %g5, 32, %g5
330        or      %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
331
332	/* Base of physical memory cannot reliably be assumed to be
333	 * at 0x0!  Figure out where it happens to be. -DaveM
334	 */
335
336	/* Put PADDR tlb data mask into %g3. */
337	sethi	%uhi(_PAGE_PADDR_SF), %g3
338	or	%g3, %ulo(_PAGE_PADDR_SF), %g3
339	sllx	%g3, 32, %g3
340	sethi	%hi(_PAGE_PADDR_SF), %g7
341	or	%g7, %lo(_PAGE_PADDR_SF), %g7
342	or	%g3, %g7, %g3
343
344	/* Walk through entire ITLB, looking for entry which maps
345	 * our %pc currently, stick PADDR from there into %g5 tlb data.
346	 */
347	clr	%l0			/* TLB entry walker. */
348	set	0x1fff, %l2		/* Page mask. */
349	rd	%pc, %l3
350	andn	%l3, %l2, %g2		/* vaddr comparator */
3511:
352	/* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
353	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
354	nop
355	nop
356	nop
357	andn	%g1, %l2, %g1		/* Get vaddr */
358	cmp	%g1, %g2
359	be,a,pn	%xcc, spitfire_got_tlbentry
360	 ldxa	[%l0] ASI_ITLB_DATA_ACCESS, %g1
361	cmp	%l0, (63 << 3)
362	blu,pt	%xcc, 1b
363	 add	%l0, (1 << 3), %l0
364
365	/* BUG() if we get here... */
366	ta	0x5
367
368spitfire_got_tlbentry:
369	/* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
370	nop
371	nop
372	nop
373	and	%g1, %g3, %g1		/* Mask to just get paddr bits.       */
374	set	0x5fff, %l3		/* Mask offset to get phys base.      */
375	andn	%g1, %l3, %g1
376
377	/* NOTE: We hold on to %g1 paddr base as we need it below to lock
378	 * NOTE: the PROM cif code into the TLB.
379	 */
380
381	or	%g5, %g1, %g5		/* Or it into TAG being built.        */
382
383	clr	%l0			/* TLB entry walker. */
384	sethi	%hi(KERNBASE), %g3	/* 4M lower limit */
385	sethi	%hi(KERNBASE<<1), %g7	/* 8M upper limit */
386	mov	TLB_TAG_ACCESS, %l7
3871:
388	/* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
389	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
390	nop
391	nop
392	nop
393	andn	%g1, %l2, %g1		/* Get vaddr */
394	cmp	%g1, %g3
395	blu,pn	%xcc, 2f
396	 cmp	%g1, %g7
397	bgeu,pn	%xcc, 2f
398	 nop
399	stxa	%g0, [%l7] ASI_IMMU
400	stxa	%g0, [%l0] ASI_ITLB_DATA_ACCESS
401	membar	#Sync
4022:
403	cmp	%l0, (63 << 3)
404	blu,pt	%xcc, 1b
405	 add	%l0, (1 << 3), %l0
406
407	nop; nop; nop
408
409	clr	%l0			/* TLB entry walker. */
4101:
411	/* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
412	ldxa	[%l0] ASI_DTLB_TAG_READ, %g1
413	nop
414	nop
415	nop
416	andn	%g1, %l2, %g1		/* Get vaddr */
417	cmp	%g1, %g3
418	blu,pn	%xcc, 2f
419	 cmp	%g1, %g7
420	bgeu,pn	%xcc, 2f
421	 nop
422	stxa	%g0, [%l7] ASI_DMMU
423	stxa	%g0, [%l0] ASI_DTLB_DATA_ACCESS
424	membar	#Sync
4252:
426	cmp	%l0, (63 << 3)
427	blu,pt	%xcc, 1b
428	 add	%l0, (1 << 3), %l0
429
430	nop; nop; nop
431
432
433	/* PROM never puts any TLB entries into the MMU with the lock bit
434	 * set.  So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
435	 */
436
437	sethi	%hi(KERNBASE), %g3
438	mov	(63 << 3), %g7
439	stxa	%g3, [%l7] ASI_DMMU		/* KERNBASE into TLB TAG	*/
440	stxa	%g5, [%g7] ASI_DTLB_DATA_ACCESS	/* TTE into TLB DATA		*/
441	membar	#Sync
442	stxa	%g3, [%l7] ASI_IMMU		/* KERNBASE into TLB TAG	*/
443	stxa	%g5, [%g7] ASI_ITLB_DATA_ACCESS	/* TTE into TLB DATA		*/
444	membar	#Sync
445	flush	%g3
446	membar	#Sync
447	sethi	%hi(_end), %g3			/* Check for bigkernel case */
448	or	%g3, %lo(_end), %g3
449	srl	%g3, 23, %g3			/* Check if _end > 8M */
450	brz,pt	%g3, 2f
451	 sethi	%hi(KERNBASE), %g3		/* Restore for fixup code below */
452	sethi	%hi(0x400000), %g3
453	or	%g3, %lo(0x400000), %g3
454	add	%g5, %g3, %g5			/* New tte data */
455	andn	%g5, (_PAGE_G), %g5
456	sethi	%hi(KERNBASE+0x400000), %g3
457	or	%g3, %lo(KERNBASE+0x400000), %g3
458	mov	(62 << 3), %g7
459	stxa	%g3, [%l7] ASI_DMMU
460	stxa	%g5, [%g7] ASI_DTLB_DATA_ACCESS
461	membar	#Sync
462	stxa	%g3, [%l7] ASI_IMMU
463	stxa	%g5, [%g7] ASI_ITLB_DATA_ACCESS
464	membar	#Sync
465	flush	%g3
466	membar	#Sync
467	sethi	%hi(KERNBASE), %g3		/* Restore for fixup code below */
4682:	ba,pt	%xcc, 1f
469	 nop
4701:
471	set	sun4u_init, %g2
472	jmpl    %g2 + %g0, %g0
473	 nop
474
475sun4u_init:
476	/* Set ctx 0 */
477	mov	PRIMARY_CONTEXT, %g7
478	stxa	%g0, [%g7] ASI_DMMU
479	membar	#Sync
480
481	mov	SECONDARY_CONTEXT, %g7
482	stxa	%g0, [%g7] ASI_DMMU
483	membar	#Sync
484
485	sethi	%uhi(PAGE_OFFSET), %g4
486	sllx	%g4, 32, %g4
487
488	/* We are now safely (we hope) in Nucleus context (0), rewrite
489	 * the KERNBASE TTE's so they no longer have the global bit set.
490	 * Don't forget to setup TAG_ACCESS first 8-)
491	 */
492	mov	TLB_TAG_ACCESS, %g2
493	stxa	%g3, [%g2] ASI_IMMU
494	stxa	%g3, [%g2] ASI_DMMU
495	membar	#Sync
496
497	BRANCH_IF_ANY_CHEETAH(g1,g5,cheetah_tlb_fixup)
498
499	ba,pt	%xcc, spitfire_tlb_fixup
500	 nop
501
502cheetah_tlb_fixup:
503	set	(0 << 16) | (15 << 3), %g7
504	ldxa	[%g7] ASI_ITLB_DATA_ACCESS, %g0
505	ldxa	[%g7] ASI_ITLB_DATA_ACCESS, %g1
506	andn	%g1, (_PAGE_G), %g1
507	stxa	%g1, [%g7] ASI_ITLB_DATA_ACCESS
508	membar	#Sync
509
510	ldxa	[%g7] ASI_DTLB_DATA_ACCESS, %g0
511	ldxa	[%g7] ASI_DTLB_DATA_ACCESS, %g1
512	andn	%g1, (_PAGE_G), %g1
513	stxa	%g1, [%g7] ASI_DTLB_DATA_ACCESS
514	membar	#Sync
515
516	/* Kill instruction prefetch queues. */
517	flush	%g3
518	membar	#Sync
519
520	mov	2, %g2		/* Set TLB type to cheetah+. */
521	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g5,g7,1f)
522
523	mov	1, %g2		/* Set TLB type to cheetah. */
524
5251:	sethi	%hi(tlb_type), %g5
526	stw	%g2, [%g5 + %lo(tlb_type)]
527
528	/* Patch copy/page operations to cheetah optimized versions. */
529	call	cheetah_patch_copyops
530	 nop
531	call	cheetah_patch_pgcopyops
532	 nop
533	call	cheetah_patch_cachetlbops
534	 nop
535
536	ba,pt	%xcc, tlb_fixup_done
537	 nop
538
539spitfire_tlb_fixup:
540	mov	(63 << 3), %g7
541	ldxa	[%g7] ASI_ITLB_DATA_ACCESS, %g1
542	andn	%g1, (_PAGE_G), %g1
543	stxa	%g1, [%g7] ASI_ITLB_DATA_ACCESS
544	membar	#Sync
545
546	ldxa	[%g7] ASI_DTLB_DATA_ACCESS, %g1
547	andn	%g1, (_PAGE_G), %g1
548	stxa	%g1, [%g7] ASI_DTLB_DATA_ACCESS
549	membar	#Sync
550
551	/* Kill instruction prefetch queues. */
552	flush	%g3
553	membar	#Sync
554
555	/* Set TLB type to spitfire. */
556	mov	0, %g2
557	sethi	%hi(tlb_type), %g5
558	stw	%g2, [%g5 + %lo(tlb_type)]
559
560tlb_fixup_done:
561	sethi	%hi(init_task_union), %g6
562	or	%g6, %lo(init_task_union), %g6
563	mov	%sp, %l6
564	mov	%o4, %l7
565
566#if 0	/* We don't do it like this anymore, but for historical hack value
567	 * I leave this snippet here to show how crazy we can be sometimes. 8-)
568	 */
569
570	/* Setup "Linux Current Register", thanks Sun 8-) */
571	wr	%g0, 0x1, %pcr
572
573	/* Blackbird errata workaround.  See commentary in
574	 * smp.c:smp_percpu_timer_interrupt() for more
575	 * information.
576	 */
577	ba,pt	%xcc, 99f
578	 nop
579	.align	64
58099:	wr	%g6, %g0, %pic
581	rd	%pic, %g0
582#endif
583
584	wr	%g0, ASI_P, %asi
585	mov	1, %g5
586	sllx	%g5, THREAD_SHIFT, %g5
587	sub	%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
588	add	%g6, %g5, %sp
589	mov	0, %fp
590
591	wrpr	%g0, 0, %wstate
592	wrpr	%g0, 0x0, %tl
593
594	/* Clear the bss */
595	sethi	%hi(__bss_start), %o0
596	or	%o0, %lo(__bss_start), %o0
597	sethi	%hi(_end), %o1
598	or	%o1, %lo(_end), %o1
599	call	__bzero
600	 sub	%o1, %o0, %o1
601
602	mov	%l6, %o1			! OpenPROM stack
603	call	prom_init
604	 mov	%l7, %o0			! OpenPROM cif handler
605
606	/* Off we go.... */
607	call	start_kernel
608	 nop
609	/* Not reached... */
610
611/* IMPORTANT NOTE: Whenever making changes here, check
612 * trampoline.S as well. -jj */
613	.globl	setup_tba
614setup_tba:	/* i0 = is_starfire */
615	save	%sp, -160, %sp
616
617	rdpr	%tba, %g7
618	sethi	%hi(prom_tba), %o1
619	or	%o1, %lo(prom_tba), %o1
620	stx	%g7, [%o1]
621
622	/* Setup "Linux" globals 8-) */
623	rdpr	%pstate, %o1
624	mov	%g6, %o2
625	wrpr	%o1, (PSTATE_AG|PSTATE_IE), %pstate
626	sethi	%hi(sparc64_ttable_tl0), %g5
627	wrpr	%g5, %tba
628	mov	%o2, %g6
629
630	/* Set up MMU globals */
631	wrpr	%o1, (PSTATE_MG|PSTATE_IE), %pstate
632
633	/* Set fixed globals used by dTLB miss handler. */
634#define KERN_HIGHBITS		((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
635#define KERN_LOWBITS		(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
636
637	mov	TSB_REG, %g1
638	stxa	%g0, [%g1] ASI_DMMU
639	membar	#Sync
640	mov	TLB_SFSR, %g1
641	sethi	%uhi(KERN_HIGHBITS), %g2
642	or	%g2, %ulo(KERN_HIGHBITS), %g2
643	sllx	%g2, 32, %g2
644	or	%g2, KERN_LOWBITS, %g2
645
646	BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
647	ba,pt	%xcc, spitfire_vpte_base
648	 nop
649
650cheetah_vpte_base:
651	sethi		%uhi(VPTE_BASE_CHEETAH), %g3
652	or		%g3, %ulo(VPTE_BASE_CHEETAH), %g3
653	ba,pt		%xcc, 2f
654	 sllx		%g3, 32, %g3
655
656spitfire_vpte_base:
657	sethi		%uhi(VPTE_BASE_SPITFIRE), %g3
658	or		%g3, %ulo(VPTE_BASE_SPITFIRE), %g3
659	sllx		%g3, 32, %g3
660
6612:
662	clr	%g7
663#undef KERN_HIGHBITS
664#undef KERN_LOWBITS
665
666	/* Setup Interrupt globals */
667	wrpr	%o1, (PSTATE_IG|PSTATE_IE), %pstate
668#ifndef CONFIG_SMP
669	sethi	%hi(__up_workvec), %g5
670	or	%g5, %lo(__up_workvec), %g6
671#else
672	/* By definition of where we are, this is boot_cpu. */
673	brz,pt	%i0, not_starfire
674	 sethi	%hi(0x1fff4000), %g1
675	or	%g1, %lo(0x1fff4000), %g1
676	sllx	%g1, 12, %g1
677	or	%g1, 0xd0, %g1
678	lduwa	[%g1] ASI_PHYS_BYPASS_EC_E, %g1
679	b,pt	%xcc, set_worklist
680	 nop
681
682not_starfire:
683	BRANCH_IF_JALAPENO(g1,g5,is_jalapeno)
684	BRANCH_IF_ANY_CHEETAH(g1,g5,is_cheetah)
685
686	ba,pt	%xcc, not_cheetah
687	 nop
688
689is_jalapeno:
690	ldxa		[%g0] ASI_JBUS_CONFIG, %g1
691	srlx		%g1, 17, %g1
692	ba,pt		%xcc, set_worklist
693	 and		%g1, 0x1f, %g1		! 5bit JBUS ID
694
695is_cheetah:
696	ldxa		[%g0] ASI_SAFARI_CONFIG, %g1
697	srlx		%g1, 17, %g1
698	ba,pt		%xcc, set_worklist
699	 and		%g1, 0x3ff, %g1		! 10bit Safari Agent ID
700
701not_cheetah:
702	ldxa	[%g0] ASI_UPA_CONFIG, %g1
703	srlx	%g1, 17, %g1
704	and	%g1, 0x1f, %g1
705
706	/* In theory this is: &(cpu_data[boot_cpu_id].irq_worklists[0]) */
707set_worklist:
708	sethi	%hi(cpu_data), %g5
709	or	%g5, %lo(cpu_data), %g5
710	sllx	%g1, 7, %g1
711	add	%g5, %g1, %g5
712	add	%g5, 64, %g6
713#endif
714
715	/* Kill PROM timer */
716	sethi	%hi(0x80000000), %g1
717	sllx	%g1, 32, %g1
718	wr	%g1, 0, %tick_cmpr
719
720	BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
721
722	ba,pt	%xcc, 2f
723	 nop
724
725	/* Disable STICK_INT interrupts. */
7261:
727	sethi	%hi(0x80000000), %g1
728	sllx	%g1, 32, %g1
729	wr	%g1, %asr25
730
731	/* Ok, we're done setting up all the state our trap mechanims needs,
732	 * now get back into normal globals and let the PROM know what is up.
733	 */
7342:
735	wrpr	%g0, %g0, %wstate
736	wrpr	%o1, PSTATE_IE, %pstate
737
738	sethi	%hi(sparc64_ttable_tl0), %g5
739	call	prom_set_trap_table
740	 mov	%g5, %o0
741
742	rdpr	%pstate, %o1
743	or	%o1, PSTATE_IE, %o1
744	wrpr	%o1, 0, %pstate
745
746	ret
747	 restore
748
749/*
750 * The following skips make sure the trap table in ttable.S is aligned
751 * on a 32K boundary as required by the v9 specs for TBA register.
752 */
753sparc64_boot_end:
754	.skip	0x2000 + _start - sparc64_boot_end
755bootup_user_stack_end:
756	.skip	0x2000
757
758#ifdef CONFIG_SBUS
759/* This is just a hack to fool make depend config.h discovering
760   strategy: As the .S files below need config.h, but
761   make depend does not find it for them, we include config.h
762   in head.S */
763#endif
764
765! 0x0000000000408000
766
767#include "ttable.S"
768#include "systbls.S"
769
770	.align	1024
771	.globl	swapper_pg_dir
772swapper_pg_dir:
773	.word	0
774
775#include "etrap.S"
776#include "rtrap.S"
777#include "winfixup.S"
778#include "entry.S"
779
780	/* This is just anal retentiveness on my part... */
781	.align	16384
782
783	.data
784	.align	8
785	.globl	prom_tba, tlb_type
786prom_tba:	.xword	0
787tlb_type:	.word	0	/* Must NOT end up in BSS */
788	.section	".fixup",#alloc,#execinstr
789	.globl	__ret_efault
790__ret_efault:
791	ret
792	 restore %g0, -EFAULT, %o0
793
794