1/* This file is subject to the terms and conditions of the GNU General Public
2 * License.  See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20#include <asm/pgtable.h>
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24
25	.level	LEVEL
26
27	__INITDATA
28ENTRY(boot_args)
29	.word 0 /* arg0 */
30	.word 0 /* arg1 */
31	.word 0 /* arg2 */
32	.word 0 /* arg3 */
33END(boot_args)
34
35	__HEAD
36
37	.align	4
38	.import init_thread_union,data
39	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
40#ifndef CONFIG_64BIT
41        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
42	.import	$global$		/* forward declaration */
43#endif /*!CONFIG_64BIT*/
44	.export _stext,data		/* Kernel want it this way! */
45_stext:
46ENTRY(stext)
47	.proc
48	.callinfo
49
50	/* Make sure sr4-sr7 are set to zero for the kernel address space */
51	mtsp	%r0,%sr4
52	mtsp	%r0,%sr5
53	mtsp	%r0,%sr6
54	mtsp	%r0,%sr7
55
56	/* Clear BSS (shouldn't the boot loader do this?) */
57
58	.import __bss_start,data
59	.import __bss_stop,data
60
61	load32		PA(__bss_start),%r3
62	load32		PA(__bss_stop),%r4
63$bss_loop:
64	cmpb,<<,n       %r3,%r4,$bss_loop
65	stw,ma          %r0,4(%r3)
66
67	/* Save away the arguments the boot loader passed in (32 bit args) */
68	load32		PA(boot_args),%r1
69	stw,ma          %arg0,4(%r1)
70	stw,ma          %arg1,4(%r1)
71	stw,ma          %arg2,4(%r1)
72	stw,ma          %arg3,4(%r1)
73
74	/* Initialize startup VM. Just map first 8/16 MB of memory */
75	load32		PA(swapper_pg_dir),%r4
76	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
77	mtctl		%r4,%cr25	/* Initialize user root pointer */
78
79#if PT_NLEVELS == 3
80	/* Set pmd in pgd */
81	load32		PA(pmd0),%r5
82	shrd            %r5,PxD_VALUE_SHIFT,%r3
83	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
84	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
85	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
86#else
87	/* 2-level page table, so pmd == pgd */
88	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
89#endif
90
91	/* Fill in pmd with enough pte directories */
92	load32		PA(pg0),%r1
93	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
94	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
95
96	ldi		ASM_PT_INITIAL,%r1
97
981:
99	stw		%r3,0(%r4)
100	ldo		(PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
101	addib,>		-1,%r1,1b
102#if PT_NLEVELS == 3
103	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
104#else
105	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
106#endif
107
108
109	/* Now initialize the PTEs themselves */
110	ldo		0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
111	ldi		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
112	load32		PA(pg0),%r1
113
114$pgt_fill_loop:
115	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
116	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
117	addib,>		-1,%r11,$pgt_fill_loop
118	nop
119
120	/* Load the return address...er...crash 'n burn */
121	copy		%r0,%r2
122
123	/* And the RFI Target address too */
124	load32		start_parisc,%r11
125
126	/* And the initial task pointer */
127	load32		init_thread_union,%r6
128	mtctl           %r6,%cr30
129
130	/* And the stack pointer too */
131	ldo             THREAD_SZ_ALGN(%r6),%sp
132
133#ifdef CONFIG_SMP
134	/* Set the smp rendezvous address into page zero.
135	** It would be safer to do this in init_smp_config() but
136	** it's just way easier to deal with here because
137	** of 64-bit function ptrs and the address is local to this file.
138	*/
139	load32		PA(smp_slave_stext),%r10
140	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
141	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
142
143	/* FALLTHROUGH */
144	.procend
145
146	/*
147	** Code Common to both Monarch and Slave processors.
148	** Entry:
149	**
150	**  1.1:
151	**    %r11 must contain RFI target address.
152	**    %r25/%r26 args to pass to target function
153	**    %r2  in case rfi target decides it didn't like something
154	**
155	**  2.0w:
156	**    %r3  PDCE_PROC address
157	**    %r11 RFI target address
158	**
159	** Caller must init: SR4-7, %sp, %r10, %cr24/25,
160	*/
161common_stext:
162	.proc
163	.callinfo
164#else
165	/* Clear PDC entry point - we won't use it */
166	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
167	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
168#endif /*CONFIG_SMP*/
169
170#ifdef CONFIG_64BIT
171	tophys_r1	%sp
172
173	/* Save the rfi target address */
174	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
175	tophys_r1       %r10
176	std             %r11,  TASK_PT_GR11(%r10)
177	/* Switch to wide mode Superdome doesn't support narrow PDC
178	** calls.
179	*/
1801:	mfia            %rp             /* clear upper part of pcoq */
181	ldo             2f-1b(%rp),%rp
182	depdi           0,31,32,%rp
183	bv              (%rp)
184	ssm             PSW_SM_W,%r0
185
186        /* Set Wide mode as the "Default" (eg for traps)
187        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
188        ** Someday, palo might not do this for the Monarch either.
189        */
1902:
191#define MEM_PDC_LO 0x388
192#define MEM_PDC_HI 0x35C
193	ldw             MEM_PDC_LO(%r0),%r3
194	ldw             MEM_PDC_HI(%r0),%r6
195	depd            %r6, 31, 32, %r3        /* move to upper word */
196
197	ldo             PDC_PSW(%r0),%arg0              /* 21 */
198	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
199	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
200	load32          PA(stext_pdc_ret), %rp
201	bv              (%r3)
202	copy            %r0,%arg3
203
204stext_pdc_ret:
205	/* restore rfi target address*/
206	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
207	tophys_r1       %r10
208	ldd             TASK_PT_GR11(%r10), %r11
209	tovirt_r1       %sp
210#endif
211
212	/* PARANOID: clear user scratch/user space SR's */
213	mtsp	%r0,%sr0
214	mtsp	%r0,%sr1
215	mtsp	%r0,%sr2
216	mtsp	%r0,%sr3
217
218	/* Initialize Protection Registers */
219	mtctl	%r0,%cr8
220	mtctl	%r0,%cr9
221	mtctl	%r0,%cr12
222	mtctl	%r0,%cr13
223
224	/* Initialize the global data pointer */
225	loadgp
226
227	/* Set up our interrupt table.  HPMCs might not work after this!
228	 *
229	 * We need to install the correct iva for PA1.1 or PA2.0. The
230	 * following short sequence of instructions can determine this
231	 * (without being illegal on a PA1.1 machine).
232	 */
233#ifndef CONFIG_64BIT
234	ldi		32,%r10
235	mtctl		%r10,%cr11
236	.level 2.0
237	mfctl,w		%cr11,%r10
238	.level 1.1
239	comib,<>,n	0,%r10,$is_pa20
240	ldil		L%PA(fault_vector_11),%r10
241	b		$install_iva
242	ldo		R%PA(fault_vector_11)(%r10),%r10
243
244$is_pa20:
245	.level		LEVEL /* restore 1.1 || 2.0w */
246#endif /*!CONFIG_64BIT*/
247	load32		PA(fault_vector_20),%r10
248
249$install_iva:
250	mtctl		%r10,%cr14
251
252	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
253	nop
254
255	.align 128
256aligned_rfi:
257	pcxt_ssm_bug
258
259	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
260	/* Don't need NOPs, have 8 compliant insn before rfi */
261
262	mtctl		%r0,%cr17	/* Clear IIASQ tail */
263	mtctl		%r0,%cr17	/* Clear IIASQ head */
264
265	/* Load RFI target into PC queue */
266	mtctl		%r11,%cr18	/* IIAOQ head */
267	ldo		4(%r11),%r11
268	mtctl		%r11,%cr18	/* IIAOQ tail */
269
270	load32		KERNEL_PSW,%r10
271	mtctl		%r10,%ipsw
272
273	/* Jump through hyperspace to Virt Mode */
274	rfi
275	nop
276
277	.procend
278
279#ifdef CONFIG_SMP
280
281	.import smp_init_current_idle_task,data
282	.import	smp_callin,code
283
284#ifndef CONFIG_64BIT
285smp_callin_rtn:
286        .proc
287	.callinfo
288	break	1,1		/*  Break if returned from start_secondary */
289	nop
290	nop
291        .procend
292#endif /*!CONFIG_64BIT*/
293
294/***************************************************************************
295* smp_slave_stext is executed by all non-monarch Processors when the Monarch
296* pokes the slave CPUs in smp.c:smp_boot_cpus().
297*
298* Once here, registers values are initialized in order to branch to virtual
299* mode. Once all available/eligible CPUs are in virtual mode, all are
300* released and start out by executing their own idle task.
301*****************************************************************************/
302smp_slave_stext:
303        .proc
304	.callinfo
305
306	/*
307	** Initialize Space registers
308	*/
309	mtsp	   %r0,%sr4
310	mtsp	   %r0,%sr5
311	mtsp	   %r0,%sr6
312	mtsp	   %r0,%sr7
313
314	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
315	load32		PA(smp_init_current_idle_task),%sp
316	LDREG		0(%sp),%sp	/* load task address */
317	tophys_r1	%sp
318	LDREG		TASK_THREAD_INFO(%sp),%sp
319	mtctl           %sp,%cr30       /* store in cr30 */
320	ldo             THREAD_SZ_ALGN(%sp),%sp
321
322	/* point CPU to kernel page tables */
323	load32		PA(swapper_pg_dir),%r4
324	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
325	mtctl		%r4,%cr25	/* Initialize user root pointer */
326
327#ifdef CONFIG_64BIT
328	/* Setup PDCE_PROC entry */
329	copy            %arg0,%r3
330#else
331	/* Load RFI *return* address in case smp_callin bails */
332	load32		smp_callin_rtn,%r2
333#endif
334
335	/* Load RFI target address.  */
336	load32		smp_callin,%r11
337
338	/* ok...common code can handle the rest */
339	b		common_stext
340	nop
341
342	.procend
343#endif /* CONFIG_SMP */
344
345ENDPROC(stext)
346
347#ifndef CONFIG_64BIT
348	.section .data..read_mostly
349
350	.align	4
351	.export	$global$,data
352
353	.type	$global$,@object
354	.size	$global$,4
355$global$:
356	.word 0
357#endif /*!CONFIG_64BIT*/
358