1/*
2 * (C) Copyright 2007
3 * Texas Instruments
4 * Karthik Dasu <karthik-dp@ti.com>
5 *
6 * (C) Copyright 2004
7 * Texas Instruments, <www.ti.com>
8 * Richard Woodruff <r-woodruff2@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of
13 * the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23 * MA 02111-1307 USA
24 */
25#include <linux/linkage.h>
26#include <asm/assembler.h>
27#include <plat/sram.h>
28#include <mach/io.h>
29
30#include "cm2xxx_3xxx.h"
31#include "prm2xxx_3xxx.h"
32#include "sdrc.h"
33#include "control.h"
34
35/*
36 * Registers access definitions
37 */
38#define SDRC_SCRATCHPAD_SEM_OFFS	0xc
39#define SDRC_SCRATCHPAD_SEM_V	OMAP343X_SCRATCHPAD_REGADDR\
40					(SDRC_SCRATCHPAD_SEM_OFFS)
41#define PM_PREPWSTST_CORE_P	OMAP3430_PRM_BASE + CORE_MOD +\
42					OMAP3430_PM_PREPWSTST
43#define PM_PWSTCTRL_MPU_P	OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
44#define CM_IDLEST1_CORE_V	OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
45#define CM_IDLEST_CKGEN_V	OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
46#define SRAM_BASE_P		OMAP3_SRAM_PA
47#define CONTROL_STAT		OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
48#define CONTROL_MEM_RTA_CTRL	(OMAP343X_CTRL_BASE +\
49					OMAP36XX_CONTROL_MEM_RTA_CTRL)
50
51/* Move this as correct place is available */
52#define SCRATCHPAD_MEM_OFFS	0x310
53#define SCRATCHPAD_BASE_P	(OMAP343X_CTRL_BASE +\
54					OMAP343X_CONTROL_MEM_WKUP +\
55					SCRATCHPAD_MEM_OFFS)
56#define SDRC_POWER_V		OMAP34XX_SDRC_REGADDR(SDRC_POWER)
57#define SDRC_SYSCONFIG_P	(OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
58#define SDRC_MR_0_P		(OMAP343X_SDRC_BASE + SDRC_MR_0)
59#define SDRC_EMR2_0_P		(OMAP343X_SDRC_BASE + SDRC_EMR2_0)
60#define SDRC_MANUAL_0_P		(OMAP343X_SDRC_BASE + SDRC_MANUAL_0)
61#define SDRC_MR_1_P		(OMAP343X_SDRC_BASE + SDRC_MR_1)
62#define SDRC_EMR2_1_P		(OMAP343X_SDRC_BASE + SDRC_EMR2_1)
63#define SDRC_MANUAL_1_P		(OMAP343X_SDRC_BASE + SDRC_MANUAL_1)
64#define SDRC_DLLA_STATUS_V	OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
65#define SDRC_DLLA_CTRL_V	OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
66
67/*
68 * This file needs be built unconditionally as ARM to interoperate correctly
69 * with non-Thumb-2-capable firmware.
70 */
71	.arm
72
73/*
74 * API functions
75 */
76
77/*
78 * The "get_*restore_pointer" functions are used to provide a
79 * physical restore address where the ROM code jumps while waking
80 * up from MPU OFF/OSWR state.
81 * The restore pointer is stored into the scratchpad.
82 */
83
84	.text
85/* Function call to get the restore pointer for resume from OFF */
86ENTRY(get_restore_pointer)
87	stmfd	sp!, {lr}	@ save registers on stack
88	adr	r0, restore
89	ldmfd	sp!, {pc}	@ restore regs and return
90ENDPROC(get_restore_pointer)
91	.align
92ENTRY(get_restore_pointer_sz)
93	.word	. - get_restore_pointer
94
95	.text
96/* Function call to get the restore pointer for 3630 resume from OFF */
97ENTRY(get_omap3630_restore_pointer)
98	stmfd	sp!, {lr}	@ save registers on stack
99	adr	r0, restore_3630
100	ldmfd	sp!, {pc}	@ restore regs and return
101ENDPROC(get_omap3630_restore_pointer)
102	.align
103ENTRY(get_omap3630_restore_pointer_sz)
104	.word	. - get_omap3630_restore_pointer
105
106	.text
107/* Function call to get the restore pointer for ES3 to resume from OFF */
108ENTRY(get_es3_restore_pointer)
109	stmfd	sp!, {lr}	@ save registers on stack
110	adr	r0, restore_es3
111	ldmfd	sp!, {pc}	@ restore regs and return
112ENDPROC(get_es3_restore_pointer)
113	.align
114ENTRY(get_es3_restore_pointer_sz)
115	.word	. - get_es3_restore_pointer
116
117	.text
118/*
119 * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
120 * This function sets up a flag that will allow for this toggling to take
121 * place on 3630. Hopefully some version in the future may not need this.
122 */
123ENTRY(enable_omap3630_toggle_l2_on_restore)
124	stmfd	sp!, {lr}	@ save registers on stack
125	/* Setup so that we will disable and enable l2 */
126	mov	r1, #0x1
127	adrl	r2, l2dis_3630	@ may be too distant for plain adr
128	str	r1, [r2]
129	ldmfd	sp!, {pc}	@ restore regs and return
130ENDPROC(enable_omap3630_toggle_l2_on_restore)
131
132	.text
133/* Function to call rom code to save secure ram context */
134	.align	3
135ENTRY(save_secure_ram_context)
136	stmfd	sp!, {r1-r12, lr}	@ save registers on stack
137	adr	r3, api_params		@ r3 points to parameters
138	str	r0, [r3,#0x4]		@ r0 has sdram address
139	ldr	r12, high_mask
140	and	r3, r3, r12
141	ldr	r12, sram_phy_addr_mask
142	orr	r3, r3, r12
143	mov	r0, #25			@ set service ID for PPA
144	mov	r12, r0			@ copy secure service ID in r12
145	mov	r1, #0			@ set task id for ROM code in r1
146	mov	r2, #4			@ set some flags in r2, r6
147	mov	r6, #0xff
148	dsb				@ data write barrier
149	dmb				@ data memory barrier
150	smc	#1			@ call SMI monitor (smi #1)
151	nop
152	nop
153	nop
154	nop
155	ldmfd	sp!, {r1-r12, pc}
156	.align
157sram_phy_addr_mask:
158	.word	SRAM_BASE_P
159high_mask:
160	.word	0xffff
161api_params:
162	.word	0x4, 0x0, 0x0, 0x1, 0x1
163ENDPROC(save_secure_ram_context)
164ENTRY(save_secure_ram_context_sz)
165	.word	. - save_secure_ram_context
166
167/*
168 * ======================
169 * == Idle entry point ==
170 * ======================
171 */
172
173/*
174 * Forces OMAP into idle state
175 *
176 * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
177 * and executes the WFI instruction. Calling WFI effectively changes the
178 * power domains states to the desired target power states.
179 *
180 *
181 * Notes:
182 * - this code gets copied to internal SRAM at boot and after wake-up
183 *   from OFF mode. The execution pointer in SRAM is _omap_sram_idle.
184 * - when the OMAP wakes up it continues at different execution points
185 *   depending on the low power mode (non-OFF vs OFF modes),
186 *   cf. 'Resume path for xxx mode' comments.
187 */
188	.align	3
189ENTRY(omap34xx_cpu_suspend)
190	stmfd	sp!, {r0-r12, lr}	@ save registers on stack
191
192	/*
193	 * r0 contains CPU context save/restore pointer in sdram
194	 * r1 contains information about saving context:
195	 *   0 - No context lost
196	 *   1 - Only L1 and logic lost
197	 *   2 - Only L2 lost (Even L1 is retained we clean it along with L2)
198	 *   3 - Both L1 and L2 lost and logic lost
199	 */
200
201	/* Directly jump to WFI is the context save is not required */
202	cmp	r1, #0x0
203	beq	omap3_do_wfi
204
205	/* Otherwise fall through to the save context code */
206save_context_wfi:
207	mov	r8, r0			@ Store SDRAM address in r8
208	mrc	p15, 0, r5, c1, c0, 1	@ Read Auxiliary Control Register
209	mov	r4, #0x1		@ Number of parameters for restore call
210	stmia	r8!, {r4-r5}		@ Push parameters for restore call
211	mrc	p15, 1, r5, c9, c0, 2	@ Read L2 AUX ctrl register
212	stmia	r8!, {r4-r5}		@ Push parameters for restore call
213
214        /* Check what that target sleep state is from r1 */
215	cmp	r1, #0x2		@ Only L2 lost, no need to save context
216	beq	clean_caches
217
218l1_logic_lost:
219	mov	r4, sp			@ Store sp
220	mrs	r5, spsr		@ Store spsr
221	mov	r6, lr			@ Store lr
222	stmia	r8!, {r4-r6}
223
224	mrc	p15, 0, r4, c1, c0, 2	@ Coprocessor access control register
225	mrc	p15, 0, r5, c2, c0, 0	@ TTBR0
226	mrc	p15, 0, r6, c2, c0, 1	@ TTBR1
227	mrc	p15, 0, r7, c2, c0, 2	@ TTBCR
228	stmia	r8!, {r4-r7}
229
230	mrc	p15, 0, r4, c3, c0, 0	@ Domain access Control Register
231	mrc	p15, 0, r5, c10, c2, 0	@ PRRR
232	mrc	p15, 0, r6, c10, c2, 1	@ NMRR
233	stmia	r8!,{r4-r6}
234
235	mrc	p15, 0, r4, c13, c0, 1	@ Context ID
236	mrc	p15, 0, r5, c13, c0, 2	@ User r/w thread and process ID
237	mrc	p15, 0, r6, c12, c0, 0	@ Secure or NS vector base address
238	mrs	r7, cpsr		@ Store current cpsr
239	stmia	r8!, {r4-r7}
240
241	mrc	p15, 0, r4, c1, c0, 0	@ save control register
242	stmia	r8!, {r4}
243
244clean_caches:
245	/*
246	 * jump out to kernel flush routine
247	 *  - reuse that code is better
248	 *  - it executes in a cached space so is faster than refetch per-block
249	 *  - should be faster and will change with kernel
250	 *  - 'might' have to copy address, load and jump to it
251	 * Flush all data from the L1 data cache before disabling
252	 * SCTLR.C bit.
253	 */
254	ldr	r1, kernel_flush
255	mov	lr, pc
256	bx	r1
257
258	/*
259	 * Clear the SCTLR.C bit to prevent further data cache
260	 * allocation. Clearing SCTLR.C would make all the data accesses
261	 * strongly ordered and would not hit the cache.
262	 */
263	mrc	p15, 0, r0, c1, c0, 0
264	bic	r0, r0, #(1 << 2)	@ Disable the C bit
265	mcr	p15, 0, r0, c1, c0, 0
266	isb
267
268	/*
269	 * Invalidate L1 data cache. Even though only invalidate is
270	 * necessary exported flush API is used here. Doing clean
271	 * on already clean cache would be almost NOP.
272	 */
273	ldr	r1, kernel_flush
274	blx	r1
275	/*
276	 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
277	 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
278	 * This sequence switches back to ARM.  Note that .align may insert a
279	 * nop: bx pc needs to be word-aligned in order to work.
280	 */
281 THUMB(	.thumb		)
282 THUMB(	.align		)
283 THUMB(	bx	pc	)
284 THUMB(	nop		)
285	.arm
286
287omap3_do_wfi:
288	ldr	r4, sdrc_power		@ read the SDRC_POWER register
289	ldr	r5, [r4]		@ read the contents of SDRC_POWER
290	orr	r5, r5, #0x40		@ enable self refresh on idle req
291	str	r5, [r4]		@ write back to SDRC_POWER register
292
293	/* Data memory barrier and Data sync barrier */
294	dsb
295	dmb
296
297/*
298 * ===================================
299 * == WFI instruction => Enter idle ==
300 * ===================================
301 */
302	wfi				@ wait for interrupt
303
304/*
305 * ===================================
306 * == Resume path for non-OFF modes ==
307 * ===================================
308 */
309	nop
310	nop
311	nop
312	nop
313	nop
314	nop
315	nop
316	nop
317	nop
318	nop
319	bl wait_sdrc_ok
320
321	mrc	p15, 0, r0, c1, c0, 0
322	tst	r0, #(1 << 2)		@ Check C bit enabled?
323	orreq	r0, r0, #(1 << 2)	@ Enable the C bit if cleared
324	mcreq	p15, 0, r0, c1, c0, 0
325	isb
326
327/*
328 * ===================================
329 * == Exit point from non-OFF modes ==
330 * ===================================
331 */
332	ldmfd	sp!, {r0-r12, pc}	@ restore regs and return
333
334
335/*
336 * ==============================
337 * == Resume path for OFF mode ==
338 * ==============================
339 */
340
341/*
342 * The restore_* functions are called by the ROM code
343 *  when back from WFI in OFF mode.
344 * Cf. the get_*restore_pointer functions.
345 *
346 *  restore_es3: applies to 34xx >= ES3.0
347 *  restore_3630: applies to 36xx
348 *  restore: common code for 3xxx
349 */
350restore_es3:
351	ldr	r5, pm_prepwstst_core_p
352	ldr	r4, [r5]
353	and	r4, r4, #0x3
354	cmp	r4, #0x0	@ Check if previous power state of CORE is OFF
355	bne	restore
356	adr	r0, es3_sdrc_fix
357	ldr	r1, sram_base
358	ldr	r2, es3_sdrc_fix_sz
359	mov	r2, r2, ror #2
360copy_to_sram:
361	ldmia	r0!, {r3}	@ val = *src
362	stmia	r1!, {r3}	@ *dst = val
363	subs	r2, r2, #0x1	@ num_words--
364	bne	copy_to_sram
365	ldr	r1, sram_base
366	blx	r1
367	b	restore
368
369restore_3630:
370	ldr	r1, pm_prepwstst_core_p
371	ldr	r2, [r1]
372	and	r2, r2, #0x3
373	cmp	r2, #0x0	@ Check if previous power state of CORE is OFF
374	bne	restore
375	/* Disable RTA before giving control */
376	ldr	r1, control_mem_rta
377	mov	r2, #OMAP36XX_RTA_DISABLE
378	str	r2, [r1]
379
380	/* Fall through to common code for the remaining logic */
381
382restore:
383	/*
384	 * Check what was the reason for mpu reset and store the reason in r9:
385	 *  0 - No context lost
386	 *  1 - Only L1 and logic lost
387	 *  2 - Only L2 lost - In this case, we wont be here
388	 *  3 - Both L1 and L2 lost
389	 */
390	ldr	r1, pm_pwstctrl_mpu
391	ldr	r2, [r1]
392	and	r2, r2, #0x3
393	cmp	r2, #0x0	@ Check if target power state was OFF or RET
394	moveq	r9, #0x3	@ MPU OFF => L1 and L2 lost
395	movne	r9, #0x1	@ Only L1 and L2 lost => avoid L2 invalidation
396	bne	logic_l1_restore
397
398	ldr	r0, l2dis_3630
399	cmp	r0, #0x1	@ should we disable L2 on 3630?
400	bne	skipl2dis
401	mrc	p15, 0, r0, c1, c0, 1
402	bic	r0, r0, #2	@ disable L2 cache
403	mcr	p15, 0, r0, c1, c0, 1
404skipl2dis:
405	ldr	r0, control_stat
406	ldr	r1, [r0]
407	and	r1, #0x700
408	cmp	r1, #0x300
409	beq	l2_inv_gp
410	mov	r0, #40			@ set service ID for PPA
411	mov	r12, r0			@ copy secure Service ID in r12
412	mov	r1, #0			@ set task id for ROM code in r1
413	mov	r2, #4			@ set some flags in r2, r6
414	mov	r6, #0xff
415	adr	r3, l2_inv_api_params	@ r3 points to dummy parameters
416	dsb				@ data write barrier
417	dmb				@ data memory barrier
418	smc	#1			@ call SMI monitor (smi #1)
419	/* Write to Aux control register to set some bits */
420	mov	r0, #42			@ set service ID for PPA
421	mov	r12, r0			@ copy secure Service ID in r12
422	mov	r1, #0			@ set task id for ROM code in r1
423	mov	r2, #4			@ set some flags in r2, r6
424	mov	r6, #0xff
425	ldr	r4, scratchpad_base
426	ldr	r3, [r4, #0xBC]		@ r3 points to parameters
427	dsb				@ data write barrier
428	dmb				@ data memory barrier
429	smc	#1			@ call SMI monitor (smi #1)
430
431#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
432	/* Restore L2 aux control register */
433					@ set service ID for PPA
434	mov	r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
435	mov	r12, r0			@ copy service ID in r12
436	mov	r1, #0			@ set task ID for ROM code in r1
437	mov	r2, #4			@ set some flags in r2, r6
438	mov	r6, #0xff
439	ldr	r4, scratchpad_base
440	ldr	r3, [r4, #0xBC]
441	adds	r3, r3, #8		@ r3 points to parameters
442	dsb				@ data write barrier
443	dmb				@ data memory barrier
444	smc	#1			@ call SMI monitor (smi #1)
445#endif
446	b	logic_l1_restore
447
448	.align
449l2_inv_api_params:
450	.word	0x1, 0x00
451l2_inv_gp:
452	/* Execute smi to invalidate L2 cache */
453	mov r12, #0x1			@ set up to invalidate L2
454	smc	#0			@ Call SMI monitor (smieq)
455	/* Write to Aux control register to set some bits */
456	ldr	r4, scratchpad_base
457	ldr	r3, [r4,#0xBC]
458	ldr	r0, [r3,#4]
459	mov	r12, #0x3
460	smc	#0			@ Call SMI monitor (smieq)
461	ldr	r4, scratchpad_base
462	ldr	r3, [r4,#0xBC]
463	ldr	r0, [r3,#12]
464	mov	r12, #0x2
465	smc	#0			@ Call SMI monitor (smieq)
466logic_l1_restore:
467	ldr	r1, l2dis_3630
468	cmp	r1, #0x1		@ Test if L2 re-enable needed on 3630
469	bne	skipl2reen
470	mrc	p15, 0, r1, c1, c0, 1
471	orr	r1, r1, #2		@ re-enable L2 cache
472	mcr	p15, 0, r1, c1, c0, 1
473skipl2reen:
474	mov	r1, #0
475	/*
476	 * Invalidate all instruction caches to PoU
477	 * and flush branch target cache
478	 */
479	mcr	p15, 0, r1, c7, c5, 0
480
481	ldr	r4, scratchpad_base
482	ldr	r3, [r4,#0xBC]
483	adds	r3, r3, #16
484
485	ldmia	r3!, {r4-r6}
486	mov	sp, r4			@ Restore sp
487	msr	spsr_cxsf, r5		@ Restore spsr
488	mov	lr, r6			@ Restore lr
489
490	ldmia	r3!, {r4-r7}
491	mcr	p15, 0, r4, c1, c0, 2	@ Coprocessor access Control Register
492	mcr	p15, 0, r5, c2, c0, 0	@ TTBR0
493	mcr	p15, 0, r6, c2, c0, 1	@ TTBR1
494	mcr	p15, 0, r7, c2, c0, 2	@ TTBCR
495
496	ldmia	r3!,{r4-r6}
497	mcr	p15, 0, r4, c3, c0, 0	@ Domain access Control Register
498	mcr	p15, 0, r5, c10, c2, 0	@ PRRR
499	mcr	p15, 0, r6, c10, c2, 1	@ NMRR
500
501
502	ldmia	r3!,{r4-r7}
503	mcr	p15, 0, r4, c13, c0, 1	@ Context ID
504	mcr	p15, 0, r5, c13, c0, 2	@ User r/w thread and process ID
505	mrc	p15, 0, r6, c12, c0, 0	@ Secure or NS vector base address
506	msr	cpsr, r7		@ store cpsr
507
508	/* Enabling MMU here */
509	mrc	p15, 0, r7, c2, c0, 2 	@ Read TTBRControl
510	/* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
511	and	r7, #0x7
512	cmp	r7, #0x0
513	beq	usettbr0
514ttbr_error:
515	/*
516	 * More work needs to be done to support N[0:2] value other than 0
517	 * So looping here so that the error can be detected
518	 */
519	b	ttbr_error
520usettbr0:
521	mrc	p15, 0, r2, c2, c0, 0
522	ldr	r5, ttbrbit_mask
523	and	r2, r5
524	mov	r4, pc
525	ldr	r5, table_index_mask
526	and	r4, r5			@ r4 = 31 to 20 bits of pc
527	/* Extract the value to be written to table entry */
528	ldr	r1, table_entry
529	/* r1 has the value to be written to table entry*/
530	add	r1, r1, r4
531	/* Getting the address of table entry to modify */
532	lsr	r4, #18
533	/* r2 has the location which needs to be modified */
534	add	r2, r4
535	/* Storing previous entry of location being modified */
536	ldr	r5, scratchpad_base
537	ldr	r4, [r2]
538	str	r4, [r5, #0xC0]
539	/* Modify the table entry */
540	str	r1, [r2]
541	/*
542	 * Storing address of entry being modified
543	 * - will be restored after enabling MMU
544	 */
545	ldr	r5, scratchpad_base
546	str	r2, [r5, #0xC4]
547
548	mov	r0, #0
549	mcr	p15, 0, r0, c7, c5, 4	@ Flush prefetch buffer
550	mcr	p15, 0, r0, c7, c5, 6	@ Invalidate branch predictor array
551	mcr	p15, 0, r0, c8, c5, 0	@ Invalidate instruction TLB
552	mcr	p15, 0, r0, c8, c6, 0	@ Invalidate data TLB
553	/*
554	 * Restore control register. This enables the MMU.
555	 * The caches and prediction are not enabled here, they
556	 * will be enabled after restoring the MMU table entry.
557	 */
558	ldmia	r3!, {r4}
559	/* Store previous value of control register in scratchpad */
560	str	r4, [r5, #0xC8]
561	ldr	r2, cache_pred_disable_mask
562	and	r4, r2
563	mcr	p15, 0, r4, c1, c0, 0
564	dsb
565	isb
566	ldr     r0, =restoremmu_on
567	bx      r0
568
569/*
570 * ==============================
571 * == Exit point from OFF mode ==
572 * ==============================
573 */
574restoremmu_on:
575	ldmfd	sp!, {r0-r12, pc}	@ restore regs and return
576
577
578/*
579 * Internal functions
580 */
581
582/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */
583	.text
584	.align	3
585ENTRY(es3_sdrc_fix)
586	ldr	r4, sdrc_syscfg		@ get config addr
587	ldr	r5, [r4]		@ get value
588	tst	r5, #0x100		@ is part access blocked
589	it	eq
590	biceq	r5, r5, #0x100		@ clear bit if set
591	str	r5, [r4]		@ write back change
592	ldr	r4, sdrc_mr_0		@ get config addr
593	ldr	r5, [r4]		@ get value
594	str	r5, [r4]		@ write back change
595	ldr	r4, sdrc_emr2_0		@ get config addr
596	ldr	r5, [r4]		@ get value
597	str	r5, [r4]		@ write back change
598	ldr	r4, sdrc_manual_0	@ get config addr
599	mov	r5, #0x2		@ autorefresh command
600	str	r5, [r4]		@ kick off refreshes
601	ldr	r4, sdrc_mr_1		@ get config addr
602	ldr	r5, [r4]		@ get value
603	str	r5, [r4]		@ write back change
604	ldr	r4, sdrc_emr2_1		@ get config addr
605	ldr	r5, [r4]		@ get value
606	str	r5, [r4]		@ write back change
607	ldr	r4, sdrc_manual_1	@ get config addr
608	mov	r5, #0x2		@ autorefresh command
609	str	r5, [r4]		@ kick off refreshes
610	bx	lr
611
612	.align
613sdrc_syscfg:
614	.word	SDRC_SYSCONFIG_P
615sdrc_mr_0:
616	.word	SDRC_MR_0_P
617sdrc_emr2_0:
618	.word	SDRC_EMR2_0_P
619sdrc_manual_0:
620	.word	SDRC_MANUAL_0_P
621sdrc_mr_1:
622	.word	SDRC_MR_1_P
623sdrc_emr2_1:
624	.word	SDRC_EMR2_1_P
625sdrc_manual_1:
626	.word	SDRC_MANUAL_1_P
627ENDPROC(es3_sdrc_fix)
628ENTRY(es3_sdrc_fix_sz)
629	.word	. - es3_sdrc_fix
630
631/*
632 * This function implements the erratum ID i581 WA:
633 *  SDRC state restore before accessing the SDRAM
634 *
635 * Only used at return from non-OFF mode. For OFF
636 * mode the ROM code configures the SDRC and
637 * the DPLL before calling the restore code directly
638 * from DDR.
639 */
640
641/* Make sure SDRC accesses are ok */
642wait_sdrc_ok:
643
644/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
645	ldr	r4, cm_idlest_ckgen
646wait_dpll3_lock:
647	ldr	r5, [r4]
648	tst	r5, #1
649	beq	wait_dpll3_lock
650
651	ldr	r4, cm_idlest1_core
652wait_sdrc_ready:
653	ldr	r5, [r4]
654	tst	r5, #0x2
655	bne	wait_sdrc_ready
656	/* allow DLL powerdown upon hw idle req */
657	ldr	r4, sdrc_power
658	ldr	r5, [r4]
659	bic	r5, r5, #0x40
660	str	r5, [r4]
661
662/*
663 * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
664 * base instead.
665 * Be careful not to clobber r7 when maintaing this code.
666 */
667
668is_dll_in_lock_mode:
669	/* Is dll in lock mode? */
670	ldr	r4, sdrc_dlla_ctrl
671	ldr	r5, [r4]
672	tst	r5, #0x4
673	bxne	lr			@ Return if locked
674	/* wait till dll locks */
675	adr	r7, kick_counter
676wait_dll_lock_timed:
677	ldr	r4, wait_dll_lock_counter
678	add	r4, r4, #1
679	str	r4, [r7, #wait_dll_lock_counter - kick_counter]
680	ldr	r4, sdrc_dlla_status
681	/* Wait 20uS for lock */
682	mov	r6, #8
683wait_dll_lock:
684	subs	r6, r6, #0x1
685	beq	kick_dll
686	ldr	r5, [r4]
687	and	r5, r5, #0x4
688	cmp	r5, #0x4
689	bne	wait_dll_lock
690	bx	lr			@ Return when locked
691
692	/* disable/reenable DLL if not locked */
693kick_dll:
694	ldr	r4, sdrc_dlla_ctrl
695	ldr	r5, [r4]
696	mov	r6, r5
697	bic	r6, #(1<<3)		@ disable dll
698	str	r6, [r4]
699	dsb
700	orr	r6, r6, #(1<<3)		@ enable dll
701	str	r6, [r4]
702	dsb
703	ldr	r4, kick_counter
704	add	r4, r4, #1
705	str	r4, [r7]		@ kick_counter
706	b	wait_dll_lock_timed
707
708	.align
709cm_idlest1_core:
710	.word	CM_IDLEST1_CORE_V
711cm_idlest_ckgen:
712	.word	CM_IDLEST_CKGEN_V
713sdrc_dlla_status:
714	.word	SDRC_DLLA_STATUS_V
715sdrc_dlla_ctrl:
716	.word	SDRC_DLLA_CTRL_V
717pm_prepwstst_core_p:
718	.word	PM_PREPWSTST_CORE_P
719pm_pwstctrl_mpu:
720	.word	PM_PWSTCTRL_MPU_P
721scratchpad_base:
722	.word	SCRATCHPAD_BASE_P
723sram_base:
724	.word	SRAM_BASE_P + 0x8000
725sdrc_power:
726	.word	SDRC_POWER_V
727ttbrbit_mask:
728	.word	0xFFFFC000
729table_index_mask:
730	.word	0xFFF00000
731table_entry:
732	.word	0x00000C02
733cache_pred_disable_mask:
734	.word	0xFFFFE7FB
735control_stat:
736	.word	CONTROL_STAT
737control_mem_rta:
738	.word	CONTROL_MEM_RTA_CTRL
739kernel_flush:
740	.word	v7_flush_dcache_all
741l2dis_3630:
742	.word	0
743	/*
744	 * When exporting to userspace while the counters are in SRAM,
745	 * these 2 words need to be at the end to facilitate retrival!
746	 */
747kick_counter:
748	.word	0
749wait_dll_lock_counter:
750	.word	0
751ENDPROC(omap34xx_cpu_suspend)
752
753ENTRY(omap34xx_cpu_suspend_sz)
754	.word	. - omap34xx_cpu_suspend
755