1/*
2 * Low-level Power Management code.
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <asm/asm.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
13#include <mach/pm.h>
14
15#include "pm.h"
16#include "sdramc.h"
17
18/* Same as 0xfff00000 but fits in a 21 bit signed immediate */
19#define PM_BASE	-0x100000
20
21	.section .bss, "wa", @nobits
22	.global	disable_idle_sleep
23	.type	disable_idle_sleep, @object
24disable_idle_sleep:
25	.int	4
26	.size	disable_idle_sleep, . - disable_idle_sleep
27
28	/* Keep this close to the irq handlers */
29	.section .irq.text, "ax", @progbits
30
31	/*
32	 * void cpu_enter_idle(void)
33	 *
34	 * Put the CPU into "idle" mode, in which it will consume
35	 * significantly less power.
36	 *
37	 * If an interrupt comes along in the window between
38	 * unmask_interrupts and the sleep instruction below, the
39	 * interrupt code will adjust the return address so that we
40	 * never execute the sleep instruction. This is required
41	 * because the AP7000 doesn't unmask interrupts when entering
42	 * sleep modes; later CPUs may not need this workaround.
43	 */
44	.global	cpu_enter_idle
45	.type	cpu_enter_idle, @function
46cpu_enter_idle:
47	mask_interrupts
48	get_thread_info r8
49	ld.w	r9, r8[TI_flags]
50	bld	r9, TIF_NEED_RESCHED
51	brcs	.Lret_from_sleep
52	sbr	r9, TIF_CPU_GOING_TO_SLEEP
53	st.w	r8[TI_flags], r9
54	unmask_interrupts
55	sleep	CPU_SLEEP_IDLE
56	.size	cpu_enter_idle, . - cpu_enter_idle
57
58	/*
59	 * Common return path for PM functions that don't run from
60	 * SRAM.
61	 */
62	.global cpu_idle_skip_sleep
63	.type	cpu_idle_skip_sleep, @function
64cpu_idle_skip_sleep:
65	mask_interrupts
66	ld.w	r9, r8[TI_flags]
67	cbr	r9, TIF_CPU_GOING_TO_SLEEP
68	st.w	r8[TI_flags], r9
69.Lret_from_sleep:
70	unmask_interrupts
71	retal	r12
72	.size	cpu_idle_skip_sleep, . - cpu_idle_skip_sleep
73
74#ifdef CONFIG_PM
75	.section .init.text, "ax", @progbits
76
77	.global	pm_exception
78	.type	pm_exception, @function
79pm_exception:
80	/*
81	 * Exceptions are masked when we switch to this handler, so
82	 * we'll only get "unrecoverable" exceptions (offset 0.)
83	 */
84	sub	r12, pc, . - .Lpanic_msg
85	lddpc	pc, .Lpanic_addr
86
87	.align	2
88.Lpanic_addr:
89	.long	panic
90.Lpanic_msg:
91	.asciz	"Unrecoverable exception during suspend\n"
92	.size	pm_exception, . - pm_exception
93
94	.global	pm_irq0
95	.type	pm_irq0, @function
96pm_irq0:
97	/* Disable interrupts and return after the sleep instruction */
98	mfsr	r9, SYSREG_RSR_INT0
99	mtsr	SYSREG_RAR_INT0, r8
100	sbr	r9, SYSREG_GM_OFFSET
101	mtsr	SYSREG_RSR_INT0, r9
102	rete
103
104	/*
105	 * void cpu_enter_standby(unsigned long sdramc_base)
106	 *
107	 * Enter PM_SUSPEND_STANDBY mode. At this point, all drivers
108	 * are suspended and interrupts are disabled. Interrupts
109	 * marked as 'wakeup' event sources may still come along and
110	 * get us out of here.
111	 *
112	 * The SDRAM will be put into self-refresh mode (which does
113	 * not require a clock from the CPU), and the CPU will be put
114	 * into "frozen" mode (HSB bus stopped). The SDRAM controller
115	 * will automatically bring the SDRAM into normal mode on the
116	 * first access, and the power manager will automatically
117	 * start the HSB and CPU clocks upon a wakeup event.
118	 *
119	 * This code uses the same "skip sleep" technique as above.
120	 * It is very important that we jump directly to
121	 * cpu_after_sleep after the sleep instruction since that's
122	 * where we'll end up if the interrupt handler decides that we
123	 * need to skip the sleep instruction.
124	 */
125	.global	pm_standby
126	.type	pm_standby, @function
127pm_standby:
128	/*
129	 * interrupts are already masked at this point, and EVBA
130	 * points to pm_exception above.
131	 */
132	ld.w	r10, r12[SDRAMC_LPR]
133	sub	r8, pc, . - 1f		/* return address for irq handler */
134	mov	r11, SDRAMC_LPR_LPCB_SELF_RFR
135	bfins	r10, r11, 0, 2		/* LPCB <- self Refresh */
136	sync	0			/* flush write buffer */
137	st.w	r12[SDRAMC_LPR], r10	/* put SDRAM in self-refresh mode */
138	ld.w	r11, r12[SDRAMC_LPR]
139	unmask_interrupts
140	sleep	CPU_SLEEP_FROZEN
1411:	mask_interrupts
142	retal	r12
143	.size	pm_standby, . - pm_standby
144
145	.global	pm_suspend_to_ram
146	.type	pm_suspend_to_ram, @function
147pm_suspend_to_ram:
148	/*
149	 * interrupts are already masked at this point, and EVBA
150	 * points to pm_exception above.
151	 */
152	mov	r11, 0
153	cache	r11[2], 8		/* clean all dcache lines */
154	sync	0			/* flush write buffer */
155	ld.w	r10, r12[SDRAMC_LPR]
156	sub	r8, pc, . - 1f		/* return address for irq handler */
157	mov	r11, SDRAMC_LPR_LPCB_SELF_RFR
158	bfins	r10, r11, 0, 2		/* LPCB <- self refresh */
159	st.w	r12[SDRAMC_LPR], r10	/* put SDRAM in self-refresh mode */
160	ld.w	r11, r12[SDRAMC_LPR]
161
162	unmask_interrupts
163	sleep	CPU_SLEEP_STOP
1641:	mask_interrupts
165
166	retal	r12
167	.size	pm_suspend_to_ram, . - pm_suspend_to_ram
168
169	.global	pm_sram_end
170	.type	pm_sram_end, @function
171pm_sram_end:
172	.size	pm_sram_end, 0
173
174#endif /* CONFIG_PM */
175