1 #ifndef __ASM_I386_SAVE_STATE_H
2 #define __ASM_I386_SAVE_STATE_H
3 
4 /*
5  * Copyright 2001-2002 Pavel Machek <pavel@suse.cz>
6  * Based on code
7  * Copyright 2001 Patrick Mochel <mochel@osdl.org>
8  */
9 #include <asm/desc.h>
10 #include <asm/i387.h>
11 
12 /* image of the saved processor state */
13 struct saved_context {
14 	u32 eax, ebx, ecx, edx;
15 	u32 esp, ebp, esi, edi;
16 	u16 es, fs, gs, ss;
17 	u32 cr0, cr2, cr3, cr4;
18 	u16 gdt_pad;
19 	u16 gdt_limit;
20 	u32 gdt_base;
21 	u16 idt_pad;
22 	u16 idt_limit;
23 	u32 idt_base;
24 	u16 ldt;
25 	u16 tss;
26 	u32 tr;
27 	u32 safety;
28 	u32 return_address;
29 	u32 eflags;
30 } __attribute__((packed));
31 
32 static struct saved_context saved_context;
33 
34 #define loaddebug(thread,register) \
35                __asm__("movl %0,%%db" #register  \
36                        : /* no output */ \
37                        :"r" ((thread)->debugreg[register]))
38 
39 
40 /*
41  * save_processor_context
42  *
43  * Save the state of the processor before we go to sleep.
44  *
45  * return_stack is the value of the stack pointer (%esp) as the caller sees it.
46  * A good way could not be found to obtain it from here (don't want to make _too_
47  * many assumptions about the layout of the stack this far down.) Also, the
48  * handy little __builtin_frame_pointer(level) where level > 0, is blatantly
49  * buggy - it returns the value of the stack at the proper location, not the
50  * location, like it should (as of gcc 2.91.66)
51  *
52  * Note that the context and timing of this function is pretty critical.
53  * With a minimal amount of things going on in the caller and in here, gcc
54  * does a good job of being just a dumb compiler.  Watch the assembly output
55  * if anything changes, though, and make sure everything is going in the right
56  * place.
57  */
save_processor_context(void)58 static inline void save_processor_context (void)
59 {
60 	kernel_fpu_begin();
61 
62 	/*
63 	 * descriptor tables
64 	 */
65 	asm volatile ("sgdt (%0)" : "=m" (saved_context.gdt_limit));
66 	asm volatile ("sidt (%0)" : "=m" (saved_context.idt_limit));
67 	asm volatile ("sldt (%0)" : "=m" (saved_context.ldt));
68 	asm volatile ("str (%0)"  : "=m" (saved_context.tr));
69 
70 	/*
71 	 * save the general registers.
72 	 * note that gcc has constructs to specify output of certain registers,
73 	 * but they're not used here, because it assumes that you want to modify
74 	 * those registers, so it tries to be smart and save them beforehand.
75 	 * It's really not necessary, and kinda fishy (check the assembly output),
76 	 * so it's avoided.
77 	 */
78 	asm volatile ("movl %%esp, (%0)" : "=m" (saved_context.esp));
79 	asm volatile ("movl %%eax, (%0)" : "=m" (saved_context.eax));
80 	asm volatile ("movl %%ebx, (%0)" : "=m" (saved_context.ebx));
81 	asm volatile ("movl %%ecx, (%0)" : "=m" (saved_context.ecx));
82 	asm volatile ("movl %%edx, (%0)" : "=m" (saved_context.edx));
83 	asm volatile ("movl %%ebp, (%0)" : "=m" (saved_context.ebp));
84 	asm volatile ("movl %%esi, (%0)" : "=m" (saved_context.esi));
85 	asm volatile ("movl %%edi, (%0)" : "=m" (saved_context.edi));
86 
87 	/*
88 	 * segment registers
89 	 */
90 	asm volatile ("movw %%es, %0" : "=r" (saved_context.es));
91 	asm volatile ("movw %%fs, %0" : "=r" (saved_context.fs));
92 	asm volatile ("movw %%gs, %0" : "=r" (saved_context.gs));
93 	asm volatile ("movw %%ss, %0" : "=r" (saved_context.ss));
94 
95 	/*
96 	 * control registers
97 	 */
98 	asm volatile ("movl %%cr0, %0" : "=r" (saved_context.cr0));
99 	asm volatile ("movl %%cr2, %0" : "=r" (saved_context.cr2));
100 	asm volatile ("movl %%cr3, %0" : "=r" (saved_context.cr3));
101 	asm volatile ("movl %%cr4, %0" : "=r" (saved_context.cr4));
102 
103 	/*
104 	 * eflags
105 	 */
106 	asm volatile ("pushfl ; popl (%0)" : "=m" (saved_context.eflags));
107 }
108 
fix_processor_context(void)109 static void fix_processor_context(void)
110 {
111 	int nr = smp_processor_id();
112 	struct tss_struct * t = &init_tss[nr];
113 
114 	set_tss_desc(nr,t);	/* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy tsc or some similar stupidity. */
115         gdt_table[__TSS(nr)].b &= 0xfffffdff;
116 
117 	load_TR(nr);		/* This does ltr */
118 	__load_LDT(nr);		/* This does lldt */
119 
120 	/*
121 	 * Now maybe reload the debug registers
122 	 */
123 	if (current->thread.debugreg[7]){
124                 loaddebug(&current->thread, 0);
125                 loaddebug(&current->thread, 1);
126                 loaddebug(&current->thread, 2);
127                 loaddebug(&current->thread, 3);
128                 /* no 4 and 5 */
129                 loaddebug(&current->thread, 6);
130                 loaddebug(&current->thread, 7);
131 	}
132 
133 }
134 
135 static void
do_fpu_end(void)136 do_fpu_end(void)
137 {
138         /* restore FPU regs if necessary */
139 	/* Do it out of line so that gcc does not move cr0 load to some stupid place */
140         kernel_fpu_end();
141 }
142 
143 /*
144  * restore_processor_context
145  *
146  * Restore the processor context as it was before we went to sleep
147  * - descriptor tables
148  * - control registers
149  * - segment registers
150  * - flags
151  *
152  * Note that it is critical that this function is declared inline.
153  * It was separated out from restore_state to make that function
154  * a little clearer, but it needs to be inlined because we won't have a
155  * stack when we get here (so we can't push a return address).
156  */
restore_processor_context(void)157 static inline void restore_processor_context (void)
158 {
159 	/*
160 	 * first restore %ds, so we can access our data properly
161 	 */
162 	asm volatile (".align 4");
163 	asm volatile ("movw %0, %%ds" :: "r" ((u16)__KERNEL_DS));
164 
165 
166 	/*
167 	 * control registers
168 	 */
169 	asm volatile ("movl %0, %%cr4" :: "r" (saved_context.cr4));
170 	asm volatile ("movl %0, %%cr3" :: "r" (saved_context.cr3));
171 	asm volatile ("movl %0, %%cr2" :: "r" (saved_context.cr2));
172 	asm volatile ("movl %0, %%cr0" :: "r" (saved_context.cr0));
173 
174 	/*
175 	 * segment registers
176 	 */
177 	asm volatile ("movw %0, %%es" :: "r" (saved_context.es));
178 	asm volatile ("movw %0, %%fs" :: "r" (saved_context.fs));
179 	asm volatile ("movw %0, %%gs" :: "r" (saved_context.gs));
180 	asm volatile ("movw %0, %%ss" :: "r" (saved_context.ss));
181 
182 	/*
183 	 * the other general registers
184 	 *
185 	 * note that even though gcc has constructs to specify memory
186 	 * input into certain registers, it will try to be too smart
187 	 * and save them at the beginning of the function.  This is esp.
188 	 * bad since we don't have a stack set up when we enter, and we
189 	 * want to preserve the values on exit. So, we set them manually.
190 	 */
191 	asm volatile ("movl %0, %%esp" :: "m" (saved_context.esp));
192 	asm volatile ("movl %0, %%ebp" :: "m" (saved_context.ebp));
193 	asm volatile ("movl %0, %%eax" :: "m" (saved_context.eax));
194 	asm volatile ("movl %0, %%ebx" :: "m" (saved_context.ebx));
195 	asm volatile ("movl %0, %%ecx" :: "m" (saved_context.ecx));
196 	asm volatile ("movl %0, %%edx" :: "m" (saved_context.edx));
197 	asm volatile ("movl %0, %%esi" :: "m" (saved_context.esi));
198 	asm volatile ("movl %0, %%edi" :: "m" (saved_context.edi));
199 
200 	/*
201 	 * now restore the descriptor tables to their proper values
202 	 */
203 	asm volatile ("lgdt (%0)" :: "m" (saved_context.gdt_limit));
204 	asm volatile ("lidt (%0)" :: "m" (saved_context.idt_limit));
205 	asm volatile ("lldt (%0)" :: "m" (saved_context.ldt));
206 
207 	fix_processor_context();
208 	do_fpu_end();
209 }
210 
211 #endif
212 
213