1 /*
2 * IA32 helper functions
3 *
4 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5 * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2001-2002 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
10 * 02/19/01 D. Mosberger dropped tssd; it's not needed
11 * 09/14/01 D. Mosberger fixed memory management for gdt/tss page
12 * 09/29/01 D. Mosberger added ia32_load_segment_descriptors()
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/personality.h>
19 #include <linux/sched.h>
20
21 #include <asm/page.h>
22 #include <asm/pgtable.h>
23 #include <asm/system.h>
24 #include <asm/processor.h>
25 #include <asm/ia32.h>
26
27 extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
28
29 struct exec_domain ia32_exec_domain;
30 struct page *ia32_shared_page[(2*IA32_PAGE_SIZE + PAGE_SIZE - 1)/PAGE_SIZE];
31 unsigned long *ia32_gdt;
32
33 static unsigned long
load_desc(u16 selector)34 load_desc (u16 selector)
35 {
36 unsigned long *table, limit, index;
37
38 if (!selector)
39 return 0;
40 if (selector & IA32_SEGSEL_TI) {
41 table = (unsigned long *) IA32_LDT_OFFSET;
42 limit = IA32_LDT_ENTRIES;
43 } else {
44 table = ia32_gdt;
45 limit = IA32_PAGE_SIZE / sizeof(ia32_gdt[0]);
46 }
47 index = selector >> IA32_SEGSEL_INDEX_SHIFT;
48 if (index >= limit)
49 return 0;
50 return IA32_SEG_UNSCRAMBLE(table[index]);
51 }
52
53 void
ia32_load_segment_descriptors(struct task_struct * task)54 ia32_load_segment_descriptors (struct task_struct *task)
55 {
56 struct pt_regs *regs = ia64_task_regs(task);
57
58 /* Setup the segment descriptors */
59 regs->r24 = load_desc(regs->r16 >> 16); /* ESD */
60 regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
61 regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
62 regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
63 regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
64 regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
65 }
66
67 void
ia32_save_state(struct task_struct * t)68 ia32_save_state (struct task_struct *t)
69 {
70 unsigned long eflag, fsr, fcr, fir, fdr;
71
72 asm ("mov %0=ar.eflag;"
73 "mov %1=ar.fsr;"
74 "mov %2=ar.fcr;"
75 "mov %3=ar.fir;"
76 "mov %4=ar.fdr;"
77 : "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr));
78 t->thread.eflag = eflag;
79 t->thread.fsr = fsr;
80 t->thread.fcr = fcr;
81 t->thread.fir = fir;
82 t->thread.fdr = fdr;
83 ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
84 ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
85 }
86
87 void
ia32_load_state(struct task_struct * t)88 ia32_load_state (struct task_struct *t)
89 {
90 unsigned long eflag, fsr, fcr, fir, fdr, tssd;
91 struct pt_regs *regs = ia64_task_regs(t);
92 int nr = smp_processor_id(); /* LDT and TSS depend on CPU number: */
93
94 eflag = t->thread.eflag;
95 fsr = t->thread.fsr;
96 fcr = t->thread.fcr;
97 fir = t->thread.fir;
98 fdr = t->thread.fdr;
99 tssd = load_desc(_TSS(nr)); /* TSSD */
100
101 asm volatile ("mov ar.eflag=%0;"
102 "mov ar.fsr=%1;"
103 "mov ar.fcr=%2;"
104 "mov ar.fir=%3;"
105 "mov ar.fdr=%4;"
106 :: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr));
107 current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
108 current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
109 ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
110 ia64_set_kr(IA64_KR_TSSD, tssd);
111
112 regs->r17 = (_TSS(nr) << 48) | (_LDT(nr) << 32) | (__u32) regs->r17;
113 regs->r30 = load_desc(_LDT(nr)); /* LDTD */
114 }
115
116 /*
117 * Setup IA32 GDT and TSS
118 */
119 void
ia32_gdt_init(void)120 ia32_gdt_init (void)
121 {
122 unsigned long *tss;
123 unsigned long ldt_size;
124 int nr;
125
126 ia32_shared_page[0] = alloc_page(GFP_KERNEL);
127 ia32_gdt = page_address(ia32_shared_page[0]);
128 tss = ia32_gdt + IA32_PAGE_SIZE/sizeof(ia32_gdt[0]);
129
130 if (IA32_PAGE_SIZE == PAGE_SIZE) {
131 ia32_shared_page[1] = alloc_page(GFP_KERNEL);
132 tss = page_address(ia32_shared_page[1]);
133 }
134
135 /* CS descriptor in IA-32 (scrambled) format */
136 ia32_gdt[__USER_CS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
137 0xb, 1, 3, 1, 1, 1, 1);
138
139 /* DS descriptor in IA-32 (scrambled) format */
140 ia32_gdt[__USER_DS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
141 0x3, 1, 3, 1, 1, 1, 1);
142
143 /* We never change the TSS and LDT descriptors, so we can share them across all CPUs. */
144 ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
145 for (nr = 0; nr < NR_CPUS; ++nr) {
146 ia32_gdt[_TSS(nr) >> IA32_SEGSEL_INDEX_SHIFT]
147 = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
148 0xb, 0, 3, 1, 1, 1, 0);
149 ia32_gdt[_LDT(nr) >> IA32_SEGSEL_INDEX_SHIFT]
150 = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
151 0x2, 0, 3, 1, 1, 1, 0);
152 }
153 }
154
155 /*
156 * Handle bad IA32 interrupt via syscall
157 */
158 void
ia32_bad_interrupt(unsigned long int_num,struct pt_regs * regs)159 ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
160 {
161 siginfo_t siginfo;
162
163 die_if_kernel("Bad IA-32 interrupt", regs, int_num);
164
165 siginfo.si_signo = SIGTRAP;
166 siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
167 siginfo.si_flags = 0;
168 siginfo.si_isr = 0;
169 siginfo.si_addr = 0;
170 siginfo.si_imm = 0;
171 siginfo.si_code = TRAP_BRKPT;
172 force_sig_info(SIGTRAP, &siginfo, current);
173 }
174
175 static int __init
ia32_init(void)176 ia32_init (void)
177 {
178 ia32_exec_domain.name = "Linux/x86";
179 ia32_exec_domain.handler = NULL;
180 ia32_exec_domain.pers_low = PER_LINUX32;
181 ia32_exec_domain.pers_high = PER_LINUX32;
182 ia32_exec_domain.signal_map = default_exec_domain.signal_map;
183 ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
184 register_exec_domain(&ia32_exec_domain);
185 return 0;
186 }
187
188 __initcall(ia32_init);
189