1 /*
2 * include/asm-x86_64/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7 #ifndef __ASM_X86_64_PROCESSOR_H
8 #define __ASM_X86_64_PROCESSOR_H
9
10 #include <asm/segment.h>
11 #include <asm/page.h>
12 #include <asm/types.h>
13 #include <asm/sigcontext.h>
14 #include <asm/cpufeature.h>
15 #include <linux/config.h>
16 #include <linux/threads.h>
17 #include <asm/msr.h>
18 #include <asm/current.h>
19 #include <asm/system.h>
20 #include <asm/cpufeature.h>
21
22 #define TF_MASK 0x00000100
23 #define IF_MASK 0x00000200
24 #define IOPL_MASK 0x00003000
25 #define NT_MASK 0x00004000
26 #define VM_MASK 0x00020000
27 #define AC_MASK 0x00040000
28 #define VIF_MASK 0x00080000 /* virtual interrupt flag */
29 #define VIP_MASK 0x00100000 /* virtual interrupt pending */
30 #define ID_MASK 0x00200000
31
32 /*
33 * Default implementation of macro that returns current
34 * instruction pointer ("program counter").
35 */
36 #define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
37
38 /*
39 * CPU type and hardware bug flags. Kept separately for each CPU.
40 */
41
42 struct cpuinfo_x86 {
43 __u8 x86; /* CPU family */
44 __u8 x86_vendor; /* CPU vendor */
45 __u8 x86_model;
46 __u8 x86_mask;
47 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
48 __u32 x86_capability[NCAPINTS];
49 char x86_vendor_id[16];
50 char x86_model_id[64];
51 int x86_cache_size; /* in KB - valid for CPUS which support this
52 call */
53 int x86_clflush_size;
54 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
55 __u8 x86_virt_bits, x86_phys_bits;
56 __u32 x86_power;
57 unsigned long loops_per_jiffy;
58 } ____cacheline_aligned;
59
60 #define X86_VENDOR_INTEL 0
61 #define X86_VENDOR_CYRIX 1
62 #define X86_VENDOR_AMD 2
63 #define X86_VENDOR_UMC 3
64 #define X86_VENDOR_NEXGEN 4
65 #define X86_VENDOR_CENTAUR 5
66 #define X86_VENDOR_RISE 6
67 #define X86_VENDOR_TRANSMETA 7
68 #define X86_VENDOR_UNKNOWN 0xff
69
70 extern struct cpuinfo_x86 boot_cpu_data;
71
72 #ifdef CONFIG_SMP
73 extern struct cpuinfo_x86 cpu_data[];
74 #define current_cpu_data cpu_data[smp_processor_id()]
75 #else
76 #define cpu_data (&boot_cpu_data)
77 #define current_cpu_data boot_cpu_data
78 #endif
79
80 extern char ignore_irq13;
81
82 extern void identify_cpu(struct cpuinfo_x86 *);
83 extern void print_cpu_info(struct cpuinfo_x86 *);
84 extern void dodgy_tsc(void);
85
86 /*
87 * EFLAGS bits
88 */
89 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
90 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
91 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
92 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
93 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
94 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
95 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
96 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
97 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
98 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
99 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
100 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
101 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
102 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
103 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
104 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
105 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
106
107 /*
108 * Generic CPUID function
109 * FIXME: This really belongs to msr.h
110 */
cpuid(int op,int * eax,int * ebx,int * ecx,int * edx)111 extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
112 {
113 __asm__("cpuid"
114 : "=a" (*eax),
115 "=b" (*ebx),
116 "=c" (*ecx),
117 "=d" (*edx)
118 : "0" (op));
119 }
120
121 /*
122 * CPUID functions returning a single datum
123 */
cpuid_eax(unsigned int op)124 extern inline unsigned int cpuid_eax(unsigned int op)
125 {
126 unsigned int eax;
127
128 __asm__("cpuid"
129 : "=a" (eax)
130 : "0" (op)
131 : "bx", "cx", "dx");
132 return eax;
133 }
cpuid_ebx(unsigned int op)134 extern inline unsigned int cpuid_ebx(unsigned int op)
135 {
136 unsigned int eax, ebx;
137
138 __asm__("cpuid"
139 : "=a" (eax), "=b" (ebx)
140 : "0" (op)
141 : "cx", "dx" );
142 return ebx;
143 }
cpuid_ecx(unsigned int op)144 extern inline unsigned int cpuid_ecx(unsigned int op)
145 {
146 unsigned int eax, ecx;
147
148 __asm__("cpuid"
149 : "=a" (eax), "=c" (ecx)
150 : "0" (op)
151 : "bx", "dx" );
152 return ecx;
153 }
cpuid_edx(unsigned int op)154 extern inline unsigned int cpuid_edx(unsigned int op)
155 {
156 unsigned int eax, edx;
157
158 __asm__("cpuid"
159 : "=a" (eax), "=d" (edx)
160 : "0" (op)
161 : "bx", "cx");
162 return edx;
163 }
164
165 /*
166 * Intel CPU features in CR4
167 */
168 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
169 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
170 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
171 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
172 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
173 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
174 #define X86_CR4_MCE 0x0040 /* Machine check enable */
175 #define X86_CR4_PGE 0x0080 /* enable global pages */
176 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
177 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
178 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
179
180 /*
181 * Save the cr4 feature set we're using (ie
182 * Pentium 4MB enable and PPro Global page
183 * enable), so that any CPU's that boot up
184 * after us can get the correct flags.
185 */
186 extern unsigned long mmu_cr4_features;
187
set_in_cr4(unsigned long mask)188 static inline void set_in_cr4 (unsigned long mask)
189 {
190 mmu_cr4_features |= mask;
191 __asm__("movq %%cr4,%%rax\n\t"
192 "orq %0,%%rax\n\t"
193 "movq %%rax,%%cr4\n"
194 : : "irg" (mask)
195 :"ax");
196 }
197
clear_in_cr4(unsigned long mask)198 static inline void clear_in_cr4 (unsigned long mask)
199 {
200 mmu_cr4_features &= ~mask;
201 __asm__("movq %%cr4,%%rax\n\t"
202 "andq %0,%%rax\n\t"
203 "movq %%rax,%%cr4\n"
204 : : "irg" (~mask)
205 :"ax");
206 }
207
208 /*
209 * Cyrix CPU configuration register indexes
210 */
211 #define CX86_CCR0 0xc0
212 #define CX86_CCR1 0xc1
213 #define CX86_CCR2 0xc2
214 #define CX86_CCR3 0xc3
215 #define CX86_CCR4 0xe8
216 #define CX86_CCR5 0xe9
217 #define CX86_CCR6 0xea
218 #define CX86_CCR7 0xeb
219 #define CX86_DIR0 0xfe
220 #define CX86_DIR1 0xff
221 #define CX86_ARR_BASE 0xc4
222 #define CX86_RCR_BASE 0xdc
223
224 /*
225 * Cyrix CPU indexed register access macros
226 */
227
228 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
229
230 #define setCx86(reg, data) do { \
231 outb((reg), 0x22); \
232 outb((data), 0x23); \
233 } while (0)
234
235 /*
236 * Bus types
237 */
238 #define EISA_bus 0
239 #define MCA_bus 0
240 #define MCA_bus__is_a_macro
241
242
243 /*
244 * User space process size: 512GB - 1GB (default).
245 */
246 #define TASK_SIZE (0x0000007fc0000000)
247
248 /* This decides where the kernel will search for a free chunk of vm
249 * space during mmap's.
250 */
251
252 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
253 #define TASK_UNMAPPED_32 (IA32_PAGE_OFFSET / 3)
254 #define TASK_UNMAPPED_64 (TASK_SIZE/3)
255 #define TASK_UNMAPPED_BASE \
256 ((current->thread.flags & THREAD_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
257
258 /*
259 * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
260 */
261 #define IO_BITMAP_SIZE 32
262 #define IO_BITMAP_BYTES (IO_BITMAP_SIZE * sizeof(u32))
263 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
264 #define INVALID_IO_BITMAP_OFFSET 0x8000
265
266 struct i387_fxsave_struct {
267 u16 cwd;
268 u16 swd;
269 u16 twd;
270 u16 fop;
271 u64 rip;
272 u64 rdp;
273 u32 mxcsr;
274 u32 mxcsr_mask;
275 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
276 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
277 u32 padding[24];
278 } __attribute__ ((aligned (16)));
279
280 union i387_union {
281 struct i387_fxsave_struct fxsave;
282 };
283
284 typedef struct {
285 unsigned long seg;
286 } mm_segment_t;
287
288 struct tss_struct {
289 u32 reserved1;
290 u64 rsp0;
291 u64 rsp1;
292 u64 rsp2;
293 u64 reserved2;
294 u64 ist[7];
295 u32 reserved3;
296 u32 reserved4;
297 u16 reserved5;
298 u16 io_map_base;
299 u32 io_bitmap[IO_BITMAP_SIZE];
300 } __attribute__((packed)) ____cacheline_aligned;
301
302 extern struct tss_struct init_tss[NR_CPUS];
303
304 struct thread_struct {
305 unsigned long rsp0;
306 unsigned long rip;
307 unsigned long rsp;
308 unsigned long userrsp; /* Copy from PDA */
309 unsigned long fs;
310 unsigned long gs;
311 unsigned short es, ds, fsindex, gsindex;
312 enum {
313 THREAD_IA32 = 0x0001,
314 } flags;
315 /* Hardware debugging registers */
316 unsigned long debugreg[8]; /* %%db0-7 debug registers */
317 /* fault info */
318 unsigned long cr2, trap_no, error_code;
319 /* floating point info */
320 union i387_union i387;
321 u32 *io_bitmap_ptr;
322 };
323
324 #define INIT_THREAD { \
325 }
326
327 #define INIT_MMAP \
328 { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
329
330 #define DOUBLEFAULT_STACK 1
331 #define NMI_STACK 2
332 #define N_EXCEPTION_STACKS 2 /* hw limit: 7 */
333 #define EXCEPTION_STKSZ PAGE_SIZE
334 #define EXCEPTION_STK_ORDER 0
335
336 extern void load_gs_index(unsigned);
337
338 #define start_thread(regs,new_rip,new_rsp) do { \
339 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
340 load_gs_index(0); \
341 (regs)->rip = (new_rip); \
342 (regs)->rsp = (new_rsp); \
343 write_pda(oldrsp, (new_rsp)); \
344 (regs)->cs = __USER_CS; \
345 (regs)->ss = __USER_DS; \
346 (regs)->eflags = 0x200; \
347 set_fs(USER_DS); \
348 } while(0)
349
350 struct task_struct;
351 struct mm_struct;
352
353 /* Free all resources held by a thread. */
354 extern void release_thread(struct task_struct *);
355 /*
356 * create a kernel thread without removing it from tasklists
357 */
358 extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
359 extern int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
360
361 /* Copy and release all segment info associated with a VM */
362 extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
363 extern void release_segments(struct mm_struct * mm);
364
365 /*
366 * Return saved PC of a blocked thread.
367 * What is this good for? it will be always the scheduler or ret_from_fork.
368 */
369
thread_saved_pc(struct thread_struct * t)370 extern inline unsigned long thread_saved_pc(struct thread_struct *t)
371 {
372 return *(unsigned long *)(t->rsp - 8);
373 }
374
375 extern unsigned long get_wchan(struct task_struct *p);
376 #define KSTK_EIP(tsk) \
377 (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
378 #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
379
380 /* Note: most of the infrastructure to separate stack and task_struct
381 are already there. When you run out of stack try this first. */
382 #define alloc_task_struct() \
383 ((struct task_struct *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
384 #define free_task_struct(p) free_pages((unsigned long) (p), 1)
385 #define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
386
387 #define init_task (init_task_union.task)
388 #define init_stack (init_task_union.stack)
389
390 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
rep_nop(void)391 extern inline void rep_nop(void)
392 {
393 __asm__ __volatile__("rep;nop":::"memory");
394 }
395
396 /* Avoid speculative execution by the CPU */
sync_core(void)397 extern inline void sync_core(void)
398 {
399 int tmp;
400 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
401 }
402
403 #define ARCH_HAS_PREFETCH
404 #define ARCH_HAS_SPINLOCK_PREFETCH
405
406 #ifdef CONFIG_MK8
407 #define ARCH_HAS_PREFETCHW
408 #define prefetchw(x) __builtin_prefetch((x),1)
409 #define spin_lock_prefetch(x) prefetchw(x)
410 #else
411 #define spin_lock_prefetch(x) prefetch(x)
412 #endif
413
414 #define prefetch(x) __builtin_prefetch((x),0)
415
416 #define cpu_relax() rep_nop()
417
418
__monitor(const void * eax,unsigned long ecx,unsigned long edx)419 static __inline__ void __monitor(const void *eax, unsigned long ecx,
420 unsigned long edx)
421 {
422 /* "monitor %eax,%ecx,%edx;" */
423 asm volatile(
424 ".byte 0x0f,0x01,0xc8;"
425 : :"a" (eax), "c" (ecx), "d"(edx));
426 }
427
__mwait(unsigned long eax,unsigned long ecx)428 static __inline__ void __mwait(unsigned long eax, unsigned long ecx)
429 {
430 /* "mwait %eax,%ecx;" */
431 asm volatile(
432 ".byte 0x0f,0x01,0xc9;"
433 : :"a" (eax), "c" (ecx));
434 }
435
436 #define ARCH_HAS_SMP_BALANCE 1
437
438 #endif /* __ASM_X86_64_PROCESSOR_H */
439
440
441