1 #ifndef _ASM_X86_PROCESSOR_H
2 #define _ASM_X86_PROCESSOR_H
3
4 #include <asm/processor-flags.h>
5
6 /* Forward declaration, a strange C thing */
7 struct task_struct;
8 struct mm_struct;
9
10 #include <asm/vm86.h>
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
13 #include <asm/types.h>
14 #include <asm/sigcontext.h>
15 #include <asm/current.h>
16 #include <asm/cpufeature.h>
17 #include <asm/page.h>
18 #include <asm/pgtable_types.h>
19 #include <asm/percpu.h>
20 #include <asm/msr.h>
21 #include <asm/desc_defs.h>
22 #include <asm/nops.h>
23 #include <asm/special_insns.h>
24
25 #include <linux/personality.h>
26 #include <linux/cpumask.h>
27 #include <linux/cache.h>
28 #include <linux/threads.h>
29 #include <linux/math64.h>
30 #include <linux/init.h>
31 #include <linux/err.h>
32 #include <linux/irqflags.h>
33
34 /*
35 * We handle most unaligned accesses in hardware. On the other hand
36 * unaligned DMA can be quite expensive on some Nehalem processors.
37 *
38 * Based on this we disable the IP header alignment in network drivers.
39 */
40 #define NET_IP_ALIGN 0
41
42 #define HBP_NUM 4
43 /*
44 * Default implementation of macro that returns current
45 * instruction pointer ("program counter").
46 */
current_text_addr(void)47 static inline void *current_text_addr(void)
48 {
49 void *pc;
50
51 asm volatile("mov $1f, %0; 1:":"=r" (pc));
52
53 return pc;
54 }
55
56 #ifdef CONFIG_X86_VSMP
57 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
58 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
59 #else
60 # define ARCH_MIN_TASKALIGN 16
61 # define ARCH_MIN_MMSTRUCT_ALIGN 0
62 #endif
63
64 /*
65 * CPU type and hardware bug flags. Kept separately for each CPU.
66 * Members of this structure are referenced in head.S, so think twice
67 * before touching them. [mj]
68 */
69
70 struct cpuinfo_x86 {
71 __u8 x86; /* CPU family */
72 __u8 x86_vendor; /* CPU vendor */
73 __u8 x86_model;
74 __u8 x86_mask;
75 #ifdef CONFIG_X86_32
76 char wp_works_ok; /* It doesn't on 386's */
77
78 /* Problems on some 486Dx4's and old 386's: */
79 char hlt_works_ok;
80 char hard_math;
81 char rfu;
82 char fdiv_bug;
83 char f00f_bug;
84 char coma_bug;
85 char pad0;
86 #else
87 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
88 int x86_tlbsize;
89 #endif
90 __u8 x86_virt_bits;
91 __u8 x86_phys_bits;
92 /* CPUID returned core id bits: */
93 __u8 x86_coreid_bits;
94 /* Max extended CPUID function supported: */
95 __u32 extended_cpuid_level;
96 /* Maximum supported CPUID level, -1=no CPUID: */
97 int cpuid_level;
98 __u32 x86_capability[NCAPINTS];
99 char x86_vendor_id[16];
100 char x86_model_id[64];
101 /* in KB - valid for CPUS which support this call: */
102 int x86_cache_size;
103 int x86_cache_alignment; /* In bytes */
104 int x86_power;
105 unsigned long loops_per_jiffy;
106 /* cpuid returned max cores value: */
107 u16 x86_max_cores;
108 u16 apicid;
109 u16 initial_apicid;
110 u16 x86_clflush_size;
111 /* number of cores as seen by the OS: */
112 u16 booted_cores;
113 /* Physical processor id: */
114 u16 phys_proc_id;
115 /* Core id: */
116 u16 cpu_core_id;
117 /* Compute unit id */
118 u8 compute_unit_id;
119 /* Index into per_cpu list: */
120 u16 cpu_index;
121 u32 microcode;
122 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
123
124 #define X86_VENDOR_INTEL 0
125 #define X86_VENDOR_CYRIX 1
126 #define X86_VENDOR_AMD 2
127 #define X86_VENDOR_UMC 3
128 #define X86_VENDOR_CENTAUR 5
129 #define X86_VENDOR_TRANSMETA 7
130 #define X86_VENDOR_NSC 8
131 #define X86_VENDOR_NUM 9
132
133 #define X86_VENDOR_UNKNOWN 0xff
134
135 /*
136 * capabilities of CPUs
137 */
138 extern struct cpuinfo_x86 boot_cpu_data;
139 extern struct cpuinfo_x86 new_cpu_data;
140
141 extern struct tss_struct doublefault_tss;
142 extern __u32 cpu_caps_cleared[NCAPINTS];
143 extern __u32 cpu_caps_set[NCAPINTS];
144
145 #ifdef CONFIG_SMP
146 DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
147 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
148 #else
149 #define cpu_info boot_cpu_data
150 #define cpu_data(cpu) boot_cpu_data
151 #endif
152
153 extern const struct seq_operations cpuinfo_op;
154
hlt_works(int cpu)155 static inline int hlt_works(int cpu)
156 {
157 #ifdef CONFIG_X86_32
158 return cpu_data(cpu).hlt_works_ok;
159 #else
160 return 1;
161 #endif
162 }
163
164 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
165
166 extern void cpu_detect(struct cpuinfo_x86 *c);
167
168 extern struct pt_regs *idle_regs(struct pt_regs *);
169
170 extern void early_cpu_init(void);
171 extern void identify_boot_cpu(void);
172 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
173 extern void print_cpu_info(struct cpuinfo_x86 *);
174 void print_cpu_msr(struct cpuinfo_x86 *);
175 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
176 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
177 extern unsigned short num_cache_leaves;
178
179 extern void detect_extended_topology(struct cpuinfo_x86 *c);
180 extern void detect_ht(struct cpuinfo_x86 *c);
181
native_cpuid(unsigned int * eax,unsigned int * ebx,unsigned int * ecx,unsigned int * edx)182 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
183 unsigned int *ecx, unsigned int *edx)
184 {
185 /* ecx is often an input as well as an output. */
186 asm volatile("cpuid"
187 : "=a" (*eax),
188 "=b" (*ebx),
189 "=c" (*ecx),
190 "=d" (*edx)
191 : "0" (*eax), "2" (*ecx)
192 : "memory");
193 }
194
load_cr3(pgd_t * pgdir)195 static inline void load_cr3(pgd_t *pgdir)
196 {
197 write_cr3(__pa(pgdir));
198 }
199
200 #ifdef CONFIG_X86_32
201 /* This is the TSS defined by the hardware. */
202 struct x86_hw_tss {
203 unsigned short back_link, __blh;
204 unsigned long sp0;
205 unsigned short ss0, __ss0h;
206 unsigned long sp1;
207 /* ss1 caches MSR_IA32_SYSENTER_CS: */
208 unsigned short ss1, __ss1h;
209 unsigned long sp2;
210 unsigned short ss2, __ss2h;
211 unsigned long __cr3;
212 unsigned long ip;
213 unsigned long flags;
214 unsigned long ax;
215 unsigned long cx;
216 unsigned long dx;
217 unsigned long bx;
218 unsigned long sp;
219 unsigned long bp;
220 unsigned long si;
221 unsigned long di;
222 unsigned short es, __esh;
223 unsigned short cs, __csh;
224 unsigned short ss, __ssh;
225 unsigned short ds, __dsh;
226 unsigned short fs, __fsh;
227 unsigned short gs, __gsh;
228 unsigned short ldt, __ldth;
229 unsigned short trace;
230 unsigned short io_bitmap_base;
231
232 } __attribute__((packed));
233 #else
234 struct x86_hw_tss {
235 u32 reserved1;
236 u64 sp0;
237 u64 sp1;
238 u64 sp2;
239 u64 reserved2;
240 u64 ist[7];
241 u32 reserved3;
242 u32 reserved4;
243 u16 reserved5;
244 u16 io_bitmap_base;
245
246 } __attribute__((packed)) ____cacheline_aligned;
247 #endif
248
249 /*
250 * IO-bitmap sizes:
251 */
252 #define IO_BITMAP_BITS 65536
253 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
254 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
255 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
256 #define INVALID_IO_BITMAP_OFFSET 0x8000
257
258 struct tss_struct {
259 /*
260 * The hardware state:
261 */
262 struct x86_hw_tss x86_tss;
263
264 /*
265 * The extra 1 is there because the CPU will access an
266 * additional byte beyond the end of the IO permission
267 * bitmap. The extra byte must be all 1 bits, and must
268 * be within the limit.
269 */
270 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
271
272 /*
273 * .. and then another 0x100 bytes for the emergency kernel stack:
274 */
275 unsigned long stack[64];
276
277 } ____cacheline_aligned;
278
279 DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
280
281 /*
282 * Save the original ist values for checking stack pointers during debugging
283 */
284 struct orig_ist {
285 unsigned long ist[7];
286 };
287
288 #define MXCSR_DEFAULT 0x1f80
289
290 struct i387_fsave_struct {
291 u32 cwd; /* FPU Control Word */
292 u32 swd; /* FPU Status Word */
293 u32 twd; /* FPU Tag Word */
294 u32 fip; /* FPU IP Offset */
295 u32 fcs; /* FPU IP Selector */
296 u32 foo; /* FPU Operand Pointer Offset */
297 u32 fos; /* FPU Operand Pointer Selector */
298
299 /* 8*10 bytes for each FP-reg = 80 bytes: */
300 u32 st_space[20];
301
302 /* Software status information [not touched by FSAVE ]: */
303 u32 status;
304 };
305
306 struct i387_fxsave_struct {
307 u16 cwd; /* Control Word */
308 u16 swd; /* Status Word */
309 u16 twd; /* Tag Word */
310 u16 fop; /* Last Instruction Opcode */
311 union {
312 struct {
313 u64 rip; /* Instruction Pointer */
314 u64 rdp; /* Data Pointer */
315 };
316 struct {
317 u32 fip; /* FPU IP Offset */
318 u32 fcs; /* FPU IP Selector */
319 u32 foo; /* FPU Operand Offset */
320 u32 fos; /* FPU Operand Selector */
321 };
322 };
323 u32 mxcsr; /* MXCSR Register State */
324 u32 mxcsr_mask; /* MXCSR Mask */
325
326 /* 8*16 bytes for each FP-reg = 128 bytes: */
327 u32 st_space[32];
328
329 /* 16*16 bytes for each XMM-reg = 256 bytes: */
330 u32 xmm_space[64];
331
332 u32 padding[12];
333
334 union {
335 u32 padding1[12];
336 u32 sw_reserved[12];
337 };
338
339 } __attribute__((aligned(16)));
340
341 struct i387_soft_struct {
342 u32 cwd;
343 u32 swd;
344 u32 twd;
345 u32 fip;
346 u32 fcs;
347 u32 foo;
348 u32 fos;
349 /* 8*10 bytes for each FP-reg = 80 bytes: */
350 u32 st_space[20];
351 u8 ftop;
352 u8 changed;
353 u8 lookahead;
354 u8 no_update;
355 u8 rm;
356 u8 alimit;
357 struct math_emu_info *info;
358 u32 entry_eip;
359 };
360
361 struct ymmh_struct {
362 /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
363 u32 ymmh_space[64];
364 };
365
366 struct xsave_hdr_struct {
367 u64 xstate_bv;
368 u64 reserved1[2];
369 u64 reserved2[5];
370 } __attribute__((packed));
371
372 struct xsave_struct {
373 struct i387_fxsave_struct i387;
374 struct xsave_hdr_struct xsave_hdr;
375 struct ymmh_struct ymmh;
376 /* new processor state extensions will go here */
377 } __attribute__ ((packed, aligned (64)));
378
379 union thread_xstate {
380 struct i387_fsave_struct fsave;
381 struct i387_fxsave_struct fxsave;
382 struct i387_soft_struct soft;
383 struct xsave_struct xsave;
384 };
385
386 struct fpu {
387 unsigned int last_cpu;
388 unsigned int has_fpu;
389 union thread_xstate *state;
390 };
391
392 #ifdef CONFIG_X86_64
393 DECLARE_PER_CPU(struct orig_ist, orig_ist);
394
395 union irq_stack_union {
396 char irq_stack[IRQ_STACK_SIZE];
397 /*
398 * GCC hardcodes the stack canary as %gs:40. Since the
399 * irq_stack is the object at %gs:0, we reserve the bottom
400 * 48 bytes of the irq stack for the canary.
401 */
402 struct {
403 char gs_base[40];
404 unsigned long stack_canary;
405 };
406 };
407
408 DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
409 DECLARE_INIT_PER_CPU(irq_stack_union);
410
411 DECLARE_PER_CPU(char *, irq_stack_ptr);
412 DECLARE_PER_CPU(unsigned int, irq_count);
413 extern unsigned long kernel_eflags;
414 extern asmlinkage void ignore_sysret(void);
415 #else /* X86_64 */
416 #ifdef CONFIG_CC_STACKPROTECTOR
417 /*
418 * Make sure stack canary segment base is cached-aligned:
419 * "For Intel Atom processors, avoid non zero segment base address
420 * that is not aligned to cache line boundary at all cost."
421 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
422 */
423 struct stack_canary {
424 char __pad[20]; /* canary at %gs:20 */
425 unsigned long canary;
426 };
427 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
428 #endif
429 #endif /* X86_64 */
430
431 extern unsigned int xstate_size;
432 extern void free_thread_xstate(struct task_struct *);
433 extern struct kmem_cache *task_xstate_cachep;
434
435 struct perf_event;
436
437 struct thread_struct {
438 /* Cached TLS descriptors: */
439 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
440 unsigned long sp0;
441 unsigned long sp;
442 #ifdef CONFIG_X86_32
443 unsigned long sysenter_cs;
444 #else
445 unsigned long usersp; /* Copy from PDA */
446 unsigned short es;
447 unsigned short ds;
448 unsigned short fsindex;
449 unsigned short gsindex;
450 #endif
451 #ifdef CONFIG_X86_32
452 unsigned long ip;
453 #endif
454 #ifdef CONFIG_X86_64
455 unsigned long fs;
456 #endif
457 unsigned long gs;
458 /* Save middle states of ptrace breakpoints */
459 struct perf_event *ptrace_bps[HBP_NUM];
460 /* Debug status used for traps, single steps, etc... */
461 unsigned long debugreg6;
462 /* Keep track of the exact dr7 value set by the user */
463 unsigned long ptrace_dr7;
464 /* Fault info: */
465 unsigned long cr2;
466 unsigned long trap_nr;
467 unsigned long error_code;
468 /* floating point and extended processor state */
469 struct fpu fpu;
470 #ifdef CONFIG_X86_32
471 /* Virtual 86 mode info */
472 struct vm86_struct __user *vm86_info;
473 unsigned long screen_bitmap;
474 unsigned long v86flags;
475 unsigned long v86mask;
476 unsigned long saved_sp0;
477 unsigned int saved_fs;
478 unsigned int saved_gs;
479 #endif
480 /* IO permissions: */
481 unsigned long *io_bitmap_ptr;
482 unsigned long iopl;
483 /* Max allowed port in the bitmap, in bytes: */
484 unsigned io_bitmap_max;
485 };
486
487 /*
488 * Set IOPL bits in EFLAGS from given mask
489 */
native_set_iopl_mask(unsigned mask)490 static inline void native_set_iopl_mask(unsigned mask)
491 {
492 #ifdef CONFIG_X86_32
493 unsigned int reg;
494
495 asm volatile ("pushfl;"
496 "popl %0;"
497 "andl %1, %0;"
498 "orl %2, %0;"
499 "pushl %0;"
500 "popfl"
501 : "=&r" (reg)
502 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
503 #endif
504 }
505
506 static inline void
native_load_sp0(struct tss_struct * tss,struct thread_struct * thread)507 native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
508 {
509 tss->x86_tss.sp0 = thread->sp0;
510 #ifdef CONFIG_X86_32
511 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
512 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
513 tss->x86_tss.ss1 = thread->sysenter_cs;
514 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
515 }
516 #endif
517 }
518
native_swapgs(void)519 static inline void native_swapgs(void)
520 {
521 #ifdef CONFIG_X86_64
522 asm volatile("swapgs" ::: "memory");
523 #endif
524 }
525
526 #ifdef CONFIG_PARAVIRT
527 #include <asm/paravirt.h>
528 #else
529 #define __cpuid native_cpuid
530 #define paravirt_enabled() 0
531
load_sp0(struct tss_struct * tss,struct thread_struct * thread)532 static inline void load_sp0(struct tss_struct *tss,
533 struct thread_struct *thread)
534 {
535 native_load_sp0(tss, thread);
536 }
537
538 #define set_iopl_mask native_set_iopl_mask
539 #endif /* CONFIG_PARAVIRT */
540
541 /*
542 * Save the cr4 feature set we're using (ie
543 * Pentium 4MB enable and PPro Global page
544 * enable), so that any CPU's that boot up
545 * after us can get the correct flags.
546 */
547 extern unsigned long mmu_cr4_features;
548
set_in_cr4(unsigned long mask)549 static inline void set_in_cr4(unsigned long mask)
550 {
551 unsigned long cr4;
552
553 mmu_cr4_features |= mask;
554 cr4 = read_cr4();
555 cr4 |= mask;
556 write_cr4(cr4);
557 }
558
clear_in_cr4(unsigned long mask)559 static inline void clear_in_cr4(unsigned long mask)
560 {
561 unsigned long cr4;
562
563 mmu_cr4_features &= ~mask;
564 cr4 = read_cr4();
565 cr4 &= ~mask;
566 write_cr4(cr4);
567 }
568
569 typedef struct {
570 unsigned long seg;
571 } mm_segment_t;
572
573
574 /*
575 * create a kernel thread without removing it from tasklists
576 */
577 extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
578
579 /* Free all resources held by a thread. */
580 extern void release_thread(struct task_struct *);
581
582 /* Prepare to copy thread state - unlazy all lazy state */
583 extern void prepare_to_copy(struct task_struct *tsk);
584
585 unsigned long get_wchan(struct task_struct *p);
586
587 /*
588 * Generic CPUID function
589 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
590 * resulting in stale register contents being returned.
591 */
cpuid(unsigned int op,unsigned int * eax,unsigned int * ebx,unsigned int * ecx,unsigned int * edx)592 static inline void cpuid(unsigned int op,
593 unsigned int *eax, unsigned int *ebx,
594 unsigned int *ecx, unsigned int *edx)
595 {
596 *eax = op;
597 *ecx = 0;
598 __cpuid(eax, ebx, ecx, edx);
599 }
600
601 /* Some CPUID calls want 'count' to be placed in ecx */
cpuid_count(unsigned int op,int count,unsigned int * eax,unsigned int * ebx,unsigned int * ecx,unsigned int * edx)602 static inline void cpuid_count(unsigned int op, int count,
603 unsigned int *eax, unsigned int *ebx,
604 unsigned int *ecx, unsigned int *edx)
605 {
606 *eax = op;
607 *ecx = count;
608 __cpuid(eax, ebx, ecx, edx);
609 }
610
611 /*
612 * CPUID functions returning a single datum
613 */
cpuid_eax(unsigned int op)614 static inline unsigned int cpuid_eax(unsigned int op)
615 {
616 unsigned int eax, ebx, ecx, edx;
617
618 cpuid(op, &eax, &ebx, &ecx, &edx);
619
620 return eax;
621 }
622
cpuid_ebx(unsigned int op)623 static inline unsigned int cpuid_ebx(unsigned int op)
624 {
625 unsigned int eax, ebx, ecx, edx;
626
627 cpuid(op, &eax, &ebx, &ecx, &edx);
628
629 return ebx;
630 }
631
cpuid_ecx(unsigned int op)632 static inline unsigned int cpuid_ecx(unsigned int op)
633 {
634 unsigned int eax, ebx, ecx, edx;
635
636 cpuid(op, &eax, &ebx, &ecx, &edx);
637
638 return ecx;
639 }
640
cpuid_edx(unsigned int op)641 static inline unsigned int cpuid_edx(unsigned int op)
642 {
643 unsigned int eax, ebx, ecx, edx;
644
645 cpuid(op, &eax, &ebx, &ecx, &edx);
646
647 return edx;
648 }
649
650 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
rep_nop(void)651 static inline void rep_nop(void)
652 {
653 asm volatile("rep; nop" ::: "memory");
654 }
655
cpu_relax(void)656 static inline void cpu_relax(void)
657 {
658 rep_nop();
659 }
660
661 /* Stop speculative execution and prefetching of modified code. */
sync_core(void)662 static inline void sync_core(void)
663 {
664 int tmp;
665
666 #if defined(CONFIG_M386) || defined(CONFIG_M486)
667 if (boot_cpu_data.x86 < 5)
668 /* There is no speculative execution.
669 * jmp is a barrier to prefetching. */
670 asm volatile("jmp 1f\n1:\n" ::: "memory");
671 else
672 #endif
673 /* cpuid is a barrier to speculative execution.
674 * Prefetched instructions are automatically
675 * invalidated when modified. */
676 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
677 : "ebx", "ecx", "edx", "memory");
678 }
679
__monitor(const void * eax,unsigned long ecx,unsigned long edx)680 static inline void __monitor(const void *eax, unsigned long ecx,
681 unsigned long edx)
682 {
683 /* "monitor %eax, %ecx, %edx;" */
684 asm volatile(".byte 0x0f, 0x01, 0xc8;"
685 :: "a" (eax), "c" (ecx), "d"(edx));
686 }
687
__mwait(unsigned long eax,unsigned long ecx)688 static inline void __mwait(unsigned long eax, unsigned long ecx)
689 {
690 /* "mwait %eax, %ecx;" */
691 asm volatile(".byte 0x0f, 0x01, 0xc9;"
692 :: "a" (eax), "c" (ecx));
693 }
694
__sti_mwait(unsigned long eax,unsigned long ecx)695 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
696 {
697 trace_hardirqs_on();
698 /* "mwait %eax, %ecx;" */
699 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
700 :: "a" (eax), "c" (ecx));
701 }
702
703 extern void select_idle_routine(const struct cpuinfo_x86 *c);
704 extern void init_amd_e400_c1e_mask(void);
705
706 extern unsigned long boot_option_idle_override;
707 extern bool amd_e400_c1e_detected;
708
709 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
710 IDLE_POLL, IDLE_FORCE_MWAIT};
711
712 extern void enable_sep_cpu(void);
713 extern int sysenter_setup(void);
714
715 extern void early_trap_init(void);
716
717 /* Defined in head.S */
718 extern struct desc_ptr early_gdt_descr;
719
720 extern void cpu_set_gdt(int);
721 extern void switch_to_new_gdt(int);
722 extern void load_percpu_segment(int);
723 extern void cpu_init(void);
724
get_debugctlmsr(void)725 static inline unsigned long get_debugctlmsr(void)
726 {
727 unsigned long debugctlmsr = 0;
728
729 #ifndef CONFIG_X86_DEBUGCTLMSR
730 if (boot_cpu_data.x86 < 6)
731 return 0;
732 #endif
733 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
734
735 return debugctlmsr;
736 }
737
update_debugctlmsr(unsigned long debugctlmsr)738 static inline void update_debugctlmsr(unsigned long debugctlmsr)
739 {
740 #ifndef CONFIG_X86_DEBUGCTLMSR
741 if (boot_cpu_data.x86 < 6)
742 return;
743 #endif
744 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
745 }
746
747 /*
748 * from system description table in BIOS. Mostly for MCA use, but
749 * others may find it useful:
750 */
751 extern unsigned int machine_id;
752 extern unsigned int machine_submodel_id;
753 extern unsigned int BIOS_revision;
754
755 /* Boot loader type from the setup header: */
756 extern int bootloader_type;
757 extern int bootloader_version;
758
759 extern char ignore_fpu_irq;
760
761 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
762 #define ARCH_HAS_PREFETCHW
763 #define ARCH_HAS_SPINLOCK_PREFETCH
764
765 #ifdef CONFIG_X86_32
766 # define BASE_PREFETCH ASM_NOP4
767 # define ARCH_HAS_PREFETCH
768 #else
769 # define BASE_PREFETCH "prefetcht0 (%1)"
770 #endif
771
772 /*
773 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
774 *
775 * It's not worth to care about 3dnow prefetches for the K6
776 * because they are microcoded there and very slow.
777 */
prefetch(const void * x)778 static inline void prefetch(const void *x)
779 {
780 alternative_input(BASE_PREFETCH,
781 "prefetchnta (%1)",
782 X86_FEATURE_XMM,
783 "r" (x));
784 }
785
786 /*
787 * 3dnow prefetch to get an exclusive cache line.
788 * Useful for spinlocks to avoid one state transition in the
789 * cache coherency protocol:
790 */
prefetchw(const void * x)791 static inline void prefetchw(const void *x)
792 {
793 alternative_input(BASE_PREFETCH,
794 "prefetchw (%1)",
795 X86_FEATURE_3DNOW,
796 "r" (x));
797 }
798
spin_lock_prefetch(const void * x)799 static inline void spin_lock_prefetch(const void *x)
800 {
801 prefetchw(x);
802 }
803
804 #ifdef CONFIG_X86_32
805 /*
806 * User space process size: 3GB (default).
807 */
808 #define TASK_SIZE PAGE_OFFSET
809 #define TASK_SIZE_MAX TASK_SIZE
810 #define STACK_TOP TASK_SIZE
811 #define STACK_TOP_MAX STACK_TOP
812
813 #define INIT_THREAD { \
814 .sp0 = sizeof(init_stack) + (long)&init_stack, \
815 .vm86_info = NULL, \
816 .sysenter_cs = __KERNEL_CS, \
817 .io_bitmap_ptr = NULL, \
818 }
819
820 /*
821 * Note that the .io_bitmap member must be extra-big. This is because
822 * the CPU will access an additional byte beyond the end of the IO
823 * permission bitmap. The extra byte must be all 1 bits, and must
824 * be within the limit.
825 */
826 #define INIT_TSS { \
827 .x86_tss = { \
828 .sp0 = sizeof(init_stack) + (long)&init_stack, \
829 .ss0 = __KERNEL_DS, \
830 .ss1 = __KERNEL_CS, \
831 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
832 }, \
833 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
834 }
835
836 extern unsigned long thread_saved_pc(struct task_struct *tsk);
837
838 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
839 #define KSTK_TOP(info) \
840 ({ \
841 unsigned long *__ptr = (unsigned long *)(info); \
842 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
843 })
844
845 /*
846 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
847 * This is necessary to guarantee that the entire "struct pt_regs"
848 * is accessible even if the CPU haven't stored the SS/ESP registers
849 * on the stack (interrupt gate does not save these registers
850 * when switching to the same priv ring).
851 * Therefore beware: accessing the ss/esp fields of the
852 * "struct pt_regs" is possible, but they may contain the
853 * completely wrong values.
854 */
855 #define task_pt_regs(task) \
856 ({ \
857 struct pt_regs *__regs__; \
858 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
859 __regs__ - 1; \
860 })
861
862 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
863
864 #else
865 /*
866 * User space process size. 47bits minus one guard page.
867 */
868 #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
869
870 /* This decides where the kernel will search for a free chunk of vm
871 * space during mmap's.
872 */
873 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
874 0xc0000000 : 0xFFFFe000)
875
876 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
877 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
878 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
879 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
880
881 #define STACK_TOP TASK_SIZE
882 #define STACK_TOP_MAX TASK_SIZE_MAX
883
884 #define INIT_THREAD { \
885 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
886 }
887
888 #define INIT_TSS { \
889 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
890 }
891
892 /*
893 * Return saved PC of a blocked thread.
894 * What is this good for? it will be always the scheduler or ret_from_fork.
895 */
896 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
897
898 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
899 extern unsigned long KSTK_ESP(struct task_struct *task);
900
901 /*
902 * User space RSP while inside the SYSCALL fast path
903 */
904 DECLARE_PER_CPU(unsigned long, old_rsp);
905
906 #endif /* CONFIG_X86_64 */
907
908 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
909 unsigned long new_sp);
910
911 /*
912 * This decides where the kernel will search for a free chunk of vm
913 * space during mmap's.
914 */
915 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
916
917 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
918
919 /* Get/set a process' ability to use the timestamp counter instruction */
920 #define GET_TSC_CTL(adr) get_tsc_mode((adr))
921 #define SET_TSC_CTL(val) set_tsc_mode((val))
922
923 extern int get_tsc_mode(unsigned long adr);
924 extern int set_tsc_mode(unsigned int val);
925
926 extern int amd_get_nb_id(int cpu);
927
928 struct aperfmperf {
929 u64 aperf, mperf;
930 };
931
get_aperfmperf(struct aperfmperf * am)932 static inline void get_aperfmperf(struct aperfmperf *am)
933 {
934 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
935
936 rdmsrl(MSR_IA32_APERF, am->aperf);
937 rdmsrl(MSR_IA32_MPERF, am->mperf);
938 }
939
940 #define APERFMPERF_SHIFT 10
941
942 static inline
calc_aperfmperf_ratio(struct aperfmperf * old,struct aperfmperf * new)943 unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
944 struct aperfmperf *new)
945 {
946 u64 aperf = new->aperf - old->aperf;
947 u64 mperf = new->mperf - old->mperf;
948 unsigned long ratio = aperf;
949
950 mperf >>= APERFMPERF_SHIFT;
951 if (mperf)
952 ratio = div64_u64(aperf, mperf);
953
954 return ratio;
955 }
956
957 /*
958 * AMD errata checking
959 */
960 #ifdef CONFIG_CPU_SUP_AMD
961 extern const int amd_erratum_383[];
962 extern const int amd_erratum_400[];
963 extern bool cpu_has_amd_erratum(const int *);
964
965 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
966 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
967 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
968 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
969 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
970 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
971 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
972
973 #else
974 #define cpu_has_amd_erratum(x) (false)
975 #endif /* CONFIG_CPU_SUP_AMD */
976
977 void cpu_idle_wait(void);
978
979 extern unsigned long arch_align_stack(unsigned long sp);
980 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
981
982 void default_idle(void);
983 bool set_pm_idle_to_default(void);
984
985 void stop_this_cpu(void *dummy);
986
987 #endif /* _ASM_X86_PROCESSOR_H */
988