1 #ifndef __ASM_SMP_H
2 #define __ASM_SMP_H
3 
4 /*
5  * We need the APIC definitions automatically as part of 'smp.h'
6  */
7 #ifndef __ASSEMBLY__
8 #include <linux/config.h>
9 #include <linux/threads.h>
10 #include <linux/ptrace.h>
11 #endif
12 
13 #ifdef CONFIG_X86_LOCAL_APIC
14 #ifndef __ASSEMBLY__
15 #include <asm/fixmap.h>
16 #include <asm/bitops.h>
17 #include <asm/mpspec.h>
18 #ifdef CONFIG_X86_IO_APIC
19 #include <asm/io_apic.h>
20 #endif
21 #include <asm/apic.h>
22 #endif
23 #endif
24 
25 #ifdef CONFIG_SMP
26 #ifndef __ASSEMBLY__
27 
28 /*
29  * Private routines/data
30  */
31 
32 extern void smp_alloc_memory(void);
33 extern unsigned long phys_cpu_present_map;
34 extern unsigned long cpu_online_map;
35 extern volatile unsigned long smp_invalidate_needed;
36 extern int pic_mode;
37 extern int smp_num_siblings;
38 extern int cpu_sibling_map[];
39 
40 extern void smp_flush_tlb(void);
41 extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
42 extern void fastcall smp_send_reschedule(int cpu);
43 extern void smp_invalidate_rcv(void);		/* Process an NMI */
44 extern void (*mtrr_hook) (void);
45 extern void zap_low_mappings (void);
46 
47 /*
48  * On x86 all CPUs are mapped 1:1 to the APIC space.
49  * This simplifies scheduling and IPI sending and
50  * compresses data structures.
51  */
cpu_logical_map(int cpu)52 static inline int cpu_logical_map(int cpu)
53 {
54 	return cpu;
55 }
cpu_number_map(int cpu)56 static inline int cpu_number_map(int cpu)
57 {
58 	return cpu;
59 }
60 
61 /*
62  * Some lowlevel functions might want to know about
63  * the real APIC ID <-> CPU # mapping.
64  */
65 #define MAX_APICID 256
66 extern volatile int cpu_to_physical_apicid[NR_CPUS];
67 extern volatile int physical_apicid_to_cpu[MAX_APICID];
68 extern volatile int cpu_to_logical_apicid[NR_CPUS];
69 extern volatile int logical_apicid_to_cpu[MAX_APICID];
70 
71 /*
72  * General functions that each host system must provide.
73  */
74 
75 extern void smp_boot_cpus(void);
76 extern void smp_store_cpu_info(int id);		/* Store per CPU info (like the initial udelay numbers */
77 
78 /*
79  * This function is needed by all SMP systems. It must _always_ be valid
80  * from the initial startup. We map APIC_BASE very early in page_setup(),
81  * so this is correct in the x86 case.
82  */
83 
84 #define smp_processor_id() (current->processor)
85 
hard_smp_processor_id(void)86 static __inline int hard_smp_processor_id(void)
87 {
88 	/* we don't want to mark this access volatile - bad code generation */
89 	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
90 }
91 
logical_smp_processor_id(void)92 static __inline int logical_smp_processor_id(void)
93 {
94 	/* we don't want to mark this access volatile - bad code generation */
95 	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
96 }
97 
98 #endif /* !__ASSEMBLY__ */
99 
100 #define NO_PROC_ID		0xFF		/* No processor magic marker */
101 
102 /*
103  *	This magic constant controls our willingness to transfer
104  *	a process across CPUs. Such a transfer incurs misses on the L1
105  *	cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
106  *	gut feeling is this will vary by board in value. For a board
107  *	with separate L2 cache it probably depends also on the RSS, and
108  *	for a board with shared L2 cache it ought to decay fast as other
109  *	processes are run.
110  */
111 
112 #define PROC_CHANGE_PENALTY	15		/* Schedule penalty */
113 
114 #endif
115 #endif
116