1 /* smp.h: Sparc64 specific SMP stuff.
2  *
3  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4  */
5 
6 #ifndef _SPARC64_SMP_H
7 #define _SPARC64_SMP_H
8 
9 #include <linux/config.h>
10 #include <linux/threads.h>
11 #include <linux/cache.h>
12 #include <asm/asi.h>
13 #include <asm/starfire.h>
14 #include <asm/spitfire.h>
15 
16 #ifndef __ASSEMBLY__
17 /* PROM provided per-processor information we need
18  * to start them all up.
19  */
20 
21 struct prom_cpuinfo {
22 	int prom_node;
23 	int mid;
24 };
25 
26 extern int linux_num_cpus;	/* number of CPUs probed  */
27 extern struct prom_cpuinfo linux_cpus[64];
28 
29 #endif /* !(__ASSEMBLY__) */
30 
31 #ifdef CONFIG_SMP
32 
33 #ifndef __ASSEMBLY__
34 
35 /* Per processor Sparc parameters we need. */
36 
37 /* Keep this a multiple of 64-bytes for cache reasons. */
38 typedef struct {
39 	/* Dcache line 1 */
40 	unsigned int	__pad0;		/* bh_count moved to irq_stat for consistency. KAO */
41 	unsigned int	multiplier;
42 	unsigned int	counter;
43 	unsigned int	idle_volume;
44 	unsigned long	clock_tick;	/* %tick's per second */
45 	unsigned long	udelay_val;
46 
47 	/* Dcache line 2 */
48 	unsigned int	pgcache_size;
49 	unsigned int	pgdcache_size;
50 	unsigned long	*pte_cache[2];
51 	unsigned long	*pgd_cache;
52 
53 	/* Dcache lines 3 and 4 */
54 	unsigned int	irq_worklists[16];
55 } ____cacheline_aligned cpuinfo_sparc;
56 
57 extern cpuinfo_sparc cpu_data[NR_CPUS];
58 
59 /*
60  *	Private routines/data
61  */
62 
63 extern unsigned long cpu_present_map;
64 #define cpu_online_map cpu_present_map
65 
66 /*
67  *	General functions that each host system must provide.
68  */
69 
70 extern void smp_callin(void);
71 extern void smp_boot_cpus(void);
72 extern void smp_store_cpu_info(int id);
73 
74 extern __volatile__ int __cpu_number_map[NR_CPUS];
75 extern __volatile__ int __cpu_logical_map[NR_CPUS];
76 
cpu_logical_map(int cpu)77 extern __inline__ int cpu_logical_map(int cpu)
78 {
79 	return __cpu_logical_map[cpu];
80 }
cpu_number_map(int cpu)81 extern __inline__ int cpu_number_map(int cpu)
82 {
83 	return __cpu_number_map[cpu];
84 }
85 
hard_smp_processor_id(void)86 extern __inline__ int hard_smp_processor_id(void)
87 {
88 	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
89 		unsigned long cfg, ver;
90 		__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
91 		if ((ver >> 32) == 0x003e0016) {
92 			__asm__ __volatile__("ldxa [%%g0] %1, %0"
93 					     : "=r" (cfg)
94 					     : "i" (ASI_JBUS_CONFIG));
95 			return ((cfg >> 17) & 0x1f);
96 		} else {
97 			__asm__ __volatile__("ldxa [%%g0] %1, %0"
98 					     : "=r" (cfg)
99 					     : "i" (ASI_SAFARI_CONFIG));
100 			return ((cfg >> 17) & 0x3ff);
101 		}
102 	} else if (this_is_starfire != 0) {
103 		return starfire_hard_smp_processor_id();
104 	} else {
105 		unsigned long upaconfig;
106 		__asm__ __volatile__("ldxa	[%%g0] %1, %0"
107 				     : "=r" (upaconfig)
108 				     : "i" (ASI_UPA_CONFIG));
109 		return ((upaconfig >> 17) & 0x1f);
110 	}
111 }
112 
113 #define smp_processor_id() (current->processor)
114 
115 /* This needn't do anything as we do not sleep the cpu
116  * inside of the idler task, so an interrupt is not needed
117  * to get a clean fast response.
118  *
119  * Addendum: We do want it to do something for the signal
120  *           delivery case, we detect that by just seeing
121  *           if we are trying to send this to an idler or not.
122  */
smp_send_reschedule(int cpu)123 extern __inline__ void smp_send_reschedule(int cpu)
124 {
125 	extern void smp_receive_signal(int);
126 	if(cpu_data[cpu].idle_volume == 0)
127 		smp_receive_signal(cpu);
128 }
129 
130 /* This is a nop as well because we capture all other cpus
131  * anyways when making the PROM active.
132  */
smp_send_stop(void)133 extern __inline__ void smp_send_stop(void) { }
134 
135 #endif /* !(__ASSEMBLY__) */
136 
137 #define PROC_CHANGE_PENALTY	20
138 
139 #endif /* !(CONFIG_SMP) */
140 
141 #define NO_PROC_ID		0xFF
142 
143 #endif /* !(_SPARC64_SMP_H) */
144