1 /*
2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/bootmem.h>
14 #include <linux/sched.h>
15 #include <linux/workqueue.h>
16 #include <linux/cpu.h>
17 #include <linux/smp.h>
18 #include <linux/cpuset.h>
19 #include <asm/delay.h>
20 #include <asm/s390_ext.h>
21
22 #define PTF_HORIZONTAL (0UL)
23 #define PTF_VERTICAL (1UL)
24 #define PTF_CHECK (2UL)
25
26 struct mask_info {
27 struct mask_info *next;
28 unsigned char id;
29 cpumask_t mask;
30 };
31
32 static int topology_enabled = 1;
33 static void topology_work_fn(struct work_struct *work);
34 static struct sysinfo_15_1_x *tl_info;
35 static struct timer_list topology_timer;
36 static void set_topology_timer(void);
37 static DECLARE_WORK(topology_work, topology_work_fn);
38 /* topology_lock protects the core linked list */
39 static DEFINE_SPINLOCK(topology_lock);
40
41 static struct mask_info core_info;
42 cpumask_t cpu_core_map[NR_CPUS];
43 unsigned char cpu_core_id[NR_CPUS];
44
45 #ifdef CONFIG_SCHED_BOOK
46 static struct mask_info book_info;
47 cpumask_t cpu_book_map[NR_CPUS];
48 unsigned char cpu_book_id[NR_CPUS];
49 #endif
50
cpu_group_map(struct mask_info * info,unsigned int cpu)51 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
52 {
53 cpumask_t mask;
54
55 cpus_clear(mask);
56 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
57 cpumask_copy(&mask, cpumask_of(cpu));
58 return mask;
59 }
60 while (info) {
61 if (cpu_isset(cpu, info->mask)) {
62 mask = info->mask;
63 break;
64 }
65 info = info->next;
66 }
67 if (cpus_empty(mask))
68 mask = cpumask_of_cpu(cpu);
69 return mask;
70 }
71
add_cpus_to_mask(struct topology_cpu * tl_cpu,struct mask_info * book,struct mask_info * core)72 static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
73 struct mask_info *book, struct mask_info *core)
74 {
75 unsigned int cpu;
76
77 for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
78 cpu < TOPOLOGY_CPU_BITS;
79 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
80 {
81 unsigned int rcpu, lcpu;
82
83 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
84 for_each_present_cpu(lcpu) {
85 if (cpu_logical_map(lcpu) != rcpu)
86 continue;
87 #ifdef CONFIG_SCHED_BOOK
88 cpu_set(lcpu, book->mask);
89 cpu_book_id[lcpu] = book->id;
90 #endif
91 cpu_set(lcpu, core->mask);
92 cpu_core_id[lcpu] = core->id;
93 smp_cpu_polarization[lcpu] = tl_cpu->pp;
94 }
95 }
96 }
97
clear_masks(void)98 static void clear_masks(void)
99 {
100 struct mask_info *info;
101
102 info = &core_info;
103 while (info) {
104 cpus_clear(info->mask);
105 info = info->next;
106 }
107 #ifdef CONFIG_SCHED_BOOK
108 info = &book_info;
109 while (info) {
110 cpus_clear(info->mask);
111 info = info->next;
112 }
113 #endif
114 }
115
next_tle(union topology_entry * tle)116 static union topology_entry *next_tle(union topology_entry *tle)
117 {
118 if (!tle->nl)
119 return (union topology_entry *)((struct topology_cpu *)tle + 1);
120 return (union topology_entry *)((struct topology_container *)tle + 1);
121 }
122
tl_to_cores(struct sysinfo_15_1_x * info)123 static void tl_to_cores(struct sysinfo_15_1_x *info)
124 {
125 #ifdef CONFIG_SCHED_BOOK
126 struct mask_info *book = &book_info;
127 #else
128 struct mask_info *book = NULL;
129 #endif
130 struct mask_info *core = &core_info;
131 union topology_entry *tle, *end;
132
133
134 spin_lock_irq(&topology_lock);
135 clear_masks();
136 tle = info->tle;
137 end = (union topology_entry *)((unsigned long)info + info->length);
138 while (tle < end) {
139 switch (tle->nl) {
140 #ifdef CONFIG_SCHED_BOOK
141 case 2:
142 book = book->next;
143 book->id = tle->container.id;
144 break;
145 #endif
146 case 1:
147 core = core->next;
148 core->id = tle->container.id;
149 break;
150 case 0:
151 add_cpus_to_mask(&tle->cpu, book, core);
152 break;
153 default:
154 clear_masks();
155 goto out;
156 }
157 tle = next_tle(tle);
158 }
159 out:
160 spin_unlock_irq(&topology_lock);
161 }
162
topology_update_polarization_simple(void)163 static void topology_update_polarization_simple(void)
164 {
165 int cpu;
166
167 mutex_lock(&smp_cpu_state_mutex);
168 for_each_possible_cpu(cpu)
169 smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
170 mutex_unlock(&smp_cpu_state_mutex);
171 }
172
ptf(unsigned long fc)173 static int ptf(unsigned long fc)
174 {
175 int rc;
176
177 asm volatile(
178 " .insn rre,0xb9a20000,%1,%1\n"
179 " ipm %0\n"
180 " srl %0,28\n"
181 : "=d" (rc)
182 : "d" (fc) : "cc");
183 return rc;
184 }
185
topology_set_cpu_management(int fc)186 int topology_set_cpu_management(int fc)
187 {
188 int cpu;
189 int rc;
190
191 if (!MACHINE_HAS_TOPOLOGY)
192 return -EOPNOTSUPP;
193 if (fc)
194 rc = ptf(PTF_VERTICAL);
195 else
196 rc = ptf(PTF_HORIZONTAL);
197 if (rc)
198 return -EBUSY;
199 for_each_possible_cpu(cpu)
200 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
201 return rc;
202 }
203
update_cpu_core_map(void)204 static void update_cpu_core_map(void)
205 {
206 unsigned long flags;
207 int cpu;
208
209 spin_lock_irqsave(&topology_lock, flags);
210 for_each_possible_cpu(cpu) {
211 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
212 #ifdef CONFIG_SCHED_BOOK
213 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
214 #endif
215 }
216 spin_unlock_irqrestore(&topology_lock, flags);
217 }
218
store_topology(struct sysinfo_15_1_x * info)219 void store_topology(struct sysinfo_15_1_x *info)
220 {
221 #ifdef CONFIG_SCHED_BOOK
222 int rc;
223
224 rc = stsi(info, 15, 1, 3);
225 if (rc != -ENOSYS)
226 return;
227 #endif
228 stsi(info, 15, 1, 2);
229 }
230
arch_update_cpu_topology(void)231 int arch_update_cpu_topology(void)
232 {
233 struct sysinfo_15_1_x *info = tl_info;
234 struct sys_device *sysdev;
235 int cpu;
236
237 if (!MACHINE_HAS_TOPOLOGY) {
238 update_cpu_core_map();
239 topology_update_polarization_simple();
240 return 0;
241 }
242 store_topology(info);
243 tl_to_cores(info);
244 update_cpu_core_map();
245 for_each_online_cpu(cpu) {
246 sysdev = get_cpu_sysdev(cpu);
247 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
248 }
249 return 1;
250 }
251
topology_work_fn(struct work_struct * work)252 static void topology_work_fn(struct work_struct *work)
253 {
254 rebuild_sched_domains();
255 }
256
topology_schedule_update(void)257 void topology_schedule_update(void)
258 {
259 schedule_work(&topology_work);
260 }
261
topology_timer_fn(unsigned long ignored)262 static void topology_timer_fn(unsigned long ignored)
263 {
264 if (ptf(PTF_CHECK))
265 topology_schedule_update();
266 set_topology_timer();
267 }
268
set_topology_timer(void)269 static void set_topology_timer(void)
270 {
271 topology_timer.function = topology_timer_fn;
272 topology_timer.data = 0;
273 topology_timer.expires = jiffies + 60 * HZ;
274 add_timer(&topology_timer);
275 }
276
early_parse_topology(char * p)277 static int __init early_parse_topology(char *p)
278 {
279 if (strncmp(p, "off", 3))
280 return 0;
281 topology_enabled = 0;
282 return 0;
283 }
284 early_param("topology", early_parse_topology);
285
init_topology_update(void)286 static int __init init_topology_update(void)
287 {
288 int rc;
289
290 rc = 0;
291 if (!MACHINE_HAS_TOPOLOGY) {
292 topology_update_polarization_simple();
293 goto out;
294 }
295 init_timer_deferrable(&topology_timer);
296 set_topology_timer();
297 out:
298 update_cpu_core_map();
299 return rc;
300 }
301 __initcall(init_topology_update);
302
alloc_masks(struct sysinfo_15_1_x * info,struct mask_info * mask,int offset)303 static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask,
304 int offset)
305 {
306 int i, nr_masks;
307
308 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
309 for (i = 0; i < info->mnest - offset; i++)
310 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
311 nr_masks = max(nr_masks, 1);
312 for (i = 0; i < nr_masks; i++) {
313 mask->next = alloc_bootmem(sizeof(struct mask_info));
314 mask = mask->next;
315 }
316 }
317
s390_init_cpu_topology(void)318 void __init s390_init_cpu_topology(void)
319 {
320 struct sysinfo_15_1_x *info;
321 int i;
322
323 if (!MACHINE_HAS_TOPOLOGY)
324 return;
325 tl_info = alloc_bootmem_pages(PAGE_SIZE);
326 info = tl_info;
327 store_topology(info);
328 pr_info("The CPU configuration topology of the machine is:");
329 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
330 printk(" %d", info->mag[i]);
331 printk(" / %d\n", info->mnest);
332 alloc_masks(info, &core_info, 2);
333 #ifdef CONFIG_SCHED_BOOK
334 alloc_masks(info, &book_info, 3);
335 #endif
336 }
337