1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Arch specific cpu topology information
4 *
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
7 */
8
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpufreq.h>
12 #include <linux/device.h>
13 #include <linux/of.h>
14 #include <linux/slab.h>
15 #include <linux/sched/topology.h>
16 #include <linux/cpuset.h>
17 #include <linux/cpumask.h>
18 #include <linux/init.h>
19 #include <linux/rcupdate.h>
20 #include <linux/sched.h>
21
22 #define CREATE_TRACE_POINTS
23 #include <trace/events/thermal_pressure.h>
24
25 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
26 static struct cpumask scale_freq_counters_mask;
27 static bool scale_freq_invariant;
28 static DEFINE_PER_CPU(u32, freq_factor) = 1;
29
supports_scale_freq_counters(const struct cpumask * cpus)30 static bool supports_scale_freq_counters(const struct cpumask *cpus)
31 {
32 return cpumask_subset(cpus, &scale_freq_counters_mask);
33 }
34
topology_scale_freq_invariant(void)35 bool topology_scale_freq_invariant(void)
36 {
37 return cpufreq_supports_freq_invariance() ||
38 supports_scale_freq_counters(cpu_online_mask);
39 }
40
update_scale_freq_invariant(bool status)41 static void update_scale_freq_invariant(bool status)
42 {
43 if (scale_freq_invariant == status)
44 return;
45
46 /*
47 * Task scheduler behavior depends on frequency invariance support,
48 * either cpufreq or counter driven. If the support status changes as
49 * a result of counter initialisation and use, retrigger the build of
50 * scheduling domains to ensure the information is propagated properly.
51 */
52 if (topology_scale_freq_invariant() == status) {
53 scale_freq_invariant = status;
54 rebuild_sched_domains_energy();
55 }
56 }
57
topology_set_scale_freq_source(struct scale_freq_data * data,const struct cpumask * cpus)58 void topology_set_scale_freq_source(struct scale_freq_data *data,
59 const struct cpumask *cpus)
60 {
61 struct scale_freq_data *sfd;
62 int cpu;
63
64 /*
65 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
66 * supported by cpufreq.
67 */
68 if (cpumask_empty(&scale_freq_counters_mask))
69 scale_freq_invariant = topology_scale_freq_invariant();
70
71 rcu_read_lock();
72
73 for_each_cpu(cpu, cpus) {
74 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
75
76 /* Use ARCH provided counters whenever possible */
77 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
78 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
79 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
80 }
81 }
82
83 rcu_read_unlock();
84
85 update_scale_freq_invariant(true);
86 }
87 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
88
topology_clear_scale_freq_source(enum scale_freq_source source,const struct cpumask * cpus)89 void topology_clear_scale_freq_source(enum scale_freq_source source,
90 const struct cpumask *cpus)
91 {
92 struct scale_freq_data *sfd;
93 int cpu;
94
95 rcu_read_lock();
96
97 for_each_cpu(cpu, cpus) {
98 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
99
100 if (sfd && sfd->source == source) {
101 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
102 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
103 }
104 }
105
106 rcu_read_unlock();
107
108 /*
109 * Make sure all references to previous sft_data are dropped to avoid
110 * use-after-free races.
111 */
112 synchronize_rcu();
113
114 update_scale_freq_invariant(false);
115 }
116 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
117
topology_scale_freq_tick(void)118 void topology_scale_freq_tick(void)
119 {
120 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
121
122 if (sfd)
123 sfd->set_freq_scale();
124 }
125
126 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
127 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
128
topology_set_freq_scale(const struct cpumask * cpus,unsigned long cur_freq,unsigned long max_freq)129 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
130 unsigned long max_freq)
131 {
132 unsigned long scale;
133 int i;
134
135 if (WARN_ON_ONCE(!cur_freq || !max_freq))
136 return;
137
138 /*
139 * If the use of counters for FIE is enabled, just return as we don't
140 * want to update the scale factor with information from CPUFREQ.
141 * Instead the scale factor will be updated from arch_scale_freq_tick.
142 */
143 if (supports_scale_freq_counters(cpus))
144 return;
145
146 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
147
148 for_each_cpu(i, cpus)
149 per_cpu(arch_freq_scale, i) = scale;
150 }
151
152 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
153 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
154
topology_set_cpu_scale(unsigned int cpu,unsigned long capacity)155 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
156 {
157 per_cpu(cpu_scale, cpu) = capacity;
158 }
159
160 DEFINE_PER_CPU(unsigned long, thermal_pressure);
161
162 /**
163 * topology_update_thermal_pressure() - Update thermal pressure for CPUs
164 * @cpus : The related CPUs for which capacity has been reduced
165 * @capped_freq : The maximum allowed frequency that CPUs can run at
166 *
167 * Update the value of thermal pressure for all @cpus in the mask. The
168 * cpumask should include all (online+offline) affected CPUs, to avoid
169 * operating on stale data when hot-plug is used for some CPUs. The
170 * @capped_freq reflects the currently allowed max CPUs frequency due to
171 * thermal capping. It might be also a boost frequency value, which is bigger
172 * than the internal 'freq_factor' max frequency. In such case the pressure
173 * value should simply be removed, since this is an indication that there is
174 * no thermal throttling. The @capped_freq must be provided in kHz.
175 */
topology_update_thermal_pressure(const struct cpumask * cpus,unsigned long capped_freq)176 void topology_update_thermal_pressure(const struct cpumask *cpus,
177 unsigned long capped_freq)
178 {
179 unsigned long max_capacity, capacity, th_pressure;
180 u32 max_freq;
181 int cpu;
182
183 cpu = cpumask_first(cpus);
184 max_capacity = arch_scale_cpu_capacity(cpu);
185 max_freq = per_cpu(freq_factor, cpu);
186
187 /* Convert to MHz scale which is used in 'freq_factor' */
188 capped_freq /= 1000;
189
190 /*
191 * Handle properly the boost frequencies, which should simply clean
192 * the thermal pressure value.
193 */
194 if (max_freq <= capped_freq)
195 capacity = max_capacity;
196 else
197 capacity = mult_frac(max_capacity, capped_freq, max_freq);
198
199 th_pressure = max_capacity - capacity;
200
201 trace_thermal_pressure_update(cpu, th_pressure);
202
203 for_each_cpu(cpu, cpus)
204 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
205 }
206 EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
207
cpu_capacity_show(struct device * dev,struct device_attribute * attr,char * buf)208 static ssize_t cpu_capacity_show(struct device *dev,
209 struct device_attribute *attr,
210 char *buf)
211 {
212 struct cpu *cpu = container_of(dev, struct cpu, dev);
213
214 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
215 }
216
217 static void update_topology_flags_workfn(struct work_struct *work);
218 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
219
220 static DEVICE_ATTR_RO(cpu_capacity);
221
register_cpu_capacity_sysctl(void)222 static int register_cpu_capacity_sysctl(void)
223 {
224 int i;
225 struct device *cpu;
226
227 for_each_possible_cpu(i) {
228 cpu = get_cpu_device(i);
229 if (!cpu) {
230 pr_err("%s: too early to get CPU%d device!\n",
231 __func__, i);
232 continue;
233 }
234 device_create_file(cpu, &dev_attr_cpu_capacity);
235 }
236
237 return 0;
238 }
239 subsys_initcall(register_cpu_capacity_sysctl);
240
241 static int update_topology;
242
topology_update_cpu_topology(void)243 int topology_update_cpu_topology(void)
244 {
245 return update_topology;
246 }
247
248 /*
249 * Updating the sched_domains can't be done directly from cpufreq callbacks
250 * due to locking, so queue the work for later.
251 */
update_topology_flags_workfn(struct work_struct * work)252 static void update_topology_flags_workfn(struct work_struct *work)
253 {
254 update_topology = 1;
255 rebuild_sched_domains();
256 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
257 update_topology = 0;
258 }
259
260 static u32 *raw_capacity;
261
free_raw_capacity(void)262 static int free_raw_capacity(void)
263 {
264 kfree(raw_capacity);
265 raw_capacity = NULL;
266
267 return 0;
268 }
269
topology_normalize_cpu_scale(void)270 void topology_normalize_cpu_scale(void)
271 {
272 u64 capacity;
273 u64 capacity_scale;
274 int cpu;
275
276 if (!raw_capacity)
277 return;
278
279 capacity_scale = 1;
280 for_each_possible_cpu(cpu) {
281 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
282 capacity_scale = max(capacity, capacity_scale);
283 }
284
285 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
286 for_each_possible_cpu(cpu) {
287 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
288 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
289 capacity_scale);
290 topology_set_cpu_scale(cpu, capacity);
291 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
292 cpu, topology_get_cpu_scale(cpu));
293 }
294 }
295
topology_parse_cpu_capacity(struct device_node * cpu_node,int cpu)296 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
297 {
298 struct clk *cpu_clk;
299 static bool cap_parsing_failed;
300 int ret;
301 u32 cpu_capacity;
302
303 if (cap_parsing_failed)
304 return false;
305
306 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
307 &cpu_capacity);
308 if (!ret) {
309 if (!raw_capacity) {
310 raw_capacity = kcalloc(num_possible_cpus(),
311 sizeof(*raw_capacity),
312 GFP_KERNEL);
313 if (!raw_capacity) {
314 cap_parsing_failed = true;
315 return false;
316 }
317 }
318 raw_capacity[cpu] = cpu_capacity;
319 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
320 cpu_node, raw_capacity[cpu]);
321
322 /*
323 * Update freq_factor for calculating early boot cpu capacities.
324 * For non-clk CPU DVFS mechanism, there's no way to get the
325 * frequency value now, assuming they are running at the same
326 * frequency (by keeping the initial freq_factor value).
327 */
328 cpu_clk = of_clk_get(cpu_node, 0);
329 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
330 per_cpu(freq_factor, cpu) =
331 clk_get_rate(cpu_clk) / 1000;
332 clk_put(cpu_clk);
333 }
334 } else {
335 if (raw_capacity) {
336 pr_err("cpu_capacity: missing %pOF raw capacity\n",
337 cpu_node);
338 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
339 }
340 cap_parsing_failed = true;
341 free_raw_capacity();
342 }
343
344 return !ret;
345 }
346
347 #ifdef CONFIG_ACPI_CPPC_LIB
348 #include <acpi/cppc_acpi.h>
349
topology_init_cpu_capacity_cppc(void)350 void topology_init_cpu_capacity_cppc(void)
351 {
352 struct cppc_perf_caps perf_caps;
353 int cpu;
354
355 if (likely(acpi_disabled || !acpi_cpc_valid()))
356 return;
357
358 raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
359 GFP_KERNEL);
360 if (!raw_capacity)
361 return;
362
363 for_each_possible_cpu(cpu) {
364 if (!cppc_get_perf_caps(cpu, &perf_caps) &&
365 (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
366 (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
367 raw_capacity[cpu] = perf_caps.highest_perf;
368 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
369 cpu, raw_capacity[cpu]);
370 continue;
371 }
372
373 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
374 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
375 goto exit;
376 }
377
378 topology_normalize_cpu_scale();
379 schedule_work(&update_topology_flags_work);
380 pr_debug("cpu_capacity: cpu_capacity initialization done\n");
381
382 exit:
383 free_raw_capacity();
384 }
385 #endif
386
387 #ifdef CONFIG_CPU_FREQ
388 static cpumask_var_t cpus_to_visit;
389 static void parsing_done_workfn(struct work_struct *work);
390 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
391
392 static int
init_cpu_capacity_callback(struct notifier_block * nb,unsigned long val,void * data)393 init_cpu_capacity_callback(struct notifier_block *nb,
394 unsigned long val,
395 void *data)
396 {
397 struct cpufreq_policy *policy = data;
398 int cpu;
399
400 if (!raw_capacity)
401 return 0;
402
403 if (val != CPUFREQ_CREATE_POLICY)
404 return 0;
405
406 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
407 cpumask_pr_args(policy->related_cpus),
408 cpumask_pr_args(cpus_to_visit));
409
410 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
411
412 for_each_cpu(cpu, policy->related_cpus)
413 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
414
415 if (cpumask_empty(cpus_to_visit)) {
416 topology_normalize_cpu_scale();
417 schedule_work(&update_topology_flags_work);
418 free_raw_capacity();
419 pr_debug("cpu_capacity: parsing done\n");
420 schedule_work(&parsing_done_work);
421 }
422
423 return 0;
424 }
425
426 static struct notifier_block init_cpu_capacity_notifier = {
427 .notifier_call = init_cpu_capacity_callback,
428 };
429
register_cpufreq_notifier(void)430 static int __init register_cpufreq_notifier(void)
431 {
432 int ret;
433
434 /*
435 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
436 * information is not needed for cpu capacity initialization.
437 */
438 if (!acpi_disabled || !raw_capacity)
439 return -EINVAL;
440
441 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
442 return -ENOMEM;
443
444 cpumask_copy(cpus_to_visit, cpu_possible_mask);
445
446 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
447 CPUFREQ_POLICY_NOTIFIER);
448
449 if (ret)
450 free_cpumask_var(cpus_to_visit);
451
452 return ret;
453 }
454 core_initcall(register_cpufreq_notifier);
455
parsing_done_workfn(struct work_struct * work)456 static void parsing_done_workfn(struct work_struct *work)
457 {
458 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
459 CPUFREQ_POLICY_NOTIFIER);
460 free_cpumask_var(cpus_to_visit);
461 }
462
463 #else
464 core_initcall(free_raw_capacity);
465 #endif
466
467 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
468 /*
469 * This function returns the logic cpu number of the node.
470 * There are basically three kinds of return values:
471 * (1) logic cpu number which is > 0.
472 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
473 * there is no possible logical CPU in the kernel to match. This happens
474 * when CONFIG_NR_CPUS is configure to be smaller than the number of
475 * CPU nodes in DT. We need to just ignore this case.
476 * (3) -1 if the node does not exist in the device tree
477 */
get_cpu_for_node(struct device_node * node)478 static int __init get_cpu_for_node(struct device_node *node)
479 {
480 struct device_node *cpu_node;
481 int cpu;
482
483 cpu_node = of_parse_phandle(node, "cpu", 0);
484 if (!cpu_node)
485 return -1;
486
487 cpu = of_cpu_node_to_id(cpu_node);
488 if (cpu >= 0)
489 topology_parse_cpu_capacity(cpu_node, cpu);
490 else
491 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
492 cpu_node, cpumask_pr_args(cpu_possible_mask));
493
494 of_node_put(cpu_node);
495 return cpu;
496 }
497
parse_core(struct device_node * core,int package_id,int core_id)498 static int __init parse_core(struct device_node *core, int package_id,
499 int core_id)
500 {
501 char name[20];
502 bool leaf = true;
503 int i = 0;
504 int cpu;
505 struct device_node *t;
506
507 do {
508 snprintf(name, sizeof(name), "thread%d", i);
509 t = of_get_child_by_name(core, name);
510 if (t) {
511 leaf = false;
512 cpu = get_cpu_for_node(t);
513 if (cpu >= 0) {
514 cpu_topology[cpu].package_id = package_id;
515 cpu_topology[cpu].core_id = core_id;
516 cpu_topology[cpu].thread_id = i;
517 } else if (cpu != -ENODEV) {
518 pr_err("%pOF: Can't get CPU for thread\n", t);
519 of_node_put(t);
520 return -EINVAL;
521 }
522 of_node_put(t);
523 }
524 i++;
525 } while (t);
526
527 cpu = get_cpu_for_node(core);
528 if (cpu >= 0) {
529 if (!leaf) {
530 pr_err("%pOF: Core has both threads and CPU\n",
531 core);
532 return -EINVAL;
533 }
534
535 cpu_topology[cpu].package_id = package_id;
536 cpu_topology[cpu].core_id = core_id;
537 } else if (leaf && cpu != -ENODEV) {
538 pr_err("%pOF: Can't get CPU for leaf core\n", core);
539 return -EINVAL;
540 }
541
542 return 0;
543 }
544
parse_cluster(struct device_node * cluster,int depth)545 static int __init parse_cluster(struct device_node *cluster, int depth)
546 {
547 char name[20];
548 bool leaf = true;
549 bool has_cores = false;
550 struct device_node *c;
551 static int package_id __initdata;
552 int core_id = 0;
553 int i, ret;
554
555 /*
556 * First check for child clusters; we currently ignore any
557 * information about the nesting of clusters and present the
558 * scheduler with a flat list of them.
559 */
560 i = 0;
561 do {
562 snprintf(name, sizeof(name), "cluster%d", i);
563 c = of_get_child_by_name(cluster, name);
564 if (c) {
565 leaf = false;
566 ret = parse_cluster(c, depth + 1);
567 of_node_put(c);
568 if (ret != 0)
569 return ret;
570 }
571 i++;
572 } while (c);
573
574 /* Now check for cores */
575 i = 0;
576 do {
577 snprintf(name, sizeof(name), "core%d", i);
578 c = of_get_child_by_name(cluster, name);
579 if (c) {
580 has_cores = true;
581
582 if (depth == 0) {
583 pr_err("%pOF: cpu-map children should be clusters\n",
584 c);
585 of_node_put(c);
586 return -EINVAL;
587 }
588
589 if (leaf) {
590 ret = parse_core(c, package_id, core_id++);
591 } else {
592 pr_err("%pOF: Non-leaf cluster with core %s\n",
593 cluster, name);
594 ret = -EINVAL;
595 }
596
597 of_node_put(c);
598 if (ret != 0)
599 return ret;
600 }
601 i++;
602 } while (c);
603
604 if (leaf && !has_cores)
605 pr_warn("%pOF: empty cluster\n", cluster);
606
607 if (leaf)
608 package_id++;
609
610 return 0;
611 }
612
parse_dt_topology(void)613 static int __init parse_dt_topology(void)
614 {
615 struct device_node *cn, *map;
616 int ret = 0;
617 int cpu;
618
619 cn = of_find_node_by_path("/cpus");
620 if (!cn) {
621 pr_err("No CPU information found in DT\n");
622 return 0;
623 }
624
625 /*
626 * When topology is provided cpu-map is essentially a root
627 * cluster with restricted subnodes.
628 */
629 map = of_get_child_by_name(cn, "cpu-map");
630 if (!map)
631 goto out;
632
633 ret = parse_cluster(map, 0);
634 if (ret != 0)
635 goto out_map;
636
637 topology_normalize_cpu_scale();
638
639 /*
640 * Check that all cores are in the topology; the SMP code will
641 * only mark cores described in the DT as possible.
642 */
643 for_each_possible_cpu(cpu)
644 if (cpu_topology[cpu].package_id == -1)
645 ret = -EINVAL;
646
647 out_map:
648 of_node_put(map);
649 out:
650 of_node_put(cn);
651 return ret;
652 }
653 #endif
654
655 /*
656 * cpu topology table
657 */
658 struct cpu_topology cpu_topology[NR_CPUS];
659 EXPORT_SYMBOL_GPL(cpu_topology);
660
cpu_coregroup_mask(int cpu)661 const struct cpumask *cpu_coregroup_mask(int cpu)
662 {
663 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
664
665 /* Find the smaller of NUMA, core or LLC siblings */
666 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
667 /* not numa in package, lets use the package siblings */
668 core_mask = &cpu_topology[cpu].core_sibling;
669 }
670 if (cpu_topology[cpu].llc_id != -1) {
671 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
672 core_mask = &cpu_topology[cpu].llc_sibling;
673 }
674
675 /*
676 * For systems with no shared cpu-side LLC but with clusters defined,
677 * extend core_mask to cluster_siblings. The sched domain builder will
678 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
679 */
680 if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
681 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
682 core_mask = &cpu_topology[cpu].cluster_sibling;
683
684 return core_mask;
685 }
686
cpu_clustergroup_mask(int cpu)687 const struct cpumask *cpu_clustergroup_mask(int cpu)
688 {
689 return &cpu_topology[cpu].cluster_sibling;
690 }
691
update_siblings_masks(unsigned int cpuid)692 void update_siblings_masks(unsigned int cpuid)
693 {
694 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
695 int cpu;
696
697 /* update core and thread sibling masks */
698 for_each_online_cpu(cpu) {
699 cpu_topo = &cpu_topology[cpu];
700
701 if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) {
702 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
703 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
704 }
705
706 if (cpuid_topo->package_id != cpu_topo->package_id)
707 continue;
708
709 if (cpuid_topo->cluster_id == cpu_topo->cluster_id &&
710 cpuid_topo->cluster_id != -1) {
711 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
712 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
713 }
714
715 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
716 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
717
718 if (cpuid_topo->core_id != cpu_topo->core_id)
719 continue;
720
721 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
722 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
723 }
724 }
725
clear_cpu_topology(int cpu)726 static void clear_cpu_topology(int cpu)
727 {
728 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
729
730 cpumask_clear(&cpu_topo->llc_sibling);
731 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
732
733 cpumask_clear(&cpu_topo->cluster_sibling);
734 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
735
736 cpumask_clear(&cpu_topo->core_sibling);
737 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
738 cpumask_clear(&cpu_topo->thread_sibling);
739 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
740 }
741
reset_cpu_topology(void)742 void __init reset_cpu_topology(void)
743 {
744 unsigned int cpu;
745
746 for_each_possible_cpu(cpu) {
747 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
748
749 cpu_topo->thread_id = -1;
750 cpu_topo->core_id = -1;
751 cpu_topo->cluster_id = -1;
752 cpu_topo->package_id = -1;
753 cpu_topo->llc_id = -1;
754
755 clear_cpu_topology(cpu);
756 }
757 }
758
remove_cpu_topology(unsigned int cpu)759 void remove_cpu_topology(unsigned int cpu)
760 {
761 int sibling;
762
763 for_each_cpu(sibling, topology_core_cpumask(cpu))
764 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
765 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
766 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
767 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
768 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
769 for_each_cpu(sibling, topology_llc_cpumask(cpu))
770 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
771
772 clear_cpu_topology(cpu);
773 }
774
parse_acpi_topology(void)775 __weak int __init parse_acpi_topology(void)
776 {
777 return 0;
778 }
779
780 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
init_cpu_topology(void)781 void __init init_cpu_topology(void)
782 {
783 reset_cpu_topology();
784
785 /*
786 * Discard anything that was parsed if we hit an error so we
787 * don't use partial information.
788 */
789 if (parse_acpi_topology())
790 reset_cpu_topology();
791 else if (of_have_populated_dt() && parse_dt_topology())
792 reset_cpu_topology();
793 }
794 #endif
795