1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Hygon Processor Support for Linux
4 *
5 * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
6 *
7 * Author: Pu Wen <puwen@hygon.cn>
8 */
9 #include <linux/io.h>
10
11 #include <asm/apic.h>
12 #include <asm/cpu.h>
13 #include <asm/smp.h>
14 #include <asm/numa.h>
15 #include <asm/cacheinfo.h>
16 #include <asm/spec-ctrl.h>
17 #include <asm/delay.h>
18
19 #include "cpu.h"
20
21 #define APICID_SOCKET_ID_BIT 6
22
23 /*
24 * nodes_per_socket: Stores the number of nodes per socket.
25 * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
26 */
27 static u32 nodes_per_socket = 1;
28
29 #ifdef CONFIG_NUMA
30 /*
31 * To workaround broken NUMA config. Read the comment in
32 * srat_detect_node().
33 */
nearby_node(int apicid)34 static int nearby_node(int apicid)
35 {
36 int i, node;
37
38 for (i = apicid - 1; i >= 0; i--) {
39 node = __apicid_to_node[i];
40 if (node != NUMA_NO_NODE && node_online(node))
41 return node;
42 }
43 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
44 node = __apicid_to_node[i];
45 if (node != NUMA_NO_NODE && node_online(node))
46 return node;
47 }
48 return first_node(node_online_map); /* Shouldn't happen */
49 }
50 #endif
51
hygon_get_topology_early(struct cpuinfo_x86 * c)52 static void hygon_get_topology_early(struct cpuinfo_x86 *c)
53 {
54 if (cpu_has(c, X86_FEATURE_TOPOEXT))
55 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
56 }
57
58 /*
59 * Fixup core topology information for
60 * (1) Hygon multi-node processors
61 * Assumption: Number of cores in each internal node is the same.
62 * (2) Hygon processors supporting compute units
63 */
hygon_get_topology(struct cpuinfo_x86 * c)64 static void hygon_get_topology(struct cpuinfo_x86 *c)
65 {
66 int cpu = smp_processor_id();
67
68 /* get information required for multi-node processors */
69 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
70 int err;
71 u32 eax, ebx, ecx, edx;
72
73 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
74
75 c->cpu_die_id = ecx & 0xff;
76
77 c->cpu_core_id = ebx & 0xff;
78
79 if (smp_num_siblings > 1)
80 c->x86_max_cores /= smp_num_siblings;
81
82 /*
83 * In case leaf B is available, use it to derive
84 * topology information.
85 */
86 err = detect_extended_topology(c);
87 if (!err)
88 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
89
90 /*
91 * Socket ID is ApicId[6] for the processors with model <= 0x3
92 * when running on host.
93 */
94 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
95 c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
96
97 cacheinfo_hygon_init_llc_id(c, cpu);
98 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
99 u64 value;
100
101 rdmsrl(MSR_FAM10H_NODE_ID, value);
102 c->cpu_die_id = value & 7;
103
104 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
105 } else
106 return;
107
108 if (nodes_per_socket > 1)
109 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
110 }
111
112 /*
113 * On Hygon setup the lower bits of the APIC id distinguish the cores.
114 * Assumes number of cores is a power of two.
115 */
hygon_detect_cmp(struct cpuinfo_x86 * c)116 static void hygon_detect_cmp(struct cpuinfo_x86 *c)
117 {
118 unsigned int bits;
119 int cpu = smp_processor_id();
120
121 bits = c->x86_coreid_bits;
122 /* Low order bits define the core id (index of core in socket) */
123 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
124 /* Convert the initial APIC ID into the socket ID */
125 c->phys_proc_id = c->initial_apicid >> bits;
126 /* use socket ID also for last level cache */
127 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
128 }
129
srat_detect_node(struct cpuinfo_x86 * c)130 static void srat_detect_node(struct cpuinfo_x86 *c)
131 {
132 #ifdef CONFIG_NUMA
133 int cpu = smp_processor_id();
134 int node;
135 unsigned int apicid = c->apicid;
136
137 node = numa_cpu_node(cpu);
138 if (node == NUMA_NO_NODE)
139 node = per_cpu(cpu_llc_id, cpu);
140
141 /*
142 * On multi-fabric platform (e.g. Numascale NumaChip) a
143 * platform-specific handler needs to be called to fixup some
144 * IDs of the CPU.
145 */
146 if (x86_cpuinit.fixup_cpu_id)
147 x86_cpuinit.fixup_cpu_id(c, node);
148
149 if (!node_online(node)) {
150 /*
151 * Two possibilities here:
152 *
153 * - The CPU is missing memory and no node was created. In
154 * that case try picking one from a nearby CPU.
155 *
156 * - The APIC IDs differ from the HyperTransport node IDs.
157 * Assume they are all increased by a constant offset, but
158 * in the same order as the HT nodeids. If that doesn't
159 * result in a usable node fall back to the path for the
160 * previous case.
161 *
162 * This workaround operates directly on the mapping between
163 * APIC ID and NUMA node, assuming certain relationship
164 * between APIC ID, HT node ID and NUMA topology. As going
165 * through CPU mapping may alter the outcome, directly
166 * access __apicid_to_node[].
167 */
168 int ht_nodeid = c->initial_apicid;
169
170 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
171 node = __apicid_to_node[ht_nodeid];
172 /* Pick a nearby node */
173 if (!node_online(node))
174 node = nearby_node(apicid);
175 }
176 numa_set_node(cpu, node);
177 #endif
178 }
179
early_init_hygon_mc(struct cpuinfo_x86 * c)180 static void early_init_hygon_mc(struct cpuinfo_x86 *c)
181 {
182 #ifdef CONFIG_SMP
183 unsigned int bits, ecx;
184
185 /* Multi core CPU? */
186 if (c->extended_cpuid_level < 0x80000008)
187 return;
188
189 ecx = cpuid_ecx(0x80000008);
190
191 c->x86_max_cores = (ecx & 0xff) + 1;
192
193 /* CPU telling us the core id bits shift? */
194 bits = (ecx >> 12) & 0xF;
195
196 /* Otherwise recompute */
197 if (bits == 0) {
198 while ((1 << bits) < c->x86_max_cores)
199 bits++;
200 }
201
202 c->x86_coreid_bits = bits;
203 #endif
204 }
205
bsp_init_hygon(struct cpuinfo_x86 * c)206 static void bsp_init_hygon(struct cpuinfo_x86 *c)
207 {
208 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
209 u64 val;
210
211 rdmsrl(MSR_K7_HWCR, val);
212 if (!(val & BIT(24)))
213 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
214 }
215
216 if (cpu_has(c, X86_FEATURE_MWAITX))
217 use_mwaitx_delay();
218
219 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
220 u32 ecx;
221
222 ecx = cpuid_ecx(0x8000001e);
223 __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
224 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
225 u64 value;
226
227 rdmsrl(MSR_FAM10H_NODE_ID, value);
228 __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
229 }
230
231 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
232 !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
233 /*
234 * Try to cache the base value so further operations can
235 * avoid RMW. If that faults, do not enable SSBD.
236 */
237 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
238 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
239 setup_force_cpu_cap(X86_FEATURE_SSBD);
240 x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
241 }
242 }
243 }
244
early_init_hygon(struct cpuinfo_x86 * c)245 static void early_init_hygon(struct cpuinfo_x86 *c)
246 {
247 u32 dummy;
248
249 early_init_hygon_mc(c);
250
251 set_cpu_cap(c, X86_FEATURE_K8);
252
253 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
254
255 /*
256 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
257 * with P/T states and does not stop in deep C-states
258 */
259 if (c->x86_power & (1 << 8)) {
260 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
261 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
262 }
263
264 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
265 if (c->x86_power & BIT(12))
266 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
267
268 /* Bit 14 indicates the Runtime Average Power Limit interface. */
269 if (c->x86_power & BIT(14))
270 set_cpu_cap(c, X86_FEATURE_RAPL);
271
272 #ifdef CONFIG_X86_64
273 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
274 #endif
275
276 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
277 /*
278 * ApicID can always be treated as an 8-bit value for Hygon APIC So, we
279 * can safely set X86_FEATURE_EXTD_APICID unconditionally.
280 */
281 if (boot_cpu_has(X86_FEATURE_APIC))
282 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
283 #endif
284
285 /*
286 * This is only needed to tell the kernel whether to use VMCALL
287 * and VMMCALL. VMMCALL is never executed except under virt, so
288 * we can set it unconditionally.
289 */
290 set_cpu_cap(c, X86_FEATURE_VMMCALL);
291
292 hygon_get_topology_early(c);
293 }
294
init_hygon(struct cpuinfo_x86 * c)295 static void init_hygon(struct cpuinfo_x86 *c)
296 {
297 early_init_hygon(c);
298
299 /*
300 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
301 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
302 */
303 clear_cpu_cap(c, 0*32+31);
304
305 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
306
307 /* get apicid instead of initial apic id from cpuid */
308 c->apicid = read_apic_id();
309
310 /*
311 * XXX someone from Hygon needs to confirm this DTRT
312 *
313 init_spectral_chicken(c);
314 */
315
316 set_cpu_cap(c, X86_FEATURE_ZEN);
317 set_cpu_cap(c, X86_FEATURE_CPB);
318
319 cpu_detect_cache_sizes(c);
320
321 hygon_detect_cmp(c);
322 hygon_get_topology(c);
323 srat_detect_node(c);
324
325 init_hygon_cacheinfo(c);
326
327 if (cpu_has(c, X86_FEATURE_XMM2)) {
328 /*
329 * Use LFENCE for execution serialization. On families which
330 * don't have that MSR, LFENCE is already serializing.
331 * msr_set_bit() uses the safe accessors, too, even if the MSR
332 * is not present.
333 */
334 msr_set_bit(MSR_AMD64_DE_CFG,
335 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
336
337 /* A serializing LFENCE stops RDTSC speculation */
338 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
339 }
340
341 /*
342 * Hygon processors have APIC timer running in deep C states.
343 */
344 set_cpu_cap(c, X86_FEATURE_ARAT);
345
346 /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
347 if (!cpu_feature_enabled(X86_FEATURE_XENPV))
348 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
349
350 check_null_seg_clears_base(c);
351
352 /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
353 clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
354 }
355
cpu_detect_tlb_hygon(struct cpuinfo_x86 * c)356 static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
357 {
358 u32 ebx, eax, ecx, edx;
359 u16 mask = 0xfff;
360
361 if (c->extended_cpuid_level < 0x80000006)
362 return;
363
364 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
365
366 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
367 tlb_lli_4k[ENTRIES] = ebx & mask;
368
369 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
370 if (!((eax >> 16) & mask))
371 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
372 else
373 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
374
375 /* a 4M entry uses two 2M entries */
376 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
377
378 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
379 if (!(eax & mask)) {
380 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
381 tlb_lli_2m[ENTRIES] = eax & 0xff;
382 } else
383 tlb_lli_2m[ENTRIES] = eax & mask;
384
385 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
386 }
387
388 static const struct cpu_dev hygon_cpu_dev = {
389 .c_vendor = "Hygon",
390 .c_ident = { "HygonGenuine" },
391 .c_early_init = early_init_hygon,
392 .c_detect_tlb = cpu_detect_tlb_hygon,
393 .c_bsp_init = bsp_init_hygon,
394 .c_init = init_hygon,
395 .c_x86_vendor = X86_VENDOR_HYGON,
396 };
397
398 cpu_dev_register(hygon_cpu_dev);
399