1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * SGI UV architectural definitions
7  *
8  * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
9  * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
10  */
11 
12 #ifndef _ASM_X86_UV_UV_HUB_H
13 #define _ASM_X86_UV_UV_HUB_H
14 
15 #ifdef CONFIG_X86_64
16 #include <linux/numa.h>
17 #include <linux/percpu.h>
18 #include <linux/timer.h>
19 #include <linux/io.h>
20 #include <linux/topology.h>
21 #include <asm/types.h>
22 #include <asm/percpu.h>
23 #include <asm/uv/uv.h>
24 #include <asm/uv/uv_mmrs.h>
25 #include <asm/uv/bios.h>
26 #include <asm/irq_vectors.h>
27 #include <asm/io_apic.h>
28 
29 
30 /*
31  * Addressing Terminology
32  *
33  *	M       - The low M bits of a physical address represent the offset
34  *		  into the blade local memory. RAM memory on a blade is physically
35  *		  contiguous (although various IO spaces may punch holes in
36  *		  it)..
37  *
38  *	N	- Number of bits in the node portion of a socket physical
39  *		  address.
40  *
41  *	NASID   - network ID of a router, Mbrick or Cbrick. Nasid values of
42  *		  routers always have low bit of 1, C/MBricks have low bit
43  *		  equal to 0. Most addressing macros that target UV hub chips
44  *		  right shift the NASID by 1 to exclude the always-zero bit.
45  *		  NASIDs contain up to 15 bits.
46  *
47  *	GNODE   - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
48  *		  of nasids.
49  *
50  *	PNODE   - the low N bits of the GNODE. The PNODE is the most useful variant
51  *		  of the nasid for socket usage.
52  *
53  *	GPA	- (global physical address) a socket physical address converted
54  *		  so that it can be used by the GRU as a global address. Socket
55  *		  physical addresses 1) need additional NASID (node) bits added
56  *		  to the high end of the address, and 2) unaliased if the
57  *		  partition does not have a physical address 0. In addition, on
58  *		  UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40.
59  *
60  *
61  *  NumaLink Global Physical Address Format:
62  *  +--------------------------------+---------------------+
63  *  |00..000|      GNODE             |      NodeOffset     |
64  *  +--------------------------------+---------------------+
65  *          |<-------53 - M bits --->|<--------M bits ----->
66  *
67  *	M - number of node offset bits (35 .. 40)
68  *
69  *
70  *  Memory/UV-HUB Processor Socket Address Format:
71  *  +----------------+---------------+---------------------+
72  *  |00..000000000000|   PNODE       |      NodeOffset     |
73  *  +----------------+---------------+---------------------+
74  *                   <--- N bits --->|<--------M bits ----->
75  *
76  *	M - number of node offset bits (35 .. 40)
77  *	N - number of PNODE bits (0 .. 10)
78  *
79  *		Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
80  *		The actual values are configuration dependent and are set at
81  *		boot time. M & N values are set by the hardware/BIOS at boot.
82  *
83  *
84  * APICID format
85  *	NOTE!!!!!! This is the current format of the APICID. However, code
86  *	should assume that this will change in the future. Use functions
87  *	in this file for all APICID bit manipulations and conversion.
88  *
89  *		1111110000000000
90  *		5432109876543210
91  *		pppppppppplc0cch	Nehalem-EX (12 bits in hdw reg)
92  *		ppppppppplcc0cch	Westmere-EX (12 bits in hdw reg)
93  *		pppppppppppcccch	SandyBridge (15 bits in hdw reg)
94  *		sssssssssss
95  *
96  *			p  = pnode bits
97  *			l =  socket number on board
98  *			c  = core
99  *			h  = hyperthread
100  *			s  = bits that are in the SOCKET_ID CSR
101  *
102  *	Note: Processor may support fewer bits in the APICID register. The ACPI
103  *	      tables hold all 16 bits. Software needs to be aware of this.
104  *
105  *	      Unless otherwise specified, all references to APICID refer to
106  *	      the FULL value contained in ACPI tables, not the subset in the
107  *	      processor APICID register.
108  */
109 
110 /*
111  * Maximum number of bricks in all partitions and in all coherency domains.
112  * This is the total number of bricks accessible in the numalink fabric. It
113  * includes all C & M bricks. Routers are NOT included.
114  *
115  * This value is also the value of the maximum number of non-router NASIDs
116  * in the numalink fabric.
117  *
118  * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
119  */
120 #define UV_MAX_NUMALINK_BLADES	16384
121 
122 /*
123  * Maximum number of C/Mbricks within a software SSI (hardware may support
124  * more).
125  */
126 #define UV_MAX_SSI_BLADES	256
127 
128 /*
129  * The largest possible NASID of a C or M brick (+ 2)
130  */
131 #define UV_MAX_NASID_VALUE	(UV_MAX_NUMALINK_BLADES * 2)
132 
133 /* GAM (globally addressed memory) range table */
134 struct uv_gam_range_s {
135 	u32	limit;		/* PA bits 56:26 (GAM_RANGE_SHFT) */
136 	u16	nasid;		/* node's global physical address */
137 	s8	base;		/* entry index of node's base addr */
138 	u8	reserved;
139 };
140 
141 /*
142  * The following defines attributes of the HUB chip. These attributes are
143  * frequently referenced and are kept in a common per hub struct.
144  * After setup, the struct is read only, so it should be readily
145  * available in the L3 cache on the cpu socket for the node.
146  */
147 struct uv_hub_info_s {
148 	unsigned int		hub_type;
149 	unsigned char		hub_revision;
150 	unsigned long		global_mmr_base;
151 	unsigned long		global_mmr_shift;
152 	unsigned long		gpa_mask;
153 	unsigned short		*socket_to_node;
154 	unsigned short		*socket_to_pnode;
155 	unsigned short		*pnode_to_socket;
156 	struct uv_gam_range_s	*gr_table;
157 	unsigned short		min_socket;
158 	unsigned short		min_pnode;
159 	unsigned char		m_val;
160 	unsigned char		n_val;
161 	unsigned char		gr_table_len;
162 	unsigned char		apic_pnode_shift;
163 	unsigned char		gpa_shift;
164 	unsigned char		nasid_shift;
165 	unsigned char		m_shift;
166 	unsigned char		n_lshift;
167 	unsigned int		gnode_extra;
168 	unsigned long		gnode_upper;
169 	unsigned long		lowmem_remap_top;
170 	unsigned long		lowmem_remap_base;
171 	unsigned long		global_gru_base;
172 	unsigned long		global_gru_shift;
173 	unsigned short		pnode;
174 	unsigned short		pnode_mask;
175 	unsigned short		coherency_domain_number;
176 	unsigned short		numa_blade_id;
177 	unsigned short		nr_possible_cpus;
178 	unsigned short		nr_online_cpus;
179 	short			memory_nid;
180 	unsigned short		*node_to_socket;
181 };
182 
183 /* CPU specific info with a pointer to the hub common info struct */
184 struct uv_cpu_info_s {
185 	void			*p_uv_hub_info;
186 	unsigned char		blade_cpu_id;
187 	void			*reserved;
188 };
189 DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
190 
191 #define uv_cpu_info		this_cpu_ptr(&__uv_cpu_info)
192 #define uv_cpu_info_per(cpu)	(&per_cpu(__uv_cpu_info, cpu))
193 
194 /* Node specific hub common info struct */
195 extern void **__uv_hub_info_list;
uv_hub_info_list(int node)196 static inline struct uv_hub_info_s *uv_hub_info_list(int node)
197 {
198 	return (struct uv_hub_info_s *)__uv_hub_info_list[node];
199 }
200 
_uv_hub_info(void)201 static inline struct uv_hub_info_s *_uv_hub_info(void)
202 {
203 	return (struct uv_hub_info_s *)uv_cpu_info->p_uv_hub_info;
204 }
205 #define	uv_hub_info	_uv_hub_info()
206 
uv_cpu_hub_info(int cpu)207 static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
208 {
209 	return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
210 }
211 
uv_hub_type(void)212 static inline int uv_hub_type(void)
213 {
214 	return uv_hub_info->hub_type;
215 }
216 
uv_hub_type_set(int uvmask)217 static inline __init void uv_hub_type_set(int uvmask)
218 {
219 	uv_hub_info->hub_type = uvmask;
220 }
221 
222 
223 /*
224  * HUB revision ranges for each UV HUB architecture.
225  * This is a software convention - NOT the hardware revision numbers in
226  * the hub chip.
227  */
228 #define UV2_HUB_REVISION_BASE		3
229 #define UV3_HUB_REVISION_BASE		5
230 #define UV4_HUB_REVISION_BASE		7
231 #define UV4A_HUB_REVISION_BASE		8	/* UV4 (fixed) rev 2 */
232 #define UV5_HUB_REVISION_BASE		9
233 
is_uv(int uvmask)234 static inline int is_uv(int uvmask) { return uv_hub_type() & uvmask; }
is_uv1_hub(void)235 static inline int is_uv1_hub(void) { return 0; }
is_uv2_hub(void)236 static inline int is_uv2_hub(void) { return is_uv(UV2); }
is_uv3_hub(void)237 static inline int is_uv3_hub(void) { return is_uv(UV3); }
is_uv4a_hub(void)238 static inline int is_uv4a_hub(void) { return is_uv(UV4A); }
is_uv4_hub(void)239 static inline int is_uv4_hub(void) { return is_uv(UV4); }
is_uv5_hub(void)240 static inline int is_uv5_hub(void) { return is_uv(UV5); }
241 
242 /*
243  * UV4A is a revision of UV4.  So on UV4A, both is_uv4_hub() and
244  * is_uv4a_hub() return true, While on UV4, only is_uv4_hub()
245  * returns true.  So to get true results, first test if is UV4A,
246  * then test if is UV4.
247  */
248 
249 /* UVX class: UV2,3,4 */
is_uvx_hub(void)250 static inline int is_uvx_hub(void) { return is_uv(UVX); }
251 
252 /* UVY class: UV5,..? */
is_uvy_hub(void)253 static inline int is_uvy_hub(void) { return is_uv(UVY); }
254 
255 /* Any UV Hubbed System */
is_uv_hub(void)256 static inline int is_uv_hub(void) { return is_uv(UV_ANY); }
257 
258 union uvh_apicid {
259     unsigned long       v;
260     struct uvh_apicid_s {
261         unsigned long   local_apic_mask  : 24;
262         unsigned long   local_apic_shift :  5;
263         unsigned long   unused1          :  3;
264         unsigned long   pnode_mask       : 24;
265         unsigned long   pnode_shift      :  5;
266         unsigned long   unused2          :  3;
267     } s;
268 };
269 
270 /*
271  * Local & Global MMR space macros.
272  *	Note: macros are intended to be used ONLY by inline functions
273  *	in this file - not by other kernel code.
274  *		n -  NASID (full 15-bit global nasid)
275  *		g -  GNODE (full 15-bit global nasid, right shifted 1)
276  *		p -  PNODE (local part of nsids, right shifted 1)
277  */
278 #define UV_NASID_TO_PNODE(n)		\
279 		(((n) >> uv_hub_info->nasid_shift) & uv_hub_info->pnode_mask)
280 #define UV_PNODE_TO_GNODE(p)		((p) |uv_hub_info->gnode_extra)
281 #define UV_PNODE_TO_NASID(p)		\
282 		(UV_PNODE_TO_GNODE(p) << uv_hub_info->nasid_shift)
283 
284 #define UV2_LOCAL_MMR_BASE		0xfa000000UL
285 #define UV2_GLOBAL_MMR32_BASE		0xfc000000UL
286 #define UV2_LOCAL_MMR_SIZE		(32UL * 1024 * 1024)
287 #define UV2_GLOBAL_MMR32_SIZE		(32UL * 1024 * 1024)
288 
289 #define UV3_LOCAL_MMR_BASE		0xfa000000UL
290 #define UV3_GLOBAL_MMR32_BASE		0xfc000000UL
291 #define UV3_LOCAL_MMR_SIZE		(32UL * 1024 * 1024)
292 #define UV3_GLOBAL_MMR32_SIZE		(32UL * 1024 * 1024)
293 
294 #define UV4_LOCAL_MMR_BASE		0xfa000000UL
295 #define UV4_GLOBAL_MMR32_BASE		0
296 #define UV4_LOCAL_MMR_SIZE		(32UL * 1024 * 1024)
297 #define UV4_GLOBAL_MMR32_SIZE		0
298 
299 #define UV5_LOCAL_MMR_BASE		0xfa000000UL
300 #define UV5_GLOBAL_MMR32_BASE		0
301 #define UV5_LOCAL_MMR_SIZE		(32UL * 1024 * 1024)
302 #define UV5_GLOBAL_MMR32_SIZE		0
303 
304 #define UV_LOCAL_MMR_BASE		(				\
305 					is_uv(UV2) ? UV2_LOCAL_MMR_BASE : \
306 					is_uv(UV3) ? UV3_LOCAL_MMR_BASE : \
307 					is_uv(UV4) ? UV4_LOCAL_MMR_BASE : \
308 					is_uv(UV5) ? UV5_LOCAL_MMR_BASE : \
309 					0)
310 
311 #define UV_GLOBAL_MMR32_BASE		(				\
312 					is_uv(UV2) ? UV2_GLOBAL_MMR32_BASE : \
313 					is_uv(UV3) ? UV3_GLOBAL_MMR32_BASE : \
314 					is_uv(UV4) ? UV4_GLOBAL_MMR32_BASE : \
315 					is_uv(UV5) ? UV5_GLOBAL_MMR32_BASE : \
316 					0)
317 
318 #define UV_LOCAL_MMR_SIZE		(				\
319 					is_uv(UV2) ? UV2_LOCAL_MMR_SIZE : \
320 					is_uv(UV3) ? UV3_LOCAL_MMR_SIZE : \
321 					is_uv(UV4) ? UV4_LOCAL_MMR_SIZE : \
322 					is_uv(UV5) ? UV5_LOCAL_MMR_SIZE : \
323 					0)
324 
325 #define UV_GLOBAL_MMR32_SIZE		(				\
326 					is_uv(UV2) ? UV2_GLOBAL_MMR32_SIZE : \
327 					is_uv(UV3) ? UV3_GLOBAL_MMR32_SIZE : \
328 					is_uv(UV4) ? UV4_GLOBAL_MMR32_SIZE : \
329 					is_uv(UV5) ? UV5_GLOBAL_MMR32_SIZE : \
330 					0)
331 
332 #define UV_GLOBAL_MMR64_BASE		(uv_hub_info->global_mmr_base)
333 
334 #define UV_GLOBAL_GRU_MMR_BASE		0x4000000
335 
336 #define UV_GLOBAL_MMR32_PNODE_SHIFT	15
337 #define _UV_GLOBAL_MMR64_PNODE_SHIFT	26
338 #define UV_GLOBAL_MMR64_PNODE_SHIFT	(uv_hub_info->global_mmr_shift)
339 
340 #define UV_GLOBAL_MMR32_PNODE_BITS(p)	((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
341 
342 #define UV_GLOBAL_MMR64_PNODE_BITS(p)					\
343 	(((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
344 
345 #define UVH_APICID		0x002D0E00L
346 #define UV_APIC_PNODE_SHIFT	6
347 
348 /* Local Bus from cpu's perspective */
349 #define LOCAL_BUS_BASE		0x1c00000
350 #define LOCAL_BUS_SIZE		(4 * 1024 * 1024)
351 
352 /*
353  * System Controller Interface Reg
354  *
355  * Note there are NO leds on a UV system.  This register is only
356  * used by the system controller to monitor system-wide operation.
357  * There are 64 regs per node.  With Nehalem cpus (2 cores per node,
358  * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
359  * a node.
360  *
361  * The window is located at top of ACPI MMR space
362  */
363 #define SCIR_WINDOW_COUNT	64
364 #define SCIR_LOCAL_MMR_BASE	(LOCAL_BUS_BASE + \
365 				 LOCAL_BUS_SIZE - \
366 				 SCIR_WINDOW_COUNT)
367 
368 #define SCIR_CPU_HEARTBEAT	0x01	/* timer interrupt */
369 #define SCIR_CPU_ACTIVITY	0x02	/* not idle */
370 #define SCIR_CPU_HB_INTERVAL	(HZ)	/* once per second */
371 
372 /* Loop through all installed blades */
373 #define for_each_possible_blade(bid)		\
374 	for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
375 
376 /*
377  * Macros for converting between kernel virtual addresses, socket local physical
378  * addresses, and UV global physical addresses.
379  *	Note: use the standard __pa() & __va() macros for converting
380  *	      between socket virtual and socket physical addresses.
381  */
382 
383 /* global bits offset - number of local address bits in gpa for this UV arch */
uv_gpa_shift(void)384 static inline unsigned int uv_gpa_shift(void)
385 {
386 	return uv_hub_info->gpa_shift;
387 }
388 #define	_uv_gpa_shift
389 
390 /* Find node that has the address range that contains global address  */
uv_gam_range(unsigned long pa)391 static inline struct uv_gam_range_s *uv_gam_range(unsigned long pa)
392 {
393 	struct uv_gam_range_s *gr = uv_hub_info->gr_table;
394 	unsigned long pal = (pa & uv_hub_info->gpa_mask) >> UV_GAM_RANGE_SHFT;
395 	int i, num = uv_hub_info->gr_table_len;
396 
397 	if (gr) {
398 		for (i = 0; i < num; i++, gr++) {
399 			if (pal < gr->limit)
400 				return gr;
401 		}
402 	}
403 	pr_crit("UV: GAM Range for 0x%lx not found at %p!\n", pa, gr);
404 	BUG();
405 }
406 
407 /* Return base address of node that contains global address  */
uv_gam_range_base(unsigned long pa)408 static inline unsigned long uv_gam_range_base(unsigned long pa)
409 {
410 	struct uv_gam_range_s *gr = uv_gam_range(pa);
411 	int base = gr->base;
412 
413 	if (base < 0)
414 		return 0UL;
415 
416 	return uv_hub_info->gr_table[base].limit;
417 }
418 
419 /* socket phys RAM --> UV global NASID (UV4+) */
uv_soc_phys_ram_to_nasid(unsigned long paddr)420 static inline unsigned long uv_soc_phys_ram_to_nasid(unsigned long paddr)
421 {
422 	return uv_gam_range(paddr)->nasid;
423 }
424 #define	_uv_soc_phys_ram_to_nasid
425 
426 /* socket virtual --> UV global NASID (UV4+) */
uv_gpa_nasid(void * v)427 static inline unsigned long uv_gpa_nasid(void *v)
428 {
429 	return uv_soc_phys_ram_to_nasid(__pa(v));
430 }
431 
432 /* socket phys RAM --> UV global physical address */
uv_soc_phys_ram_to_gpa(unsigned long paddr)433 static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
434 {
435 	unsigned int m_val = uv_hub_info->m_val;
436 
437 	if (paddr < uv_hub_info->lowmem_remap_top)
438 		paddr |= uv_hub_info->lowmem_remap_base;
439 
440 	if (m_val) {
441 		paddr |= uv_hub_info->gnode_upper;
442 		paddr = ((paddr << uv_hub_info->m_shift)
443 						>> uv_hub_info->m_shift) |
444 			((paddr >> uv_hub_info->m_val)
445 						<< uv_hub_info->n_lshift);
446 	} else {
447 		paddr |= uv_soc_phys_ram_to_nasid(paddr)
448 						<< uv_hub_info->gpa_shift;
449 	}
450 	return paddr;
451 }
452 
453 /* socket virtual --> UV global physical address */
uv_gpa(void * v)454 static inline unsigned long uv_gpa(void *v)
455 {
456 	return uv_soc_phys_ram_to_gpa(__pa(v));
457 }
458 
459 /* Top two bits indicate the requested address is in MMR space.  */
460 static inline int
uv_gpa_in_mmr_space(unsigned long gpa)461 uv_gpa_in_mmr_space(unsigned long gpa)
462 {
463 	return (gpa >> 62) == 0x3UL;
464 }
465 
466 /* UV global physical address --> socket phys RAM */
uv_gpa_to_soc_phys_ram(unsigned long gpa)467 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
468 {
469 	unsigned long paddr;
470 	unsigned long remap_base = uv_hub_info->lowmem_remap_base;
471 	unsigned long remap_top =  uv_hub_info->lowmem_remap_top;
472 	unsigned int m_val = uv_hub_info->m_val;
473 
474 	if (m_val)
475 		gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
476 			((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
477 
478 	paddr = gpa & uv_hub_info->gpa_mask;
479 	if (paddr >= remap_base && paddr < remap_base + remap_top)
480 		paddr -= remap_base;
481 	return paddr;
482 }
483 
484 /* gpa -> gnode */
uv_gpa_to_gnode(unsigned long gpa)485 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
486 {
487 	unsigned int n_lshift = uv_hub_info->n_lshift;
488 
489 	if (n_lshift)
490 		return gpa >> n_lshift;
491 
492 	return uv_gam_range(gpa)->nasid >> 1;
493 }
494 
495 /* gpa -> pnode */
uv_gpa_to_pnode(unsigned long gpa)496 static inline int uv_gpa_to_pnode(unsigned long gpa)
497 {
498 	return uv_gpa_to_gnode(gpa) & uv_hub_info->pnode_mask;
499 }
500 
501 /* gpa -> node offset */
uv_gpa_to_offset(unsigned long gpa)502 static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
503 {
504 	unsigned int m_shift = uv_hub_info->m_shift;
505 
506 	if (m_shift)
507 		return (gpa << m_shift) >> m_shift;
508 
509 	return (gpa & uv_hub_info->gpa_mask) - uv_gam_range_base(gpa);
510 }
511 
512 /* Convert socket to node */
_uv_socket_to_node(int socket,unsigned short * s2nid)513 static inline int _uv_socket_to_node(int socket, unsigned short *s2nid)
514 {
515 	return s2nid ? s2nid[socket - uv_hub_info->min_socket] : socket;
516 }
517 
uv_socket_to_node(int socket)518 static inline int uv_socket_to_node(int socket)
519 {
520 	return _uv_socket_to_node(socket, uv_hub_info->socket_to_node);
521 }
522 
uv_pnode_to_socket(int pnode)523 static inline int uv_pnode_to_socket(int pnode)
524 {
525 	unsigned short *p2s = uv_hub_info->pnode_to_socket;
526 
527 	return p2s ? p2s[pnode - uv_hub_info->min_pnode] : pnode;
528 }
529 
530 /* pnode, offset --> socket virtual */
uv_pnode_offset_to_vaddr(int pnode,unsigned long offset)531 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
532 {
533 	unsigned int m_val = uv_hub_info->m_val;
534 	unsigned long base;
535 	unsigned short sockid;
536 
537 	if (m_val)
538 		return __va(((unsigned long)pnode << m_val) | offset);
539 
540 	sockid = uv_pnode_to_socket(pnode);
541 
542 	/* limit address of previous socket is our base, except node 0 is 0 */
543 	if (sockid == 0)
544 		return __va((unsigned long)offset);
545 
546 	base = (unsigned long)(uv_hub_info->gr_table[sockid - 1].limit);
547 	return __va(base << UV_GAM_RANGE_SHFT | offset);
548 }
549 
550 /* Extract/Convert a PNODE from an APICID (full apicid, not processor subset) */
uv_apicid_to_pnode(int apicid)551 static inline int uv_apicid_to_pnode(int apicid)
552 {
553 	int pnode = apicid >> uv_hub_info->apic_pnode_shift;
554 	unsigned short *s2pn = uv_hub_info->socket_to_pnode;
555 
556 	return s2pn ? s2pn[pnode - uv_hub_info->min_socket] : pnode;
557 }
558 
559 /*
560  * Access global MMRs using the low memory MMR32 space. This region supports
561  * faster MMR access but not all MMRs are accessible in this space.
562  */
uv_global_mmr32_address(int pnode,unsigned long offset)563 static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
564 {
565 	return __va(UV_GLOBAL_MMR32_BASE |
566 		       UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
567 }
568 
uv_write_global_mmr32(int pnode,unsigned long offset,unsigned long val)569 static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
570 {
571 	writeq(val, uv_global_mmr32_address(pnode, offset));
572 }
573 
uv_read_global_mmr32(int pnode,unsigned long offset)574 static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
575 {
576 	return readq(uv_global_mmr32_address(pnode, offset));
577 }
578 
579 /*
580  * Access Global MMR space using the MMR space located at the top of physical
581  * memory.
582  */
uv_global_mmr64_address(int pnode,unsigned long offset)583 static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset)
584 {
585 	return __va(UV_GLOBAL_MMR64_BASE |
586 		    UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
587 }
588 
uv_write_global_mmr64(int pnode,unsigned long offset,unsigned long val)589 static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
590 {
591 	writeq(val, uv_global_mmr64_address(pnode, offset));
592 }
593 
uv_read_global_mmr64(int pnode,unsigned long offset)594 static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
595 {
596 	return readq(uv_global_mmr64_address(pnode, offset));
597 }
598 
uv_write_global_mmr8(int pnode,unsigned long offset,unsigned char val)599 static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
600 {
601 	writeb(val, uv_global_mmr64_address(pnode, offset));
602 }
603 
uv_read_global_mmr8(int pnode,unsigned long offset)604 static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
605 {
606 	return readb(uv_global_mmr64_address(pnode, offset));
607 }
608 
609 /*
610  * Access hub local MMRs. Faster than using global space but only local MMRs
611  * are accessible.
612  */
uv_local_mmr_address(unsigned long offset)613 static inline unsigned long *uv_local_mmr_address(unsigned long offset)
614 {
615 	return __va(UV_LOCAL_MMR_BASE | offset);
616 }
617 
uv_read_local_mmr(unsigned long offset)618 static inline unsigned long uv_read_local_mmr(unsigned long offset)
619 {
620 	return readq(uv_local_mmr_address(offset));
621 }
622 
uv_write_local_mmr(unsigned long offset,unsigned long val)623 static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
624 {
625 	writeq(val, uv_local_mmr_address(offset));
626 }
627 
uv_read_local_mmr8(unsigned long offset)628 static inline unsigned char uv_read_local_mmr8(unsigned long offset)
629 {
630 	return readb(uv_local_mmr_address(offset));
631 }
632 
uv_write_local_mmr8(unsigned long offset,unsigned char val)633 static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
634 {
635 	writeb(val, uv_local_mmr_address(offset));
636 }
637 
638 /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
uv_blade_processor_id(void)639 static inline int uv_blade_processor_id(void)
640 {
641 	return uv_cpu_info->blade_cpu_id;
642 }
643 
644 /* Blade-local cpu number of cpu N. Numbered 0 .. <# cpus on the blade> */
uv_cpu_blade_processor_id(int cpu)645 static inline int uv_cpu_blade_processor_id(int cpu)
646 {
647 	return uv_cpu_info_per(cpu)->blade_cpu_id;
648 }
649 
650 /* Blade number to Node number (UV2..UV4 is 1:1) */
uv_blade_to_node(int blade)651 static inline int uv_blade_to_node(int blade)
652 {
653 	return uv_socket_to_node(blade);
654 }
655 
656 /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
uv_numa_blade_id(void)657 static inline int uv_numa_blade_id(void)
658 {
659 	return uv_hub_info->numa_blade_id;
660 }
661 
662 /*
663  * Convert linux node number to the UV blade number.
664  * .. Currently for UV2 thru UV4 the node and the blade are identical.
665  * .. UV5 needs conversion when sub-numa clustering is enabled.
666  */
uv_node_to_blade_id(int nid)667 static inline int uv_node_to_blade_id(int nid)
668 {
669 	unsigned short *n2s = uv_hub_info->node_to_socket;
670 
671 	return n2s ? n2s[nid] : nid;
672 }
673 
674 /* Convert a CPU number to the UV blade number */
uv_cpu_to_blade_id(int cpu)675 static inline int uv_cpu_to_blade_id(int cpu)
676 {
677 	return uv_cpu_hub_info(cpu)->numa_blade_id;
678 }
679 
680 /* Convert a blade id to the PNODE of the blade */
uv_blade_to_pnode(int bid)681 static inline int uv_blade_to_pnode(int bid)
682 {
683 	unsigned short *s2p = uv_hub_info->socket_to_pnode;
684 
685 	return s2p ? s2p[bid] : bid;
686 }
687 
688 /* Nid of memory node on blade. -1 if no blade-local memory */
uv_blade_to_memory_nid(int bid)689 static inline int uv_blade_to_memory_nid(int bid)
690 {
691 	return uv_hub_info_list(uv_blade_to_node(bid))->memory_nid;
692 }
693 
694 /* Determine the number of possible cpus on a blade */
uv_blade_nr_possible_cpus(int bid)695 static inline int uv_blade_nr_possible_cpus(int bid)
696 {
697 	return uv_hub_info_list(uv_blade_to_node(bid))->nr_possible_cpus;
698 }
699 
700 /* Determine the number of online cpus on a blade */
uv_blade_nr_online_cpus(int bid)701 static inline int uv_blade_nr_online_cpus(int bid)
702 {
703 	return uv_hub_info_list(uv_blade_to_node(bid))->nr_online_cpus;
704 }
705 
706 /* Convert a cpu id to the PNODE of the blade containing the cpu */
uv_cpu_to_pnode(int cpu)707 static inline int uv_cpu_to_pnode(int cpu)
708 {
709 	return uv_cpu_hub_info(cpu)->pnode;
710 }
711 
712 /* Convert a linux node number to the PNODE of the blade */
uv_node_to_pnode(int nid)713 static inline int uv_node_to_pnode(int nid)
714 {
715 	return uv_hub_info_list(nid)->pnode;
716 }
717 
718 /* Maximum possible number of blades */
719 extern short uv_possible_blades;
uv_num_possible_blades(void)720 static inline int uv_num_possible_blades(void)
721 {
722 	return uv_possible_blades;
723 }
724 
725 /* Per Hub NMI support */
726 extern void uv_nmi_setup(void);
727 extern void uv_nmi_setup_hubless(void);
728 
729 /* BIOS/Kernel flags exchange MMR */
730 #define UVH_BIOS_KERNEL_MMR		UVH_SCRATCH5
731 #define UVH_BIOS_KERNEL_MMR_ALIAS	UVH_SCRATCH5_ALIAS
732 #define UVH_BIOS_KERNEL_MMR_ALIAS_2	UVH_SCRATCH5_ALIAS_2
733 
734 /* TSC sync valid, set by BIOS */
735 #define UVH_TSC_SYNC_MMR	UVH_BIOS_KERNEL_MMR
736 #define UVH_TSC_SYNC_SHIFT	10
737 #define UVH_TSC_SYNC_SHIFT_UV2K	16	/* UV2/3k have different bits */
738 #define UVH_TSC_SYNC_MASK	3	/* 0011 */
739 #define UVH_TSC_SYNC_VALID	3	/* 0011 */
740 #define UVH_TSC_SYNC_UNKNOWN	0	/* 0000 */
741 
742 /* BMC sets a bit this MMR non-zero before sending an NMI */
743 #define UVH_NMI_MMR		UVH_BIOS_KERNEL_MMR
744 #define UVH_NMI_MMR_CLEAR	UVH_BIOS_KERNEL_MMR_ALIAS
745 #define UVH_NMI_MMR_SHIFT	63
746 #define UVH_NMI_MMR_TYPE	"SCRATCH5"
747 
748 struct uv_hub_nmi_s {
749 	raw_spinlock_t	nmi_lock;
750 	atomic_t	in_nmi;		/* flag this node in UV NMI IRQ */
751 	atomic_t	cpu_owner;	/* last locker of this struct */
752 	atomic_t	read_mmr_count;	/* count of MMR reads */
753 	atomic_t	nmi_count;	/* count of true UV NMIs */
754 	unsigned long	nmi_value;	/* last value read from NMI MMR */
755 	bool		hub_present;	/* false means UV hubless system */
756 	bool		pch_owner;	/* indicates this hub owns PCH */
757 };
758 
759 struct uv_cpu_nmi_s {
760 	struct uv_hub_nmi_s	*hub;
761 	int			state;
762 	int			pinging;
763 	int			queries;
764 	int			pings;
765 };
766 
767 DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
768 
769 #define uv_hub_nmi			this_cpu_read(uv_cpu_nmi.hub)
770 #define uv_cpu_nmi_per(cpu)		(per_cpu(uv_cpu_nmi, cpu))
771 #define uv_hub_nmi_per(cpu)		(uv_cpu_nmi_per(cpu).hub)
772 
773 /* uv_cpu_nmi_states */
774 #define	UV_NMI_STATE_OUT		0
775 #define	UV_NMI_STATE_IN			1
776 #define	UV_NMI_STATE_DUMP		2
777 #define	UV_NMI_STATE_DUMP_DONE		3
778 
779 /*
780  * Get the minimum revision number of the hub chips within the partition.
781  * (See UVx_HUB_REVISION_BASE above for specific values.)
782  */
uv_get_min_hub_revision_id(void)783 static inline int uv_get_min_hub_revision_id(void)
784 {
785 	return uv_hub_info->hub_revision;
786 }
787 
788 #endif /* CONFIG_X86_64 */
789 #endif /* _ASM_X86_UV_UV_HUB_H */
790