1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
7  */
8 #ifndef _ASM_IA64_SN_PDA_H
9 #define _ASM_IA64_SN_PDA_H
10 
11 #include <linux/config.h>
12 #include <linux/cache.h>
13 #include <asm/system.h>
14 #include <asm/processor.h>
15 #include <asm/page.h>
16 #include <linux/mmzone.h>
17 #include <asm/sn/bte.h>
18 
19 
20 /*
21  * CPU-specific data structure.
22  *
23  * One of these structures is allocated for each cpu of a NUMA system.
24  *
25  * This structure provides a convenient way of keeping together
26  * all SN per-cpu data structures.
27  */
28 
29 typedef struct pda_s {
30 
31 	/* Having a pointer in the begining of PDA tends to increase
32 	 * the chance of having this pointer in cache. (Yes something
33 	 * else gets pushed out). Doing this reduces the number of memory
34 	 * access to all nodepda variables to be one
35 	 */
36 	struct nodepda_s *p_nodepda;		/* Pointer to Per node PDA */
37 	struct subnodepda_s *p_subnodepda;	/* Pointer to CPU  subnode PDA */
38 
39 	/*
40 	 * Support for SN LEDs
41 	 */
42 	volatile short	*led_address;
43 	u8		led_state;
44 	u8		hb_state;	/* supports blinking heartbeat leds */
45 	u8		shub_1_1_found;
46 	unsigned int	hb_count;
47 
48 	unsigned int	idle_flag;
49 
50 	volatile unsigned long *bedrock_rev_id;
51 	volatile unsigned long *pio_write_status_addr;
52 	volatile unsigned long *pio_shub_war_cam_addr;
53 	volatile unsigned long *mem_write_status_addr;
54 
55 	struct bteinfo_s *cpu_bte_if[BTES_PER_NODE];	/* cpu interface order */
56 
57 	unsigned long	sn_soft_irr[4];
58 	unsigned long	sn_in_service_ivecs[4];
59 	short		cnodeid_to_nasid_table[NR_NODES];
60 	int		sn_lb_int_war_ticks;
61 	int		sn_last_irq;
62 	int		sn_first_irq;
63 } pda_t;
64 
65 
66 #define CACHE_ALIGN(x)	(((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
67 
68 /*
69  * PDA
70  * Per-cpu private data area for each cpu. The PDA is located immediately after
71  * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
72  * cpu but only a small amout of the page is actually used. We put the SNIA PDA
73  * in the same page as the cpu_data area. Note that there is a check in the setup
74  * code to verify that we don't overflow the page.
75  *
76  * Seems like we should should cache-line align the pda so that any changes in the
77  * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
78  * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
79  */
80 #define CPU_DATA_END	CACHE_ALIGN((long)&(((struct cpuinfo_ia64*)0)->platform_specific))
81 #define PDAADDR		(PERCPU_ADDR+CPU_DATA_END)
82 
83 #define pda		(*((pda_t *) PDAADDR))
84 
85 #define pdacpu(cpu)	(*((pda_t *) ((long)cpu_data(cpu) + CPU_DATA_END)))
86 
87 /*
88  * Use this macro to test if shub 1.1 wars should be enabled
89  */
90 #define enable_shub_wars_1_1()	(pda.shub_1_1_found)
91 
92 
93 #endif /* _ASM_IA64_SN_PDA_H */
94