1 /*
2 * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
3 * Adapted for K8/x86-64 Jul 2002 by Andi Kleen.
4 */
5 #ifndef _ASM_MMZONE_H_
6 #define _ASM_MMZONE_H_
7
8 #include <linux/config.h>
9
10 typedef struct plat_pglist_data {
11 pg_data_t gendata;
12 unsigned long start_pfn, end_pfn;
13 } plat_pg_data_t;
14
15 struct bootmem_data_t;
16
17 /*
18 * Following are macros that are specific to this numa platform.
19 *
20 * XXX check what the compiler generates for all this
21 */
22
23 extern plat_pg_data_t *plat_node_data[];
24
25 #define MAXNODE 8
26 #define MAX_NUMNODES MAXNODE
27 #define NODEMAPSIZE 0xff
28
29 /* Simple perfect hash to map physical addresses to node numbers */
30 extern int memnode_shift;
31 extern u8 memnodemap[NODEMAPSIZE];
32 extern int maxnode;
33
34 #if 0
35 #define VIRTUAL_BUG_ON(x) do { if (x) out_of_line_bug(); } while(0)
36 #else
37 #define VIRTUAL_BUG_ON(x) do {} while (0)
38 #endif
39
40 /* VALID_PAGE below hardcodes the same algorithm*/
phys_to_nid(unsigned long addr)41 static inline int phys_to_nid(unsigned long addr)
42 {
43 int nid;
44 VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE);
45 nid = memnodemap[addr >> memnode_shift];
46 VIRTUAL_BUG_ON(nid > maxnode);
47 return nid;
48 }
49
50 #define PLAT_NODE_DATA(n) (plat_node_data[(n)])
51 #define PLAT_NODE_DATA_STARTNR(n) \
52 (PLAT_NODE_DATA(n)->gendata.node_start_mapnr)
53 #define PLAT_NODE_DATA_SIZE(n) (PLAT_NODE_DATA(n)->gendata.node_size)
54
55 #define PLAT_NODE_DATA_LOCALNR(p, n) \
56 (((p) - PLAT_NODE_DATA(n)->gendata.node_start_paddr) >> PAGE_SHIFT)
57
58 #ifdef CONFIG_DISCONTIGMEM
59
60 /*
61 * Following are macros that each numa implmentation must define.
62 */
63
64 /*
65 * Given a kernel address, find the home node of the underlying memory.
66 */
67 #define KVADDR_TO_NID(kaddr) phys_to_nid(__pa(kaddr))
68
69 /*
70 * Return a pointer to the node data for node n.
71 */
72 #define NODE_DATA(n) (&((PLAT_NODE_DATA(n))->gendata))
73
74 /*
75 * NODE_MEM_MAP gives the kaddr for the mem_map of the node.
76 */
77 #define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
78
79 /*
80 * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
81 * and returns the the mem_map of that node.
82 */
83 #define ADDR_TO_MAPBASE(kaddr) \
84 NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
85
86 /*
87 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
88 * and returns the kaddr corresponding to first physical page in the
89 * node's mem_map.
90 */
91 #define LOCAL_BASE_ADDR(kaddr) ((unsigned long)__va(NODE_DATA(KVADDR_TO_NID(kaddr))->node_start_paddr))
92
93 #define LOCAL_MAP_NR(kvaddr) \
94 (((unsigned long)(kvaddr)-LOCAL_BASE_ADDR(kvaddr)) >> PAGE_SHIFT)
95
96 #define BAD_PAGE 0xffffffffffff
97
98 /* this really should be optimized a bit */
99 static inline unsigned long
paddr_to_local_pfn(unsigned long phys_addr,struct page ** mem_map,int check)100 paddr_to_local_pfn(unsigned long phys_addr, struct page **mem_map, int check)
101 {
102 unsigned long nid;
103 if (check) { /* we rely on gcc optimizing this way for most cases */
104 unsigned long index = phys_addr >> memnode_shift;
105 if (index >= NODEMAPSIZE || memnodemap[index] == 0xff) {
106 *mem_map = NULL;
107 return BAD_PAGE;
108 }
109 nid = memnodemap[index];
110 } else {
111 nid = phys_to_nid(phys_addr);
112 }
113 plat_pg_data_t *plat_pgdat = plat_node_data[nid];
114 unsigned long pfn = phys_addr >> PAGE_SHIFT;
115 VIRTUAL_BUG_ON(pfn >= plat_pgdat->end_pfn);
116 VIRTUAL_BUG_ON(pfn < plat_pgdat->start_pfn);
117 *mem_map = plat_pgdat->gendata.node_mem_map;
118 return pfn - plat_pgdat->start_pfn;
119 }
120 #define virt_to_page(kaddr) \
121 ({ struct page *lmemmap; \
122 unsigned long lpfn = paddr_to_local_pfn(__pa(kaddr),&lmemmap,0); \
123 lmemmap + lpfn; })
124
125 /* needs to handle bad addresses too */
126 #define pte_page(pte) \
127 ({ struct page *lmemmap; \
128 unsigned long addr = pte_val(pte) & PHYSICAL_PAGE_MASK; \
129 unsigned long lpfn = paddr_to_local_pfn(addr,&lmemmap,1); \
130 lmemmap + lpfn; })
131
132 #define pfn_to_page(pfn) virt_to_page(__va((unsigned long)(pfn) << PAGE_SHIFT))
133 #define page_to_pfn(page) ({ \
134 int nodeid = phys_to_nid(__pa(page)); \
135 plat_pg_data_t *nd = PLAT_NODE_DATA(nodeid); \
136 (page - nd->gendata.node_mem_map) + nd->start_pfn; \
137 })
138
139 #define VALID_PAGE(page_ptr) ({ \
140 int ok = 0; \
141 unsigned long phys = __pa(page_ptr); \
142 unsigned long index = phys >> memnode_shift; \
143 if (index <= NODEMAPSIZE) { \
144 unsigned nodeid = memnodemap[index]; \
145 pg_data_t *nd = NODE_DATA(nodeid); \
146 struct page *lmemmap = nd->node_mem_map; \
147 ok = (nodeid != 0xff) && \
148 (page_ptr >= lmemmap && page_ptr < lmemmap + nd->node_size); \
149 } \
150 ok; \
151 })
152
153 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
154
155
156
157 extern void setup_node_bootmem(int nodeid, unsigned long start_, unsigned long end_);
158
159
160 #ifdef CONFIG_NUMA
161 extern int fake_node;
162 #define cputonode(cpu) (fake_node ? 0 : (cpu))
163 #define numa_node_id() cputonode(smp_processor_id())
164 #endif /* CONFIG_NUMA */
165
166 #define MAX_NR_NODES 8
167
168 #endif /* CONFIG_DISCONTIGMEM */
169
170 #endif /* _ASM_MMZONE_H_ */
171