1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * This file contains NUMA specific variables and functions which can
7  * be split away from DISCONTIGMEM and are used on NUMA machines with
8  * contiguous memory.
9  *
10  *                         2002/08/07 Erich Focht <efocht@ess.nec.de>
11  */
12 
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/init.h>
17 #include <linux/bootmem.h>
18 #include <linux/mmzone.h>
19 #include <linux/smp.h>
20 #include <asm/numa.h>
21 
22 /*
23  * The following structures are usually initialized by ACPI or
24  * similar mechanisms and describe the NUMA characteristics of the machine.
25  */
26 int num_memblks = 0;
27 struct node_memblk_s node_memblk[NR_MEMBLKS];
28 struct node_cpuid_s node_cpuid[NR_CPUS];
29 /*
30  * This is a matrix with "distances" between nodes, they should be
31  * proportional to the memory access latency ratios.
32  */
33 u8 numa_slit[NR_NODES * NR_NODES];
34 
35 /* Identify which cnode a physical address resides on */
36 int
paddr_to_nid(unsigned long paddr)37 paddr_to_nid(unsigned long paddr)
38 {
39 	int	i;
40 
41 	for (i = 0; i < num_memblks; i++)
42 		if (paddr >= node_memblk[i].start_paddr &&
43 		    paddr < node_memblk[i].start_paddr + node_memblk[i].size)
44 			break;
45 
46 	return (i < num_memblks) ? node_memblk[i].nid : (num_memblks ? -1 : 0);
47 }
48 
49 /* return end addr of a memblk */
50 unsigned long
memblk_endpaddr(unsigned long paddr)51 memblk_endpaddr(unsigned long paddr)
52 {
53 	int	i;
54 
55 	for (i = 0; i < num_memblks; i++)
56 		if (paddr >= node_memblk[i].start_paddr &&
57 		    paddr < node_memblk[i].start_paddr + node_memblk[i].size)
58 			return node_memblk[i].start_paddr + node_memblk[i].size;
59 
60 	return 0;
61 }
62 
63 
64 /* on which node is each logical CPU (one cacheline even for 64 CPUs) */
65 volatile char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
66 
67 /* which logical CPUs are on which nodes */
68 volatile unsigned long node_to_cpu_mask[NR_NODES]  __cacheline_aligned;
69 
70 /*
71  * Build cpu to node mapping and initialize the per node cpu masks.
72  */
73 void __init
build_cpu_to_node_map(void)74 build_cpu_to_node_map (void)
75 {
76 	int cpu, i, node;
77 
78 	for(cpu = 0; cpu < NR_CPUS; ++cpu) {
79 		/*
80 		 * All Itanium NUMA platforms I know use ACPI, so maybe we
81 		 * can drop this ifdef completely.                    [EF]
82 		 */
83 #ifdef CONFIG_SMP
84 # ifdef CONFIG_ACPI_NUMA
85 		node = -1;
86 		for (i = 0; i < NR_CPUS; ++i) {
87 			extern volatile int ia64_cpu_to_sapicid[];
88 			if (ia64_cpu_to_sapicid[cpu] == node_cpuid[i].phys_id) {
89 				node = node_cpuid[i].nid;
90 				break;
91 			}
92 		}
93 # else
94 #		error Fixme: Dunno how to build CPU-to-node map.
95 # endif
96 		cpu_to_node_map[cpu] = node;
97 		if (node >= 0)
98 			__set_bit(cpu, &node_to_cpu_mask[node]);
99 #else
100 			__set_bit(0, &node_to_cpu_mask[0]);
101 #endif
102 	}
103 }
104 
105