1 /*
2 * Written by Kanoj Sarcar, SGI, Aug 1999
3 */
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/bootmem.h>
9 #include <linux/mmzone.h>
10 #include <linux/spinlock.h>
11
12 int numnodes = 1; /* Initialized for UMA platforms */
13
14 static bootmem_data_t contig_bootmem_data;
15 pg_data_t contig_page_data = { bdata: &contig_bootmem_data };
16
17 #ifndef CONFIG_DISCONTIGMEM
18
19 /*
20 * This is meant to be invoked by platforms whose physical memory starts
21 * at a considerably higher value than 0. Examples are Super-H, ARM, m68k.
22 * Should be invoked with paramters (0, 0, unsigned long *[], start_paddr).
23 */
free_area_init_node(int nid,pg_data_t * pgdat,struct page * pmap,unsigned long * zones_size,unsigned long zone_start_paddr,unsigned long * zholes_size)24 void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
25 unsigned long *zones_size, unsigned long zone_start_paddr,
26 unsigned long *zholes_size)
27 {
28 free_area_init_core(0, &contig_page_data, &mem_map, zones_size,
29 zone_start_paddr, zholes_size, pmap);
30 }
31
32 #endif /* !CONFIG_DISCONTIGMEM */
33
alloc_pages_node(int nid,unsigned int gfp_mask,unsigned int order)34 struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order)
35 {
36 #ifdef CONFIG_NUMA
37 return __alloc_pages(gfp_mask, order, NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
38 #else
39 return alloc_pages(gfp_mask, order);
40 #endif
41 }
42
43 #ifdef CONFIG_DISCONTIGMEM
44
45 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
46
47 static spinlock_t node_lock = SPIN_LOCK_UNLOCKED;
48
show_free_areas_node(pg_data_t * pgdat)49 void show_free_areas_node(pg_data_t *pgdat)
50 {
51 unsigned long flags;
52
53 spin_lock_irqsave(&node_lock, flags);
54 show_free_areas_core(pgdat);
55 spin_unlock_irqrestore(&node_lock, flags);
56 }
57
58 /*
59 * Nodes can be initialized parallely, in no particular order.
60 */
free_area_init_node(int nid,pg_data_t * pgdat,struct page * pmap,unsigned long * zones_size,unsigned long zone_start_paddr,unsigned long * zholes_size)61 void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
62 unsigned long *zones_size, unsigned long zone_start_paddr,
63 unsigned long *zholes_size)
64 {
65 int i, size = 0;
66 struct page *discard;
67
68 if (mem_map == (mem_map_t *)NULL)
69 mem_map = (mem_map_t *)PAGE_OFFSET;
70
71 free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr,
72 zholes_size, pmap);
73 pgdat->node_id = nid;
74
75 /*
76 * Get space for the valid bitmap.
77 */
78 for (i = 0; i < MAX_NR_ZONES; i++)
79 size += zones_size[i];
80 size = LONG_ALIGN((size + 7) >> 3);
81 pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(pgdat, size);
82 memset(pgdat->valid_addr_bitmap, 0, size);
83 }
84
alloc_pages_pgdat(pg_data_t * pgdat,unsigned int gfp_mask,unsigned int order)85 static struct page * alloc_pages_pgdat(pg_data_t *pgdat, unsigned int gfp_mask,
86 unsigned int order)
87 {
88 return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK));
89 }
90
91 /*
92 * This can be refined. Currently, tries to do round robin, instead
93 * should do concentratic circle search, starting from current node.
94 */
_alloc_pages(unsigned int gfp_mask,unsigned int order)95 struct page * _alloc_pages(unsigned int gfp_mask, unsigned int order)
96 {
97 struct page *ret = 0;
98 pg_data_t *start, *temp;
99 #ifndef CONFIG_NUMA
100 unsigned long flags;
101 static pg_data_t *next = 0;
102 #endif
103
104 if (order >= MAX_ORDER)
105 return NULL;
106 #ifdef CONFIG_NUMA
107 temp = NODE_DATA(numa_node_id());
108 #else
109 spin_lock_irqsave(&node_lock, flags);
110 if (!next) next = pgdat_list;
111 temp = next;
112 next = next->node_next;
113 spin_unlock_irqrestore(&node_lock, flags);
114 #endif
115 start = temp;
116 while (temp) {
117 if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
118 return(ret);
119 temp = temp->node_next;
120 }
121 temp = pgdat_list;
122 while (temp != start) {
123 if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
124 return(ret);
125 temp = temp->node_next;
126 }
127 return(0);
128 }
129
130 #endif /* CONFIG_DISCONTIGMEM */
131