blob: 0d3bd4bf3aaa1a76c28106b3776b3fd304c7ab99 [file] [log] [blame]
Andy Whitcroftd41dee32005-06-23 00:07:54 -07001/*
2 * sparse memory mappings.
3 */
4#include <linux/config.h>
5#include <linux/mm.h>
6#include <linux/mmzone.h>
7#include <linux/bootmem.h>
8#include <linux/module.h>
Dave Hansen28ae55c2005-09-03 15:54:29 -07009#include <linux/spinlock.h>
Andy Whitcroftd41dee32005-06-23 00:07:54 -070010#include <asm/dma.h>
11
12/*
13 * Permanent SPARSEMEM data:
14 *
15 * 1) mem_section - memory sections, mem_map's for valid memory
16 */
Bob Picco3e347262005-09-03 15:54:28 -070017#ifdef CONFIG_SPARSEMEM_EXTREME
Bob Picco802f1922005-09-03 15:54:26 -070018struct mem_section *mem_section[NR_SECTION_ROOTS]
19 ____cacheline_maxaligned_in_smp;
Bob Picco3e347262005-09-03 15:54:28 -070020#else
21struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22 ____cacheline_maxaligned_in_smp;
23#endif
24EXPORT_SYMBOL(mem_section);
25
Bob Picco3e347262005-09-03 15:54:28 -070026#ifdef CONFIG_SPARSEMEM_EXTREME
Dave Hansen28ae55c2005-09-03 15:54:29 -070027static struct mem_section *sparse_index_alloc(int nid)
Bob Picco802f1922005-09-03 15:54:26 -070028{
Dave Hansen28ae55c2005-09-03 15:54:29 -070029 struct mem_section *section = NULL;
30 unsigned long array_size = SECTIONS_PER_ROOT *
31 sizeof(struct mem_section);
Bob Picco802f1922005-09-03 15:54:26 -070032
Dave Hansen28ae55c2005-09-03 15:54:29 -070033 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
Bob Picco3e347262005-09-03 15:54:28 -070034
Dave Hansen28ae55c2005-09-03 15:54:29 -070035 if (section)
36 memset(section, 0, array_size);
Bob Picco3e347262005-09-03 15:54:28 -070037
Dave Hansen28ae55c2005-09-03 15:54:29 -070038 return section;
Bob Picco802f1922005-09-03 15:54:26 -070039}
Dave Hansen28ae55c2005-09-03 15:54:29 -070040
41static int sparse_index_init(unsigned long section_nr, int nid)
42{
43 static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED;
44 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
45 struct mem_section *section;
46 int ret = 0;
47
48 if (mem_section[root])
49 return -EEXIST;
50
51 section = sparse_index_alloc(nid);
52 /*
53 * This lock keeps two different sections from
54 * reallocating for the same index
55 */
56 spin_lock(&index_init_lock);
57
58 if (mem_section[root]) {
59 ret = -EEXIST;
60 goto out;
61 }
62
63 mem_section[root] = section;
64out:
65 spin_unlock(&index_init_lock);
66 return ret;
67}
68#else /* !SPARSEMEM_EXTREME */
69static inline int sparse_index_init(unsigned long section_nr, int nid)
70{
71 return 0;
72}
73#endif
74
Dave Hansen4ca644d2005-10-29 18:16:51 -070075/*
76 * Although written for the SPARSEMEM_EXTREME case, this happens
77 * to also work for the flat array case becase
78 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
79 */
80int __section_nr(struct mem_section* ms)
81{
82 unsigned long root_nr;
83 struct mem_section* root;
84
85 for (root_nr = 0;
86 root_nr < NR_MEM_SECTIONS;
87 root_nr += SECTIONS_PER_ROOT) {
88 root = __nr_to_section(root_nr);
89
90 if (!root)
91 continue;
92
93 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
94 break;
95 }
96
97 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
98}
99
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700100/* Record a memory area against a node. */
101void memory_present(int nid, unsigned long start, unsigned long end)
102{
103 unsigned long pfn;
104
105 start &= PAGE_SECTION_MASK;
106 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
107 unsigned long section = pfn_to_section_nr(pfn);
Bob Picco802f1922005-09-03 15:54:26 -0700108 struct mem_section *ms;
109
110 sparse_index_init(section, nid);
111
112 ms = __nr_to_section(section);
113 if (!ms->section_mem_map)
114 ms->section_mem_map = SECTION_MARKED_PRESENT;
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700115 }
116}
117
118/*
119 * Only used by the i386 NUMA architecures, but relatively
120 * generic code.
121 */
122unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
123 unsigned long end_pfn)
124{
125 unsigned long pfn;
126 unsigned long nr_pages = 0;
127
128 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
129 if (nid != early_pfn_to_nid(pfn))
130 continue;
131
132 if (pfn_valid(pfn))
133 nr_pages += PAGES_PER_SECTION;
134 }
135
136 return nr_pages * sizeof(struct page);
137}
138
139/*
Andy Whitcroft29751f62005-06-23 00:08:00 -0700140 * Subtle, we encode the real pfn into the mem_map such that
141 * the identity pfn - section_mem_map will return the actual
142 * physical page frame number.
143 */
144static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
145{
146 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
147}
148
149/*
150 * We need this if we ever free the mem_maps. While not implemented yet,
151 * this function is included for parity with its sibling.
152 */
153static __attribute((unused))
154struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
155{
156 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
157}
158
159static int sparse_init_one_section(struct mem_section *ms,
160 unsigned long pnum, struct page *mem_map)
161{
162 if (!valid_section(ms))
163 return -EINVAL;
164
165 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
166
167 return 1;
168}
169
170static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
171{
172 struct page *map;
173 int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
Bob Picco802f1922005-09-03 15:54:26 -0700174 struct mem_section *ms = __nr_to_section(pnum);
Andy Whitcroft29751f62005-06-23 00:08:00 -0700175
176 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
177 if (map)
178 return map;
179
180 map = alloc_bootmem_node(NODE_DATA(nid),
181 sizeof(struct page) * PAGES_PER_SECTION);
182 if (map)
183 return map;
184
185 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
Bob Picco802f1922005-09-03 15:54:26 -0700186 ms->section_mem_map = 0;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700187 return NULL;
188}
189
190/*
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700191 * Allocate the accumulated non-linear sections, allocate a mem_map
192 * for each and record the physical to section mapping.
193 */
194void sparse_init(void)
195{
196 unsigned long pnum;
197 struct page *map;
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700198
199 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
Andy Whitcroft29751f62005-06-23 00:08:00 -0700200 if (!valid_section_nr(pnum))
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700201 continue;
202
Andy Whitcroft29751f62005-06-23 00:08:00 -0700203 map = sparse_early_mem_map_alloc(pnum);
Bob Picco802f1922005-09-03 15:54:26 -0700204 if (!map)
205 continue;
206 sparse_init_one_section(__nr_to_section(pnum), pnum, map);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700207 }
208}
Andy Whitcroft29751f62005-06-23 00:08:00 -0700209
210/*
211 * returns the number of sections whose mem_maps were properly
212 * set. If this is <=0, then that means that the passed-in
213 * map was not consumed and must be freed.
214 */
215int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map)
216{
217 struct mem_section *ms = __pfn_to_section(start_pfn);
218
219 if (ms->section_mem_map & SECTION_MARKED_PRESENT)
220 return -EEXIST;
221
222 ms->section_mem_map |= SECTION_MARKED_PRESENT;
223
224 return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map);
225}