blob: d9e0b615492e68c617d0bf510af792e3ab652ef2 [file] [log] [blame]
Tejun Heo9f645532010-04-09 18:57:01 +09001/*
2 * mm/percpu-vm.c - vmalloc area based chunk allocation
3 *
4 * Copyright (C) 2010 SUSE Linux Products GmbH
5 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * Chunks are mapped into vmalloc areas and populated page by page.
10 * This is the default chunk allocator.
11 */
12
13static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
14 unsigned int cpu, int page_idx)
15{
16 /* must not be used on pre-mapped chunk */
17 WARN_ON(chunk->immutable);
18
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
20}
21
22/**
Tejun Heofbbb7f42014-09-02 14:46:01 -040023 * pcpu_get_pages - get temp pages array
Tejun Heo9f645532010-04-09 18:57:01 +090024 * @chunk: chunk of interest
Tejun Heo9f645532010-04-09 18:57:01 +090025 *
Tejun Heofbbb7f42014-09-02 14:46:01 -040026 * Returns pointer to array of pointers to struct page which can be indexed
Tejun Heocdb4cba2014-09-02 14:46:01 -040027 * with pcpu_page_idx(). Note that there is only one array and accesses
28 * should be serialized by pcpu_alloc_mutex.
Tejun Heo9f645532010-04-09 18:57:01 +090029 *
30 * RETURNS:
Tejun Heofbbb7f42014-09-02 14:46:01 -040031 * Pointer to temp pages array on success.
Tejun Heo9f645532010-04-09 18:57:01 +090032 */
Tejun Heocdb4cba2014-09-02 14:46:01 -040033static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc)
Tejun Heo9f645532010-04-09 18:57:01 +090034{
35 static struct page **pages;
Tejun Heo9f645532010-04-09 18:57:01 +090036 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
Tejun Heo9f645532010-04-09 18:57:01 +090037
Tejun Heocdb4cba2014-09-02 14:46:01 -040038 lockdep_assert_held(&pcpu_alloc_mutex);
39
40 if (!pages)
Tejun Heofbbb7f42014-09-02 14:46:01 -040041 pages = pcpu_mem_zalloc(pages_size);
Tejun Heo9f645532010-04-09 18:57:01 +090042 return pages;
43}
44
45/**
46 * pcpu_free_pages - free pages which were allocated for @chunk
47 * @chunk: chunk pages were allocated for
48 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
Tejun Heo9f645532010-04-09 18:57:01 +090049 * @page_start: page index of the first page to be freed
50 * @page_end: page index of the last page to be freed + 1
51 *
52 * Free pages [@page_start and @page_end) in @pages for all units.
53 * The pages were allocated for @chunk.
54 */
55static void pcpu_free_pages(struct pcpu_chunk *chunk,
Tejun Heofbbb7f42014-09-02 14:46:01 -040056 struct page **pages, int page_start, int page_end)
Tejun Heo9f645532010-04-09 18:57:01 +090057{
58 unsigned int cpu;
59 int i;
60
61 for_each_possible_cpu(cpu) {
62 for (i = page_start; i < page_end; i++) {
63 struct page *page = pages[pcpu_page_idx(cpu, i)];
64
65 if (page)
66 __free_page(page);
67 }
68 }
69}
70
71/**
72 * pcpu_alloc_pages - allocates pages for @chunk
73 * @chunk: target chunk
74 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
Tejun Heo9f645532010-04-09 18:57:01 +090075 * @page_start: page index of the first page to be allocated
76 * @page_end: page index of the last page to be allocated + 1
77 *
78 * Allocate pages [@page_start,@page_end) into @pages for all units.
79 * The allocation is for @chunk. Percpu core doesn't care about the
80 * content of @pages and will pass it verbatim to pcpu_map_pages().
81 */
82static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
Tejun Heofbbb7f42014-09-02 14:46:01 -040083 struct page **pages, int page_start, int page_end)
Tejun Heo9f645532010-04-09 18:57:01 +090084{
85 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
Tejun Heof0d27962014-08-15 16:06:06 -040086 unsigned int cpu, tcpu;
Tejun Heo9f645532010-04-09 18:57:01 +090087 int i;
88
89 for_each_possible_cpu(cpu) {
90 for (i = page_start; i < page_end; i++) {
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
92
93 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
Tejun Heof0d27962014-08-15 16:06:06 -040094 if (!*pagep)
95 goto err;
Tejun Heo9f645532010-04-09 18:57:01 +090096 }
97 }
98 return 0;
Tejun Heof0d27962014-08-15 16:06:06 -040099
100err:
101 while (--i >= page_start)
102 __free_page(pages[pcpu_page_idx(cpu, i)]);
103
104 for_each_possible_cpu(tcpu) {
105 if (tcpu == cpu)
106 break;
107 for (i = page_start; i < page_end; i++)
108 __free_page(pages[pcpu_page_idx(tcpu, i)]);
109 }
110 return -ENOMEM;
Tejun Heo9f645532010-04-09 18:57:01 +0900111}
112
113/**
114 * pcpu_pre_unmap_flush - flush cache prior to unmapping
115 * @chunk: chunk the regions to be flushed belongs to
116 * @page_start: page index of the first page to be flushed
117 * @page_end: page index of the last page to be flushed + 1
118 *
119 * Pages in [@page_start,@page_end) of @chunk are about to be
120 * unmapped. Flush cache. As each flushing trial can be very
121 * expensive, issue flush on the whole region at once rather than
122 * doing it for each cpu. This could be an overkill but is more
123 * scalable.
124 */
125static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
126 int page_start, int page_end)
127{
128 flush_cache_vunmap(
Tejun Heoa855b842011-11-18 10:55:35 -0800129 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
130 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
Tejun Heo9f645532010-04-09 18:57:01 +0900131}
132
133static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
134{
135 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
136}
137
138/**
139 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
140 * @chunk: chunk of interest
141 * @pages: pages array which can be used to pass information to free
Tejun Heo9f645532010-04-09 18:57:01 +0900142 * @page_start: page index of the first page to unmap
143 * @page_end: page index of the last page to unmap + 1
144 *
145 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
146 * Corresponding elements in @pages were cleared by the caller and can
147 * be used to carry information to pcpu_free_pages() which will be
148 * called after all unmaps are finished. The caller should call
149 * proper pre/post flush functions.
150 */
151static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
Tejun Heofbbb7f42014-09-02 14:46:01 -0400152 struct page **pages, int page_start, int page_end)
Tejun Heo9f645532010-04-09 18:57:01 +0900153{
154 unsigned int cpu;
155 int i;
156
157 for_each_possible_cpu(cpu) {
158 for (i = page_start; i < page_end; i++) {
159 struct page *page;
160
161 page = pcpu_chunk_page(chunk, cpu, i);
162 WARN_ON(!page);
163 pages[pcpu_page_idx(cpu, i)] = page;
164 }
165 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
166 page_end - page_start);
167 }
Tejun Heo9f645532010-04-09 18:57:01 +0900168}
169
170/**
171 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
172 * @chunk: pcpu_chunk the regions to be flushed belong to
173 * @page_start: page index of the first page to be flushed
174 * @page_end: page index of the last page to be flushed + 1
175 *
176 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
177 * TLB for the regions. This can be skipped if the area is to be
178 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
179 *
180 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
181 * for the whole region.
182 */
183static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
184 int page_start, int page_end)
185{
186 flush_tlb_kernel_range(
Tejun Heoa855b842011-11-18 10:55:35 -0800187 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
188 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
Tejun Heo9f645532010-04-09 18:57:01 +0900189}
190
191static int __pcpu_map_pages(unsigned long addr, struct page **pages,
192 int nr_pages)
193{
194 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
195 PAGE_KERNEL, pages);
196}
197
198/**
199 * pcpu_map_pages - map pages into a pcpu_chunk
200 * @chunk: chunk of interest
201 * @pages: pages array containing pages to be mapped
Tejun Heo9f645532010-04-09 18:57:01 +0900202 * @page_start: page index of the first page to map
203 * @page_end: page index of the last page to map + 1
204 *
205 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
206 * caller is responsible for calling pcpu_post_map_flush() after all
207 * mappings are complete.
208 *
Tejun Heofbbb7f42014-09-02 14:46:01 -0400209 * This function is responsible for setting up whatever is necessary for
210 * reverse lookup (addr -> chunk).
Tejun Heo9f645532010-04-09 18:57:01 +0900211 */
212static int pcpu_map_pages(struct pcpu_chunk *chunk,
Tejun Heofbbb7f42014-09-02 14:46:01 -0400213 struct page **pages, int page_start, int page_end)
Tejun Heo9f645532010-04-09 18:57:01 +0900214{
215 unsigned int cpu, tcpu;
216 int i, err;
217
218 for_each_possible_cpu(cpu) {
219 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
220 &pages[pcpu_page_idx(cpu, page_start)],
221 page_end - page_start);
222 if (err < 0)
223 goto err;
Tejun Heo9f645532010-04-09 18:57:01 +0900224
Tejun Heofbbb7f42014-09-02 14:46:01 -0400225 for (i = page_start; i < page_end; i++)
Tejun Heo9f645532010-04-09 18:57:01 +0900226 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
227 chunk);
Tejun Heo9f645532010-04-09 18:57:01 +0900228 }
Tejun Heo9f645532010-04-09 18:57:01 +0900229 return 0;
Tejun Heo9f645532010-04-09 18:57:01 +0900230err:
231 for_each_possible_cpu(tcpu) {
232 if (tcpu == cpu)
233 break;
234 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
235 page_end - page_start);
236 }
Tejun Heo849f5162014-08-15 16:06:10 -0400237 pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
Tejun Heo9f645532010-04-09 18:57:01 +0900238 return err;
239}
240
241/**
242 * pcpu_post_map_flush - flush cache after mapping
243 * @chunk: pcpu_chunk the regions to be flushed belong to
244 * @page_start: page index of the first page to be flushed
245 * @page_end: page index of the last page to be flushed + 1
246 *
247 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
248 * cache.
249 *
250 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
251 * for the whole region.
252 */
253static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
254 int page_start, int page_end)
255{
256 flush_cache_vmap(
Tejun Heoa855b842011-11-18 10:55:35 -0800257 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
258 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
Tejun Heo9f645532010-04-09 18:57:01 +0900259}
260
261/**
262 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
263 * @chunk: chunk of interest
264 * @off: offset to the area to populate
265 * @size: size of the area to populate in bytes
266 *
267 * For each cpu, populate and map pages [@page_start,@page_end) into
268 * @chunk. The area is cleared on return.
269 *
270 * CONTEXT:
271 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
272 */
273static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
274{
275 int page_start = PFN_DOWN(off);
276 int page_end = PFN_UP(off + size);
277 int free_end = page_start, unmap_end = page_start;
278 struct page **pages;
Tejun Heo9f645532010-04-09 18:57:01 +0900279 unsigned int cpu;
280 int rs, re, rc;
281
282 /* quick path, check whether all pages are already there */
283 rs = page_start;
284 pcpu_next_pop(chunk, &rs, &re, page_end);
285 if (rs == page_start && re == page_end)
286 goto clear;
287
288 /* need to allocate and map pages, this chunk can't be immutable */
289 WARN_ON(chunk->immutable);
290
Tejun Heocdb4cba2014-09-02 14:46:01 -0400291 pages = pcpu_get_pages(chunk);
Tejun Heo9f645532010-04-09 18:57:01 +0900292 if (!pages)
293 return -ENOMEM;
294
295 /* alloc and map */
296 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
Tejun Heofbbb7f42014-09-02 14:46:01 -0400297 rc = pcpu_alloc_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900298 if (rc)
299 goto err_free;
300 free_end = re;
301 }
302
303 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
Tejun Heofbbb7f42014-09-02 14:46:01 -0400304 rc = pcpu_map_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900305 if (rc)
306 goto err_unmap;
307 unmap_end = re;
308 }
309 pcpu_post_map_flush(chunk, page_start, page_end);
310
Tejun Heofbbb7f42014-09-02 14:46:01 -0400311 bitmap_set(chunk->populated, page_start, page_end - page_start);
Tejun Heo9f645532010-04-09 18:57:01 +0900312clear:
313 for_each_possible_cpu(cpu)
314 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
315 return 0;
316
317err_unmap:
318 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
319 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
Tejun Heofbbb7f42014-09-02 14:46:01 -0400320 pcpu_unmap_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900321 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
322err_free:
323 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
Tejun Heofbbb7f42014-09-02 14:46:01 -0400324 pcpu_free_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900325 return rc;
326}
327
328/**
329 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
330 * @chunk: chunk to depopulate
331 * @off: offset to the area to depopulate
332 * @size: size of the area to depopulate in bytes
Tejun Heo9f645532010-04-09 18:57:01 +0900333 *
334 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
335 * from @chunk. If @flush is true, vcache is flushed before unmapping
336 * and tlb after.
337 *
338 * CONTEXT:
339 * pcpu_alloc_mutex.
340 */
341static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
342{
343 int page_start = PFN_DOWN(off);
344 int page_end = PFN_UP(off + size);
345 struct page **pages;
Tejun Heo9f645532010-04-09 18:57:01 +0900346 int rs, re;
347
348 /* quick path, check whether it's empty already */
349 rs = page_start;
350 pcpu_next_unpop(chunk, &rs, &re, page_end);
351 if (rs == page_start && re == page_end)
352 return;
353
354 /* immutable chunks can't be depopulated */
355 WARN_ON(chunk->immutable);
356
357 /*
358 * If control reaches here, there must have been at least one
359 * successful population attempt so the temp pages array must
360 * be available now.
361 */
Tejun Heocdb4cba2014-09-02 14:46:01 -0400362 pages = pcpu_get_pages(chunk);
Tejun Heo9f645532010-04-09 18:57:01 +0900363 BUG_ON(!pages);
364
365 /* unmap and free */
366 pcpu_pre_unmap_flush(chunk, page_start, page_end);
367
368 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
Tejun Heofbbb7f42014-09-02 14:46:01 -0400369 pcpu_unmap_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900370
371 /* no need to flush tlb, vmalloc will handle it lazily */
372
373 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
Tejun Heofbbb7f42014-09-02 14:46:01 -0400374 pcpu_free_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900375
Tejun Heofbbb7f42014-09-02 14:46:01 -0400376 bitmap_clear(chunk->populated, page_start, page_end - page_start);
Tejun Heo9f645532010-04-09 18:57:01 +0900377}
378
379static struct pcpu_chunk *pcpu_create_chunk(void)
380{
381 struct pcpu_chunk *chunk;
382 struct vm_struct **vms;
383
384 chunk = pcpu_alloc_chunk();
385 if (!chunk)
386 return NULL;
387
388 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
David Rientjesec3f64f2011-01-13 15:46:01 -0800389 pcpu_nr_groups, pcpu_atom_size);
Tejun Heo9f645532010-04-09 18:57:01 +0900390 if (!vms) {
391 pcpu_free_chunk(chunk);
392 return NULL;
393 }
394
395 chunk->data = vms;
396 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
397 return chunk;
398}
399
400static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
401{
402 if (chunk && chunk->data)
403 pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
404 pcpu_free_chunk(chunk);
405}
406
407static struct page *pcpu_addr_to_page(void *addr)
408{
409 return vmalloc_to_page(addr);
410}
411
412static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
413{
414 /* no extra restriction */
415 return 0;
416}