blob: 47b47bfbcb66709419766cce3da05890e99a40b0 [file] [log] [blame]
Tejun Heo9f645532010-04-09 18:57:01 +09001/*
2 * mm/percpu-vm.c - vmalloc area based chunk allocation
3 *
4 * Copyright (C) 2010 SUSE Linux Products GmbH
5 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * Chunks are mapped into vmalloc areas and populated page by page.
10 * This is the default chunk allocator.
11 */
12
13static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
14 unsigned int cpu, int page_idx)
15{
16 /* must not be used on pre-mapped chunk */
17 WARN_ON(chunk->immutable);
18
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
20}
21
22/**
Tejun Heofbbb7f42014-09-02 14:46:01 -040023 * pcpu_get_pages - get temp pages array
Tejun Heo9f645532010-04-09 18:57:01 +090024 * @chunk: chunk of interest
Tejun Heo9f645532010-04-09 18:57:01 +090025 * @may_alloc: may allocate the array
26 *
Tejun Heofbbb7f42014-09-02 14:46:01 -040027 * Returns pointer to array of pointers to struct page which can be indexed
28 * with pcpu_page_idx(). Note that there is only one array and access
29 * exclusion is the caller's responsibility.
Tejun Heo9f645532010-04-09 18:57:01 +090030 *
31 * RETURNS:
Tejun Heofbbb7f42014-09-02 14:46:01 -040032 * Pointer to temp pages array on success.
Tejun Heo9f645532010-04-09 18:57:01 +090033 */
Tejun Heofbbb7f42014-09-02 14:46:01 -040034static struct page **pcpu_get_pages(struct pcpu_chunk *chunk, bool may_alloc)
Tejun Heo9f645532010-04-09 18:57:01 +090035{
36 static struct page **pages;
Tejun Heo9f645532010-04-09 18:57:01 +090037 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
Tejun Heo9f645532010-04-09 18:57:01 +090038
Tejun Heofbbb7f42014-09-02 14:46:01 -040039 if (!pages && may_alloc)
40 pages = pcpu_mem_zalloc(pages_size);
Tejun Heo9f645532010-04-09 18:57:01 +090041 return pages;
42}
43
44/**
45 * pcpu_free_pages - free pages which were allocated for @chunk
46 * @chunk: chunk pages were allocated for
47 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
Tejun Heo9f645532010-04-09 18:57:01 +090048 * @page_start: page index of the first page to be freed
49 * @page_end: page index of the last page to be freed + 1
50 *
51 * Free pages [@page_start and @page_end) in @pages for all units.
52 * The pages were allocated for @chunk.
53 */
54static void pcpu_free_pages(struct pcpu_chunk *chunk,
Tejun Heofbbb7f42014-09-02 14:46:01 -040055 struct page **pages, int page_start, int page_end)
Tejun Heo9f645532010-04-09 18:57:01 +090056{
57 unsigned int cpu;
58 int i;
59
60 for_each_possible_cpu(cpu) {
61 for (i = page_start; i < page_end; i++) {
62 struct page *page = pages[pcpu_page_idx(cpu, i)];
63
64 if (page)
65 __free_page(page);
66 }
67 }
68}
69
70/**
71 * pcpu_alloc_pages - allocates pages for @chunk
72 * @chunk: target chunk
73 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
Tejun Heo9f645532010-04-09 18:57:01 +090074 * @page_start: page index of the first page to be allocated
75 * @page_end: page index of the last page to be allocated + 1
76 *
77 * Allocate pages [@page_start,@page_end) into @pages for all units.
78 * The allocation is for @chunk. Percpu core doesn't care about the
79 * content of @pages and will pass it verbatim to pcpu_map_pages().
80 */
81static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
Tejun Heofbbb7f42014-09-02 14:46:01 -040082 struct page **pages, int page_start, int page_end)
Tejun Heo9f645532010-04-09 18:57:01 +090083{
84 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
Tejun Heof0d27962014-08-15 16:06:06 -040085 unsigned int cpu, tcpu;
Tejun Heo9f645532010-04-09 18:57:01 +090086 int i;
87
88 for_each_possible_cpu(cpu) {
89 for (i = page_start; i < page_end; i++) {
90 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
91
92 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
Tejun Heof0d27962014-08-15 16:06:06 -040093 if (!*pagep)
94 goto err;
Tejun Heo9f645532010-04-09 18:57:01 +090095 }
96 }
97 return 0;
Tejun Heof0d27962014-08-15 16:06:06 -040098
99err:
100 while (--i >= page_start)
101 __free_page(pages[pcpu_page_idx(cpu, i)]);
102
103 for_each_possible_cpu(tcpu) {
104 if (tcpu == cpu)
105 break;
106 for (i = page_start; i < page_end; i++)
107 __free_page(pages[pcpu_page_idx(tcpu, i)]);
108 }
109 return -ENOMEM;
Tejun Heo9f645532010-04-09 18:57:01 +0900110}
111
112/**
113 * pcpu_pre_unmap_flush - flush cache prior to unmapping
114 * @chunk: chunk the regions to be flushed belongs to
115 * @page_start: page index of the first page to be flushed
116 * @page_end: page index of the last page to be flushed + 1
117 *
118 * Pages in [@page_start,@page_end) of @chunk are about to be
119 * unmapped. Flush cache. As each flushing trial can be very
120 * expensive, issue flush on the whole region at once rather than
121 * doing it for each cpu. This could be an overkill but is more
122 * scalable.
123 */
124static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
125 int page_start, int page_end)
126{
127 flush_cache_vunmap(
Tejun Heoa855b842011-11-18 10:55:35 -0800128 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
129 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
Tejun Heo9f645532010-04-09 18:57:01 +0900130}
131
132static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
133{
134 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
135}
136
137/**
138 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
139 * @chunk: chunk of interest
140 * @pages: pages array which can be used to pass information to free
Tejun Heo9f645532010-04-09 18:57:01 +0900141 * @page_start: page index of the first page to unmap
142 * @page_end: page index of the last page to unmap + 1
143 *
144 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
145 * Corresponding elements in @pages were cleared by the caller and can
146 * be used to carry information to pcpu_free_pages() which will be
147 * called after all unmaps are finished. The caller should call
148 * proper pre/post flush functions.
149 */
150static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
Tejun Heofbbb7f42014-09-02 14:46:01 -0400151 struct page **pages, int page_start, int page_end)
Tejun Heo9f645532010-04-09 18:57:01 +0900152{
153 unsigned int cpu;
154 int i;
155
156 for_each_possible_cpu(cpu) {
157 for (i = page_start; i < page_end; i++) {
158 struct page *page;
159
160 page = pcpu_chunk_page(chunk, cpu, i);
161 WARN_ON(!page);
162 pages[pcpu_page_idx(cpu, i)] = page;
163 }
164 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
165 page_end - page_start);
166 }
Tejun Heo9f645532010-04-09 18:57:01 +0900167}
168
169/**
170 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
171 * @chunk: pcpu_chunk the regions to be flushed belong to
172 * @page_start: page index of the first page to be flushed
173 * @page_end: page index of the last page to be flushed + 1
174 *
175 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
176 * TLB for the regions. This can be skipped if the area is to be
177 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
178 *
179 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
180 * for the whole region.
181 */
182static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
183 int page_start, int page_end)
184{
185 flush_tlb_kernel_range(
Tejun Heoa855b842011-11-18 10:55:35 -0800186 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
187 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
Tejun Heo9f645532010-04-09 18:57:01 +0900188}
189
190static int __pcpu_map_pages(unsigned long addr, struct page **pages,
191 int nr_pages)
192{
193 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
194 PAGE_KERNEL, pages);
195}
196
197/**
198 * pcpu_map_pages - map pages into a pcpu_chunk
199 * @chunk: chunk of interest
200 * @pages: pages array containing pages to be mapped
Tejun Heo9f645532010-04-09 18:57:01 +0900201 * @page_start: page index of the first page to map
202 * @page_end: page index of the last page to map + 1
203 *
204 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
205 * caller is responsible for calling pcpu_post_map_flush() after all
206 * mappings are complete.
207 *
Tejun Heofbbb7f42014-09-02 14:46:01 -0400208 * This function is responsible for setting up whatever is necessary for
209 * reverse lookup (addr -> chunk).
Tejun Heo9f645532010-04-09 18:57:01 +0900210 */
211static int pcpu_map_pages(struct pcpu_chunk *chunk,
Tejun Heofbbb7f42014-09-02 14:46:01 -0400212 struct page **pages, int page_start, int page_end)
Tejun Heo9f645532010-04-09 18:57:01 +0900213{
214 unsigned int cpu, tcpu;
215 int i, err;
216
217 for_each_possible_cpu(cpu) {
218 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
219 &pages[pcpu_page_idx(cpu, page_start)],
220 page_end - page_start);
221 if (err < 0)
222 goto err;
Tejun Heo9f645532010-04-09 18:57:01 +0900223
Tejun Heofbbb7f42014-09-02 14:46:01 -0400224 for (i = page_start; i < page_end; i++)
Tejun Heo9f645532010-04-09 18:57:01 +0900225 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
226 chunk);
Tejun Heo9f645532010-04-09 18:57:01 +0900227 }
Tejun Heo9f645532010-04-09 18:57:01 +0900228 return 0;
Tejun Heo9f645532010-04-09 18:57:01 +0900229err:
230 for_each_possible_cpu(tcpu) {
231 if (tcpu == cpu)
232 break;
233 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
234 page_end - page_start);
235 }
Tejun Heo849f5162014-08-15 16:06:10 -0400236 pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
Tejun Heo9f645532010-04-09 18:57:01 +0900237 return err;
238}
239
240/**
241 * pcpu_post_map_flush - flush cache after mapping
242 * @chunk: pcpu_chunk the regions to be flushed belong to
243 * @page_start: page index of the first page to be flushed
244 * @page_end: page index of the last page to be flushed + 1
245 *
246 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
247 * cache.
248 *
249 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
250 * for the whole region.
251 */
252static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
253 int page_start, int page_end)
254{
255 flush_cache_vmap(
Tejun Heoa855b842011-11-18 10:55:35 -0800256 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
257 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
Tejun Heo9f645532010-04-09 18:57:01 +0900258}
259
260/**
261 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
262 * @chunk: chunk of interest
263 * @off: offset to the area to populate
264 * @size: size of the area to populate in bytes
265 *
266 * For each cpu, populate and map pages [@page_start,@page_end) into
267 * @chunk. The area is cleared on return.
268 *
269 * CONTEXT:
270 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
271 */
272static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
273{
274 int page_start = PFN_DOWN(off);
275 int page_end = PFN_UP(off + size);
276 int free_end = page_start, unmap_end = page_start;
277 struct page **pages;
Tejun Heo9f645532010-04-09 18:57:01 +0900278 unsigned int cpu;
279 int rs, re, rc;
280
281 /* quick path, check whether all pages are already there */
282 rs = page_start;
283 pcpu_next_pop(chunk, &rs, &re, page_end);
284 if (rs == page_start && re == page_end)
285 goto clear;
286
287 /* need to allocate and map pages, this chunk can't be immutable */
288 WARN_ON(chunk->immutable);
289
Tejun Heofbbb7f42014-09-02 14:46:01 -0400290 pages = pcpu_get_pages(chunk, true);
Tejun Heo9f645532010-04-09 18:57:01 +0900291 if (!pages)
292 return -ENOMEM;
293
294 /* alloc and map */
295 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
Tejun Heofbbb7f42014-09-02 14:46:01 -0400296 rc = pcpu_alloc_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900297 if (rc)
298 goto err_free;
299 free_end = re;
300 }
301
302 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
Tejun Heofbbb7f42014-09-02 14:46:01 -0400303 rc = pcpu_map_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900304 if (rc)
305 goto err_unmap;
306 unmap_end = re;
307 }
308 pcpu_post_map_flush(chunk, page_start, page_end);
309
Tejun Heofbbb7f42014-09-02 14:46:01 -0400310 bitmap_set(chunk->populated, page_start, page_end - page_start);
Tejun Heo9f645532010-04-09 18:57:01 +0900311clear:
312 for_each_possible_cpu(cpu)
313 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
314 return 0;
315
316err_unmap:
317 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
318 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
Tejun Heofbbb7f42014-09-02 14:46:01 -0400319 pcpu_unmap_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900320 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
321err_free:
322 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
Tejun Heofbbb7f42014-09-02 14:46:01 -0400323 pcpu_free_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900324 return rc;
325}
326
327/**
328 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
329 * @chunk: chunk to depopulate
330 * @off: offset to the area to depopulate
331 * @size: size of the area to depopulate in bytes
Tejun Heo9f645532010-04-09 18:57:01 +0900332 *
333 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
334 * from @chunk. If @flush is true, vcache is flushed before unmapping
335 * and tlb after.
336 *
337 * CONTEXT:
338 * pcpu_alloc_mutex.
339 */
340static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
341{
342 int page_start = PFN_DOWN(off);
343 int page_end = PFN_UP(off + size);
344 struct page **pages;
Tejun Heo9f645532010-04-09 18:57:01 +0900345 int rs, re;
346
347 /* quick path, check whether it's empty already */
348 rs = page_start;
349 pcpu_next_unpop(chunk, &rs, &re, page_end);
350 if (rs == page_start && re == page_end)
351 return;
352
353 /* immutable chunks can't be depopulated */
354 WARN_ON(chunk->immutable);
355
356 /*
357 * If control reaches here, there must have been at least one
358 * successful population attempt so the temp pages array must
359 * be available now.
360 */
Tejun Heofbbb7f42014-09-02 14:46:01 -0400361 pages = pcpu_get_pages(chunk, false);
Tejun Heo9f645532010-04-09 18:57:01 +0900362 BUG_ON(!pages);
363
364 /* unmap and free */
365 pcpu_pre_unmap_flush(chunk, page_start, page_end);
366
367 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
Tejun Heofbbb7f42014-09-02 14:46:01 -0400368 pcpu_unmap_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900369
370 /* no need to flush tlb, vmalloc will handle it lazily */
371
372 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
Tejun Heofbbb7f42014-09-02 14:46:01 -0400373 pcpu_free_pages(chunk, pages, rs, re);
Tejun Heo9f645532010-04-09 18:57:01 +0900374
Tejun Heofbbb7f42014-09-02 14:46:01 -0400375 bitmap_clear(chunk->populated, page_start, page_end - page_start);
Tejun Heo9f645532010-04-09 18:57:01 +0900376}
377
378static struct pcpu_chunk *pcpu_create_chunk(void)
379{
380 struct pcpu_chunk *chunk;
381 struct vm_struct **vms;
382
383 chunk = pcpu_alloc_chunk();
384 if (!chunk)
385 return NULL;
386
387 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
David Rientjesec3f64f2011-01-13 15:46:01 -0800388 pcpu_nr_groups, pcpu_atom_size);
Tejun Heo9f645532010-04-09 18:57:01 +0900389 if (!vms) {
390 pcpu_free_chunk(chunk);
391 return NULL;
392 }
393
394 chunk->data = vms;
395 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
396 return chunk;
397}
398
399static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
400{
401 if (chunk && chunk->data)
402 pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
403 pcpu_free_chunk(chunk);
404}
405
406static struct page *pcpu_addr_to_page(void *addr)
407{
408 return vmalloc_to_page(addr);
409}
410
411static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
412{
413 /* no extra restriction */
414 return 0;
415}