blob: 6e09741ddc628bf53f4efc338fb4d2636b1b473e [file] [log] [blame]
Tejun Heofbf59bc2009-02-20 16:29:08 +09001/*
2 * linux/mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
Tejun Heo2f39e632009-07-04 08:11:00 +090011 * chunk is consisted of boot-time determined number of units and the
12 * first chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated. ie. in
17 * vmalloc area
Tejun Heofbf59bc2009-02-20 16:29:08 +090018 *
19 * c0 c1 c2
20 * ------------------- ------------------- ------------
21 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
22 * ------------------- ...... ------------------- .... ------------
23 *
24 * Allocation is done in offset-size areas of single unit space. Ie,
25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
Tejun Heo2f39e632009-07-04 08:11:00 +090026 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
27 * cpus. On NUMA, the mapping can be non-linear and even sparse.
28 * Percpu access can be done by configuring percpu base registers
29 * according to cpu to unit mapping and pcpu_unit_size.
Tejun Heofbf59bc2009-02-20 16:29:08 +090030 *
Tejun Heo2f39e632009-07-04 08:11:00 +090031 * There are usually many small percpu allocations many of them being
32 * as small as 4 bytes. The allocator organizes chunks into lists
Tejun Heofbf59bc2009-02-20 16:29:08 +090033 * according to free size and tries to allocate from the fullest one.
34 * Each chunk keeps the maximum contiguous area size hint which is
35 * guaranteed to be eqaul to or larger than the maximum contiguous
36 * area in the chunk. This helps the allocator not to iterate the
37 * chunk maps unnecessarily.
38 *
39 * Allocation state in each chunk is kept using an array of integers
40 * on chunk->map. A positive value in the map represents a free
41 * region and negative allocated. Allocation inside a chunk is done
42 * by scanning this map sequentially and serving the first matching
43 * entry. This is mostly copied from the percpu_modalloc() allocator.
Christoph Lametere1b9aa32009-04-02 13:21:44 +090044 * Chunks can be determined from the address using the index field
45 * in the page struct. The index field contains a pointer to the chunk.
Tejun Heofbf59bc2009-02-20 16:29:08 +090046 *
47 * To use this allocator, arch code should do the followings.
48 *
Tejun Heofbf59bc2009-02-20 16:29:08 +090049 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
Tejun Heoe0100982009-03-10 16:27:48 +090050 * regular address to percpu pointer and back if they need to be
51 * different from the default
Tejun Heofbf59bc2009-02-20 16:29:08 +090052 *
Tejun Heo8d408b42009-02-24 11:57:21 +090053 * - use pcpu_setup_first_chunk() during percpu area initialization to
54 * setup the first chunk containing the kernel static percpu area
Tejun Heofbf59bc2009-02-20 16:29:08 +090055 */
56
57#include <linux/bitmap.h>
58#include <linux/bootmem.h>
Tejun Heofd1e8a12009-08-14 15:00:51 +090059#include <linux/err.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090060#include <linux/list.h>
Tejun Heoa530b792009-07-04 08:11:00 +090061#include <linux/log2.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090062#include <linux/mm.h>
63#include <linux/module.h>
64#include <linux/mutex.h>
65#include <linux/percpu.h>
66#include <linux/pfn.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090067#include <linux/slab.h>
Tejun Heoccea34b2009-03-07 00:44:13 +090068#include <linux/spinlock.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090069#include <linux/vmalloc.h>
Tejun Heoa56dbdd2009-03-07 00:44:11 +090070#include <linux/workqueue.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090071
72#include <asm/cacheflush.h>
Tejun Heoe0100982009-03-10 16:27:48 +090073#include <asm/sections.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090074#include <asm/tlbflush.h>
Vivek Goyal3b034b02009-11-24 15:50:03 +090075#include <asm/io.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090076
Tejun Heofbf59bc2009-02-20 16:29:08 +090077#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
78#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
79
Tejun Heoe0100982009-03-10 16:27:48 +090080/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
81#ifndef __addr_to_pcpu_ptr
82#define __addr_to_pcpu_ptr(addr) \
Tejun Heo43cf38e2010-02-02 14:38:57 +090083 (void __percpu *)((unsigned long)(addr) - \
84 (unsigned long)pcpu_base_addr + \
85 (unsigned long)__per_cpu_start)
Tejun Heoe0100982009-03-10 16:27:48 +090086#endif
87#ifndef __pcpu_ptr_to_addr
88#define __pcpu_ptr_to_addr(ptr) \
Tejun Heo43cf38e2010-02-02 14:38:57 +090089 (void __force *)((unsigned long)(ptr) + \
90 (unsigned long)pcpu_base_addr - \
91 (unsigned long)__per_cpu_start)
Tejun Heoe0100982009-03-10 16:27:48 +090092#endif
93
Tejun Heofbf59bc2009-02-20 16:29:08 +090094struct pcpu_chunk {
95 struct list_head list; /* linked to pcpu_slot lists */
Tejun Heofbf59bc2009-02-20 16:29:08 +090096 int free_size; /* free bytes in the chunk */
97 int contig_hint; /* max contiguous size hint */
Tejun Heobba174f2009-08-14 15:00:51 +090098 void *base_addr; /* base address of this chunk */
Tejun Heofbf59bc2009-02-20 16:29:08 +090099 int map_used; /* # of map entries used */
100 int map_alloc; /* # of map entries allocated */
101 int *map; /* allocation map */
Tejun Heo65632972009-08-14 15:00:52 +0900102 struct vm_struct **vms; /* mapped vmalloc regions */
Tejun Heo8d408b42009-02-24 11:57:21 +0900103 bool immutable; /* no [de]population allowed */
Tejun Heoce3141a2009-07-04 08:11:00 +0900104 unsigned long populated[]; /* populated bitmap */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900105};
106
Tejun Heo40150d32009-02-24 12:32:28 +0900107static int pcpu_unit_pages __read_mostly;
108static int pcpu_unit_size __read_mostly;
Tejun Heo2f39e632009-07-04 08:11:00 +0900109static int pcpu_nr_units __read_mostly;
Tejun Heo65632972009-08-14 15:00:52 +0900110static int pcpu_atom_size __read_mostly;
Tejun Heo40150d32009-02-24 12:32:28 +0900111static int pcpu_nr_slots __read_mostly;
112static size_t pcpu_chunk_struct_size __read_mostly;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900113
Tejun Heo2f39e632009-07-04 08:11:00 +0900114/* cpus with the lowest and highest unit numbers */
115static unsigned int pcpu_first_unit_cpu __read_mostly;
116static unsigned int pcpu_last_unit_cpu __read_mostly;
117
Tejun Heofbf59bc2009-02-20 16:29:08 +0900118/* the address of the first chunk which starts with the kernel static area */
Tejun Heo40150d32009-02-24 12:32:28 +0900119void *pcpu_base_addr __read_mostly;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900120EXPORT_SYMBOL_GPL(pcpu_base_addr);
121
Tejun Heofb435d52009-08-14 15:00:51 +0900122static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
123const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
Tejun Heo2f39e632009-07-04 08:11:00 +0900124
Tejun Heo65632972009-08-14 15:00:52 +0900125/* group information, used for vm allocation */
126static int pcpu_nr_groups __read_mostly;
127static const unsigned long *pcpu_group_offsets __read_mostly;
128static const size_t *pcpu_group_sizes __read_mostly;
129
Tejun Heoae9e6bc2009-04-02 13:19:54 +0900130/*
131 * The first chunk which always exists. Note that unlike other
132 * chunks, this one can be allocated and mapped in several different
133 * ways and thus often doesn't live in the vmalloc area.
134 */
135static struct pcpu_chunk *pcpu_first_chunk;
136
137/*
138 * Optional reserved chunk. This chunk reserves part of the first
139 * chunk and serves it for reserved allocations. The amount of
140 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
141 * area doesn't exist, the following variables contain NULL and 0
142 * respectively.
143 */
Tejun Heoedcb4632009-03-06 14:33:59 +0900144static struct pcpu_chunk *pcpu_reserved_chunk;
Tejun Heoedcb4632009-03-06 14:33:59 +0900145static int pcpu_reserved_chunk_limit;
146
Tejun Heofbf59bc2009-02-20 16:29:08 +0900147/*
Tejun Heoccea34b2009-03-07 00:44:13 +0900148 * Synchronization rules.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900149 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900150 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
Tejun Heoce3141a2009-07-04 08:11:00 +0900151 * protects allocation/reclaim paths, chunks, populated bitmap and
152 * vmalloc mapping. The latter is a spinlock and protects the index
153 * data structures - chunk slots, chunks and area maps in chunks.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900154 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900155 * During allocation, pcpu_alloc_mutex is kept locked all the time and
156 * pcpu_lock is grabbed and released as necessary. All actual memory
Jiri Kosina403a91b2009-10-29 00:25:59 +0900157 * allocations are done using GFP_KERNEL with pcpu_lock released. In
158 * general, percpu memory can't be allocated with irq off but
159 * irqsave/restore are still used in alloc path so that it can be used
160 * from early init path - sched_init() specifically.
Tejun Heoccea34b2009-03-07 00:44:13 +0900161 *
162 * Free path accesses and alters only the index data structures, so it
163 * can be safely called from atomic context. When memory needs to be
164 * returned to the system, free path schedules reclaim_work which
165 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
166 * reclaimed, release both locks and frees the chunks. Note that it's
167 * necessary to grab both locks to remove a chunk from circulation as
168 * allocation path might be referencing the chunk with only
169 * pcpu_alloc_mutex locked.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900170 */
Tejun Heoccea34b2009-03-07 00:44:13 +0900171static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
172static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900173
Tejun Heo40150d32009-02-24 12:32:28 +0900174static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900175
Tejun Heoa56dbdd2009-03-07 00:44:11 +0900176/* reclaim work to release fully free chunks, scheduled from free path */
177static void pcpu_reclaim(struct work_struct *work);
178static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
179
Tejun Heod9b55ee2009-02-24 11:57:21 +0900180static int __pcpu_size_to_slot(int size)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900181{
Tejun Heocae3aeb2009-02-21 16:56:23 +0900182 int highbit = fls(size); /* size is in bytes */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900183 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
184}
185
Tejun Heod9b55ee2009-02-24 11:57:21 +0900186static int pcpu_size_to_slot(int size)
187{
188 if (size == pcpu_unit_size)
189 return pcpu_nr_slots - 1;
190 return __pcpu_size_to_slot(size);
191}
192
Tejun Heofbf59bc2009-02-20 16:29:08 +0900193static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
194{
195 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
196 return 0;
197
198 return pcpu_size_to_slot(chunk->free_size);
199}
200
201static int pcpu_page_idx(unsigned int cpu, int page_idx)
202{
Tejun Heo2f39e632009-07-04 08:11:00 +0900203 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900204}
205
206static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
207 unsigned int cpu, int page_idx)
208{
Tejun Heobba174f2009-08-14 15:00:51 +0900209 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
Tejun Heofb435d52009-08-14 15:00:51 +0900210 (page_idx << PAGE_SHIFT);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900211}
212
Tejun Heoce3141a2009-07-04 08:11:00 +0900213static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
214 unsigned int cpu, int page_idx)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900215{
Tejun Heoce3141a2009-07-04 08:11:00 +0900216 /* must not be used on pre-mapped chunk */
217 WARN_ON(chunk->immutable);
Tejun Heoc8a51be2009-07-04 08:10:59 +0900218
Tejun Heoce3141a2009-07-04 08:11:00 +0900219 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900220}
221
Christoph Lametere1b9aa32009-04-02 13:21:44 +0900222/* set the pointer to a chunk in a page struct */
223static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
224{
225 page->index = (unsigned long)pcpu;
226}
227
228/* obtain pointer to a chunk from a page struct */
229static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
230{
231 return (struct pcpu_chunk *)page->index;
232}
233
Tejun Heoce3141a2009-07-04 08:11:00 +0900234static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
235{
236 *rs = find_next_zero_bit(chunk->populated, end, *rs);
237 *re = find_next_bit(chunk->populated, end, *rs + 1);
238}
239
240static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
241{
242 *rs = find_next_bit(chunk->populated, end, *rs);
243 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
244}
245
246/*
247 * (Un)populated page region iterators. Iterate over (un)populated
248 * page regions betwen @start and @end in @chunk. @rs and @re should
249 * be integer variables and will be set to start and end page index of
250 * the current region.
251 */
252#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
253 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
254 (rs) < (re); \
255 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
256
257#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
258 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
259 (rs) < (re); \
260 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
261
Tejun Heofbf59bc2009-02-20 16:29:08 +0900262/**
Tejun Heo1880d932009-03-07 00:44:09 +0900263 * pcpu_mem_alloc - allocate memory
264 * @size: bytes to allocate
Tejun Heofbf59bc2009-02-20 16:29:08 +0900265 *
Tejun Heo1880d932009-03-07 00:44:09 +0900266 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
267 * kzalloc() is used; otherwise, vmalloc() is used. The returned
268 * memory is always zeroed.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900269 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900270 * CONTEXT:
271 * Does GFP_KERNEL allocation.
272 *
Tejun Heofbf59bc2009-02-20 16:29:08 +0900273 * RETURNS:
Tejun Heo1880d932009-03-07 00:44:09 +0900274 * Pointer to the allocated area on success, NULL on failure.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900275 */
Tejun Heo1880d932009-03-07 00:44:09 +0900276static void *pcpu_mem_alloc(size_t size)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900277{
Tejun Heofbf59bc2009-02-20 16:29:08 +0900278 if (size <= PAGE_SIZE)
Tejun Heo1880d932009-03-07 00:44:09 +0900279 return kzalloc(size, GFP_KERNEL);
280 else {
281 void *ptr = vmalloc(size);
282 if (ptr)
283 memset(ptr, 0, size);
284 return ptr;
285 }
286}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900287
Tejun Heo1880d932009-03-07 00:44:09 +0900288/**
289 * pcpu_mem_free - free memory
290 * @ptr: memory to free
291 * @size: size of the area
292 *
293 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
294 */
295static void pcpu_mem_free(void *ptr, size_t size)
296{
297 if (size <= PAGE_SIZE)
298 kfree(ptr);
299 else
300 vfree(ptr);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900301}
302
303/**
304 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
305 * @chunk: chunk of interest
306 * @oslot: the previous slot it was on
307 *
308 * This function is called after an allocation or free changed @chunk.
309 * New slot according to the changed state is determined and @chunk is
Tejun Heoedcb4632009-03-06 14:33:59 +0900310 * moved to the slot. Note that the reserved chunk is never put on
311 * chunk slots.
Tejun Heoccea34b2009-03-07 00:44:13 +0900312 *
313 * CONTEXT:
314 * pcpu_lock.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900315 */
316static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
317{
318 int nslot = pcpu_chunk_slot(chunk);
319
Tejun Heoedcb4632009-03-06 14:33:59 +0900320 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
Tejun Heofbf59bc2009-02-20 16:29:08 +0900321 if (oslot < nslot)
322 list_move(&chunk->list, &pcpu_slot[nslot]);
323 else
324 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
325 }
326}
327
Tejun Heofbf59bc2009-02-20 16:29:08 +0900328/**
Christoph Lametere1b9aa32009-04-02 13:21:44 +0900329 * pcpu_chunk_addr_search - determine chunk containing specified address
330 * @addr: address for which the chunk needs to be determined.
Tejun Heoccea34b2009-03-07 00:44:13 +0900331 *
Tejun Heofbf59bc2009-02-20 16:29:08 +0900332 * RETURNS:
333 * The address of the found chunk.
334 */
335static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
336{
Tejun Heobba174f2009-08-14 15:00:51 +0900337 void *first_start = pcpu_first_chunk->base_addr;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900338
Tejun Heoae9e6bc2009-04-02 13:19:54 +0900339 /* is it in the first chunk? */
Tejun Heo79ba6ac2009-07-04 08:10:58 +0900340 if (addr >= first_start && addr < first_start + pcpu_unit_size) {
Tejun Heoae9e6bc2009-04-02 13:19:54 +0900341 /* is it in the reserved area? */
342 if (addr < first_start + pcpu_reserved_chunk_limit)
Tejun Heoedcb4632009-03-06 14:33:59 +0900343 return pcpu_reserved_chunk;
Tejun Heoae9e6bc2009-04-02 13:19:54 +0900344 return pcpu_first_chunk;
Tejun Heoedcb4632009-03-06 14:33:59 +0900345 }
346
Tejun Heo04a13c72009-09-01 21:12:28 +0900347 /*
348 * The address is relative to unit0 which might be unused and
349 * thus unmapped. Offset the address to the unit space of the
350 * current processor before looking it up in the vmalloc
351 * space. Note that any possible cpu id can be used here, so
352 * there's no need to worry about preemption or cpu hotplug.
353 */
Tejun Heo5579fd72009-09-15 09:57:19 +0900354 addr += pcpu_unit_offsets[raw_smp_processor_id()];
Christoph Lametere1b9aa32009-04-02 13:21:44 +0900355 return pcpu_get_page_chunk(vmalloc_to_page(addr));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900356}
357
358/**
Tejun Heo833af842009-11-11 15:35:18 +0900359 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
360 * @chunk: chunk of interest
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900361 *
Tejun Heo833af842009-11-11 15:35:18 +0900362 * Determine whether area map of @chunk needs to be extended to
363 * accomodate a new allocation.
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900364 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900365 * CONTEXT:
Tejun Heo833af842009-11-11 15:35:18 +0900366 * pcpu_lock.
Tejun Heoccea34b2009-03-07 00:44:13 +0900367 *
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900368 * RETURNS:
Tejun Heo833af842009-11-11 15:35:18 +0900369 * New target map allocation length if extension is necessary, 0
370 * otherwise.
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900371 */
Tejun Heo833af842009-11-11 15:35:18 +0900372static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900373{
374 int new_alloc;
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900375
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900376 if (chunk->map_alloc >= chunk->map_used + 2)
377 return 0;
378
379 new_alloc = PCPU_DFL_MAP_ALLOC;
380 while (new_alloc < chunk->map_used + 2)
381 new_alloc *= 2;
382
Tejun Heo833af842009-11-11 15:35:18 +0900383 return new_alloc;
384}
385
386/**
387 * pcpu_extend_area_map - extend area map of a chunk
388 * @chunk: chunk of interest
389 * @new_alloc: new target allocation length of the area map
390 *
391 * Extend area map of @chunk to have @new_alloc entries.
392 *
393 * CONTEXT:
394 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
395 *
396 * RETURNS:
397 * 0 on success, -errno on failure.
398 */
399static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
400{
401 int *old = NULL, *new = NULL;
402 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
403 unsigned long flags;
404
405 new = pcpu_mem_alloc(new_size);
406 if (!new)
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900407 return -ENOMEM;
Tejun Heoccea34b2009-03-07 00:44:13 +0900408
Tejun Heo833af842009-11-11 15:35:18 +0900409 /* acquire pcpu_lock and switch to new area map */
410 spin_lock_irqsave(&pcpu_lock, flags);
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900411
Tejun Heo833af842009-11-11 15:35:18 +0900412 if (new_alloc <= chunk->map_alloc)
413 goto out_unlock;
414
415 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
416 memcpy(new, chunk->map, old_size);
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900417
418 /*
419 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
420 * one of the first chunks and still using static map.
421 */
422 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
Tejun Heo833af842009-11-11 15:35:18 +0900423 old = chunk->map;
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900424
425 chunk->map_alloc = new_alloc;
426 chunk->map = new;
Tejun Heo833af842009-11-11 15:35:18 +0900427 new = NULL;
428
429out_unlock:
430 spin_unlock_irqrestore(&pcpu_lock, flags);
431
432 /*
433 * pcpu_mem_free() might end up calling vfree() which uses
434 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
435 */
436 pcpu_mem_free(old, old_size);
437 pcpu_mem_free(new, new_size);
438
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900439 return 0;
440}
441
442/**
Tejun Heofbf59bc2009-02-20 16:29:08 +0900443 * pcpu_split_block - split a map block
444 * @chunk: chunk of interest
445 * @i: index of map block to split
Tejun Heocae3aeb2009-02-21 16:56:23 +0900446 * @head: head size in bytes (can be 0)
447 * @tail: tail size in bytes (can be 0)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900448 *
449 * Split the @i'th map block into two or three blocks. If @head is
450 * non-zero, @head bytes block is inserted before block @i moving it
451 * to @i+1 and reducing its size by @head bytes.
452 *
453 * If @tail is non-zero, the target block, which can be @i or @i+1
454 * depending on @head, is reduced by @tail bytes and @tail byte block
455 * is inserted after the target block.
456 *
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900457 * @chunk->map must have enough free slots to accomodate the split.
Tejun Heoccea34b2009-03-07 00:44:13 +0900458 *
459 * CONTEXT:
460 * pcpu_lock.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900461 */
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900462static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
463 int head, int tail)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900464{
465 int nr_extra = !!head + !!tail;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900466
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900467 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900468
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900469 /* insert new subblocks */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900470 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
471 sizeof(chunk->map[0]) * (chunk->map_used - i));
472 chunk->map_used += nr_extra;
473
474 if (head) {
475 chunk->map[i + 1] = chunk->map[i] - head;
476 chunk->map[i++] = head;
477 }
478 if (tail) {
479 chunk->map[i++] -= tail;
480 chunk->map[i] = tail;
481 }
Tejun Heofbf59bc2009-02-20 16:29:08 +0900482}
483
484/**
485 * pcpu_alloc_area - allocate area from a pcpu_chunk
486 * @chunk: chunk of interest
Tejun Heocae3aeb2009-02-21 16:56:23 +0900487 * @size: wanted size in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +0900488 * @align: wanted align
489 *
490 * Try to allocate @size bytes area aligned at @align from @chunk.
491 * Note that this function only allocates the offset. It doesn't
492 * populate or map the area.
493 *
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900494 * @chunk->map must have at least two free slots.
495 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900496 * CONTEXT:
497 * pcpu_lock.
498 *
Tejun Heofbf59bc2009-02-20 16:29:08 +0900499 * RETURNS:
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900500 * Allocated offset in @chunk on success, -1 if no matching area is
501 * found.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900502 */
503static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
504{
505 int oslot = pcpu_chunk_slot(chunk);
506 int max_contig = 0;
507 int i, off;
508
Tejun Heofbf59bc2009-02-20 16:29:08 +0900509 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
510 bool is_last = i + 1 == chunk->map_used;
511 int head, tail;
512
513 /* extra for alignment requirement */
514 head = ALIGN(off, align) - off;
515 BUG_ON(i == 0 && head != 0);
516
517 if (chunk->map[i] < 0)
518 continue;
519 if (chunk->map[i] < head + size) {
520 max_contig = max(chunk->map[i], max_contig);
521 continue;
522 }
523
524 /*
525 * If head is small or the previous block is free,
526 * merge'em. Note that 'small' is defined as smaller
527 * than sizeof(int), which is very small but isn't too
528 * uncommon for percpu allocations.
529 */
530 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
531 if (chunk->map[i - 1] > 0)
532 chunk->map[i - 1] += head;
533 else {
534 chunk->map[i - 1] -= head;
535 chunk->free_size -= head;
536 }
537 chunk->map[i] -= head;
538 off += head;
539 head = 0;
540 }
541
542 /* if tail is small, just keep it around */
543 tail = chunk->map[i] - head - size;
544 if (tail < sizeof(int))
545 tail = 0;
546
547 /* split if warranted */
548 if (head || tail) {
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900549 pcpu_split_block(chunk, i, head, tail);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900550 if (head) {
551 i++;
552 off += head;
553 max_contig = max(chunk->map[i - 1], max_contig);
554 }
555 if (tail)
556 max_contig = max(chunk->map[i + 1], max_contig);
557 }
558
559 /* update hint and mark allocated */
560 if (is_last)
561 chunk->contig_hint = max_contig; /* fully scanned */
562 else
563 chunk->contig_hint = max(chunk->contig_hint,
564 max_contig);
565
566 chunk->free_size -= chunk->map[i];
567 chunk->map[i] = -chunk->map[i];
568
569 pcpu_chunk_relocate(chunk, oslot);
570 return off;
571 }
572
573 chunk->contig_hint = max_contig; /* fully scanned */
574 pcpu_chunk_relocate(chunk, oslot);
575
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900576 /* tell the upper layer that this chunk has no matching area */
577 return -1;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900578}
579
580/**
581 * pcpu_free_area - free area to a pcpu_chunk
582 * @chunk: chunk of interest
583 * @freeme: offset of area to free
584 *
585 * Free area starting from @freeme to @chunk. Note that this function
586 * only modifies the allocation map. It doesn't depopulate or unmap
587 * the area.
Tejun Heoccea34b2009-03-07 00:44:13 +0900588 *
589 * CONTEXT:
590 * pcpu_lock.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900591 */
592static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
593{
594 int oslot = pcpu_chunk_slot(chunk);
595 int i, off;
596
597 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
598 if (off == freeme)
599 break;
600 BUG_ON(off != freeme);
601 BUG_ON(chunk->map[i] > 0);
602
603 chunk->map[i] = -chunk->map[i];
604 chunk->free_size += chunk->map[i];
605
606 /* merge with previous? */
607 if (i > 0 && chunk->map[i - 1] >= 0) {
608 chunk->map[i - 1] += chunk->map[i];
609 chunk->map_used--;
610 memmove(&chunk->map[i], &chunk->map[i + 1],
611 (chunk->map_used - i) * sizeof(chunk->map[0]));
612 i--;
613 }
614 /* merge with next? */
615 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
616 chunk->map[i] += chunk->map[i + 1];
617 chunk->map_used--;
618 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
619 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
620 }
621
622 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
623 pcpu_chunk_relocate(chunk, oslot);
624}
625
626/**
Tejun Heoce3141a2009-07-04 08:11:00 +0900627 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
Tejun Heofbf59bc2009-02-20 16:29:08 +0900628 * @chunk: chunk of interest
Tejun Heoce3141a2009-07-04 08:11:00 +0900629 * @bitmapp: output parameter for bitmap
630 * @may_alloc: may allocate the array
Tejun Heofbf59bc2009-02-20 16:29:08 +0900631 *
Tejun Heoce3141a2009-07-04 08:11:00 +0900632 * Returns pointer to array of pointers to struct page and bitmap,
633 * both of which can be indexed with pcpu_page_idx(). The returned
634 * array is cleared to zero and *@bitmapp is copied from
635 * @chunk->populated. Note that there is only one array and bitmap
636 * and access exclusion is the caller's responsibility.
637 *
638 * CONTEXT:
639 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
640 * Otherwise, don't care.
641 *
642 * RETURNS:
643 * Pointer to temp pages array on success, NULL on failure.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900644 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900645static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
646 unsigned long **bitmapp,
647 bool may_alloc)
648{
649 static struct page **pages;
650 static unsigned long *bitmap;
Tejun Heo2f39e632009-07-04 08:11:00 +0900651 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
Tejun Heoce3141a2009-07-04 08:11:00 +0900652 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
653 sizeof(unsigned long);
654
655 if (!pages || !bitmap) {
656 if (may_alloc && !pages)
657 pages = pcpu_mem_alloc(pages_size);
658 if (may_alloc && !bitmap)
659 bitmap = pcpu_mem_alloc(bitmap_size);
660 if (!pages || !bitmap)
661 return NULL;
662 }
663
664 memset(pages, 0, pages_size);
665 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
666
667 *bitmapp = bitmap;
668 return pages;
669}
670
671/**
672 * pcpu_free_pages - free pages which were allocated for @chunk
673 * @chunk: chunk pages were allocated for
674 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
675 * @populated: populated bitmap
676 * @page_start: page index of the first page to be freed
677 * @page_end: page index of the last page to be freed + 1
678 *
679 * Free pages [@page_start and @page_end) in @pages for all units.
680 * The pages were allocated for @chunk.
681 */
682static void pcpu_free_pages(struct pcpu_chunk *chunk,
683 struct page **pages, unsigned long *populated,
684 int page_start, int page_end)
685{
686 unsigned int cpu;
687 int i;
688
689 for_each_possible_cpu(cpu) {
690 for (i = page_start; i < page_end; i++) {
691 struct page *page = pages[pcpu_page_idx(cpu, i)];
692
693 if (page)
694 __free_page(page);
695 }
696 }
697}
698
699/**
700 * pcpu_alloc_pages - allocates pages for @chunk
701 * @chunk: target chunk
702 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
703 * @populated: populated bitmap
704 * @page_start: page index of the first page to be allocated
705 * @page_end: page index of the last page to be allocated + 1
706 *
707 * Allocate pages [@page_start,@page_end) into @pages for all units.
708 * The allocation is for @chunk. Percpu core doesn't care about the
709 * content of @pages and will pass it verbatim to pcpu_map_pages().
710 */
711static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
712 struct page **pages, unsigned long *populated,
713 int page_start, int page_end)
714{
715 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
716 unsigned int cpu;
717 int i;
718
719 for_each_possible_cpu(cpu) {
720 for (i = page_start; i < page_end; i++) {
721 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
722
723 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
724 if (!*pagep) {
725 pcpu_free_pages(chunk, pages, populated,
726 page_start, page_end);
727 return -ENOMEM;
728 }
729 }
730 }
731 return 0;
732}
733
734/**
735 * pcpu_pre_unmap_flush - flush cache prior to unmapping
736 * @chunk: chunk the regions to be flushed belongs to
737 * @page_start: page index of the first page to be flushed
738 * @page_end: page index of the last page to be flushed + 1
739 *
740 * Pages in [@page_start,@page_end) of @chunk are about to be
741 * unmapped. Flush cache. As each flushing trial can be very
742 * expensive, issue flush on the whole region at once rather than
743 * doing it for each cpu. This could be an overkill but is more
744 * scalable.
745 */
746static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
747 int page_start, int page_end)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900748{
Tejun Heo2f39e632009-07-04 08:11:00 +0900749 flush_cache_vunmap(
750 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
751 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
Tejun Heoce3141a2009-07-04 08:11:00 +0900752}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900753
Tejun Heoce3141a2009-07-04 08:11:00 +0900754static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
755{
756 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
757}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900758
Tejun Heoce3141a2009-07-04 08:11:00 +0900759/**
760 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
761 * @chunk: chunk of interest
762 * @pages: pages array which can be used to pass information to free
763 * @populated: populated bitmap
Tejun Heofbf59bc2009-02-20 16:29:08 +0900764 * @page_start: page index of the first page to unmap
765 * @page_end: page index of the last page to unmap + 1
Tejun Heofbf59bc2009-02-20 16:29:08 +0900766 *
767 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
Tejun Heoce3141a2009-07-04 08:11:00 +0900768 * Corresponding elements in @pages were cleared by the caller and can
769 * be used to carry information to pcpu_free_pages() which will be
770 * called after all unmaps are finished. The caller should call
771 * proper pre/post flush functions.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900772 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900773static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
774 struct page **pages, unsigned long *populated,
775 int page_start, int page_end)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900776{
Tejun Heofbf59bc2009-02-20 16:29:08 +0900777 unsigned int cpu;
Tejun Heoce3141a2009-07-04 08:11:00 +0900778 int i;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900779
Tejun Heoce3141a2009-07-04 08:11:00 +0900780 for_each_possible_cpu(cpu) {
781 for (i = page_start; i < page_end; i++) {
782 struct page *page;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900783
Tejun Heoce3141a2009-07-04 08:11:00 +0900784 page = pcpu_chunk_page(chunk, cpu, i);
785 WARN_ON(!page);
786 pages[pcpu_page_idx(cpu, i)] = page;
787 }
788 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
789 page_end - page_start);
790 }
Tejun Heofbf59bc2009-02-20 16:29:08 +0900791
Tejun Heoce3141a2009-07-04 08:11:00 +0900792 for (i = page_start; i < page_end; i++)
793 __clear_bit(i, populated);
794}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900795
Tejun Heoce3141a2009-07-04 08:11:00 +0900796/**
797 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
798 * @chunk: pcpu_chunk the regions to be flushed belong to
799 * @page_start: page index of the first page to be flushed
800 * @page_end: page index of the last page to be flushed + 1
801 *
802 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
803 * TLB for the regions. This can be skipped if the area is to be
804 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
805 *
806 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
807 * for the whole region.
808 */
809static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
810 int page_start, int page_end)
811{
Tejun Heo2f39e632009-07-04 08:11:00 +0900812 flush_tlb_kernel_range(
813 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
814 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900815}
816
Tejun Heoc8a51be2009-07-04 08:10:59 +0900817static int __pcpu_map_pages(unsigned long addr, struct page **pages,
818 int nr_pages)
819{
820 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
821 PAGE_KERNEL, pages);
822}
823
824/**
Tejun Heoce3141a2009-07-04 08:11:00 +0900825 * pcpu_map_pages - map pages into a pcpu_chunk
Tejun Heoc8a51be2009-07-04 08:10:59 +0900826 * @chunk: chunk of interest
Tejun Heoce3141a2009-07-04 08:11:00 +0900827 * @pages: pages array containing pages to be mapped
828 * @populated: populated bitmap
Tejun Heoc8a51be2009-07-04 08:10:59 +0900829 * @page_start: page index of the first page to map
830 * @page_end: page index of the last page to map + 1
831 *
Tejun Heoce3141a2009-07-04 08:11:00 +0900832 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
833 * caller is responsible for calling pcpu_post_map_flush() after all
834 * mappings are complete.
835 *
836 * This function is responsible for setting corresponding bits in
837 * @chunk->populated bitmap and whatever is necessary for reverse
838 * lookup (addr -> chunk).
Tejun Heoc8a51be2009-07-04 08:10:59 +0900839 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900840static int pcpu_map_pages(struct pcpu_chunk *chunk,
841 struct page **pages, unsigned long *populated,
842 int page_start, int page_end)
Tejun Heoc8a51be2009-07-04 08:10:59 +0900843{
Tejun Heoce3141a2009-07-04 08:11:00 +0900844 unsigned int cpu, tcpu;
845 int i, err;
Tejun Heoc8a51be2009-07-04 08:10:59 +0900846
847 for_each_possible_cpu(cpu) {
848 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
Tejun Heoce3141a2009-07-04 08:11:00 +0900849 &pages[pcpu_page_idx(cpu, page_start)],
Tejun Heoc8a51be2009-07-04 08:10:59 +0900850 page_end - page_start);
851 if (err < 0)
Tejun Heoce3141a2009-07-04 08:11:00 +0900852 goto err;
Tejun Heoc8a51be2009-07-04 08:10:59 +0900853 }
854
Tejun Heoce3141a2009-07-04 08:11:00 +0900855 /* mapping successful, link chunk and mark populated */
856 for (i = page_start; i < page_end; i++) {
857 for_each_possible_cpu(cpu)
858 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
859 chunk);
860 __set_bit(i, populated);
861 }
862
863 return 0;
864
865err:
866 for_each_possible_cpu(tcpu) {
867 if (tcpu == cpu)
868 break;
869 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
870 page_end - page_start);
871 }
872 return err;
873}
874
875/**
876 * pcpu_post_map_flush - flush cache after mapping
877 * @chunk: pcpu_chunk the regions to be flushed belong to
878 * @page_start: page index of the first page to be flushed
879 * @page_end: page index of the last page to be flushed + 1
880 *
881 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
882 * cache.
883 *
884 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
885 * for the whole region.
886 */
887static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
888 int page_start, int page_end)
889{
Tejun Heo2f39e632009-07-04 08:11:00 +0900890 flush_cache_vmap(
891 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
892 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900893}
894
895/**
896 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
897 * @chunk: chunk to depopulate
898 * @off: offset to the area to depopulate
Tejun Heocae3aeb2009-02-21 16:56:23 +0900899 * @size: size of the area to depopulate in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +0900900 * @flush: whether to flush cache and tlb or not
901 *
902 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
903 * from @chunk. If @flush is true, vcache is flushed before unmapping
904 * and tlb after.
Tejun Heoccea34b2009-03-07 00:44:13 +0900905 *
906 * CONTEXT:
907 * pcpu_alloc_mutex.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900908 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900909static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900910{
911 int page_start = PFN_DOWN(off);
912 int page_end = PFN_UP(off + size);
Tejun Heoce3141a2009-07-04 08:11:00 +0900913 struct page **pages;
914 unsigned long *populated;
915 int rs, re;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900916
Tejun Heoce3141a2009-07-04 08:11:00 +0900917 /* quick path, check whether it's empty already */
WANG Cong22b737f2009-12-01 23:28:10 +0900918 rs = page_start;
919 pcpu_next_unpop(chunk, &rs, &re, page_end);
920 if (rs == page_start && re == page_end)
921 return;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900922
Tejun Heoce3141a2009-07-04 08:11:00 +0900923 /* immutable chunks can't be depopulated */
Tejun Heo8d408b42009-02-24 11:57:21 +0900924 WARN_ON(chunk->immutable);
925
Tejun Heoce3141a2009-07-04 08:11:00 +0900926 /*
927 * If control reaches here, there must have been at least one
928 * successful population attempt so the temp pages array must
929 * be available now.
930 */
931 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
932 BUG_ON(!pages);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900933
Tejun Heoce3141a2009-07-04 08:11:00 +0900934 /* unmap and free */
935 pcpu_pre_unmap_flush(chunk, page_start, page_end);
936
937 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
938 pcpu_unmap_pages(chunk, pages, populated, rs, re);
939
940 /* no need to flush tlb, vmalloc will handle it lazily */
941
942 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
943 pcpu_free_pages(chunk, pages, populated, rs, re);
944
945 /* commit new bitmap */
946 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900947}
948
949/**
950 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
951 * @chunk: chunk of interest
952 * @off: offset to the area to populate
Tejun Heocae3aeb2009-02-21 16:56:23 +0900953 * @size: size of the area to populate in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +0900954 *
955 * For each cpu, populate and map pages [@page_start,@page_end) into
956 * @chunk. The area is cleared on return.
Tejun Heoccea34b2009-03-07 00:44:13 +0900957 *
958 * CONTEXT:
959 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900960 */
961static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
962{
Tejun Heofbf59bc2009-02-20 16:29:08 +0900963 int page_start = PFN_DOWN(off);
964 int page_end = PFN_UP(off + size);
Tejun Heoce3141a2009-07-04 08:11:00 +0900965 int free_end = page_start, unmap_end = page_start;
966 struct page **pages;
967 unsigned long *populated;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900968 unsigned int cpu;
Tejun Heoce3141a2009-07-04 08:11:00 +0900969 int rs, re, rc;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900970
Tejun Heoce3141a2009-07-04 08:11:00 +0900971 /* quick path, check whether all pages are already there */
WANG Cong22b737f2009-12-01 23:28:10 +0900972 rs = page_start;
973 pcpu_next_pop(chunk, &rs, &re, page_end);
974 if (rs == page_start && re == page_end)
975 goto clear;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900976
Tejun Heoce3141a2009-07-04 08:11:00 +0900977 /* need to allocate and map pages, this chunk can't be immutable */
978 WARN_ON(chunk->immutable);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900979
Tejun Heoce3141a2009-07-04 08:11:00 +0900980 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
981 if (!pages)
982 return -ENOMEM;
983
984 /* alloc and map */
985 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
986 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
987 if (rc)
988 goto err_free;
989 free_end = re;
990 }
991
992 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
993 rc = pcpu_map_pages(chunk, pages, populated, rs, re);
994 if (rc)
995 goto err_unmap;
996 unmap_end = re;
997 }
998 pcpu_post_map_flush(chunk, page_start, page_end);
999
1000 /* commit new bitmap */
1001 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
1002clear:
Tejun Heofbf59bc2009-02-20 16:29:08 +09001003 for_each_possible_cpu(cpu)
Tejun Heo2f39e632009-07-04 08:11:00 +09001004 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001005 return 0;
Tejun Heoce3141a2009-07-04 08:11:00 +09001006
1007err_unmap:
1008 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
1009 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
1010 pcpu_unmap_pages(chunk, pages, populated, rs, re);
1011 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
1012err_free:
1013 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
1014 pcpu_free_pages(chunk, pages, populated, rs, re);
1015 return rc;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001016}
1017
1018static void free_pcpu_chunk(struct pcpu_chunk *chunk)
1019{
1020 if (!chunk)
1021 return;
Tejun Heo65632972009-08-14 15:00:52 +09001022 if (chunk->vms)
1023 pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
Tejun Heo1880d932009-03-07 00:44:09 +09001024 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
Tejun Heofbf59bc2009-02-20 16:29:08 +09001025 kfree(chunk);
1026}
1027
1028static struct pcpu_chunk *alloc_pcpu_chunk(void)
1029{
1030 struct pcpu_chunk *chunk;
1031
1032 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
1033 if (!chunk)
1034 return NULL;
1035
Tejun Heo1880d932009-03-07 00:44:09 +09001036 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
Tejun Heofbf59bc2009-02-20 16:29:08 +09001037 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
1038 chunk->map[chunk->map_used++] = pcpu_unit_size;
1039
Tejun Heo65632972009-08-14 15:00:52 +09001040 chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
1041 pcpu_nr_groups, pcpu_atom_size,
1042 GFP_KERNEL);
1043 if (!chunk->vms) {
Tejun Heofbf59bc2009-02-20 16:29:08 +09001044 free_pcpu_chunk(chunk);
1045 return NULL;
1046 }
1047
1048 INIT_LIST_HEAD(&chunk->list);
1049 chunk->free_size = pcpu_unit_size;
1050 chunk->contig_hint = pcpu_unit_size;
Tejun Heo65632972009-08-14 15:00:52 +09001051 chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
Tejun Heofbf59bc2009-02-20 16:29:08 +09001052
1053 return chunk;
1054}
1055
1056/**
Tejun Heoedcb4632009-03-06 14:33:59 +09001057 * pcpu_alloc - the percpu allocator
Tejun Heocae3aeb2009-02-21 16:56:23 +09001058 * @size: size of area to allocate in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +09001059 * @align: alignment of area (max PAGE_SIZE)
Tejun Heoedcb4632009-03-06 14:33:59 +09001060 * @reserved: allocate from the reserved chunk if available
Tejun Heofbf59bc2009-02-20 16:29:08 +09001061 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001062 * Allocate percpu area of @size bytes aligned at @align.
1063 *
1064 * CONTEXT:
1065 * Does GFP_KERNEL allocation.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001066 *
1067 * RETURNS:
1068 * Percpu pointer to the allocated area on success, NULL on failure.
1069 */
Tejun Heo43cf38e2010-02-02 14:38:57 +09001070static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001071{
Tejun Heof2badb02009-09-29 09:17:58 +09001072 static int warn_limit = 10;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001073 struct pcpu_chunk *chunk;
Tejun Heof2badb02009-09-29 09:17:58 +09001074 const char *err;
Tejun Heo833af842009-11-11 15:35:18 +09001075 int slot, off, new_alloc;
Jiri Kosina403a91b2009-10-29 00:25:59 +09001076 unsigned long flags;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001077
Tejun Heo8d408b42009-02-24 11:57:21 +09001078 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
Tejun Heofbf59bc2009-02-20 16:29:08 +09001079 WARN(true, "illegal size (%zu) or align (%zu) for "
1080 "percpu allocation\n", size, align);
1081 return NULL;
1082 }
1083
Tejun Heoccea34b2009-03-07 00:44:13 +09001084 mutex_lock(&pcpu_alloc_mutex);
Jiri Kosina403a91b2009-10-29 00:25:59 +09001085 spin_lock_irqsave(&pcpu_lock, flags);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001086
Tejun Heoedcb4632009-03-06 14:33:59 +09001087 /* serve reserved allocations from the reserved chunk if available */
1088 if (reserved && pcpu_reserved_chunk) {
1089 chunk = pcpu_reserved_chunk;
Tejun Heo833af842009-11-11 15:35:18 +09001090
1091 if (size > chunk->contig_hint) {
1092 err = "alloc from reserved chunk failed";
Tejun Heoccea34b2009-03-07 00:44:13 +09001093 goto fail_unlock;
Tejun Heof2badb02009-09-29 09:17:58 +09001094 }
Tejun Heo833af842009-11-11 15:35:18 +09001095
1096 while ((new_alloc = pcpu_need_to_extend(chunk))) {
1097 spin_unlock_irqrestore(&pcpu_lock, flags);
1098 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
1099 err = "failed to extend area map of reserved chunk";
1100 goto fail_unlock_mutex;
1101 }
1102 spin_lock_irqsave(&pcpu_lock, flags);
1103 }
1104
Tejun Heoedcb4632009-03-06 14:33:59 +09001105 off = pcpu_alloc_area(chunk, size, align);
1106 if (off >= 0)
1107 goto area_found;
Tejun Heo833af842009-11-11 15:35:18 +09001108
Tejun Heof2badb02009-09-29 09:17:58 +09001109 err = "alloc from reserved chunk failed";
Tejun Heoccea34b2009-03-07 00:44:13 +09001110 goto fail_unlock;
Tejun Heoedcb4632009-03-06 14:33:59 +09001111 }
1112
Tejun Heoccea34b2009-03-07 00:44:13 +09001113restart:
Tejun Heoedcb4632009-03-06 14:33:59 +09001114 /* search through normal chunks */
Tejun Heofbf59bc2009-02-20 16:29:08 +09001115 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1116 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1117 if (size > chunk->contig_hint)
1118 continue;
Tejun Heoccea34b2009-03-07 00:44:13 +09001119
Tejun Heo833af842009-11-11 15:35:18 +09001120 new_alloc = pcpu_need_to_extend(chunk);
1121 if (new_alloc) {
1122 spin_unlock_irqrestore(&pcpu_lock, flags);
1123 if (pcpu_extend_area_map(chunk,
1124 new_alloc) < 0) {
1125 err = "failed to extend area map";
1126 goto fail_unlock_mutex;
1127 }
1128 spin_lock_irqsave(&pcpu_lock, flags);
1129 /*
1130 * pcpu_lock has been dropped, need to
1131 * restart cpu_slot list walking.
1132 */
1133 goto restart;
Tejun Heoccea34b2009-03-07 00:44:13 +09001134 }
1135
Tejun Heofbf59bc2009-02-20 16:29:08 +09001136 off = pcpu_alloc_area(chunk, size, align);
1137 if (off >= 0)
1138 goto area_found;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001139 }
1140 }
1141
1142 /* hmmm... no space left, create a new chunk */
Jiri Kosina403a91b2009-10-29 00:25:59 +09001143 spin_unlock_irqrestore(&pcpu_lock, flags);
Tejun Heoccea34b2009-03-07 00:44:13 +09001144
Tejun Heofbf59bc2009-02-20 16:29:08 +09001145 chunk = alloc_pcpu_chunk();
Tejun Heof2badb02009-09-29 09:17:58 +09001146 if (!chunk) {
1147 err = "failed to allocate new chunk";
Tejun Heoccea34b2009-03-07 00:44:13 +09001148 goto fail_unlock_mutex;
Tejun Heof2badb02009-09-29 09:17:58 +09001149 }
Tejun Heoccea34b2009-03-07 00:44:13 +09001150
Jiri Kosina403a91b2009-10-29 00:25:59 +09001151 spin_lock_irqsave(&pcpu_lock, flags);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001152 pcpu_chunk_relocate(chunk, -1);
Tejun Heoccea34b2009-03-07 00:44:13 +09001153 goto restart;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001154
1155area_found:
Jiri Kosina403a91b2009-10-29 00:25:59 +09001156 spin_unlock_irqrestore(&pcpu_lock, flags);
Tejun Heoccea34b2009-03-07 00:44:13 +09001157
Tejun Heofbf59bc2009-02-20 16:29:08 +09001158 /* populate, map and clear the area */
1159 if (pcpu_populate_chunk(chunk, off, size)) {
Jiri Kosina403a91b2009-10-29 00:25:59 +09001160 spin_lock_irqsave(&pcpu_lock, flags);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001161 pcpu_free_area(chunk, off);
Tejun Heof2badb02009-09-29 09:17:58 +09001162 err = "failed to populate";
Tejun Heoccea34b2009-03-07 00:44:13 +09001163 goto fail_unlock;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001164 }
1165
Tejun Heoccea34b2009-03-07 00:44:13 +09001166 mutex_unlock(&pcpu_alloc_mutex);
1167
Tejun Heobba174f2009-08-14 15:00:51 +09001168 /* return address relative to base address */
1169 return __addr_to_pcpu_ptr(chunk->base_addr + off);
Tejun Heoccea34b2009-03-07 00:44:13 +09001170
1171fail_unlock:
Jiri Kosina403a91b2009-10-29 00:25:59 +09001172 spin_unlock_irqrestore(&pcpu_lock, flags);
Tejun Heoccea34b2009-03-07 00:44:13 +09001173fail_unlock_mutex:
1174 mutex_unlock(&pcpu_alloc_mutex);
Tejun Heof2badb02009-09-29 09:17:58 +09001175 if (warn_limit) {
1176 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
1177 "%s\n", size, align, err);
1178 dump_stack();
1179 if (!--warn_limit)
1180 pr_info("PERCPU: limit reached, disable warning\n");
1181 }
Tejun Heoccea34b2009-03-07 00:44:13 +09001182 return NULL;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001183}
Tejun Heoedcb4632009-03-06 14:33:59 +09001184
1185/**
1186 * __alloc_percpu - allocate dynamic percpu area
1187 * @size: size of area to allocate in bytes
1188 * @align: alignment of area (max PAGE_SIZE)
1189 *
1190 * Allocate percpu area of @size bytes aligned at @align. Might
1191 * sleep. Might trigger writeouts.
1192 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001193 * CONTEXT:
1194 * Does GFP_KERNEL allocation.
1195 *
Tejun Heoedcb4632009-03-06 14:33:59 +09001196 * RETURNS:
1197 * Percpu pointer to the allocated area on success, NULL on failure.
1198 */
Tejun Heo43cf38e2010-02-02 14:38:57 +09001199void __percpu *__alloc_percpu(size_t size, size_t align)
Tejun Heoedcb4632009-03-06 14:33:59 +09001200{
1201 return pcpu_alloc(size, align, false);
1202}
Tejun Heofbf59bc2009-02-20 16:29:08 +09001203EXPORT_SYMBOL_GPL(__alloc_percpu);
1204
Tejun Heoedcb4632009-03-06 14:33:59 +09001205/**
1206 * __alloc_reserved_percpu - allocate reserved percpu area
1207 * @size: size of area to allocate in bytes
1208 * @align: alignment of area (max PAGE_SIZE)
1209 *
1210 * Allocate percpu area of @size bytes aligned at @align from reserved
1211 * percpu area if arch has set it up; otherwise, allocation is served
1212 * from the same dynamic area. Might sleep. Might trigger writeouts.
1213 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001214 * CONTEXT:
1215 * Does GFP_KERNEL allocation.
1216 *
Tejun Heoedcb4632009-03-06 14:33:59 +09001217 * RETURNS:
1218 * Percpu pointer to the allocated area on success, NULL on failure.
1219 */
Tejun Heo43cf38e2010-02-02 14:38:57 +09001220void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
Tejun Heoedcb4632009-03-06 14:33:59 +09001221{
1222 return pcpu_alloc(size, align, true);
1223}
1224
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001225/**
1226 * pcpu_reclaim - reclaim fully free chunks, workqueue function
1227 * @work: unused
1228 *
1229 * Reclaim all fully free chunks except for the first one.
Tejun Heoccea34b2009-03-07 00:44:13 +09001230 *
1231 * CONTEXT:
1232 * workqueue context.
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001233 */
1234static void pcpu_reclaim(struct work_struct *work)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001235{
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001236 LIST_HEAD(todo);
1237 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
1238 struct pcpu_chunk *chunk, *next;
1239
Tejun Heoccea34b2009-03-07 00:44:13 +09001240 mutex_lock(&pcpu_alloc_mutex);
1241 spin_lock_irq(&pcpu_lock);
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001242
1243 list_for_each_entry_safe(chunk, next, head, list) {
1244 WARN_ON(chunk->immutable);
1245
1246 /* spare the first one */
1247 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
1248 continue;
1249
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001250 list_move(&chunk->list, &todo);
1251 }
1252
Tejun Heoccea34b2009-03-07 00:44:13 +09001253 spin_unlock_irq(&pcpu_lock);
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001254
1255 list_for_each_entry_safe(chunk, next, &todo, list) {
Tejun Heoce3141a2009-07-04 08:11:00 +09001256 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001257 free_pcpu_chunk(chunk);
1258 }
Tejun Heo971f3912009-08-14 15:00:49 +09001259
1260 mutex_unlock(&pcpu_alloc_mutex);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001261}
1262
1263/**
1264 * free_percpu - free percpu area
1265 * @ptr: pointer to area to free
1266 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001267 * Free percpu area @ptr.
1268 *
1269 * CONTEXT:
1270 * Can be called from atomic context.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001271 */
Tejun Heo43cf38e2010-02-02 14:38:57 +09001272void free_percpu(void __percpu *ptr)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001273{
Andrew Morton129182e2010-01-08 14:42:39 -08001274 void *addr;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001275 struct pcpu_chunk *chunk;
Tejun Heoccea34b2009-03-07 00:44:13 +09001276 unsigned long flags;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001277 int off;
1278
1279 if (!ptr)
1280 return;
1281
Andrew Morton129182e2010-01-08 14:42:39 -08001282 addr = __pcpu_ptr_to_addr(ptr);
1283
Tejun Heoccea34b2009-03-07 00:44:13 +09001284 spin_lock_irqsave(&pcpu_lock, flags);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001285
1286 chunk = pcpu_chunk_addr_search(addr);
Tejun Heobba174f2009-08-14 15:00:51 +09001287 off = addr - chunk->base_addr;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001288
1289 pcpu_free_area(chunk, off);
1290
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001291 /* if there are more than one fully free chunks, wake up grim reaper */
Tejun Heofbf59bc2009-02-20 16:29:08 +09001292 if (chunk->free_size == pcpu_unit_size) {
1293 struct pcpu_chunk *pos;
1294
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001295 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001296 if (pos != chunk) {
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001297 schedule_work(&pcpu_reclaim_work);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001298 break;
1299 }
1300 }
1301
Tejun Heoccea34b2009-03-07 00:44:13 +09001302 spin_unlock_irqrestore(&pcpu_lock, flags);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001303}
1304EXPORT_SYMBOL_GPL(free_percpu);
1305
Vivek Goyal3b034b02009-11-24 15:50:03 +09001306/**
Tejun Heo10fad5e2010-03-10 18:57:54 +09001307 * is_kernel_percpu_address - test whether address is from static percpu area
1308 * @addr: address to test
1309 *
1310 * Test whether @addr belongs to in-kernel static percpu area. Module
1311 * static percpu areas are not considered. For those, use
1312 * is_module_percpu_address().
1313 *
1314 * RETURNS:
1315 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1316 */
1317bool is_kernel_percpu_address(unsigned long addr)
1318{
1319 const size_t static_size = __per_cpu_end - __per_cpu_start;
1320 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1321 unsigned int cpu;
1322
1323 for_each_possible_cpu(cpu) {
1324 void *start = per_cpu_ptr(base, cpu);
1325
1326 if ((void *)addr >= start && (void *)addr < start + static_size)
1327 return true;
1328 }
1329 return false;
1330}
1331
1332/**
Vivek Goyal3b034b02009-11-24 15:50:03 +09001333 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1334 * @addr: the address to be converted to physical address
1335 *
1336 * Given @addr which is dereferenceable address obtained via one of
1337 * percpu access macros, this function translates it into its physical
1338 * address. The caller is responsible for ensuring @addr stays valid
1339 * until this function finishes.
1340 *
1341 * RETURNS:
1342 * The physical address for @addr.
1343 */
1344phys_addr_t per_cpu_ptr_to_phys(void *addr)
1345{
1346 if ((unsigned long)addr < VMALLOC_START ||
1347 (unsigned long)addr >= VMALLOC_END)
1348 return __pa(addr);
1349 else
1350 return page_to_phys(vmalloc_to_page(addr));
1351}
1352
Tejun Heo033e48f2009-08-14 15:00:51 +09001353static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1354 size_t reserved_size,
1355 ssize_t *dyn_sizep)
1356{
1357 size_t size_sum;
1358
1359 size_sum = PFN_ALIGN(static_size + reserved_size +
1360 (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1361 if (*dyn_sizep != 0)
1362 *dyn_sizep = size_sum - static_size - reserved_size;
1363
1364 return size_sum;
1365}
1366
Tejun Heofbf59bc2009-02-20 16:29:08 +09001367/**
Tejun Heofd1e8a12009-08-14 15:00:51 +09001368 * pcpu_alloc_alloc_info - allocate percpu allocation info
1369 * @nr_groups: the number of groups
1370 * @nr_units: the number of units
Tejun Heo033e48f2009-08-14 15:00:51 +09001371 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001372 * Allocate ai which is large enough for @nr_groups groups containing
1373 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1374 * cpu_map array which is long enough for @nr_units and filled with
1375 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1376 * pointer of other groups.
Tejun Heo033e48f2009-08-14 15:00:51 +09001377 *
1378 * RETURNS:
Tejun Heofd1e8a12009-08-14 15:00:51 +09001379 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1380 * failure.
Tejun Heo033e48f2009-08-14 15:00:51 +09001381 */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001382struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1383 int nr_units)
1384{
1385 struct pcpu_alloc_info *ai;
1386 size_t base_size, ai_size;
1387 void *ptr;
1388 int unit;
1389
1390 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1391 __alignof__(ai->groups[0].cpu_map[0]));
1392 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1393
1394 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1395 if (!ptr)
1396 return NULL;
1397 ai = ptr;
1398 ptr += base_size;
1399
1400 ai->groups[0].cpu_map = ptr;
1401
1402 for (unit = 0; unit < nr_units; unit++)
1403 ai->groups[0].cpu_map[unit] = NR_CPUS;
1404
1405 ai->nr_groups = nr_groups;
1406 ai->__ai_size = PFN_ALIGN(ai_size);
1407
1408 return ai;
1409}
1410
1411/**
1412 * pcpu_free_alloc_info - free percpu allocation info
1413 * @ai: pcpu_alloc_info to free
1414 *
1415 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1416 */
1417void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1418{
1419 free_bootmem(__pa(ai), ai->__ai_size);
1420}
1421
1422/**
1423 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
Tejun Heoedcb4632009-03-06 14:33:59 +09001424 * @reserved_size: the size of reserved percpu area in bytes
Tejun Heocafe8812009-03-06 14:33:59 +09001425 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
Tejun Heofd1e8a12009-08-14 15:00:51 +09001426 * @atom_size: allocation atom size
1427 * @cpu_distance_fn: callback to determine distance between cpus, optional
1428 *
1429 * This function determines grouping of units, their mappings to cpus
1430 * and other parameters considering needed percpu size, allocation
1431 * atom size and distances between CPUs.
1432 *
1433 * Groups are always mutliples of atom size and CPUs which are of
1434 * LOCAL_DISTANCE both ways are grouped together and share space for
1435 * units in the same group. The returned configuration is guaranteed
1436 * to have CPUs on different nodes on different groups and >=75% usage
1437 * of allocated virtual address space.
1438 *
1439 * RETURNS:
1440 * On success, pointer to the new allocation_info is returned. On
1441 * failure, ERR_PTR value is returned.
1442 */
1443struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1444 size_t reserved_size, ssize_t dyn_size,
1445 size_t atom_size,
1446 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
Tejun Heo033e48f2009-08-14 15:00:51 +09001447{
1448 static int group_map[NR_CPUS] __initdata;
1449 static int group_cnt[NR_CPUS] __initdata;
1450 const size_t static_size = __per_cpu_end - __per_cpu_start;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001451 int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
Tejun Heo033e48f2009-08-14 15:00:51 +09001452 size_t size_sum, min_unit_size, alloc_size;
1453 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001454 int last_allocs, group, unit;
Tejun Heo033e48f2009-08-14 15:00:51 +09001455 unsigned int cpu, tcpu;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001456 struct pcpu_alloc_info *ai;
1457 unsigned int *cpu_map;
Tejun Heo033e48f2009-08-14 15:00:51 +09001458
Tejun Heofb59e722009-09-24 18:50:34 +09001459 /* this function may be called multiple times */
1460 memset(group_map, 0, sizeof(group_map));
1461 memset(group_cnt, 0, sizeof(group_map));
1462
Tejun Heo033e48f2009-08-14 15:00:51 +09001463 /*
1464 * Determine min_unit_size, alloc_size and max_upa such that
Tejun Heofd1e8a12009-08-14 15:00:51 +09001465 * alloc_size is multiple of atom_size and is the smallest
Tejun Heo033e48f2009-08-14 15:00:51 +09001466 * which can accomodate 4k aligned segments which are equal to
1467 * or larger than min_unit_size.
1468 */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001469 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
Tejun Heo033e48f2009-08-14 15:00:51 +09001470 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1471
Tejun Heofd1e8a12009-08-14 15:00:51 +09001472 alloc_size = roundup(min_unit_size, atom_size);
Tejun Heo033e48f2009-08-14 15:00:51 +09001473 upa = alloc_size / min_unit_size;
1474 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1475 upa--;
1476 max_upa = upa;
1477
1478 /* group cpus according to their proximity */
1479 for_each_possible_cpu(cpu) {
1480 group = 0;
1481 next_group:
1482 for_each_possible_cpu(tcpu) {
1483 if (cpu == tcpu)
1484 break;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001485 if (group_map[tcpu] == group && cpu_distance_fn &&
Tejun Heo033e48f2009-08-14 15:00:51 +09001486 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1487 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1488 group++;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001489 nr_groups = max(nr_groups, group + 1);
Tejun Heo033e48f2009-08-14 15:00:51 +09001490 goto next_group;
1491 }
1492 }
1493 group_map[cpu] = group;
1494 group_cnt[group]++;
1495 group_cnt_max = max(group_cnt_max, group_cnt[group]);
1496 }
1497
1498 /*
1499 * Expand unit size until address space usage goes over 75%
1500 * and then as much as possible without using more address
1501 * space.
1502 */
1503 last_allocs = INT_MAX;
1504 for (upa = max_upa; upa; upa--) {
1505 int allocs = 0, wasted = 0;
1506
1507 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1508 continue;
1509
Tejun Heofd1e8a12009-08-14 15:00:51 +09001510 for (group = 0; group < nr_groups; group++) {
Tejun Heo033e48f2009-08-14 15:00:51 +09001511 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1512 allocs += this_allocs;
1513 wasted += this_allocs * upa - group_cnt[group];
1514 }
1515
1516 /*
1517 * Don't accept if wastage is over 25%. The
1518 * greater-than comparison ensures upa==1 always
1519 * passes the following check.
1520 */
1521 if (wasted > num_possible_cpus() / 3)
1522 continue;
1523
1524 /* and then don't consume more memory */
1525 if (allocs > last_allocs)
1526 break;
1527 last_allocs = allocs;
1528 best_upa = upa;
1529 }
Tejun Heofd1e8a12009-08-14 15:00:51 +09001530 upa = best_upa;
Tejun Heo033e48f2009-08-14 15:00:51 +09001531
Tejun Heofd1e8a12009-08-14 15:00:51 +09001532 /* allocate and fill alloc_info */
1533 for (group = 0; group < nr_groups; group++)
1534 nr_units += roundup(group_cnt[group], upa);
1535
1536 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1537 if (!ai)
1538 return ERR_PTR(-ENOMEM);
1539 cpu_map = ai->groups[0].cpu_map;
1540
1541 for (group = 0; group < nr_groups; group++) {
1542 ai->groups[group].cpu_map = cpu_map;
1543 cpu_map += roundup(group_cnt[group], upa);
Tejun Heo033e48f2009-08-14 15:00:51 +09001544 }
1545
Tejun Heofd1e8a12009-08-14 15:00:51 +09001546 ai->static_size = static_size;
1547 ai->reserved_size = reserved_size;
1548 ai->dyn_size = dyn_size;
1549 ai->unit_size = alloc_size / upa;
1550 ai->atom_size = atom_size;
1551 ai->alloc_size = alloc_size;
1552
1553 for (group = 0, unit = 0; group_cnt[group]; group++) {
1554 struct pcpu_group_info *gi = &ai->groups[group];
1555
1556 /*
1557 * Initialize base_offset as if all groups are located
1558 * back-to-back. The caller should update this to
1559 * reflect actual allocation.
1560 */
1561 gi->base_offset = unit * ai->unit_size;
1562
1563 for_each_possible_cpu(cpu)
1564 if (group_map[cpu] == group)
1565 gi->cpu_map[gi->nr_units++] = cpu;
1566 gi->nr_units = roundup(gi->nr_units, upa);
1567 unit += gi->nr_units;
1568 }
1569 BUG_ON(unit != nr_units);
1570
1571 return ai;
Tejun Heo033e48f2009-08-14 15:00:51 +09001572}
1573
Tejun Heofd1e8a12009-08-14 15:00:51 +09001574/**
1575 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1576 * @lvl: loglevel
1577 * @ai: allocation info to dump
1578 *
1579 * Print out information about @ai using loglevel @lvl.
1580 */
1581static void pcpu_dump_alloc_info(const char *lvl,
1582 const struct pcpu_alloc_info *ai)
Tejun Heo033e48f2009-08-14 15:00:51 +09001583{
Tejun Heofd1e8a12009-08-14 15:00:51 +09001584 int group_width = 1, cpu_width = 1, width;
Tejun Heo033e48f2009-08-14 15:00:51 +09001585 char empty_str[] = "--------";
Tejun Heofd1e8a12009-08-14 15:00:51 +09001586 int alloc = 0, alloc_end = 0;
1587 int group, v;
1588 int upa, apl; /* units per alloc, allocs per line */
Tejun Heo033e48f2009-08-14 15:00:51 +09001589
Tejun Heofd1e8a12009-08-14 15:00:51 +09001590 v = ai->nr_groups;
Tejun Heo033e48f2009-08-14 15:00:51 +09001591 while (v /= 10)
Tejun Heofd1e8a12009-08-14 15:00:51 +09001592 group_width++;
Tejun Heo033e48f2009-08-14 15:00:51 +09001593
Tejun Heofd1e8a12009-08-14 15:00:51 +09001594 v = num_possible_cpus();
1595 while (v /= 10)
1596 cpu_width++;
1597 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
Tejun Heo033e48f2009-08-14 15:00:51 +09001598
Tejun Heofd1e8a12009-08-14 15:00:51 +09001599 upa = ai->alloc_size / ai->unit_size;
1600 width = upa * (cpu_width + 1) + group_width + 3;
1601 apl = rounddown_pow_of_two(max(60 / width, 1));
Tejun Heo033e48f2009-08-14 15:00:51 +09001602
Tejun Heofd1e8a12009-08-14 15:00:51 +09001603 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1604 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1605 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1606
1607 for (group = 0; group < ai->nr_groups; group++) {
1608 const struct pcpu_group_info *gi = &ai->groups[group];
1609 int unit = 0, unit_end = 0;
1610
1611 BUG_ON(gi->nr_units % upa);
1612 for (alloc_end += gi->nr_units / upa;
1613 alloc < alloc_end; alloc++) {
1614 if (!(alloc % apl)) {
Tejun Heo033e48f2009-08-14 15:00:51 +09001615 printk("\n");
Tejun Heofd1e8a12009-08-14 15:00:51 +09001616 printk("%spcpu-alloc: ", lvl);
1617 }
1618 printk("[%0*d] ", group_width, group);
1619
1620 for (unit_end += upa; unit < unit_end; unit++)
1621 if (gi->cpu_map[unit] != NR_CPUS)
1622 printk("%0*d ", cpu_width,
1623 gi->cpu_map[unit]);
1624 else
1625 printk("%s ", empty_str);
Tejun Heo033e48f2009-08-14 15:00:51 +09001626 }
Tejun Heo033e48f2009-08-14 15:00:51 +09001627 }
1628 printk("\n");
1629}
Tejun Heo033e48f2009-08-14 15:00:51 +09001630
Tejun Heofbf59bc2009-02-20 16:29:08 +09001631/**
Tejun Heo8d408b42009-02-24 11:57:21 +09001632 * pcpu_setup_first_chunk - initialize the first percpu chunk
Tejun Heofd1e8a12009-08-14 15:00:51 +09001633 * @ai: pcpu_alloc_info describing how to percpu area is shaped
Tejun Heo38a6be52009-07-04 08:10:59 +09001634 * @base_addr: mapped address
Tejun Heofbf59bc2009-02-20 16:29:08 +09001635 *
Tejun Heo8d408b42009-02-24 11:57:21 +09001636 * Initialize the first percpu chunk which contains the kernel static
1637 * perpcu area. This function is to be called from arch percpu area
Tejun Heo38a6be52009-07-04 08:10:59 +09001638 * setup path.
Tejun Heo8d408b42009-02-24 11:57:21 +09001639 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001640 * @ai contains all information necessary to initialize the first
1641 * chunk and prime the dynamic percpu allocator.
Tejun Heo8d408b42009-02-24 11:57:21 +09001642 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001643 * @ai->static_size is the size of static percpu area.
1644 *
1645 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
Tejun Heoedcb4632009-03-06 14:33:59 +09001646 * reserve after the static area in the first chunk. This reserves
1647 * the first chunk such that it's available only through reserved
1648 * percpu allocation. This is primarily used to serve module percpu
1649 * static areas on architectures where the addressing model has
1650 * limited offset range for symbol relocations to guarantee module
1651 * percpu symbols fall inside the relocatable range.
1652 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001653 * @ai->dyn_size determines the number of bytes available for dynamic
1654 * allocation in the first chunk. The area between @ai->static_size +
1655 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
Tejun Heo6074d5b2009-03-10 16:27:48 +09001656 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001657 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1658 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1659 * @ai->dyn_size.
Tejun Heo8d408b42009-02-24 11:57:21 +09001660 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001661 * @ai->atom_size is the allocation atom size and used as alignment
1662 * for vm areas.
Tejun Heo8d408b42009-02-24 11:57:21 +09001663 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001664 * @ai->alloc_size is the allocation size and always multiple of
1665 * @ai->atom_size. This is larger than @ai->atom_size if
1666 * @ai->unit_size is larger than @ai->atom_size.
1667 *
1668 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1669 * percpu areas. Units which should be colocated are put into the
1670 * same group. Dynamic VM areas will be allocated according to these
1671 * groupings. If @ai->nr_groups is zero, a single group containing
1672 * all units is assumed.
Tejun Heo8d408b42009-02-24 11:57:21 +09001673 *
Tejun Heo38a6be52009-07-04 08:10:59 +09001674 * The caller should have mapped the first chunk at @base_addr and
1675 * copied static data to each unit.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001676 *
Tejun Heoedcb4632009-03-06 14:33:59 +09001677 * If the first chunk ends up with both reserved and dynamic areas, it
1678 * is served by two chunks - one to serve the core static and reserved
1679 * areas and the other for the dynamic area. They share the same vm
1680 * and page map but uses different area allocation map to stay away
1681 * from each other. The latter chunk is circulated in the chunk slots
1682 * and available for dynamic allocation like any other chunks.
1683 *
Tejun Heofbf59bc2009-02-20 16:29:08 +09001684 * RETURNS:
Tejun Heofb435d52009-08-14 15:00:51 +09001685 * 0 on success, -errno on failure.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001686 */
Tejun Heofb435d52009-08-14 15:00:51 +09001687int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1688 void *base_addr)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001689{
Tejun Heo635b75f2009-09-24 09:43:11 +09001690 static char cpus_buf[4096] __initdata;
Tejun Heoedcb4632009-03-06 14:33:59 +09001691 static int smap[2], dmap[2];
Tejun Heofd1e8a12009-08-14 15:00:51 +09001692 size_t dyn_size = ai->dyn_size;
1693 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
Tejun Heoedcb4632009-03-06 14:33:59 +09001694 struct pcpu_chunk *schunk, *dchunk = NULL;
Tejun Heo65632972009-08-14 15:00:52 +09001695 unsigned long *group_offsets;
1696 size_t *group_sizes;
Tejun Heofb435d52009-08-14 15:00:51 +09001697 unsigned long *unit_off;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001698 unsigned int cpu;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001699 int *unit_map;
1700 int group, unit, i;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001701
Tejun Heo635b75f2009-09-24 09:43:11 +09001702 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1703
1704#define PCPU_SETUP_BUG_ON(cond) do { \
1705 if (unlikely(cond)) { \
1706 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1707 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1708 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1709 BUG(); \
1710 } \
1711} while (0)
1712
Tejun Heo2f39e632009-07-04 08:11:00 +09001713 /* sanity checks */
Tejun Heoedcb4632009-03-06 14:33:59 +09001714 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1715 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
Tejun Heo635b75f2009-09-24 09:43:11 +09001716 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1717 PCPU_SETUP_BUG_ON(!ai->static_size);
1718 PCPU_SETUP_BUG_ON(!base_addr);
1719 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1720 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1721 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
Tejun Heo8d408b42009-02-24 11:57:21 +09001722
Tejun Heo65632972009-08-14 15:00:52 +09001723 /* process group information and build config tables accordingly */
1724 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1725 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
Tejun Heofd1e8a12009-08-14 15:00:51 +09001726 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
Tejun Heofb435d52009-08-14 15:00:51 +09001727 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
Tejun Heo2f39e632009-07-04 08:11:00 +09001728
Tejun Heofd1e8a12009-08-14 15:00:51 +09001729 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
Tejun Heoffe0d5a2009-09-29 09:17:56 +09001730 unit_map[cpu] = UINT_MAX;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001731 pcpu_first_unit_cpu = NR_CPUS;
Tejun Heo2f39e632009-07-04 08:11:00 +09001732
Tejun Heofd1e8a12009-08-14 15:00:51 +09001733 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1734 const struct pcpu_group_info *gi = &ai->groups[group];
Tejun Heo2f39e632009-07-04 08:11:00 +09001735
Tejun Heo65632972009-08-14 15:00:52 +09001736 group_offsets[group] = gi->base_offset;
1737 group_sizes[group] = gi->nr_units * ai->unit_size;
1738
Tejun Heofd1e8a12009-08-14 15:00:51 +09001739 for (i = 0; i < gi->nr_units; i++) {
1740 cpu = gi->cpu_map[i];
1741 if (cpu == NR_CPUS)
1742 continue;
1743
Tejun Heo635b75f2009-09-24 09:43:11 +09001744 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1745 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1746 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001747
1748 unit_map[cpu] = unit + i;
Tejun Heofb435d52009-08-14 15:00:51 +09001749 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1750
Tejun Heofd1e8a12009-08-14 15:00:51 +09001751 if (pcpu_first_unit_cpu == NR_CPUS)
Tejun Heo2f39e632009-07-04 08:11:00 +09001752 pcpu_first_unit_cpu = cpu;
Tejun Heo2f39e632009-07-04 08:11:00 +09001753 }
Tejun Heo2f39e632009-07-04 08:11:00 +09001754 }
Tejun Heofd1e8a12009-08-14 15:00:51 +09001755 pcpu_last_unit_cpu = cpu;
1756 pcpu_nr_units = unit;
1757
1758 for_each_possible_cpu(cpu)
Tejun Heo635b75f2009-09-24 09:43:11 +09001759 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1760
1761 /* we're done parsing the input, undefine BUG macro and dump config */
1762#undef PCPU_SETUP_BUG_ON
1763 pcpu_dump_alloc_info(KERN_INFO, ai);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001764
Tejun Heo65632972009-08-14 15:00:52 +09001765 pcpu_nr_groups = ai->nr_groups;
1766 pcpu_group_offsets = group_offsets;
1767 pcpu_group_sizes = group_sizes;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001768 pcpu_unit_map = unit_map;
Tejun Heofb435d52009-08-14 15:00:51 +09001769 pcpu_unit_offsets = unit_off;
Tejun Heo2f39e632009-07-04 08:11:00 +09001770
1771 /* determine basic parameters */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001772 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
Tejun Heod9b55ee2009-02-24 11:57:21 +09001773 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
Tejun Heo65632972009-08-14 15:00:52 +09001774 pcpu_atom_size = ai->atom_size;
Tejun Heoce3141a2009-07-04 08:11:00 +09001775 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1776 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
Tejun Heocafe8812009-03-06 14:33:59 +09001777
Tejun Heod9b55ee2009-02-24 11:57:21 +09001778 /*
1779 * Allocate chunk slots. The additional last slot is for
1780 * empty chunks.
1781 */
1782 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001783 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1784 for (i = 0; i < pcpu_nr_slots; i++)
1785 INIT_LIST_HEAD(&pcpu_slot[i]);
1786
Tejun Heoedcb4632009-03-06 14:33:59 +09001787 /*
1788 * Initialize static chunk. If reserved_size is zero, the
1789 * static chunk covers static area + dynamic allocation area
1790 * in the first chunk. If reserved_size is not zero, it
1791 * covers static area + reserved area (mostly used for module
1792 * static percpu allocation).
1793 */
Tejun Heo2441d152009-03-06 14:33:59 +09001794 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1795 INIT_LIST_HEAD(&schunk->list);
Tejun Heobba174f2009-08-14 15:00:51 +09001796 schunk->base_addr = base_addr;
Tejun Heo61ace7f2009-03-06 14:33:59 +09001797 schunk->map = smap;
1798 schunk->map_alloc = ARRAY_SIZE(smap);
Tejun Heo38a6be52009-07-04 08:10:59 +09001799 schunk->immutable = true;
Tejun Heoce3141a2009-07-04 08:11:00 +09001800 bitmap_fill(schunk->populated, pcpu_unit_pages);
Tejun Heoedcb4632009-03-06 14:33:59 +09001801
Tejun Heofd1e8a12009-08-14 15:00:51 +09001802 if (ai->reserved_size) {
1803 schunk->free_size = ai->reserved_size;
Tejun Heoae9e6bc2009-04-02 13:19:54 +09001804 pcpu_reserved_chunk = schunk;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001805 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
Tejun Heoedcb4632009-03-06 14:33:59 +09001806 } else {
1807 schunk->free_size = dyn_size;
1808 dyn_size = 0; /* dynamic area covered */
1809 }
Tejun Heo2441d152009-03-06 14:33:59 +09001810 schunk->contig_hint = schunk->free_size;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001811
Tejun Heofd1e8a12009-08-14 15:00:51 +09001812 schunk->map[schunk->map_used++] = -ai->static_size;
Tejun Heo61ace7f2009-03-06 14:33:59 +09001813 if (schunk->free_size)
1814 schunk->map[schunk->map_used++] = schunk->free_size;
1815
Tejun Heoedcb4632009-03-06 14:33:59 +09001816 /* init dynamic chunk if necessary */
1817 if (dyn_size) {
Tejun Heoce3141a2009-07-04 08:11:00 +09001818 dchunk = alloc_bootmem(pcpu_chunk_struct_size);
Tejun Heoedcb4632009-03-06 14:33:59 +09001819 INIT_LIST_HEAD(&dchunk->list);
Tejun Heobba174f2009-08-14 15:00:51 +09001820 dchunk->base_addr = base_addr;
Tejun Heoedcb4632009-03-06 14:33:59 +09001821 dchunk->map = dmap;
1822 dchunk->map_alloc = ARRAY_SIZE(dmap);
Tejun Heo38a6be52009-07-04 08:10:59 +09001823 dchunk->immutable = true;
Tejun Heoce3141a2009-07-04 08:11:00 +09001824 bitmap_fill(dchunk->populated, pcpu_unit_pages);
Tejun Heoedcb4632009-03-06 14:33:59 +09001825
1826 dchunk->contig_hint = dchunk->free_size = dyn_size;
1827 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1828 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1829 }
1830
Tejun Heo2441d152009-03-06 14:33:59 +09001831 /* link the first chunk in */
Tejun Heoae9e6bc2009-04-02 13:19:54 +09001832 pcpu_first_chunk = dchunk ?: schunk;
1833 pcpu_chunk_relocate(pcpu_first_chunk, -1);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001834
1835 /* we're done */
Tejun Heobba174f2009-08-14 15:00:51 +09001836 pcpu_base_addr = base_addr;
Tejun Heofb435d52009-08-14 15:00:51 +09001837 return 0;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001838}
Tejun Heo66c3a752009-03-10 16:27:48 +09001839
Tejun Heof58dc012009-08-14 15:00:50 +09001840const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1841 [PCPU_FC_AUTO] = "auto",
1842 [PCPU_FC_EMBED] = "embed",
1843 [PCPU_FC_PAGE] = "page",
Tejun Heof58dc012009-08-14 15:00:50 +09001844};
Tejun Heo66c3a752009-03-10 16:27:48 +09001845
Tejun Heof58dc012009-08-14 15:00:50 +09001846enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1847
1848static int __init percpu_alloc_setup(char *str)
Tejun Heo66c3a752009-03-10 16:27:48 +09001849{
Tejun Heof58dc012009-08-14 15:00:50 +09001850 if (0)
1851 /* nada */;
1852#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1853 else if (!strcmp(str, "embed"))
1854 pcpu_chosen_fc = PCPU_FC_EMBED;
1855#endif
1856#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1857 else if (!strcmp(str, "page"))
1858 pcpu_chosen_fc = PCPU_FC_PAGE;
1859#endif
Tejun Heof58dc012009-08-14 15:00:50 +09001860 else
1861 pr_warning("PERCPU: unknown allocator %s specified\n", str);
Tejun Heo66c3a752009-03-10 16:27:48 +09001862
Tejun Heof58dc012009-08-14 15:00:50 +09001863 return 0;
Tejun Heo66c3a752009-03-10 16:27:48 +09001864}
Tejun Heof58dc012009-08-14 15:00:50 +09001865early_param("percpu_alloc", percpu_alloc_setup);
Tejun Heo66c3a752009-03-10 16:27:48 +09001866
Tejun Heo08fc4582009-08-14 15:00:49 +09001867#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1868 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
Tejun Heo66c3a752009-03-10 16:27:48 +09001869/**
1870 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
Tejun Heo66c3a752009-03-10 16:27:48 +09001871 * @reserved_size: the size of reserved percpu area in bytes
1872 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
Tejun Heoc8826dd2009-08-14 15:00:52 +09001873 * @atom_size: allocation atom size
1874 * @cpu_distance_fn: callback to determine distance between cpus, optional
1875 * @alloc_fn: function to allocate percpu page
1876 * @free_fn: funtion to free percpu page
Tejun Heo66c3a752009-03-10 16:27:48 +09001877 *
1878 * This is a helper to ease setting up embedded first percpu chunk and
1879 * can be called where pcpu_setup_first_chunk() is expected.
1880 *
1881 * If this function is used to setup the first chunk, it is allocated
Tejun Heoc8826dd2009-08-14 15:00:52 +09001882 * by calling @alloc_fn and used as-is without being mapped into
1883 * vmalloc area. Allocations are always whole multiples of @atom_size
1884 * aligned to @atom_size.
1885 *
1886 * This enables the first chunk to piggy back on the linear physical
1887 * mapping which often uses larger page size. Please note that this
1888 * can result in very sparse cpu->unit mapping on NUMA machines thus
1889 * requiring large vmalloc address space. Don't use this allocator if
1890 * vmalloc space is not orders of magnitude larger than distances
1891 * between node memory addresses (ie. 32bit NUMA machines).
Tejun Heo66c3a752009-03-10 16:27:48 +09001892 *
1893 * When @dyn_size is positive, dynamic area might be larger than
Tejun Heo788e5ab2009-07-04 08:10:58 +09001894 * specified to fill page alignment. When @dyn_size is auto,
1895 * @dyn_size is just big enough to fill page alignment after static
1896 * and reserved areas.
Tejun Heo66c3a752009-03-10 16:27:48 +09001897 *
1898 * If the needed size is smaller than the minimum or specified unit
Tejun Heoc8826dd2009-08-14 15:00:52 +09001899 * size, the leftover is returned using @free_fn.
Tejun Heo66c3a752009-03-10 16:27:48 +09001900 *
1901 * RETURNS:
Tejun Heofb435d52009-08-14 15:00:51 +09001902 * 0 on success, -errno on failure.
Tejun Heo66c3a752009-03-10 16:27:48 +09001903 */
Tejun Heoc8826dd2009-08-14 15:00:52 +09001904int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1905 size_t atom_size,
1906 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1907 pcpu_fc_alloc_fn_t alloc_fn,
1908 pcpu_fc_free_fn_t free_fn)
Tejun Heo66c3a752009-03-10 16:27:48 +09001909{
Tejun Heoc8826dd2009-08-14 15:00:52 +09001910 void *base = (void *)ULONG_MAX;
1911 void **areas = NULL;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001912 struct pcpu_alloc_info *ai;
Tejun Heo6ea529a2009-09-24 18:46:01 +09001913 size_t size_sum, areas_size, max_distance;
Tejun Heoc8826dd2009-08-14 15:00:52 +09001914 int group, i, rc;
Tejun Heo66c3a752009-03-10 16:27:48 +09001915
Tejun Heoc8826dd2009-08-14 15:00:52 +09001916 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1917 cpu_distance_fn);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001918 if (IS_ERR(ai))
1919 return PTR_ERR(ai);
Tejun Heo66c3a752009-03-10 16:27:48 +09001920
Tejun Heofd1e8a12009-08-14 15:00:51 +09001921 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
Tejun Heoc8826dd2009-08-14 15:00:52 +09001922 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
Tejun Heo66c3a752009-03-10 16:27:48 +09001923
Tejun Heoc8826dd2009-08-14 15:00:52 +09001924 areas = alloc_bootmem_nopanic(areas_size);
1925 if (!areas) {
Tejun Heofb435d52009-08-14 15:00:51 +09001926 rc = -ENOMEM;
Tejun Heoc8826dd2009-08-14 15:00:52 +09001927 goto out_free;
Tejun Heofa8a7092009-06-22 11:56:24 +09001928 }
Tejun Heo66c3a752009-03-10 16:27:48 +09001929
Tejun Heoc8826dd2009-08-14 15:00:52 +09001930 /* allocate, copy and determine base address */
1931 for (group = 0; group < ai->nr_groups; group++) {
1932 struct pcpu_group_info *gi = &ai->groups[group];
1933 unsigned int cpu = NR_CPUS;
1934 void *ptr;
Tejun Heo66c3a752009-03-10 16:27:48 +09001935
Tejun Heoc8826dd2009-08-14 15:00:52 +09001936 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1937 cpu = gi->cpu_map[i];
1938 BUG_ON(cpu == NR_CPUS);
1939
1940 /* allocate space for the whole group */
1941 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1942 if (!ptr) {
1943 rc = -ENOMEM;
1944 goto out_free_areas;
1945 }
1946 areas[group] = ptr;
1947
1948 base = min(ptr, base);
1949
1950 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1951 if (gi->cpu_map[i] == NR_CPUS) {
1952 /* unused unit, free whole */
1953 free_fn(ptr, ai->unit_size);
1954 continue;
1955 }
1956 /* copy and return the unused part */
1957 memcpy(ptr, __per_cpu_load, ai->static_size);
1958 free_fn(ptr + size_sum, ai->unit_size - size_sum);
1959 }
Tejun Heo66c3a752009-03-10 16:27:48 +09001960 }
1961
Tejun Heoc8826dd2009-08-14 15:00:52 +09001962 /* base address is now known, determine group base offsets */
Tejun Heo6ea529a2009-09-24 18:46:01 +09001963 max_distance = 0;
1964 for (group = 0; group < ai->nr_groups; group++) {
Tejun Heoc8826dd2009-08-14 15:00:52 +09001965 ai->groups[group].base_offset = areas[group] - base;
Tejun Heo1a0c3292009-10-04 09:31:05 +09001966 max_distance = max_t(size_t, max_distance,
1967 ai->groups[group].base_offset);
Tejun Heo6ea529a2009-09-24 18:46:01 +09001968 }
1969 max_distance += ai->unit_size;
1970
1971 /* warn if maximum distance is further than 75% of vmalloc space */
1972 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
Tejun Heo1a0c3292009-10-04 09:31:05 +09001973 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
Tejun Heo6ea529a2009-09-24 18:46:01 +09001974 "space 0x%lx\n",
1975 max_distance, VMALLOC_END - VMALLOC_START);
1976#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1977 /* and fail if we have fallback */
1978 rc = -EINVAL;
1979 goto out_free;
1980#endif
1981 }
Tejun Heoc8826dd2009-08-14 15:00:52 +09001982
Tejun Heo004018e2009-08-14 15:00:49 +09001983 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
Tejun Heofd1e8a12009-08-14 15:00:51 +09001984 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1985 ai->dyn_size, ai->unit_size);
Tejun Heo66c3a752009-03-10 16:27:48 +09001986
Tejun Heofb435d52009-08-14 15:00:51 +09001987 rc = pcpu_setup_first_chunk(ai, base);
Tejun Heoc8826dd2009-08-14 15:00:52 +09001988 goto out_free;
1989
1990out_free_areas:
1991 for (group = 0; group < ai->nr_groups; group++)
1992 free_fn(areas[group],
1993 ai->groups[group].nr_units * ai->unit_size);
1994out_free:
Tejun Heofd1e8a12009-08-14 15:00:51 +09001995 pcpu_free_alloc_info(ai);
Tejun Heoc8826dd2009-08-14 15:00:52 +09001996 if (areas)
1997 free_bootmem(__pa(areas), areas_size);
Tejun Heofb435d52009-08-14 15:00:51 +09001998 return rc;
Tejun Heod4b95f82009-07-04 08:10:59 +09001999}
Tejun Heo08fc4582009-08-14 15:00:49 +09002000#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
2001 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
Tejun Heod4b95f82009-07-04 08:10:59 +09002002
Tejun Heo08fc4582009-08-14 15:00:49 +09002003#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
Tejun Heod4b95f82009-07-04 08:10:59 +09002004/**
Tejun Heo00ae4062009-08-14 15:00:49 +09002005 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
Tejun Heod4b95f82009-07-04 08:10:59 +09002006 * @reserved_size: the size of reserved percpu area in bytes
2007 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2008 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
2009 * @populate_pte_fn: function to populate pte
2010 *
Tejun Heo00ae4062009-08-14 15:00:49 +09002011 * This is a helper to ease setting up page-remapped first percpu
2012 * chunk and can be called where pcpu_setup_first_chunk() is expected.
Tejun Heod4b95f82009-07-04 08:10:59 +09002013 *
2014 * This is the basic allocator. Static percpu area is allocated
2015 * page-by-page into vmalloc area.
2016 *
2017 * RETURNS:
Tejun Heofb435d52009-08-14 15:00:51 +09002018 * 0 on success, -errno on failure.
Tejun Heod4b95f82009-07-04 08:10:59 +09002019 */
Tejun Heofb435d52009-08-14 15:00:51 +09002020int __init pcpu_page_first_chunk(size_t reserved_size,
2021 pcpu_fc_alloc_fn_t alloc_fn,
2022 pcpu_fc_free_fn_t free_fn,
2023 pcpu_fc_populate_pte_fn_t populate_pte_fn)
Tejun Heod4b95f82009-07-04 08:10:59 +09002024{
Tejun Heo8f05a6a2009-07-04 08:10:59 +09002025 static struct vm_struct vm;
Tejun Heofd1e8a12009-08-14 15:00:51 +09002026 struct pcpu_alloc_info *ai;
Tejun Heo00ae4062009-08-14 15:00:49 +09002027 char psize_str[16];
Tejun Heoce3141a2009-07-04 08:11:00 +09002028 int unit_pages;
Tejun Heod4b95f82009-07-04 08:10:59 +09002029 size_t pages_size;
Tejun Heoce3141a2009-07-04 08:11:00 +09002030 struct page **pages;
Tejun Heofb435d52009-08-14 15:00:51 +09002031 int unit, i, j, rc;
Tejun Heod4b95f82009-07-04 08:10:59 +09002032
Tejun Heo00ae4062009-08-14 15:00:49 +09002033 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2034
Tejun Heofd1e8a12009-08-14 15:00:51 +09002035 ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
2036 if (IS_ERR(ai))
2037 return PTR_ERR(ai);
2038 BUG_ON(ai->nr_groups != 1);
2039 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
2040
2041 unit_pages = ai->unit_size >> PAGE_SHIFT;
Tejun Heod4b95f82009-07-04 08:10:59 +09002042
2043 /* unaligned allocations can't be freed, round up to page size */
Tejun Heofd1e8a12009-08-14 15:00:51 +09002044 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2045 sizeof(pages[0]));
Tejun Heoce3141a2009-07-04 08:11:00 +09002046 pages = alloc_bootmem(pages_size);
Tejun Heod4b95f82009-07-04 08:10:59 +09002047
Tejun Heo8f05a6a2009-07-04 08:10:59 +09002048 /* allocate pages */
Tejun Heod4b95f82009-07-04 08:10:59 +09002049 j = 0;
Tejun Heofd1e8a12009-08-14 15:00:51 +09002050 for (unit = 0; unit < num_possible_cpus(); unit++)
Tejun Heoce3141a2009-07-04 08:11:00 +09002051 for (i = 0; i < unit_pages; i++) {
Tejun Heofd1e8a12009-08-14 15:00:51 +09002052 unsigned int cpu = ai->groups[0].cpu_map[unit];
Tejun Heod4b95f82009-07-04 08:10:59 +09002053 void *ptr;
2054
Tejun Heo3cbc8562009-08-14 15:00:50 +09002055 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
Tejun Heod4b95f82009-07-04 08:10:59 +09002056 if (!ptr) {
Tejun Heo00ae4062009-08-14 15:00:49 +09002057 pr_warning("PERCPU: failed to allocate %s page "
2058 "for cpu%u\n", psize_str, cpu);
Tejun Heod4b95f82009-07-04 08:10:59 +09002059 goto enomem;
2060 }
Tejun Heoce3141a2009-07-04 08:11:00 +09002061 pages[j++] = virt_to_page(ptr);
Tejun Heod4b95f82009-07-04 08:10:59 +09002062 }
2063
Tejun Heo8f05a6a2009-07-04 08:10:59 +09002064 /* allocate vm area, map the pages and copy static data */
2065 vm.flags = VM_ALLOC;
Tejun Heofd1e8a12009-08-14 15:00:51 +09002066 vm.size = num_possible_cpus() * ai->unit_size;
Tejun Heo8f05a6a2009-07-04 08:10:59 +09002067 vm_area_register_early(&vm, PAGE_SIZE);
2068
Tejun Heofd1e8a12009-08-14 15:00:51 +09002069 for (unit = 0; unit < num_possible_cpus(); unit++) {
Tejun Heo1d9d3252009-08-14 15:00:50 +09002070 unsigned long unit_addr =
Tejun Heofd1e8a12009-08-14 15:00:51 +09002071 (unsigned long)vm.addr + unit * ai->unit_size;
Tejun Heo8f05a6a2009-07-04 08:10:59 +09002072
Tejun Heoce3141a2009-07-04 08:11:00 +09002073 for (i = 0; i < unit_pages; i++)
Tejun Heo8f05a6a2009-07-04 08:10:59 +09002074 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2075
2076 /* pte already populated, the following shouldn't fail */
Tejun Heofb435d52009-08-14 15:00:51 +09002077 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2078 unit_pages);
2079 if (rc < 0)
2080 panic("failed to map percpu area, err=%d\n", rc);
Tejun Heo8f05a6a2009-07-04 08:10:59 +09002081
2082 /*
2083 * FIXME: Archs with virtual cache should flush local
2084 * cache for the linear mapping here - something
2085 * equivalent to flush_cache_vmap() on the local cpu.
2086 * flush_cache_vmap() can't be used as most supporting
2087 * data structures are not set up yet.
2088 */
2089
2090 /* copy static data */
Tejun Heofd1e8a12009-08-14 15:00:51 +09002091 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
Tejun Heo66c3a752009-03-10 16:27:48 +09002092 }
2093
2094 /* we're ready, commit */
Tejun Heo1d9d3252009-08-14 15:00:50 +09002095 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
Tejun Heofd1e8a12009-08-14 15:00:51 +09002096 unit_pages, psize_str, vm.addr, ai->static_size,
2097 ai->reserved_size, ai->dyn_size);
Tejun Heo66c3a752009-03-10 16:27:48 +09002098
Tejun Heofb435d52009-08-14 15:00:51 +09002099 rc = pcpu_setup_first_chunk(ai, vm.addr);
Tejun Heod4b95f82009-07-04 08:10:59 +09002100 goto out_free_ar;
2101
2102enomem:
2103 while (--j >= 0)
Tejun Heoce3141a2009-07-04 08:11:00 +09002104 free_fn(page_address(pages[j]), PAGE_SIZE);
Tejun Heofb435d52009-08-14 15:00:51 +09002105 rc = -ENOMEM;
Tejun Heod4b95f82009-07-04 08:10:59 +09002106out_free_ar:
Tejun Heoce3141a2009-07-04 08:11:00 +09002107 free_bootmem(__pa(pages), pages_size);
Tejun Heofd1e8a12009-08-14 15:00:51 +09002108 pcpu_free_alloc_info(ai);
Tejun Heofb435d52009-08-14 15:00:51 +09002109 return rc;
Tejun Heo66c3a752009-03-10 16:27:48 +09002110}
Tejun Heo08fc4582009-08-14 15:00:49 +09002111#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
Tejun Heod4b95f82009-07-04 08:10:59 +09002112
Tejun Heo8c4bfc62009-07-04 08:10:59 +09002113/*
Tejun Heoe74e3962009-03-30 19:07:44 +09002114 * Generic percpu area setup.
2115 *
2116 * The embedding helper is used because its behavior closely resembles
2117 * the original non-dynamic generic percpu area setup. This is
2118 * important because many archs have addressing restrictions and might
2119 * fail if the percpu area is located far away from the previous
2120 * location. As an added bonus, in non-NUMA cases, embedding is
2121 * generally a good idea TLB-wise because percpu area can piggy back
2122 * on the physical linear memory mapping which uses large page
2123 * mappings on applicable archs.
2124 */
2125#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2126unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2127EXPORT_SYMBOL(__per_cpu_offset);
2128
Tejun Heoc8826dd2009-08-14 15:00:52 +09002129static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2130 size_t align)
2131{
2132 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
2133}
2134
2135static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2136{
2137 free_bootmem(__pa(ptr), size);
2138}
2139
Tejun Heoe74e3962009-03-30 19:07:44 +09002140void __init setup_per_cpu_areas(void)
2141{
Tejun Heoe74e3962009-03-30 19:07:44 +09002142 unsigned long delta;
2143 unsigned int cpu;
Tejun Heofb435d52009-08-14 15:00:51 +09002144 int rc;
Tejun Heoe74e3962009-03-30 19:07:44 +09002145
2146 /*
2147 * Always reserve area for module percpu variables. That's
2148 * what the legacy allocator did.
2149 */
Tejun Heofb435d52009-08-14 15:00:51 +09002150 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
Tejun Heoc8826dd2009-08-14 15:00:52 +09002151 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2152 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
Tejun Heofb435d52009-08-14 15:00:51 +09002153 if (rc < 0)
Tejun Heoe74e3962009-03-30 19:07:44 +09002154 panic("Failed to initialized percpu areas.");
2155
2156 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2157 for_each_possible_cpu(cpu)
Tejun Heofb435d52009-08-14 15:00:51 +09002158 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
Tejun Heoe74e3962009-03-30 19:07:44 +09002159}
2160#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */