blob: 43d8cacfdaa5e4f0e1e3ba0b5e8275570e948292 [file] [log] [blame]
Tejun Heofbf59bc2009-02-20 16:29:08 +09001/*
2 * linux/mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
Tejun Heo2f39e632009-07-04 08:11:00 +090011 * chunk is consisted of boot-time determined number of units and the
12 * first chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated. ie. in
17 * vmalloc area
Tejun Heofbf59bc2009-02-20 16:29:08 +090018 *
19 * c0 c1 c2
20 * ------------------- ------------------- ------------
21 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
22 * ------------------- ...... ------------------- .... ------------
23 *
24 * Allocation is done in offset-size areas of single unit space. Ie,
25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
Tejun Heo2f39e632009-07-04 08:11:00 +090026 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
27 * cpus. On NUMA, the mapping can be non-linear and even sparse.
28 * Percpu access can be done by configuring percpu base registers
29 * according to cpu to unit mapping and pcpu_unit_size.
Tejun Heofbf59bc2009-02-20 16:29:08 +090030 *
Tejun Heo2f39e632009-07-04 08:11:00 +090031 * There are usually many small percpu allocations many of them being
32 * as small as 4 bytes. The allocator organizes chunks into lists
Tejun Heofbf59bc2009-02-20 16:29:08 +090033 * according to free size and tries to allocate from the fullest one.
34 * Each chunk keeps the maximum contiguous area size hint which is
35 * guaranteed to be eqaul to or larger than the maximum contiguous
36 * area in the chunk. This helps the allocator not to iterate the
37 * chunk maps unnecessarily.
38 *
39 * Allocation state in each chunk is kept using an array of integers
40 * on chunk->map. A positive value in the map represents a free
41 * region and negative allocated. Allocation inside a chunk is done
42 * by scanning this map sequentially and serving the first matching
43 * entry. This is mostly copied from the percpu_modalloc() allocator.
Christoph Lametere1b9aa32009-04-02 13:21:44 +090044 * Chunks can be determined from the address using the index field
45 * in the page struct. The index field contains a pointer to the chunk.
Tejun Heofbf59bc2009-02-20 16:29:08 +090046 *
47 * To use this allocator, arch code should do the followings.
48 *
Tejun Heoe74e3962009-03-30 19:07:44 +090049 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
Tejun Heofbf59bc2009-02-20 16:29:08 +090050 *
51 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
Tejun Heoe0100982009-03-10 16:27:48 +090052 * regular address to percpu pointer and back if they need to be
53 * different from the default
Tejun Heofbf59bc2009-02-20 16:29:08 +090054 *
Tejun Heo8d408b42009-02-24 11:57:21 +090055 * - use pcpu_setup_first_chunk() during percpu area initialization to
56 * setup the first chunk containing the kernel static percpu area
Tejun Heofbf59bc2009-02-20 16:29:08 +090057 */
58
59#include <linux/bitmap.h>
60#include <linux/bootmem.h>
Tejun Heofd1e8a12009-08-14 15:00:51 +090061#include <linux/err.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090062#include <linux/list.h>
Tejun Heoa530b792009-07-04 08:11:00 +090063#include <linux/log2.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090064#include <linux/mm.h>
65#include <linux/module.h>
66#include <linux/mutex.h>
67#include <linux/percpu.h>
68#include <linux/pfn.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090069#include <linux/slab.h>
Tejun Heoccea34b2009-03-07 00:44:13 +090070#include <linux/spinlock.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090071#include <linux/vmalloc.h>
Tejun Heoa56dbdd2009-03-07 00:44:11 +090072#include <linux/workqueue.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090073
74#include <asm/cacheflush.h>
Tejun Heoe0100982009-03-10 16:27:48 +090075#include <asm/sections.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090076#include <asm/tlbflush.h>
77
Tejun Heofbf59bc2009-02-20 16:29:08 +090078#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
79#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
80
Tejun Heoe0100982009-03-10 16:27:48 +090081/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
82#ifndef __addr_to_pcpu_ptr
83#define __addr_to_pcpu_ptr(addr) \
84 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
85 + (unsigned long)__per_cpu_start)
86#endif
87#ifndef __pcpu_ptr_to_addr
88#define __pcpu_ptr_to_addr(ptr) \
89 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
90 - (unsigned long)__per_cpu_start)
91#endif
92
Tejun Heofbf59bc2009-02-20 16:29:08 +090093struct pcpu_chunk {
94 struct list_head list; /* linked to pcpu_slot lists */
Tejun Heofbf59bc2009-02-20 16:29:08 +090095 int free_size; /* free bytes in the chunk */
96 int contig_hint; /* max contiguous size hint */
Tejun Heobba174f2009-08-14 15:00:51 +090097 void *base_addr; /* base address of this chunk */
Tejun Heofbf59bc2009-02-20 16:29:08 +090098 int map_used; /* # of map entries used */
99 int map_alloc; /* # of map entries allocated */
100 int *map; /* allocation map */
Tejun Heo65632972009-08-14 15:00:52 +0900101 struct vm_struct **vms; /* mapped vmalloc regions */
Tejun Heo8d408b42009-02-24 11:57:21 +0900102 bool immutable; /* no [de]population allowed */
Tejun Heoce3141a2009-07-04 08:11:00 +0900103 unsigned long populated[]; /* populated bitmap */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900104};
105
Tejun Heo40150d32009-02-24 12:32:28 +0900106static int pcpu_unit_pages __read_mostly;
107static int pcpu_unit_size __read_mostly;
Tejun Heo2f39e632009-07-04 08:11:00 +0900108static int pcpu_nr_units __read_mostly;
Tejun Heo65632972009-08-14 15:00:52 +0900109static int pcpu_atom_size __read_mostly;
Tejun Heo40150d32009-02-24 12:32:28 +0900110static int pcpu_nr_slots __read_mostly;
111static size_t pcpu_chunk_struct_size __read_mostly;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900112
Tejun Heo2f39e632009-07-04 08:11:00 +0900113/* cpus with the lowest and highest unit numbers */
114static unsigned int pcpu_first_unit_cpu __read_mostly;
115static unsigned int pcpu_last_unit_cpu __read_mostly;
116
Tejun Heofbf59bc2009-02-20 16:29:08 +0900117/* the address of the first chunk which starts with the kernel static area */
Tejun Heo40150d32009-02-24 12:32:28 +0900118void *pcpu_base_addr __read_mostly;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900119EXPORT_SYMBOL_GPL(pcpu_base_addr);
120
Tejun Heofb435d52009-08-14 15:00:51 +0900121static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
122const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
Tejun Heo2f39e632009-07-04 08:11:00 +0900123
Tejun Heo65632972009-08-14 15:00:52 +0900124/* group information, used for vm allocation */
125static int pcpu_nr_groups __read_mostly;
126static const unsigned long *pcpu_group_offsets __read_mostly;
127static const size_t *pcpu_group_sizes __read_mostly;
128
Tejun Heoae9e6bc92009-04-02 13:19:54 +0900129/*
130 * The first chunk which always exists. Note that unlike other
131 * chunks, this one can be allocated and mapped in several different
132 * ways and thus often doesn't live in the vmalloc area.
133 */
134static struct pcpu_chunk *pcpu_first_chunk;
135
136/*
137 * Optional reserved chunk. This chunk reserves part of the first
138 * chunk and serves it for reserved allocations. The amount of
139 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
140 * area doesn't exist, the following variables contain NULL and 0
141 * respectively.
142 */
Tejun Heoedcb4632009-03-06 14:33:59 +0900143static struct pcpu_chunk *pcpu_reserved_chunk;
Tejun Heoedcb4632009-03-06 14:33:59 +0900144static int pcpu_reserved_chunk_limit;
145
Tejun Heofbf59bc2009-02-20 16:29:08 +0900146/*
Tejun Heoccea34b2009-03-07 00:44:13 +0900147 * Synchronization rules.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900148 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900149 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
Tejun Heoce3141a2009-07-04 08:11:00 +0900150 * protects allocation/reclaim paths, chunks, populated bitmap and
151 * vmalloc mapping. The latter is a spinlock and protects the index
152 * data structures - chunk slots, chunks and area maps in chunks.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900153 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900154 * During allocation, pcpu_alloc_mutex is kept locked all the time and
155 * pcpu_lock is grabbed and released as necessary. All actual memory
156 * allocations are done using GFP_KERNEL with pcpu_lock released.
157 *
158 * Free path accesses and alters only the index data structures, so it
159 * can be safely called from atomic context. When memory needs to be
160 * returned to the system, free path schedules reclaim_work which
161 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
162 * reclaimed, release both locks and frees the chunks. Note that it's
163 * necessary to grab both locks to remove a chunk from circulation as
164 * allocation path might be referencing the chunk with only
165 * pcpu_alloc_mutex locked.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900166 */
Tejun Heoccea34b2009-03-07 00:44:13 +0900167static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
168static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900169
Tejun Heo40150d32009-02-24 12:32:28 +0900170static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900171
Tejun Heoa56dbdd2009-03-07 00:44:11 +0900172/* reclaim work to release fully free chunks, scheduled from free path */
173static void pcpu_reclaim(struct work_struct *work);
174static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
175
Tejun Heod9b55ee2009-02-24 11:57:21 +0900176static int __pcpu_size_to_slot(int size)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900177{
Tejun Heocae3aeb2009-02-21 16:56:23 +0900178 int highbit = fls(size); /* size is in bytes */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900179 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
180}
181
Tejun Heod9b55ee2009-02-24 11:57:21 +0900182static int pcpu_size_to_slot(int size)
183{
184 if (size == pcpu_unit_size)
185 return pcpu_nr_slots - 1;
186 return __pcpu_size_to_slot(size);
187}
188
Tejun Heofbf59bc2009-02-20 16:29:08 +0900189static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
190{
191 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
192 return 0;
193
194 return pcpu_size_to_slot(chunk->free_size);
195}
196
197static int pcpu_page_idx(unsigned int cpu, int page_idx)
198{
Tejun Heo2f39e632009-07-04 08:11:00 +0900199 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900200}
201
202static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
203 unsigned int cpu, int page_idx)
204{
Tejun Heobba174f2009-08-14 15:00:51 +0900205 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
Tejun Heofb435d52009-08-14 15:00:51 +0900206 (page_idx << PAGE_SHIFT);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900207}
208
Tejun Heoce3141a2009-07-04 08:11:00 +0900209static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
210 unsigned int cpu, int page_idx)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900211{
Tejun Heoce3141a2009-07-04 08:11:00 +0900212 /* must not be used on pre-mapped chunk */
213 WARN_ON(chunk->immutable);
Tejun Heoc8a51be2009-07-04 08:10:59 +0900214
Tejun Heoce3141a2009-07-04 08:11:00 +0900215 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900216}
217
Christoph Lametere1b9aa32009-04-02 13:21:44 +0900218/* set the pointer to a chunk in a page struct */
219static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
220{
221 page->index = (unsigned long)pcpu;
222}
223
224/* obtain pointer to a chunk from a page struct */
225static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
226{
227 return (struct pcpu_chunk *)page->index;
228}
229
Tejun Heoce3141a2009-07-04 08:11:00 +0900230static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
231{
232 *rs = find_next_zero_bit(chunk->populated, end, *rs);
233 *re = find_next_bit(chunk->populated, end, *rs + 1);
234}
235
236static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
237{
238 *rs = find_next_bit(chunk->populated, end, *rs);
239 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
240}
241
242/*
243 * (Un)populated page region iterators. Iterate over (un)populated
244 * page regions betwen @start and @end in @chunk. @rs and @re should
245 * be integer variables and will be set to start and end page index of
246 * the current region.
247 */
248#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
249 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
250 (rs) < (re); \
251 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
252
253#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
254 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
255 (rs) < (re); \
256 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
257
Tejun Heofbf59bc2009-02-20 16:29:08 +0900258/**
Tejun Heo1880d932009-03-07 00:44:09 +0900259 * pcpu_mem_alloc - allocate memory
260 * @size: bytes to allocate
Tejun Heofbf59bc2009-02-20 16:29:08 +0900261 *
Tejun Heo1880d932009-03-07 00:44:09 +0900262 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
263 * kzalloc() is used; otherwise, vmalloc() is used. The returned
264 * memory is always zeroed.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900265 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900266 * CONTEXT:
267 * Does GFP_KERNEL allocation.
268 *
Tejun Heofbf59bc2009-02-20 16:29:08 +0900269 * RETURNS:
Tejun Heo1880d932009-03-07 00:44:09 +0900270 * Pointer to the allocated area on success, NULL on failure.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900271 */
Tejun Heo1880d932009-03-07 00:44:09 +0900272static void *pcpu_mem_alloc(size_t size)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900273{
Tejun Heofbf59bc2009-02-20 16:29:08 +0900274 if (size <= PAGE_SIZE)
Tejun Heo1880d932009-03-07 00:44:09 +0900275 return kzalloc(size, GFP_KERNEL);
276 else {
277 void *ptr = vmalloc(size);
278 if (ptr)
279 memset(ptr, 0, size);
280 return ptr;
281 }
282}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900283
Tejun Heo1880d932009-03-07 00:44:09 +0900284/**
285 * pcpu_mem_free - free memory
286 * @ptr: memory to free
287 * @size: size of the area
288 *
289 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
290 */
291static void pcpu_mem_free(void *ptr, size_t size)
292{
293 if (size <= PAGE_SIZE)
294 kfree(ptr);
295 else
296 vfree(ptr);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900297}
298
299/**
300 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
301 * @chunk: chunk of interest
302 * @oslot: the previous slot it was on
303 *
304 * This function is called after an allocation or free changed @chunk.
305 * New slot according to the changed state is determined and @chunk is
Tejun Heoedcb4632009-03-06 14:33:59 +0900306 * moved to the slot. Note that the reserved chunk is never put on
307 * chunk slots.
Tejun Heoccea34b2009-03-07 00:44:13 +0900308 *
309 * CONTEXT:
310 * pcpu_lock.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900311 */
312static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
313{
314 int nslot = pcpu_chunk_slot(chunk);
315
Tejun Heoedcb4632009-03-06 14:33:59 +0900316 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
Tejun Heofbf59bc2009-02-20 16:29:08 +0900317 if (oslot < nslot)
318 list_move(&chunk->list, &pcpu_slot[nslot]);
319 else
320 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
321 }
322}
323
Tejun Heofbf59bc2009-02-20 16:29:08 +0900324/**
Christoph Lametere1b9aa32009-04-02 13:21:44 +0900325 * pcpu_chunk_addr_search - determine chunk containing specified address
326 * @addr: address for which the chunk needs to be determined.
Tejun Heoccea34b2009-03-07 00:44:13 +0900327 *
Tejun Heofbf59bc2009-02-20 16:29:08 +0900328 * RETURNS:
329 * The address of the found chunk.
330 */
331static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
332{
Tejun Heobba174f2009-08-14 15:00:51 +0900333 void *first_start = pcpu_first_chunk->base_addr;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900334
Tejun Heoae9e6bc92009-04-02 13:19:54 +0900335 /* is it in the first chunk? */
Tejun Heo79ba6ac2009-07-04 08:10:58 +0900336 if (addr >= first_start && addr < first_start + pcpu_unit_size) {
Tejun Heoae9e6bc92009-04-02 13:19:54 +0900337 /* is it in the reserved area? */
338 if (addr < first_start + pcpu_reserved_chunk_limit)
Tejun Heoedcb4632009-03-06 14:33:59 +0900339 return pcpu_reserved_chunk;
Tejun Heoae9e6bc92009-04-02 13:19:54 +0900340 return pcpu_first_chunk;
Tejun Heoedcb4632009-03-06 14:33:59 +0900341 }
342
Tejun Heo04a13c72009-09-01 21:12:28 +0900343 /*
344 * The address is relative to unit0 which might be unused and
345 * thus unmapped. Offset the address to the unit space of the
346 * current processor before looking it up in the vmalloc
347 * space. Note that any possible cpu id can be used here, so
348 * there's no need to worry about preemption or cpu hotplug.
349 */
Tejun Heo5579fd72009-09-15 09:57:19 +0900350 addr += pcpu_unit_offsets[raw_smp_processor_id()];
Christoph Lametere1b9aa32009-04-02 13:21:44 +0900351 return pcpu_get_page_chunk(vmalloc_to_page(addr));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900352}
353
354/**
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900355 * pcpu_extend_area_map - extend area map for allocation
356 * @chunk: target chunk
357 *
358 * Extend area map of @chunk so that it can accomodate an allocation.
359 * A single allocation can split an area into three areas, so this
360 * function makes sure that @chunk->map has at least two extra slots.
361 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900362 * CONTEXT:
363 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
364 * if area map is extended.
365 *
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900366 * RETURNS:
367 * 0 if noop, 1 if successfully extended, -errno on failure.
368 */
369static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
370{
371 int new_alloc;
372 int *new;
373 size_t size;
374
375 /* has enough? */
376 if (chunk->map_alloc >= chunk->map_used + 2)
377 return 0;
378
Tejun Heoccea34b2009-03-07 00:44:13 +0900379 spin_unlock_irq(&pcpu_lock);
380
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900381 new_alloc = PCPU_DFL_MAP_ALLOC;
382 while (new_alloc < chunk->map_used + 2)
383 new_alloc *= 2;
384
385 new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
Tejun Heoccea34b2009-03-07 00:44:13 +0900386 if (!new) {
387 spin_lock_irq(&pcpu_lock);
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900388 return -ENOMEM;
Tejun Heoccea34b2009-03-07 00:44:13 +0900389 }
390
391 /*
392 * Acquire pcpu_lock and switch to new area map. Only free
393 * could have happened inbetween, so map_used couldn't have
394 * grown.
395 */
396 spin_lock_irq(&pcpu_lock);
397 BUG_ON(new_alloc < chunk->map_used + 2);
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900398
399 size = chunk->map_alloc * sizeof(chunk->map[0]);
400 memcpy(new, chunk->map, size);
401
402 /*
403 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
404 * one of the first chunks and still using static map.
405 */
406 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
407 pcpu_mem_free(chunk->map, size);
408
409 chunk->map_alloc = new_alloc;
410 chunk->map = new;
411 return 0;
412}
413
414/**
Tejun Heofbf59bc2009-02-20 16:29:08 +0900415 * pcpu_split_block - split a map block
416 * @chunk: chunk of interest
417 * @i: index of map block to split
Tejun Heocae3aeb2009-02-21 16:56:23 +0900418 * @head: head size in bytes (can be 0)
419 * @tail: tail size in bytes (can be 0)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900420 *
421 * Split the @i'th map block into two or three blocks. If @head is
422 * non-zero, @head bytes block is inserted before block @i moving it
423 * to @i+1 and reducing its size by @head bytes.
424 *
425 * If @tail is non-zero, the target block, which can be @i or @i+1
426 * depending on @head, is reduced by @tail bytes and @tail byte block
427 * is inserted after the target block.
428 *
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900429 * @chunk->map must have enough free slots to accomodate the split.
Tejun Heoccea34b2009-03-07 00:44:13 +0900430 *
431 * CONTEXT:
432 * pcpu_lock.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900433 */
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900434static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
435 int head, int tail)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900436{
437 int nr_extra = !!head + !!tail;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900438
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900439 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900440
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900441 /* insert new subblocks */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900442 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
443 sizeof(chunk->map[0]) * (chunk->map_used - i));
444 chunk->map_used += nr_extra;
445
446 if (head) {
447 chunk->map[i + 1] = chunk->map[i] - head;
448 chunk->map[i++] = head;
449 }
450 if (tail) {
451 chunk->map[i++] -= tail;
452 chunk->map[i] = tail;
453 }
Tejun Heofbf59bc2009-02-20 16:29:08 +0900454}
455
456/**
457 * pcpu_alloc_area - allocate area from a pcpu_chunk
458 * @chunk: chunk of interest
Tejun Heocae3aeb2009-02-21 16:56:23 +0900459 * @size: wanted size in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +0900460 * @align: wanted align
461 *
462 * Try to allocate @size bytes area aligned at @align from @chunk.
463 * Note that this function only allocates the offset. It doesn't
464 * populate or map the area.
465 *
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900466 * @chunk->map must have at least two free slots.
467 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900468 * CONTEXT:
469 * pcpu_lock.
470 *
Tejun Heofbf59bc2009-02-20 16:29:08 +0900471 * RETURNS:
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900472 * Allocated offset in @chunk on success, -1 if no matching area is
473 * found.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900474 */
475static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
476{
477 int oslot = pcpu_chunk_slot(chunk);
478 int max_contig = 0;
479 int i, off;
480
Tejun Heofbf59bc2009-02-20 16:29:08 +0900481 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
482 bool is_last = i + 1 == chunk->map_used;
483 int head, tail;
484
485 /* extra for alignment requirement */
486 head = ALIGN(off, align) - off;
487 BUG_ON(i == 0 && head != 0);
488
489 if (chunk->map[i] < 0)
490 continue;
491 if (chunk->map[i] < head + size) {
492 max_contig = max(chunk->map[i], max_contig);
493 continue;
494 }
495
496 /*
497 * If head is small or the previous block is free,
498 * merge'em. Note that 'small' is defined as smaller
499 * than sizeof(int), which is very small but isn't too
500 * uncommon for percpu allocations.
501 */
502 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
503 if (chunk->map[i - 1] > 0)
504 chunk->map[i - 1] += head;
505 else {
506 chunk->map[i - 1] -= head;
507 chunk->free_size -= head;
508 }
509 chunk->map[i] -= head;
510 off += head;
511 head = 0;
512 }
513
514 /* if tail is small, just keep it around */
515 tail = chunk->map[i] - head - size;
516 if (tail < sizeof(int))
517 tail = 0;
518
519 /* split if warranted */
520 if (head || tail) {
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900521 pcpu_split_block(chunk, i, head, tail);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900522 if (head) {
523 i++;
524 off += head;
525 max_contig = max(chunk->map[i - 1], max_contig);
526 }
527 if (tail)
528 max_contig = max(chunk->map[i + 1], max_contig);
529 }
530
531 /* update hint and mark allocated */
532 if (is_last)
533 chunk->contig_hint = max_contig; /* fully scanned */
534 else
535 chunk->contig_hint = max(chunk->contig_hint,
536 max_contig);
537
538 chunk->free_size -= chunk->map[i];
539 chunk->map[i] = -chunk->map[i];
540
541 pcpu_chunk_relocate(chunk, oslot);
542 return off;
543 }
544
545 chunk->contig_hint = max_contig; /* fully scanned */
546 pcpu_chunk_relocate(chunk, oslot);
547
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900548 /* tell the upper layer that this chunk has no matching area */
549 return -1;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900550}
551
552/**
553 * pcpu_free_area - free area to a pcpu_chunk
554 * @chunk: chunk of interest
555 * @freeme: offset of area to free
556 *
557 * Free area starting from @freeme to @chunk. Note that this function
558 * only modifies the allocation map. It doesn't depopulate or unmap
559 * the area.
Tejun Heoccea34b2009-03-07 00:44:13 +0900560 *
561 * CONTEXT:
562 * pcpu_lock.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900563 */
564static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
565{
566 int oslot = pcpu_chunk_slot(chunk);
567 int i, off;
568
569 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
570 if (off == freeme)
571 break;
572 BUG_ON(off != freeme);
573 BUG_ON(chunk->map[i] > 0);
574
575 chunk->map[i] = -chunk->map[i];
576 chunk->free_size += chunk->map[i];
577
578 /* merge with previous? */
579 if (i > 0 && chunk->map[i - 1] >= 0) {
580 chunk->map[i - 1] += chunk->map[i];
581 chunk->map_used--;
582 memmove(&chunk->map[i], &chunk->map[i + 1],
583 (chunk->map_used - i) * sizeof(chunk->map[0]));
584 i--;
585 }
586 /* merge with next? */
587 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
588 chunk->map[i] += chunk->map[i + 1];
589 chunk->map_used--;
590 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
591 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
592 }
593
594 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
595 pcpu_chunk_relocate(chunk, oslot);
596}
597
598/**
Tejun Heoce3141a2009-07-04 08:11:00 +0900599 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
Tejun Heofbf59bc2009-02-20 16:29:08 +0900600 * @chunk: chunk of interest
Tejun Heoce3141a2009-07-04 08:11:00 +0900601 * @bitmapp: output parameter for bitmap
602 * @may_alloc: may allocate the array
Tejun Heofbf59bc2009-02-20 16:29:08 +0900603 *
Tejun Heoce3141a2009-07-04 08:11:00 +0900604 * Returns pointer to array of pointers to struct page and bitmap,
605 * both of which can be indexed with pcpu_page_idx(). The returned
606 * array is cleared to zero and *@bitmapp is copied from
607 * @chunk->populated. Note that there is only one array and bitmap
608 * and access exclusion is the caller's responsibility.
609 *
610 * CONTEXT:
611 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
612 * Otherwise, don't care.
613 *
614 * RETURNS:
615 * Pointer to temp pages array on success, NULL on failure.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900616 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900617static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
618 unsigned long **bitmapp,
619 bool may_alloc)
620{
621 static struct page **pages;
622 static unsigned long *bitmap;
Tejun Heo2f39e632009-07-04 08:11:00 +0900623 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
Tejun Heoce3141a2009-07-04 08:11:00 +0900624 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
625 sizeof(unsigned long);
626
627 if (!pages || !bitmap) {
628 if (may_alloc && !pages)
629 pages = pcpu_mem_alloc(pages_size);
630 if (may_alloc && !bitmap)
631 bitmap = pcpu_mem_alloc(bitmap_size);
632 if (!pages || !bitmap)
633 return NULL;
634 }
635
636 memset(pages, 0, pages_size);
637 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
638
639 *bitmapp = bitmap;
640 return pages;
641}
642
643/**
644 * pcpu_free_pages - free pages which were allocated for @chunk
645 * @chunk: chunk pages were allocated for
646 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
647 * @populated: populated bitmap
648 * @page_start: page index of the first page to be freed
649 * @page_end: page index of the last page to be freed + 1
650 *
651 * Free pages [@page_start and @page_end) in @pages for all units.
652 * The pages were allocated for @chunk.
653 */
654static void pcpu_free_pages(struct pcpu_chunk *chunk,
655 struct page **pages, unsigned long *populated,
656 int page_start, int page_end)
657{
658 unsigned int cpu;
659 int i;
660
661 for_each_possible_cpu(cpu) {
662 for (i = page_start; i < page_end; i++) {
663 struct page *page = pages[pcpu_page_idx(cpu, i)];
664
665 if (page)
666 __free_page(page);
667 }
668 }
669}
670
671/**
672 * pcpu_alloc_pages - allocates pages for @chunk
673 * @chunk: target chunk
674 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
675 * @populated: populated bitmap
676 * @page_start: page index of the first page to be allocated
677 * @page_end: page index of the last page to be allocated + 1
678 *
679 * Allocate pages [@page_start,@page_end) into @pages for all units.
680 * The allocation is for @chunk. Percpu core doesn't care about the
681 * content of @pages and will pass it verbatim to pcpu_map_pages().
682 */
683static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
684 struct page **pages, unsigned long *populated,
685 int page_start, int page_end)
686{
687 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
688 unsigned int cpu;
689 int i;
690
691 for_each_possible_cpu(cpu) {
692 for (i = page_start; i < page_end; i++) {
693 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
694
695 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
696 if (!*pagep) {
697 pcpu_free_pages(chunk, pages, populated,
698 page_start, page_end);
699 return -ENOMEM;
700 }
701 }
702 }
703 return 0;
704}
705
706/**
707 * pcpu_pre_unmap_flush - flush cache prior to unmapping
708 * @chunk: chunk the regions to be flushed belongs to
709 * @page_start: page index of the first page to be flushed
710 * @page_end: page index of the last page to be flushed + 1
711 *
712 * Pages in [@page_start,@page_end) of @chunk are about to be
713 * unmapped. Flush cache. As each flushing trial can be very
714 * expensive, issue flush on the whole region at once rather than
715 * doing it for each cpu. This could be an overkill but is more
716 * scalable.
717 */
718static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
719 int page_start, int page_end)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900720{
Tejun Heo2f39e632009-07-04 08:11:00 +0900721 flush_cache_vunmap(
722 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
723 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
Tejun Heoce3141a2009-07-04 08:11:00 +0900724}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900725
Tejun Heoce3141a2009-07-04 08:11:00 +0900726static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
727{
728 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
729}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900730
Tejun Heoce3141a2009-07-04 08:11:00 +0900731/**
732 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
733 * @chunk: chunk of interest
734 * @pages: pages array which can be used to pass information to free
735 * @populated: populated bitmap
Tejun Heofbf59bc2009-02-20 16:29:08 +0900736 * @page_start: page index of the first page to unmap
737 * @page_end: page index of the last page to unmap + 1
Tejun Heofbf59bc2009-02-20 16:29:08 +0900738 *
739 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
Tejun Heoce3141a2009-07-04 08:11:00 +0900740 * Corresponding elements in @pages were cleared by the caller and can
741 * be used to carry information to pcpu_free_pages() which will be
742 * called after all unmaps are finished. The caller should call
743 * proper pre/post flush functions.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900744 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900745static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
746 struct page **pages, unsigned long *populated,
747 int page_start, int page_end)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900748{
Tejun Heofbf59bc2009-02-20 16:29:08 +0900749 unsigned int cpu;
Tejun Heoce3141a2009-07-04 08:11:00 +0900750 int i;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900751
Tejun Heoce3141a2009-07-04 08:11:00 +0900752 for_each_possible_cpu(cpu) {
753 for (i = page_start; i < page_end; i++) {
754 struct page *page;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900755
Tejun Heoce3141a2009-07-04 08:11:00 +0900756 page = pcpu_chunk_page(chunk, cpu, i);
757 WARN_ON(!page);
758 pages[pcpu_page_idx(cpu, i)] = page;
759 }
760 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
761 page_end - page_start);
762 }
Tejun Heofbf59bc2009-02-20 16:29:08 +0900763
Tejun Heoce3141a2009-07-04 08:11:00 +0900764 for (i = page_start; i < page_end; i++)
765 __clear_bit(i, populated);
766}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900767
Tejun Heoce3141a2009-07-04 08:11:00 +0900768/**
769 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
770 * @chunk: pcpu_chunk the regions to be flushed belong to
771 * @page_start: page index of the first page to be flushed
772 * @page_end: page index of the last page to be flushed + 1
773 *
774 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
775 * TLB for the regions. This can be skipped if the area is to be
776 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
777 *
778 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
779 * for the whole region.
780 */
781static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
782 int page_start, int page_end)
783{
Tejun Heo2f39e632009-07-04 08:11:00 +0900784 flush_tlb_kernel_range(
785 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
786 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900787}
788
Tejun Heoc8a51be2009-07-04 08:10:59 +0900789static int __pcpu_map_pages(unsigned long addr, struct page **pages,
790 int nr_pages)
791{
792 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
793 PAGE_KERNEL, pages);
794}
795
796/**
Tejun Heoce3141a2009-07-04 08:11:00 +0900797 * pcpu_map_pages - map pages into a pcpu_chunk
Tejun Heoc8a51be2009-07-04 08:10:59 +0900798 * @chunk: chunk of interest
Tejun Heoce3141a2009-07-04 08:11:00 +0900799 * @pages: pages array containing pages to be mapped
800 * @populated: populated bitmap
Tejun Heoc8a51be2009-07-04 08:10:59 +0900801 * @page_start: page index of the first page to map
802 * @page_end: page index of the last page to map + 1
803 *
Tejun Heoce3141a2009-07-04 08:11:00 +0900804 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
805 * caller is responsible for calling pcpu_post_map_flush() after all
806 * mappings are complete.
807 *
808 * This function is responsible for setting corresponding bits in
809 * @chunk->populated bitmap and whatever is necessary for reverse
810 * lookup (addr -> chunk).
Tejun Heoc8a51be2009-07-04 08:10:59 +0900811 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900812static int pcpu_map_pages(struct pcpu_chunk *chunk,
813 struct page **pages, unsigned long *populated,
814 int page_start, int page_end)
Tejun Heoc8a51be2009-07-04 08:10:59 +0900815{
Tejun Heoce3141a2009-07-04 08:11:00 +0900816 unsigned int cpu, tcpu;
817 int i, err;
Tejun Heoc8a51be2009-07-04 08:10:59 +0900818
819 for_each_possible_cpu(cpu) {
820 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
Tejun Heoce3141a2009-07-04 08:11:00 +0900821 &pages[pcpu_page_idx(cpu, page_start)],
Tejun Heoc8a51be2009-07-04 08:10:59 +0900822 page_end - page_start);
823 if (err < 0)
Tejun Heoce3141a2009-07-04 08:11:00 +0900824 goto err;
Tejun Heoc8a51be2009-07-04 08:10:59 +0900825 }
826
Tejun Heoce3141a2009-07-04 08:11:00 +0900827 /* mapping successful, link chunk and mark populated */
828 for (i = page_start; i < page_end; i++) {
829 for_each_possible_cpu(cpu)
830 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
831 chunk);
832 __set_bit(i, populated);
833 }
834
835 return 0;
836
837err:
838 for_each_possible_cpu(tcpu) {
839 if (tcpu == cpu)
840 break;
841 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
842 page_end - page_start);
843 }
844 return err;
845}
846
847/**
848 * pcpu_post_map_flush - flush cache after mapping
849 * @chunk: pcpu_chunk the regions to be flushed belong to
850 * @page_start: page index of the first page to be flushed
851 * @page_end: page index of the last page to be flushed + 1
852 *
853 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
854 * cache.
855 *
856 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
857 * for the whole region.
858 */
859static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
860 int page_start, int page_end)
861{
Tejun Heo2f39e632009-07-04 08:11:00 +0900862 flush_cache_vmap(
863 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
864 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900865}
866
867/**
868 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
869 * @chunk: chunk to depopulate
870 * @off: offset to the area to depopulate
Tejun Heocae3aeb2009-02-21 16:56:23 +0900871 * @size: size of the area to depopulate in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +0900872 * @flush: whether to flush cache and tlb or not
873 *
874 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
875 * from @chunk. If @flush is true, vcache is flushed before unmapping
876 * and tlb after.
Tejun Heoccea34b2009-03-07 00:44:13 +0900877 *
878 * CONTEXT:
879 * pcpu_alloc_mutex.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900880 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900881static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900882{
883 int page_start = PFN_DOWN(off);
884 int page_end = PFN_UP(off + size);
Tejun Heoce3141a2009-07-04 08:11:00 +0900885 struct page **pages;
886 unsigned long *populated;
887 int rs, re;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900888
Tejun Heoce3141a2009-07-04 08:11:00 +0900889 /* quick path, check whether it's empty already */
890 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
891 if (rs == page_start && re == page_end)
892 return;
893 break;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900894 }
895
Tejun Heoce3141a2009-07-04 08:11:00 +0900896 /* immutable chunks can't be depopulated */
Tejun Heo8d408b42009-02-24 11:57:21 +0900897 WARN_ON(chunk->immutable);
898
Tejun Heoce3141a2009-07-04 08:11:00 +0900899 /*
900 * If control reaches here, there must have been at least one
901 * successful population attempt so the temp pages array must
902 * be available now.
903 */
904 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
905 BUG_ON(!pages);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900906
Tejun Heoce3141a2009-07-04 08:11:00 +0900907 /* unmap and free */
908 pcpu_pre_unmap_flush(chunk, page_start, page_end);
909
910 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
911 pcpu_unmap_pages(chunk, pages, populated, rs, re);
912
913 /* no need to flush tlb, vmalloc will handle it lazily */
914
915 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
916 pcpu_free_pages(chunk, pages, populated, rs, re);
917
918 /* commit new bitmap */
919 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900920}
921
922/**
923 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
924 * @chunk: chunk of interest
925 * @off: offset to the area to populate
Tejun Heocae3aeb2009-02-21 16:56:23 +0900926 * @size: size of the area to populate in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +0900927 *
928 * For each cpu, populate and map pages [@page_start,@page_end) into
929 * @chunk. The area is cleared on return.
Tejun Heoccea34b2009-03-07 00:44:13 +0900930 *
931 * CONTEXT:
932 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900933 */
934static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
935{
Tejun Heofbf59bc2009-02-20 16:29:08 +0900936 int page_start = PFN_DOWN(off);
937 int page_end = PFN_UP(off + size);
Tejun Heoce3141a2009-07-04 08:11:00 +0900938 int free_end = page_start, unmap_end = page_start;
939 struct page **pages;
940 unsigned long *populated;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900941 unsigned int cpu;
Tejun Heoce3141a2009-07-04 08:11:00 +0900942 int rs, re, rc;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900943
Tejun Heoce3141a2009-07-04 08:11:00 +0900944 /* quick path, check whether all pages are already there */
945 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
946 if (rs == page_start && re == page_end)
947 goto clear;
948 break;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900949 }
950
Tejun Heoce3141a2009-07-04 08:11:00 +0900951 /* need to allocate and map pages, this chunk can't be immutable */
952 WARN_ON(chunk->immutable);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900953
Tejun Heoce3141a2009-07-04 08:11:00 +0900954 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
955 if (!pages)
956 return -ENOMEM;
957
958 /* alloc and map */
959 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
960 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
961 if (rc)
962 goto err_free;
963 free_end = re;
964 }
965
966 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
967 rc = pcpu_map_pages(chunk, pages, populated, rs, re);
968 if (rc)
969 goto err_unmap;
970 unmap_end = re;
971 }
972 pcpu_post_map_flush(chunk, page_start, page_end);
973
974 /* commit new bitmap */
975 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
976clear:
Tejun Heofbf59bc2009-02-20 16:29:08 +0900977 for_each_possible_cpu(cpu)
Tejun Heo2f39e632009-07-04 08:11:00 +0900978 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900979 return 0;
Tejun Heoce3141a2009-07-04 08:11:00 +0900980
981err_unmap:
982 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
983 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
984 pcpu_unmap_pages(chunk, pages, populated, rs, re);
985 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
986err_free:
987 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
988 pcpu_free_pages(chunk, pages, populated, rs, re);
989 return rc;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900990}
991
992static void free_pcpu_chunk(struct pcpu_chunk *chunk)
993{
994 if (!chunk)
995 return;
Tejun Heo65632972009-08-14 15:00:52 +0900996 if (chunk->vms)
997 pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
Tejun Heo1880d932009-03-07 00:44:09 +0900998 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900999 kfree(chunk);
1000}
1001
1002static struct pcpu_chunk *alloc_pcpu_chunk(void)
1003{
1004 struct pcpu_chunk *chunk;
1005
1006 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
1007 if (!chunk)
1008 return NULL;
1009
Tejun Heo1880d932009-03-07 00:44:09 +09001010 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
Tejun Heofbf59bc2009-02-20 16:29:08 +09001011 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
1012 chunk->map[chunk->map_used++] = pcpu_unit_size;
1013
Tejun Heo65632972009-08-14 15:00:52 +09001014 chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
1015 pcpu_nr_groups, pcpu_atom_size,
1016 GFP_KERNEL);
1017 if (!chunk->vms) {
Tejun Heofbf59bc2009-02-20 16:29:08 +09001018 free_pcpu_chunk(chunk);
1019 return NULL;
1020 }
1021
1022 INIT_LIST_HEAD(&chunk->list);
1023 chunk->free_size = pcpu_unit_size;
1024 chunk->contig_hint = pcpu_unit_size;
Tejun Heo65632972009-08-14 15:00:52 +09001025 chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
Tejun Heofbf59bc2009-02-20 16:29:08 +09001026
1027 return chunk;
1028}
1029
1030/**
Tejun Heoedcb4632009-03-06 14:33:59 +09001031 * pcpu_alloc - the percpu allocator
Tejun Heocae3aeb2009-02-21 16:56:23 +09001032 * @size: size of area to allocate in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +09001033 * @align: alignment of area (max PAGE_SIZE)
Tejun Heoedcb4632009-03-06 14:33:59 +09001034 * @reserved: allocate from the reserved chunk if available
Tejun Heofbf59bc2009-02-20 16:29:08 +09001035 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001036 * Allocate percpu area of @size bytes aligned at @align.
1037 *
1038 * CONTEXT:
1039 * Does GFP_KERNEL allocation.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001040 *
1041 * RETURNS:
1042 * Percpu pointer to the allocated area on success, NULL on failure.
1043 */
Tejun Heoedcb4632009-03-06 14:33:59 +09001044static void *pcpu_alloc(size_t size, size_t align, bool reserved)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001045{
Tejun Heofbf59bc2009-02-20 16:29:08 +09001046 struct pcpu_chunk *chunk;
1047 int slot, off;
1048
Tejun Heo8d408b42009-02-24 11:57:21 +09001049 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
Tejun Heofbf59bc2009-02-20 16:29:08 +09001050 WARN(true, "illegal size (%zu) or align (%zu) for "
1051 "percpu allocation\n", size, align);
1052 return NULL;
1053 }
1054
Tejun Heoccea34b2009-03-07 00:44:13 +09001055 mutex_lock(&pcpu_alloc_mutex);
1056 spin_lock_irq(&pcpu_lock);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001057
Tejun Heoedcb4632009-03-06 14:33:59 +09001058 /* serve reserved allocations from the reserved chunk if available */
1059 if (reserved && pcpu_reserved_chunk) {
1060 chunk = pcpu_reserved_chunk;
Tejun Heo9f7dcf22009-03-07 00:44:09 +09001061 if (size > chunk->contig_hint ||
1062 pcpu_extend_area_map(chunk) < 0)
Tejun Heoccea34b2009-03-07 00:44:13 +09001063 goto fail_unlock;
Tejun Heoedcb4632009-03-06 14:33:59 +09001064 off = pcpu_alloc_area(chunk, size, align);
1065 if (off >= 0)
1066 goto area_found;
Tejun Heoccea34b2009-03-07 00:44:13 +09001067 goto fail_unlock;
Tejun Heoedcb4632009-03-06 14:33:59 +09001068 }
1069
Tejun Heoccea34b2009-03-07 00:44:13 +09001070restart:
Tejun Heoedcb4632009-03-06 14:33:59 +09001071 /* search through normal chunks */
Tejun Heofbf59bc2009-02-20 16:29:08 +09001072 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1073 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1074 if (size > chunk->contig_hint)
1075 continue;
Tejun Heoccea34b2009-03-07 00:44:13 +09001076
1077 switch (pcpu_extend_area_map(chunk)) {
1078 case 0:
1079 break;
1080 case 1:
1081 goto restart; /* pcpu_lock dropped, restart */
1082 default:
1083 goto fail_unlock;
1084 }
1085
Tejun Heofbf59bc2009-02-20 16:29:08 +09001086 off = pcpu_alloc_area(chunk, size, align);
1087 if (off >= 0)
1088 goto area_found;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001089 }
1090 }
1091
1092 /* hmmm... no space left, create a new chunk */
Tejun Heoccea34b2009-03-07 00:44:13 +09001093 spin_unlock_irq(&pcpu_lock);
1094
Tejun Heofbf59bc2009-02-20 16:29:08 +09001095 chunk = alloc_pcpu_chunk();
1096 if (!chunk)
Tejun Heoccea34b2009-03-07 00:44:13 +09001097 goto fail_unlock_mutex;
1098
1099 spin_lock_irq(&pcpu_lock);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001100 pcpu_chunk_relocate(chunk, -1);
Tejun Heoccea34b2009-03-07 00:44:13 +09001101 goto restart;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001102
1103area_found:
Tejun Heoccea34b2009-03-07 00:44:13 +09001104 spin_unlock_irq(&pcpu_lock);
1105
Tejun Heofbf59bc2009-02-20 16:29:08 +09001106 /* populate, map and clear the area */
1107 if (pcpu_populate_chunk(chunk, off, size)) {
Tejun Heoccea34b2009-03-07 00:44:13 +09001108 spin_lock_irq(&pcpu_lock);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001109 pcpu_free_area(chunk, off);
Tejun Heoccea34b2009-03-07 00:44:13 +09001110 goto fail_unlock;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001111 }
1112
Tejun Heoccea34b2009-03-07 00:44:13 +09001113 mutex_unlock(&pcpu_alloc_mutex);
1114
Tejun Heobba174f2009-08-14 15:00:51 +09001115 /* return address relative to base address */
1116 return __addr_to_pcpu_ptr(chunk->base_addr + off);
Tejun Heoccea34b2009-03-07 00:44:13 +09001117
1118fail_unlock:
1119 spin_unlock_irq(&pcpu_lock);
1120fail_unlock_mutex:
1121 mutex_unlock(&pcpu_alloc_mutex);
1122 return NULL;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001123}
Tejun Heoedcb4632009-03-06 14:33:59 +09001124
1125/**
1126 * __alloc_percpu - allocate dynamic percpu area
1127 * @size: size of area to allocate in bytes
1128 * @align: alignment of area (max PAGE_SIZE)
1129 *
1130 * Allocate percpu area of @size bytes aligned at @align. Might
1131 * sleep. Might trigger writeouts.
1132 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001133 * CONTEXT:
1134 * Does GFP_KERNEL allocation.
1135 *
Tejun Heoedcb4632009-03-06 14:33:59 +09001136 * RETURNS:
1137 * Percpu pointer to the allocated area on success, NULL on failure.
1138 */
1139void *__alloc_percpu(size_t size, size_t align)
1140{
1141 return pcpu_alloc(size, align, false);
1142}
Tejun Heofbf59bc2009-02-20 16:29:08 +09001143EXPORT_SYMBOL_GPL(__alloc_percpu);
1144
Tejun Heoedcb4632009-03-06 14:33:59 +09001145/**
1146 * __alloc_reserved_percpu - allocate reserved percpu area
1147 * @size: size of area to allocate in bytes
1148 * @align: alignment of area (max PAGE_SIZE)
1149 *
1150 * Allocate percpu area of @size bytes aligned at @align from reserved
1151 * percpu area if arch has set it up; otherwise, allocation is served
1152 * from the same dynamic area. Might sleep. Might trigger writeouts.
1153 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001154 * CONTEXT:
1155 * Does GFP_KERNEL allocation.
1156 *
Tejun Heoedcb4632009-03-06 14:33:59 +09001157 * RETURNS:
1158 * Percpu pointer to the allocated area on success, NULL on failure.
1159 */
1160void *__alloc_reserved_percpu(size_t size, size_t align)
1161{
1162 return pcpu_alloc(size, align, true);
1163}
1164
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001165/**
1166 * pcpu_reclaim - reclaim fully free chunks, workqueue function
1167 * @work: unused
1168 *
1169 * Reclaim all fully free chunks except for the first one.
Tejun Heoccea34b2009-03-07 00:44:13 +09001170 *
1171 * CONTEXT:
1172 * workqueue context.
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001173 */
1174static void pcpu_reclaim(struct work_struct *work)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001175{
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001176 LIST_HEAD(todo);
1177 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
1178 struct pcpu_chunk *chunk, *next;
1179
Tejun Heoccea34b2009-03-07 00:44:13 +09001180 mutex_lock(&pcpu_alloc_mutex);
1181 spin_lock_irq(&pcpu_lock);
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001182
1183 list_for_each_entry_safe(chunk, next, head, list) {
1184 WARN_ON(chunk->immutable);
1185
1186 /* spare the first one */
1187 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
1188 continue;
1189
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001190 list_move(&chunk->list, &todo);
1191 }
1192
Tejun Heoccea34b2009-03-07 00:44:13 +09001193 spin_unlock_irq(&pcpu_lock);
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001194
1195 list_for_each_entry_safe(chunk, next, &todo, list) {
Tejun Heoce3141a2009-07-04 08:11:00 +09001196 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001197 free_pcpu_chunk(chunk);
1198 }
Tejun Heo971f3912009-08-14 15:00:49 +09001199
1200 mutex_unlock(&pcpu_alloc_mutex);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001201}
1202
1203/**
1204 * free_percpu - free percpu area
1205 * @ptr: pointer to area to free
1206 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001207 * Free percpu area @ptr.
1208 *
1209 * CONTEXT:
1210 * Can be called from atomic context.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001211 */
1212void free_percpu(void *ptr)
1213{
1214 void *addr = __pcpu_ptr_to_addr(ptr);
1215 struct pcpu_chunk *chunk;
Tejun Heoccea34b2009-03-07 00:44:13 +09001216 unsigned long flags;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001217 int off;
1218
1219 if (!ptr)
1220 return;
1221
Tejun Heoccea34b2009-03-07 00:44:13 +09001222 spin_lock_irqsave(&pcpu_lock, flags);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001223
1224 chunk = pcpu_chunk_addr_search(addr);
Tejun Heobba174f2009-08-14 15:00:51 +09001225 off = addr - chunk->base_addr;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001226
1227 pcpu_free_area(chunk, off);
1228
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001229 /* if there are more than one fully free chunks, wake up grim reaper */
Tejun Heofbf59bc2009-02-20 16:29:08 +09001230 if (chunk->free_size == pcpu_unit_size) {
1231 struct pcpu_chunk *pos;
1232
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001233 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001234 if (pos != chunk) {
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001235 schedule_work(&pcpu_reclaim_work);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001236 break;
1237 }
1238 }
1239
Tejun Heoccea34b2009-03-07 00:44:13 +09001240 spin_unlock_irqrestore(&pcpu_lock, flags);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001241}
1242EXPORT_SYMBOL_GPL(free_percpu);
1243
Tejun Heo033e48f2009-08-14 15:00:51 +09001244static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1245 size_t reserved_size,
1246 ssize_t *dyn_sizep)
1247{
1248 size_t size_sum;
1249
1250 size_sum = PFN_ALIGN(static_size + reserved_size +
1251 (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1252 if (*dyn_sizep != 0)
1253 *dyn_sizep = size_sum - static_size - reserved_size;
1254
1255 return size_sum;
1256}
1257
Tejun Heofbf59bc2009-02-20 16:29:08 +09001258/**
Tejun Heofd1e8a12009-08-14 15:00:51 +09001259 * pcpu_alloc_alloc_info - allocate percpu allocation info
1260 * @nr_groups: the number of groups
1261 * @nr_units: the number of units
Tejun Heo033e48f2009-08-14 15:00:51 +09001262 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001263 * Allocate ai which is large enough for @nr_groups groups containing
1264 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1265 * cpu_map array which is long enough for @nr_units and filled with
1266 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1267 * pointer of other groups.
Tejun Heo033e48f2009-08-14 15:00:51 +09001268 *
1269 * RETURNS:
Tejun Heofd1e8a12009-08-14 15:00:51 +09001270 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1271 * failure.
Tejun Heo033e48f2009-08-14 15:00:51 +09001272 */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001273struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1274 int nr_units)
1275{
1276 struct pcpu_alloc_info *ai;
1277 size_t base_size, ai_size;
1278 void *ptr;
1279 int unit;
1280
1281 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1282 __alignof__(ai->groups[0].cpu_map[0]));
1283 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1284
1285 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1286 if (!ptr)
1287 return NULL;
1288 ai = ptr;
1289 ptr += base_size;
1290
1291 ai->groups[0].cpu_map = ptr;
1292
1293 for (unit = 0; unit < nr_units; unit++)
1294 ai->groups[0].cpu_map[unit] = NR_CPUS;
1295
1296 ai->nr_groups = nr_groups;
1297 ai->__ai_size = PFN_ALIGN(ai_size);
1298
1299 return ai;
1300}
1301
1302/**
1303 * pcpu_free_alloc_info - free percpu allocation info
1304 * @ai: pcpu_alloc_info to free
1305 *
1306 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1307 */
1308void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1309{
1310 free_bootmem(__pa(ai), ai->__ai_size);
1311}
1312
1313/**
1314 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
Tejun Heoedcb4632009-03-06 14:33:59 +09001315 * @reserved_size: the size of reserved percpu area in bytes
Tejun Heocafe8812009-03-06 14:33:59 +09001316 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
Tejun Heofd1e8a12009-08-14 15:00:51 +09001317 * @atom_size: allocation atom size
1318 * @cpu_distance_fn: callback to determine distance between cpus, optional
1319 *
1320 * This function determines grouping of units, their mappings to cpus
1321 * and other parameters considering needed percpu size, allocation
1322 * atom size and distances between CPUs.
1323 *
1324 * Groups are always mutliples of atom size and CPUs which are of
1325 * LOCAL_DISTANCE both ways are grouped together and share space for
1326 * units in the same group. The returned configuration is guaranteed
1327 * to have CPUs on different nodes on different groups and >=75% usage
1328 * of allocated virtual address space.
1329 *
1330 * RETURNS:
1331 * On success, pointer to the new allocation_info is returned. On
1332 * failure, ERR_PTR value is returned.
1333 */
1334struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1335 size_t reserved_size, ssize_t dyn_size,
1336 size_t atom_size,
1337 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
Tejun Heo033e48f2009-08-14 15:00:51 +09001338{
1339 static int group_map[NR_CPUS] __initdata;
1340 static int group_cnt[NR_CPUS] __initdata;
1341 const size_t static_size = __per_cpu_end - __per_cpu_start;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001342 int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
Tejun Heo033e48f2009-08-14 15:00:51 +09001343 size_t size_sum, min_unit_size, alloc_size;
1344 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001345 int last_allocs, group, unit;
Tejun Heo033e48f2009-08-14 15:00:51 +09001346 unsigned int cpu, tcpu;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001347 struct pcpu_alloc_info *ai;
1348 unsigned int *cpu_map;
Tejun Heo033e48f2009-08-14 15:00:51 +09001349
1350 /*
1351 * Determine min_unit_size, alloc_size and max_upa such that
Tejun Heofd1e8a12009-08-14 15:00:51 +09001352 * alloc_size is multiple of atom_size and is the smallest
Tejun Heo033e48f2009-08-14 15:00:51 +09001353 * which can accomodate 4k aligned segments which are equal to
1354 * or larger than min_unit_size.
1355 */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001356 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
Tejun Heo033e48f2009-08-14 15:00:51 +09001357 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1358
Tejun Heofd1e8a12009-08-14 15:00:51 +09001359 alloc_size = roundup(min_unit_size, atom_size);
Tejun Heo033e48f2009-08-14 15:00:51 +09001360 upa = alloc_size / min_unit_size;
1361 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1362 upa--;
1363 max_upa = upa;
1364
1365 /* group cpus according to their proximity */
1366 for_each_possible_cpu(cpu) {
1367 group = 0;
1368 next_group:
1369 for_each_possible_cpu(tcpu) {
1370 if (cpu == tcpu)
1371 break;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001372 if (group_map[tcpu] == group && cpu_distance_fn &&
Tejun Heo033e48f2009-08-14 15:00:51 +09001373 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1374 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1375 group++;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001376 nr_groups = max(nr_groups, group + 1);
Tejun Heo033e48f2009-08-14 15:00:51 +09001377 goto next_group;
1378 }
1379 }
1380 group_map[cpu] = group;
1381 group_cnt[group]++;
1382 group_cnt_max = max(group_cnt_max, group_cnt[group]);
1383 }
1384
1385 /*
1386 * Expand unit size until address space usage goes over 75%
1387 * and then as much as possible without using more address
1388 * space.
1389 */
1390 last_allocs = INT_MAX;
1391 for (upa = max_upa; upa; upa--) {
1392 int allocs = 0, wasted = 0;
1393
1394 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1395 continue;
1396
Tejun Heofd1e8a12009-08-14 15:00:51 +09001397 for (group = 0; group < nr_groups; group++) {
Tejun Heo033e48f2009-08-14 15:00:51 +09001398 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1399 allocs += this_allocs;
1400 wasted += this_allocs * upa - group_cnt[group];
1401 }
1402
1403 /*
1404 * Don't accept if wastage is over 25%. The
1405 * greater-than comparison ensures upa==1 always
1406 * passes the following check.
1407 */
1408 if (wasted > num_possible_cpus() / 3)
1409 continue;
1410
1411 /* and then don't consume more memory */
1412 if (allocs > last_allocs)
1413 break;
1414 last_allocs = allocs;
1415 best_upa = upa;
1416 }
Tejun Heofd1e8a12009-08-14 15:00:51 +09001417 upa = best_upa;
Tejun Heo033e48f2009-08-14 15:00:51 +09001418
Tejun Heofd1e8a12009-08-14 15:00:51 +09001419 /* allocate and fill alloc_info */
1420 for (group = 0; group < nr_groups; group++)
1421 nr_units += roundup(group_cnt[group], upa);
1422
1423 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1424 if (!ai)
1425 return ERR_PTR(-ENOMEM);
1426 cpu_map = ai->groups[0].cpu_map;
1427
1428 for (group = 0; group < nr_groups; group++) {
1429 ai->groups[group].cpu_map = cpu_map;
1430 cpu_map += roundup(group_cnt[group], upa);
Tejun Heo033e48f2009-08-14 15:00:51 +09001431 }
1432
Tejun Heofd1e8a12009-08-14 15:00:51 +09001433 ai->static_size = static_size;
1434 ai->reserved_size = reserved_size;
1435 ai->dyn_size = dyn_size;
1436 ai->unit_size = alloc_size / upa;
1437 ai->atom_size = atom_size;
1438 ai->alloc_size = alloc_size;
1439
1440 for (group = 0, unit = 0; group_cnt[group]; group++) {
1441 struct pcpu_group_info *gi = &ai->groups[group];
1442
1443 /*
1444 * Initialize base_offset as if all groups are located
1445 * back-to-back. The caller should update this to
1446 * reflect actual allocation.
1447 */
1448 gi->base_offset = unit * ai->unit_size;
1449
1450 for_each_possible_cpu(cpu)
1451 if (group_map[cpu] == group)
1452 gi->cpu_map[gi->nr_units++] = cpu;
1453 gi->nr_units = roundup(gi->nr_units, upa);
1454 unit += gi->nr_units;
1455 }
1456 BUG_ON(unit != nr_units);
1457
1458 return ai;
Tejun Heo033e48f2009-08-14 15:00:51 +09001459}
1460
Tejun Heofd1e8a12009-08-14 15:00:51 +09001461/**
1462 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1463 * @lvl: loglevel
1464 * @ai: allocation info to dump
1465 *
1466 * Print out information about @ai using loglevel @lvl.
1467 */
1468static void pcpu_dump_alloc_info(const char *lvl,
1469 const struct pcpu_alloc_info *ai)
Tejun Heo033e48f2009-08-14 15:00:51 +09001470{
Tejun Heofd1e8a12009-08-14 15:00:51 +09001471 int group_width = 1, cpu_width = 1, width;
Tejun Heo033e48f2009-08-14 15:00:51 +09001472 char empty_str[] = "--------";
Tejun Heofd1e8a12009-08-14 15:00:51 +09001473 int alloc = 0, alloc_end = 0;
1474 int group, v;
1475 int upa, apl; /* units per alloc, allocs per line */
Tejun Heo033e48f2009-08-14 15:00:51 +09001476
Tejun Heofd1e8a12009-08-14 15:00:51 +09001477 v = ai->nr_groups;
Tejun Heo033e48f2009-08-14 15:00:51 +09001478 while (v /= 10)
Tejun Heofd1e8a12009-08-14 15:00:51 +09001479 group_width++;
Tejun Heo033e48f2009-08-14 15:00:51 +09001480
Tejun Heofd1e8a12009-08-14 15:00:51 +09001481 v = num_possible_cpus();
1482 while (v /= 10)
1483 cpu_width++;
1484 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
Tejun Heo033e48f2009-08-14 15:00:51 +09001485
Tejun Heofd1e8a12009-08-14 15:00:51 +09001486 upa = ai->alloc_size / ai->unit_size;
1487 width = upa * (cpu_width + 1) + group_width + 3;
1488 apl = rounddown_pow_of_two(max(60 / width, 1));
Tejun Heo033e48f2009-08-14 15:00:51 +09001489
Tejun Heofd1e8a12009-08-14 15:00:51 +09001490 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1491 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1492 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1493
1494 for (group = 0; group < ai->nr_groups; group++) {
1495 const struct pcpu_group_info *gi = &ai->groups[group];
1496 int unit = 0, unit_end = 0;
1497
1498 BUG_ON(gi->nr_units % upa);
1499 for (alloc_end += gi->nr_units / upa;
1500 alloc < alloc_end; alloc++) {
1501 if (!(alloc % apl)) {
Tejun Heo033e48f2009-08-14 15:00:51 +09001502 printk("\n");
Tejun Heofd1e8a12009-08-14 15:00:51 +09001503 printk("%spcpu-alloc: ", lvl);
1504 }
1505 printk("[%0*d] ", group_width, group);
1506
1507 for (unit_end += upa; unit < unit_end; unit++)
1508 if (gi->cpu_map[unit] != NR_CPUS)
1509 printk("%0*d ", cpu_width,
1510 gi->cpu_map[unit]);
1511 else
1512 printk("%s ", empty_str);
Tejun Heo033e48f2009-08-14 15:00:51 +09001513 }
Tejun Heo033e48f2009-08-14 15:00:51 +09001514 }
1515 printk("\n");
1516}
Tejun Heo033e48f2009-08-14 15:00:51 +09001517
Tejun Heofbf59bc2009-02-20 16:29:08 +09001518/**
Tejun Heo8d408b42009-02-24 11:57:21 +09001519 * pcpu_setup_first_chunk - initialize the first percpu chunk
Tejun Heofd1e8a12009-08-14 15:00:51 +09001520 * @ai: pcpu_alloc_info describing how to percpu area is shaped
Tejun Heo38a6be52009-07-04 08:10:59 +09001521 * @base_addr: mapped address
Tejun Heofbf59bc2009-02-20 16:29:08 +09001522 *
Tejun Heo8d408b42009-02-24 11:57:21 +09001523 * Initialize the first percpu chunk which contains the kernel static
1524 * perpcu area. This function is to be called from arch percpu area
Tejun Heo38a6be52009-07-04 08:10:59 +09001525 * setup path.
Tejun Heo8d408b42009-02-24 11:57:21 +09001526 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001527 * @ai contains all information necessary to initialize the first
1528 * chunk and prime the dynamic percpu allocator.
Tejun Heo8d408b42009-02-24 11:57:21 +09001529 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001530 * @ai->static_size is the size of static percpu area.
1531 *
1532 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
Tejun Heoedcb4632009-03-06 14:33:59 +09001533 * reserve after the static area in the first chunk. This reserves
1534 * the first chunk such that it's available only through reserved
1535 * percpu allocation. This is primarily used to serve module percpu
1536 * static areas on architectures where the addressing model has
1537 * limited offset range for symbol relocations to guarantee module
1538 * percpu symbols fall inside the relocatable range.
1539 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001540 * @ai->dyn_size determines the number of bytes available for dynamic
1541 * allocation in the first chunk. The area between @ai->static_size +
1542 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
Tejun Heo6074d5b2009-03-10 16:27:48 +09001543 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001544 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1545 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1546 * @ai->dyn_size.
Tejun Heo8d408b42009-02-24 11:57:21 +09001547 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001548 * @ai->atom_size is the allocation atom size and used as alignment
1549 * for vm areas.
Tejun Heo8d408b42009-02-24 11:57:21 +09001550 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001551 * @ai->alloc_size is the allocation size and always multiple of
1552 * @ai->atom_size. This is larger than @ai->atom_size if
1553 * @ai->unit_size is larger than @ai->atom_size.
1554 *
1555 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1556 * percpu areas. Units which should be colocated are put into the
1557 * same group. Dynamic VM areas will be allocated according to these
1558 * groupings. If @ai->nr_groups is zero, a single group containing
1559 * all units is assumed.
Tejun Heo8d408b42009-02-24 11:57:21 +09001560 *
Tejun Heo38a6be52009-07-04 08:10:59 +09001561 * The caller should have mapped the first chunk at @base_addr and
1562 * copied static data to each unit.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001563 *
Tejun Heoedcb4632009-03-06 14:33:59 +09001564 * If the first chunk ends up with both reserved and dynamic areas, it
1565 * is served by two chunks - one to serve the core static and reserved
1566 * areas and the other for the dynamic area. They share the same vm
1567 * and page map but uses different area allocation map to stay away
1568 * from each other. The latter chunk is circulated in the chunk slots
1569 * and available for dynamic allocation like any other chunks.
1570 *
Tejun Heofbf59bc2009-02-20 16:29:08 +09001571 * RETURNS:
Tejun Heofb435d52009-08-14 15:00:51 +09001572 * 0 on success, -errno on failure.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001573 */
Tejun Heofb435d52009-08-14 15:00:51 +09001574int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1575 void *base_addr)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001576{
Tejun Heoedcb4632009-03-06 14:33:59 +09001577 static int smap[2], dmap[2];
Tejun Heofd1e8a12009-08-14 15:00:51 +09001578 size_t dyn_size = ai->dyn_size;
1579 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
Tejun Heoedcb4632009-03-06 14:33:59 +09001580 struct pcpu_chunk *schunk, *dchunk = NULL;
Tejun Heo65632972009-08-14 15:00:52 +09001581 unsigned long *group_offsets;
1582 size_t *group_sizes;
Tejun Heofb435d52009-08-14 15:00:51 +09001583 unsigned long *unit_off;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001584 unsigned int cpu;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001585 int *unit_map;
1586 int group, unit, i;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001587
Tejun Heo2f39e632009-07-04 08:11:00 +09001588 /* sanity checks */
Tejun Heoedcb4632009-03-06 14:33:59 +09001589 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1590 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001591 BUG_ON(ai->nr_groups <= 0);
1592 BUG_ON(!ai->static_size);
Tejun Heo38a6be52009-07-04 08:10:59 +09001593 BUG_ON(!base_addr);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001594 BUG_ON(ai->unit_size < size_sum);
1595 BUG_ON(ai->unit_size & ~PAGE_MASK);
1596 BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001597
Tejun Heofd1e8a12009-08-14 15:00:51 +09001598 pcpu_dump_alloc_info(KERN_DEBUG, ai);
Tejun Heo8d408b42009-02-24 11:57:21 +09001599
Tejun Heo65632972009-08-14 15:00:52 +09001600 /* process group information and build config tables accordingly */
1601 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1602 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
Tejun Heofd1e8a12009-08-14 15:00:51 +09001603 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
Tejun Heofb435d52009-08-14 15:00:51 +09001604 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
Tejun Heo2f39e632009-07-04 08:11:00 +09001605
Tejun Heofd1e8a12009-08-14 15:00:51 +09001606 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1607 unit_map[cpu] = NR_CPUS;
1608 pcpu_first_unit_cpu = NR_CPUS;
Tejun Heo2f39e632009-07-04 08:11:00 +09001609
Tejun Heofd1e8a12009-08-14 15:00:51 +09001610 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1611 const struct pcpu_group_info *gi = &ai->groups[group];
Tejun Heo2f39e632009-07-04 08:11:00 +09001612
Tejun Heo65632972009-08-14 15:00:52 +09001613 group_offsets[group] = gi->base_offset;
1614 group_sizes[group] = gi->nr_units * ai->unit_size;
1615
Tejun Heofd1e8a12009-08-14 15:00:51 +09001616 for (i = 0; i < gi->nr_units; i++) {
1617 cpu = gi->cpu_map[i];
1618 if (cpu == NR_CPUS)
1619 continue;
1620
1621 BUG_ON(cpu > nr_cpu_ids || !cpu_possible(cpu));
1622 BUG_ON(unit_map[cpu] != NR_CPUS);
1623
1624 unit_map[cpu] = unit + i;
Tejun Heofb435d52009-08-14 15:00:51 +09001625 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1626
Tejun Heofd1e8a12009-08-14 15:00:51 +09001627 if (pcpu_first_unit_cpu == NR_CPUS)
Tejun Heo2f39e632009-07-04 08:11:00 +09001628 pcpu_first_unit_cpu = cpu;
Tejun Heo2f39e632009-07-04 08:11:00 +09001629 }
Tejun Heo2f39e632009-07-04 08:11:00 +09001630 }
Tejun Heofd1e8a12009-08-14 15:00:51 +09001631 pcpu_last_unit_cpu = cpu;
1632 pcpu_nr_units = unit;
1633
1634 for_each_possible_cpu(cpu)
1635 BUG_ON(unit_map[cpu] == NR_CPUS);
1636
Tejun Heo65632972009-08-14 15:00:52 +09001637 pcpu_nr_groups = ai->nr_groups;
1638 pcpu_group_offsets = group_offsets;
1639 pcpu_group_sizes = group_sizes;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001640 pcpu_unit_map = unit_map;
Tejun Heofb435d52009-08-14 15:00:51 +09001641 pcpu_unit_offsets = unit_off;
Tejun Heo2f39e632009-07-04 08:11:00 +09001642
1643 /* determine basic parameters */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001644 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
Tejun Heod9b55ee2009-02-24 11:57:21 +09001645 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
Tejun Heo65632972009-08-14 15:00:52 +09001646 pcpu_atom_size = ai->atom_size;
Tejun Heoce3141a2009-07-04 08:11:00 +09001647 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1648 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
Tejun Heocafe8812009-03-06 14:33:59 +09001649
Tejun Heod9b55ee2009-02-24 11:57:21 +09001650 /*
1651 * Allocate chunk slots. The additional last slot is for
1652 * empty chunks.
1653 */
1654 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001655 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1656 for (i = 0; i < pcpu_nr_slots; i++)
1657 INIT_LIST_HEAD(&pcpu_slot[i]);
1658
Tejun Heoedcb4632009-03-06 14:33:59 +09001659 /*
1660 * Initialize static chunk. If reserved_size is zero, the
1661 * static chunk covers static area + dynamic allocation area
1662 * in the first chunk. If reserved_size is not zero, it
1663 * covers static area + reserved area (mostly used for module
1664 * static percpu allocation).
1665 */
Tejun Heo2441d152009-03-06 14:33:59 +09001666 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1667 INIT_LIST_HEAD(&schunk->list);
Tejun Heobba174f2009-08-14 15:00:51 +09001668 schunk->base_addr = base_addr;
Tejun Heo61ace7f2009-03-06 14:33:59 +09001669 schunk->map = smap;
1670 schunk->map_alloc = ARRAY_SIZE(smap);
Tejun Heo38a6be52009-07-04 08:10:59 +09001671 schunk->immutable = true;
Tejun Heoce3141a2009-07-04 08:11:00 +09001672 bitmap_fill(schunk->populated, pcpu_unit_pages);
Tejun Heoedcb4632009-03-06 14:33:59 +09001673
Tejun Heofd1e8a12009-08-14 15:00:51 +09001674 if (ai->reserved_size) {
1675 schunk->free_size = ai->reserved_size;
Tejun Heoae9e6bc92009-04-02 13:19:54 +09001676 pcpu_reserved_chunk = schunk;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001677 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
Tejun Heoedcb4632009-03-06 14:33:59 +09001678 } else {
1679 schunk->free_size = dyn_size;
1680 dyn_size = 0; /* dynamic area covered */
1681 }
Tejun Heo2441d152009-03-06 14:33:59 +09001682 schunk->contig_hint = schunk->free_size;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001683
Tejun Heofd1e8a12009-08-14 15:00:51 +09001684 schunk->map[schunk->map_used++] = -ai->static_size;
Tejun Heo61ace7f2009-03-06 14:33:59 +09001685 if (schunk->free_size)
1686 schunk->map[schunk->map_used++] = schunk->free_size;
1687
Tejun Heoedcb4632009-03-06 14:33:59 +09001688 /* init dynamic chunk if necessary */
1689 if (dyn_size) {
Tejun Heoce3141a2009-07-04 08:11:00 +09001690 dchunk = alloc_bootmem(pcpu_chunk_struct_size);
Tejun Heoedcb4632009-03-06 14:33:59 +09001691 INIT_LIST_HEAD(&dchunk->list);
Tejun Heobba174f2009-08-14 15:00:51 +09001692 dchunk->base_addr = base_addr;
Tejun Heoedcb4632009-03-06 14:33:59 +09001693 dchunk->map = dmap;
1694 dchunk->map_alloc = ARRAY_SIZE(dmap);
Tejun Heo38a6be52009-07-04 08:10:59 +09001695 dchunk->immutable = true;
Tejun Heoce3141a2009-07-04 08:11:00 +09001696 bitmap_fill(dchunk->populated, pcpu_unit_pages);
Tejun Heoedcb4632009-03-06 14:33:59 +09001697
1698 dchunk->contig_hint = dchunk->free_size = dyn_size;
1699 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1700 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1701 }
1702
Tejun Heo2441d152009-03-06 14:33:59 +09001703 /* link the first chunk in */
Tejun Heoae9e6bc92009-04-02 13:19:54 +09001704 pcpu_first_chunk = dchunk ?: schunk;
1705 pcpu_chunk_relocate(pcpu_first_chunk, -1);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001706
1707 /* we're done */
Tejun Heobba174f2009-08-14 15:00:51 +09001708 pcpu_base_addr = base_addr;
Tejun Heofb435d52009-08-14 15:00:51 +09001709 return 0;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001710}
Tejun Heo66c3a752009-03-10 16:27:48 +09001711
Tejun Heof58dc012009-08-14 15:00:50 +09001712const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1713 [PCPU_FC_AUTO] = "auto",
1714 [PCPU_FC_EMBED] = "embed",
1715 [PCPU_FC_PAGE] = "page",
Tejun Heof58dc012009-08-14 15:00:50 +09001716};
Tejun Heo66c3a752009-03-10 16:27:48 +09001717
Tejun Heof58dc012009-08-14 15:00:50 +09001718enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1719
1720static int __init percpu_alloc_setup(char *str)
Tejun Heo66c3a752009-03-10 16:27:48 +09001721{
Tejun Heof58dc012009-08-14 15:00:50 +09001722 if (0)
1723 /* nada */;
1724#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1725 else if (!strcmp(str, "embed"))
1726 pcpu_chosen_fc = PCPU_FC_EMBED;
1727#endif
1728#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1729 else if (!strcmp(str, "page"))
1730 pcpu_chosen_fc = PCPU_FC_PAGE;
1731#endif
Tejun Heof58dc012009-08-14 15:00:50 +09001732 else
1733 pr_warning("PERCPU: unknown allocator %s specified\n", str);
Tejun Heo66c3a752009-03-10 16:27:48 +09001734
Tejun Heof58dc012009-08-14 15:00:50 +09001735 return 0;
Tejun Heo66c3a752009-03-10 16:27:48 +09001736}
Tejun Heof58dc012009-08-14 15:00:50 +09001737early_param("percpu_alloc", percpu_alloc_setup);
Tejun Heo66c3a752009-03-10 16:27:48 +09001738
Tejun Heo08fc4582009-08-14 15:00:49 +09001739#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1740 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
Tejun Heo66c3a752009-03-10 16:27:48 +09001741/**
1742 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
Tejun Heo66c3a752009-03-10 16:27:48 +09001743 * @reserved_size: the size of reserved percpu area in bytes
1744 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
Tejun Heoc8826dd2009-08-14 15:00:52 +09001745 * @atom_size: allocation atom size
1746 * @cpu_distance_fn: callback to determine distance between cpus, optional
1747 * @alloc_fn: function to allocate percpu page
1748 * @free_fn: funtion to free percpu page
Tejun Heo66c3a752009-03-10 16:27:48 +09001749 *
1750 * This is a helper to ease setting up embedded first percpu chunk and
1751 * can be called where pcpu_setup_first_chunk() is expected.
1752 *
1753 * If this function is used to setup the first chunk, it is allocated
Tejun Heoc8826dd2009-08-14 15:00:52 +09001754 * by calling @alloc_fn and used as-is without being mapped into
1755 * vmalloc area. Allocations are always whole multiples of @atom_size
1756 * aligned to @atom_size.
1757 *
1758 * This enables the first chunk to piggy back on the linear physical
1759 * mapping which often uses larger page size. Please note that this
1760 * can result in very sparse cpu->unit mapping on NUMA machines thus
1761 * requiring large vmalloc address space. Don't use this allocator if
1762 * vmalloc space is not orders of magnitude larger than distances
1763 * between node memory addresses (ie. 32bit NUMA machines).
Tejun Heo66c3a752009-03-10 16:27:48 +09001764 *
1765 * When @dyn_size is positive, dynamic area might be larger than
Tejun Heo788e5ab2009-07-04 08:10:58 +09001766 * specified to fill page alignment. When @dyn_size is auto,
1767 * @dyn_size is just big enough to fill page alignment after static
1768 * and reserved areas.
Tejun Heo66c3a752009-03-10 16:27:48 +09001769 *
1770 * If the needed size is smaller than the minimum or specified unit
Tejun Heoc8826dd2009-08-14 15:00:52 +09001771 * size, the leftover is returned using @free_fn.
Tejun Heo66c3a752009-03-10 16:27:48 +09001772 *
1773 * RETURNS:
Tejun Heofb435d52009-08-14 15:00:51 +09001774 * 0 on success, -errno on failure.
Tejun Heo66c3a752009-03-10 16:27:48 +09001775 */
Tejun Heoc8826dd2009-08-14 15:00:52 +09001776int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1777 size_t atom_size,
1778 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1779 pcpu_fc_alloc_fn_t alloc_fn,
1780 pcpu_fc_free_fn_t free_fn)
Tejun Heo66c3a752009-03-10 16:27:48 +09001781{
Tejun Heoc8826dd2009-08-14 15:00:52 +09001782 void *base = (void *)ULONG_MAX;
1783 void **areas = NULL;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001784 struct pcpu_alloc_info *ai;
Tejun Heoc8826dd2009-08-14 15:00:52 +09001785 size_t size_sum, areas_size;
1786 int group, i, rc;
Tejun Heo66c3a752009-03-10 16:27:48 +09001787
Tejun Heoc8826dd2009-08-14 15:00:52 +09001788 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1789 cpu_distance_fn);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001790 if (IS_ERR(ai))
1791 return PTR_ERR(ai);
Tejun Heo66c3a752009-03-10 16:27:48 +09001792
Tejun Heofd1e8a12009-08-14 15:00:51 +09001793 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
Tejun Heoc8826dd2009-08-14 15:00:52 +09001794 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
Tejun Heo66c3a752009-03-10 16:27:48 +09001795
Tejun Heoc8826dd2009-08-14 15:00:52 +09001796 areas = alloc_bootmem_nopanic(areas_size);
1797 if (!areas) {
Tejun Heofb435d52009-08-14 15:00:51 +09001798 rc = -ENOMEM;
Tejun Heoc8826dd2009-08-14 15:00:52 +09001799 goto out_free;
Tejun Heofa8a7092009-06-22 11:56:24 +09001800 }
Tejun Heo66c3a752009-03-10 16:27:48 +09001801
Tejun Heoc8826dd2009-08-14 15:00:52 +09001802 /* allocate, copy and determine base address */
1803 for (group = 0; group < ai->nr_groups; group++) {
1804 struct pcpu_group_info *gi = &ai->groups[group];
1805 unsigned int cpu = NR_CPUS;
1806 void *ptr;
Tejun Heo66c3a752009-03-10 16:27:48 +09001807
Tejun Heoc8826dd2009-08-14 15:00:52 +09001808 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1809 cpu = gi->cpu_map[i];
1810 BUG_ON(cpu == NR_CPUS);
1811
1812 /* allocate space for the whole group */
1813 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1814 if (!ptr) {
1815 rc = -ENOMEM;
1816 goto out_free_areas;
1817 }
1818 areas[group] = ptr;
1819
1820 base = min(ptr, base);
1821
1822 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1823 if (gi->cpu_map[i] == NR_CPUS) {
1824 /* unused unit, free whole */
1825 free_fn(ptr, ai->unit_size);
1826 continue;
1827 }
1828 /* copy and return the unused part */
1829 memcpy(ptr, __per_cpu_load, ai->static_size);
1830 free_fn(ptr + size_sum, ai->unit_size - size_sum);
1831 }
Tejun Heo66c3a752009-03-10 16:27:48 +09001832 }
1833
Tejun Heoc8826dd2009-08-14 15:00:52 +09001834 /* base address is now known, determine group base offsets */
1835 for (group = 0; group < ai->nr_groups; group++)
1836 ai->groups[group].base_offset = areas[group] - base;
1837
Tejun Heo004018e2009-08-14 15:00:49 +09001838 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
Tejun Heofd1e8a12009-08-14 15:00:51 +09001839 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1840 ai->dyn_size, ai->unit_size);
Tejun Heo66c3a752009-03-10 16:27:48 +09001841
Tejun Heofb435d52009-08-14 15:00:51 +09001842 rc = pcpu_setup_first_chunk(ai, base);
Tejun Heoc8826dd2009-08-14 15:00:52 +09001843 goto out_free;
1844
1845out_free_areas:
1846 for (group = 0; group < ai->nr_groups; group++)
1847 free_fn(areas[group],
1848 ai->groups[group].nr_units * ai->unit_size);
1849out_free:
Tejun Heofd1e8a12009-08-14 15:00:51 +09001850 pcpu_free_alloc_info(ai);
Tejun Heoc8826dd2009-08-14 15:00:52 +09001851 if (areas)
1852 free_bootmem(__pa(areas), areas_size);
Tejun Heofb435d52009-08-14 15:00:51 +09001853 return rc;
Tejun Heod4b95f82009-07-04 08:10:59 +09001854}
Tejun Heo08fc4582009-08-14 15:00:49 +09001855#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1856 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
Tejun Heod4b95f82009-07-04 08:10:59 +09001857
Tejun Heo08fc4582009-08-14 15:00:49 +09001858#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
Tejun Heod4b95f82009-07-04 08:10:59 +09001859/**
Tejun Heo00ae4062009-08-14 15:00:49 +09001860 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
Tejun Heod4b95f82009-07-04 08:10:59 +09001861 * @reserved_size: the size of reserved percpu area in bytes
1862 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1863 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1864 * @populate_pte_fn: function to populate pte
1865 *
Tejun Heo00ae4062009-08-14 15:00:49 +09001866 * This is a helper to ease setting up page-remapped first percpu
1867 * chunk and can be called where pcpu_setup_first_chunk() is expected.
Tejun Heod4b95f82009-07-04 08:10:59 +09001868 *
1869 * This is the basic allocator. Static percpu area is allocated
1870 * page-by-page into vmalloc area.
1871 *
1872 * RETURNS:
Tejun Heofb435d52009-08-14 15:00:51 +09001873 * 0 on success, -errno on failure.
Tejun Heod4b95f82009-07-04 08:10:59 +09001874 */
Tejun Heofb435d52009-08-14 15:00:51 +09001875int __init pcpu_page_first_chunk(size_t reserved_size,
1876 pcpu_fc_alloc_fn_t alloc_fn,
1877 pcpu_fc_free_fn_t free_fn,
1878 pcpu_fc_populate_pte_fn_t populate_pte_fn)
Tejun Heod4b95f82009-07-04 08:10:59 +09001879{
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001880 static struct vm_struct vm;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001881 struct pcpu_alloc_info *ai;
Tejun Heo00ae4062009-08-14 15:00:49 +09001882 char psize_str[16];
Tejun Heoce3141a2009-07-04 08:11:00 +09001883 int unit_pages;
Tejun Heod4b95f82009-07-04 08:10:59 +09001884 size_t pages_size;
Tejun Heoce3141a2009-07-04 08:11:00 +09001885 struct page **pages;
Tejun Heofb435d52009-08-14 15:00:51 +09001886 int unit, i, j, rc;
Tejun Heod4b95f82009-07-04 08:10:59 +09001887
Tejun Heo00ae4062009-08-14 15:00:49 +09001888 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1889
Tejun Heofd1e8a12009-08-14 15:00:51 +09001890 ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
1891 if (IS_ERR(ai))
1892 return PTR_ERR(ai);
1893 BUG_ON(ai->nr_groups != 1);
1894 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1895
1896 unit_pages = ai->unit_size >> PAGE_SHIFT;
Tejun Heod4b95f82009-07-04 08:10:59 +09001897
1898 /* unaligned allocations can't be freed, round up to page size */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001899 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1900 sizeof(pages[0]));
Tejun Heoce3141a2009-07-04 08:11:00 +09001901 pages = alloc_bootmem(pages_size);
Tejun Heod4b95f82009-07-04 08:10:59 +09001902
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001903 /* allocate pages */
Tejun Heod4b95f82009-07-04 08:10:59 +09001904 j = 0;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001905 for (unit = 0; unit < num_possible_cpus(); unit++)
Tejun Heoce3141a2009-07-04 08:11:00 +09001906 for (i = 0; i < unit_pages; i++) {
Tejun Heofd1e8a12009-08-14 15:00:51 +09001907 unsigned int cpu = ai->groups[0].cpu_map[unit];
Tejun Heod4b95f82009-07-04 08:10:59 +09001908 void *ptr;
1909
Tejun Heo3cbc8562009-08-14 15:00:50 +09001910 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
Tejun Heod4b95f82009-07-04 08:10:59 +09001911 if (!ptr) {
Tejun Heo00ae4062009-08-14 15:00:49 +09001912 pr_warning("PERCPU: failed to allocate %s page "
1913 "for cpu%u\n", psize_str, cpu);
Tejun Heod4b95f82009-07-04 08:10:59 +09001914 goto enomem;
1915 }
Tejun Heoce3141a2009-07-04 08:11:00 +09001916 pages[j++] = virt_to_page(ptr);
Tejun Heod4b95f82009-07-04 08:10:59 +09001917 }
1918
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001919 /* allocate vm area, map the pages and copy static data */
1920 vm.flags = VM_ALLOC;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001921 vm.size = num_possible_cpus() * ai->unit_size;
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001922 vm_area_register_early(&vm, PAGE_SIZE);
1923
Tejun Heofd1e8a12009-08-14 15:00:51 +09001924 for (unit = 0; unit < num_possible_cpus(); unit++) {
Tejun Heo1d9d3252009-08-14 15:00:50 +09001925 unsigned long unit_addr =
Tejun Heofd1e8a12009-08-14 15:00:51 +09001926 (unsigned long)vm.addr + unit * ai->unit_size;
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001927
Tejun Heoce3141a2009-07-04 08:11:00 +09001928 for (i = 0; i < unit_pages; i++)
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001929 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1930
1931 /* pte already populated, the following shouldn't fail */
Tejun Heofb435d52009-08-14 15:00:51 +09001932 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1933 unit_pages);
1934 if (rc < 0)
1935 panic("failed to map percpu area, err=%d\n", rc);
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001936
1937 /*
1938 * FIXME: Archs with virtual cache should flush local
1939 * cache for the linear mapping here - something
1940 * equivalent to flush_cache_vmap() on the local cpu.
1941 * flush_cache_vmap() can't be used as most supporting
1942 * data structures are not set up yet.
1943 */
1944
1945 /* copy static data */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001946 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
Tejun Heo66c3a752009-03-10 16:27:48 +09001947 }
1948
1949 /* we're ready, commit */
Tejun Heo1d9d3252009-08-14 15:00:50 +09001950 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
Tejun Heofd1e8a12009-08-14 15:00:51 +09001951 unit_pages, psize_str, vm.addr, ai->static_size,
1952 ai->reserved_size, ai->dyn_size);
Tejun Heo66c3a752009-03-10 16:27:48 +09001953
Tejun Heofb435d52009-08-14 15:00:51 +09001954 rc = pcpu_setup_first_chunk(ai, vm.addr);
Tejun Heod4b95f82009-07-04 08:10:59 +09001955 goto out_free_ar;
1956
1957enomem:
1958 while (--j >= 0)
Tejun Heoce3141a2009-07-04 08:11:00 +09001959 free_fn(page_address(pages[j]), PAGE_SIZE);
Tejun Heofb435d52009-08-14 15:00:51 +09001960 rc = -ENOMEM;
Tejun Heod4b95f82009-07-04 08:10:59 +09001961out_free_ar:
Tejun Heoce3141a2009-07-04 08:11:00 +09001962 free_bootmem(__pa(pages), pages_size);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001963 pcpu_free_alloc_info(ai);
Tejun Heofb435d52009-08-14 15:00:51 +09001964 return rc;
Tejun Heo66c3a752009-03-10 16:27:48 +09001965}
Tejun Heo08fc4582009-08-14 15:00:49 +09001966#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
Tejun Heod4b95f82009-07-04 08:10:59 +09001967
Tejun Heo8c4bfc62009-07-04 08:10:59 +09001968/*
Tejun Heoe74e3962009-03-30 19:07:44 +09001969 * Generic percpu area setup.
1970 *
1971 * The embedding helper is used because its behavior closely resembles
1972 * the original non-dynamic generic percpu area setup. This is
1973 * important because many archs have addressing restrictions and might
1974 * fail if the percpu area is located far away from the previous
1975 * location. As an added bonus, in non-NUMA cases, embedding is
1976 * generally a good idea TLB-wise because percpu area can piggy back
1977 * on the physical linear memory mapping which uses large page
1978 * mappings on applicable archs.
1979 */
1980#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1981unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1982EXPORT_SYMBOL(__per_cpu_offset);
1983
Tejun Heoc8826dd2009-08-14 15:00:52 +09001984static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1985 size_t align)
1986{
1987 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1988}
1989
1990static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1991{
1992 free_bootmem(__pa(ptr), size);
1993}
1994
Tejun Heoe74e3962009-03-30 19:07:44 +09001995void __init setup_per_cpu_areas(void)
1996{
Tejun Heoe74e3962009-03-30 19:07:44 +09001997 unsigned long delta;
1998 unsigned int cpu;
Tejun Heofb435d52009-08-14 15:00:51 +09001999 int rc;
Tejun Heoe74e3962009-03-30 19:07:44 +09002000
2001 /*
2002 * Always reserve area for module percpu variables. That's
2003 * what the legacy allocator did.
2004 */
Tejun Heofb435d52009-08-14 15:00:51 +09002005 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
Tejun Heoc8826dd2009-08-14 15:00:52 +09002006 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2007 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
Tejun Heofb435d52009-08-14 15:00:51 +09002008 if (rc < 0)
Tejun Heoe74e3962009-03-30 19:07:44 +09002009 panic("Failed to initialized percpu areas.");
2010
2011 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2012 for_each_possible_cpu(cpu)
Tejun Heofb435d52009-08-14 15:00:51 +09002013 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
Tejun Heoe74e3962009-03-30 19:07:44 +09002014}
2015#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */