blob: 77c6f7994a46cc4add460e5f05ff055123b8a1a4 [file] [log] [blame]
Tejun Heofbf59bc2009-02-20 16:29:08 +09001/*
2 * linux/mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
Tejun Heo2f39e632009-07-04 08:11:00 +090011 * chunk is consisted of boot-time determined number of units and the
12 * first chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated. ie. in
17 * vmalloc area
Tejun Heofbf59bc2009-02-20 16:29:08 +090018 *
19 * c0 c1 c2
20 * ------------------- ------------------- ------------
21 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
22 * ------------------- ...... ------------------- .... ------------
23 *
24 * Allocation is done in offset-size areas of single unit space. Ie,
25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
Tejun Heo2f39e632009-07-04 08:11:00 +090026 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
27 * cpus. On NUMA, the mapping can be non-linear and even sparse.
28 * Percpu access can be done by configuring percpu base registers
29 * according to cpu to unit mapping and pcpu_unit_size.
Tejun Heofbf59bc2009-02-20 16:29:08 +090030 *
Tejun Heo2f39e632009-07-04 08:11:00 +090031 * There are usually many small percpu allocations many of them being
32 * as small as 4 bytes. The allocator organizes chunks into lists
Tejun Heofbf59bc2009-02-20 16:29:08 +090033 * according to free size and tries to allocate from the fullest one.
34 * Each chunk keeps the maximum contiguous area size hint which is
35 * guaranteed to be eqaul to or larger than the maximum contiguous
36 * area in the chunk. This helps the allocator not to iterate the
37 * chunk maps unnecessarily.
38 *
39 * Allocation state in each chunk is kept using an array of integers
40 * on chunk->map. A positive value in the map represents a free
41 * region and negative allocated. Allocation inside a chunk is done
42 * by scanning this map sequentially and serving the first matching
43 * entry. This is mostly copied from the percpu_modalloc() allocator.
Christoph Lametere1b9aa32009-04-02 13:21:44 +090044 * Chunks can be determined from the address using the index field
45 * in the page struct. The index field contains a pointer to the chunk.
Tejun Heofbf59bc2009-02-20 16:29:08 +090046 *
47 * To use this allocator, arch code should do the followings.
48 *
Tejun Heofbf59bc2009-02-20 16:29:08 +090049 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
Tejun Heoe0100982009-03-10 16:27:48 +090050 * regular address to percpu pointer and back if they need to be
51 * different from the default
Tejun Heofbf59bc2009-02-20 16:29:08 +090052 *
Tejun Heo8d408b42009-02-24 11:57:21 +090053 * - use pcpu_setup_first_chunk() during percpu area initialization to
54 * setup the first chunk containing the kernel static percpu area
Tejun Heofbf59bc2009-02-20 16:29:08 +090055 */
56
57#include <linux/bitmap.h>
58#include <linux/bootmem.h>
Tejun Heofd1e8a12009-08-14 15:00:51 +090059#include <linux/err.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090060#include <linux/list.h>
Tejun Heoa530b792009-07-04 08:11:00 +090061#include <linux/log2.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090062#include <linux/mm.h>
63#include <linux/module.h>
64#include <linux/mutex.h>
65#include <linux/percpu.h>
66#include <linux/pfn.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090067#include <linux/slab.h>
Tejun Heoccea34b2009-03-07 00:44:13 +090068#include <linux/spinlock.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090069#include <linux/vmalloc.h>
Tejun Heoa56dbdd2009-03-07 00:44:11 +090070#include <linux/workqueue.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090071
72#include <asm/cacheflush.h>
Tejun Heoe0100982009-03-10 16:27:48 +090073#include <asm/sections.h>
Tejun Heofbf59bc2009-02-20 16:29:08 +090074#include <asm/tlbflush.h>
75
Tejun Heofbf59bc2009-02-20 16:29:08 +090076#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
77#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
78
Tejun Heoe0100982009-03-10 16:27:48 +090079/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
80#ifndef __addr_to_pcpu_ptr
81#define __addr_to_pcpu_ptr(addr) \
82 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
83 + (unsigned long)__per_cpu_start)
84#endif
85#ifndef __pcpu_ptr_to_addr
86#define __pcpu_ptr_to_addr(ptr) \
87 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
88 - (unsigned long)__per_cpu_start)
89#endif
90
Tejun Heofbf59bc2009-02-20 16:29:08 +090091struct pcpu_chunk {
92 struct list_head list; /* linked to pcpu_slot lists */
Tejun Heofbf59bc2009-02-20 16:29:08 +090093 int free_size; /* free bytes in the chunk */
94 int contig_hint; /* max contiguous size hint */
Tejun Heobba174f2009-08-14 15:00:51 +090095 void *base_addr; /* base address of this chunk */
Tejun Heofbf59bc2009-02-20 16:29:08 +090096 int map_used; /* # of map entries used */
97 int map_alloc; /* # of map entries allocated */
98 int *map; /* allocation map */
Tejun Heo65632972009-08-14 15:00:52 +090099 struct vm_struct **vms; /* mapped vmalloc regions */
Tejun Heo8d408b42009-02-24 11:57:21 +0900100 bool immutable; /* no [de]population allowed */
Tejun Heoce3141a2009-07-04 08:11:00 +0900101 unsigned long populated[]; /* populated bitmap */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900102};
103
Tejun Heo40150d32009-02-24 12:32:28 +0900104static int pcpu_unit_pages __read_mostly;
105static int pcpu_unit_size __read_mostly;
Tejun Heo2f39e632009-07-04 08:11:00 +0900106static int pcpu_nr_units __read_mostly;
Tejun Heo65632972009-08-14 15:00:52 +0900107static int pcpu_atom_size __read_mostly;
Tejun Heo40150d32009-02-24 12:32:28 +0900108static int pcpu_nr_slots __read_mostly;
109static size_t pcpu_chunk_struct_size __read_mostly;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900110
Tejun Heo2f39e632009-07-04 08:11:00 +0900111/* cpus with the lowest and highest unit numbers */
112static unsigned int pcpu_first_unit_cpu __read_mostly;
113static unsigned int pcpu_last_unit_cpu __read_mostly;
114
Tejun Heofbf59bc2009-02-20 16:29:08 +0900115/* the address of the first chunk which starts with the kernel static area */
Tejun Heo40150d32009-02-24 12:32:28 +0900116void *pcpu_base_addr __read_mostly;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900117EXPORT_SYMBOL_GPL(pcpu_base_addr);
118
Tejun Heofb435d52009-08-14 15:00:51 +0900119static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
120const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
Tejun Heo2f39e632009-07-04 08:11:00 +0900121
Tejun Heo65632972009-08-14 15:00:52 +0900122/* group information, used for vm allocation */
123static int pcpu_nr_groups __read_mostly;
124static const unsigned long *pcpu_group_offsets __read_mostly;
125static const size_t *pcpu_group_sizes __read_mostly;
126
Tejun Heoae9e6bc92009-04-02 13:19:54 +0900127/*
128 * The first chunk which always exists. Note that unlike other
129 * chunks, this one can be allocated and mapped in several different
130 * ways and thus often doesn't live in the vmalloc area.
131 */
132static struct pcpu_chunk *pcpu_first_chunk;
133
134/*
135 * Optional reserved chunk. This chunk reserves part of the first
136 * chunk and serves it for reserved allocations. The amount of
137 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
138 * area doesn't exist, the following variables contain NULL and 0
139 * respectively.
140 */
Tejun Heoedcb4632009-03-06 14:33:59 +0900141static struct pcpu_chunk *pcpu_reserved_chunk;
Tejun Heoedcb4632009-03-06 14:33:59 +0900142static int pcpu_reserved_chunk_limit;
143
Tejun Heofbf59bc2009-02-20 16:29:08 +0900144/*
Tejun Heoccea34b2009-03-07 00:44:13 +0900145 * Synchronization rules.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900146 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900147 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
Tejun Heoce3141a2009-07-04 08:11:00 +0900148 * protects allocation/reclaim paths, chunks, populated bitmap and
149 * vmalloc mapping. The latter is a spinlock and protects the index
150 * data structures - chunk slots, chunks and area maps in chunks.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900151 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900152 * During allocation, pcpu_alloc_mutex is kept locked all the time and
153 * pcpu_lock is grabbed and released as necessary. All actual memory
154 * allocations are done using GFP_KERNEL with pcpu_lock released.
155 *
156 * Free path accesses and alters only the index data structures, so it
157 * can be safely called from atomic context. When memory needs to be
158 * returned to the system, free path schedules reclaim_work which
159 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
160 * reclaimed, release both locks and frees the chunks. Note that it's
161 * necessary to grab both locks to remove a chunk from circulation as
162 * allocation path might be referencing the chunk with only
163 * pcpu_alloc_mutex locked.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900164 */
Tejun Heoccea34b2009-03-07 00:44:13 +0900165static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
166static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900167
Tejun Heo40150d32009-02-24 12:32:28 +0900168static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900169
Tejun Heoa56dbdd2009-03-07 00:44:11 +0900170/* reclaim work to release fully free chunks, scheduled from free path */
171static void pcpu_reclaim(struct work_struct *work);
172static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
173
Tejun Heod9b55ee2009-02-24 11:57:21 +0900174static int __pcpu_size_to_slot(int size)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900175{
Tejun Heocae3aeb2009-02-21 16:56:23 +0900176 int highbit = fls(size); /* size is in bytes */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900177 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
178}
179
Tejun Heod9b55ee2009-02-24 11:57:21 +0900180static int pcpu_size_to_slot(int size)
181{
182 if (size == pcpu_unit_size)
183 return pcpu_nr_slots - 1;
184 return __pcpu_size_to_slot(size);
185}
186
Tejun Heofbf59bc2009-02-20 16:29:08 +0900187static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
188{
189 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
190 return 0;
191
192 return pcpu_size_to_slot(chunk->free_size);
193}
194
195static int pcpu_page_idx(unsigned int cpu, int page_idx)
196{
Tejun Heo2f39e632009-07-04 08:11:00 +0900197 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900198}
199
200static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
201 unsigned int cpu, int page_idx)
202{
Tejun Heobba174f2009-08-14 15:00:51 +0900203 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
Tejun Heofb435d52009-08-14 15:00:51 +0900204 (page_idx << PAGE_SHIFT);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900205}
206
Tejun Heoce3141a2009-07-04 08:11:00 +0900207static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
208 unsigned int cpu, int page_idx)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900209{
Tejun Heoce3141a2009-07-04 08:11:00 +0900210 /* must not be used on pre-mapped chunk */
211 WARN_ON(chunk->immutable);
Tejun Heoc8a51be2009-07-04 08:10:59 +0900212
Tejun Heoce3141a2009-07-04 08:11:00 +0900213 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900214}
215
Christoph Lametere1b9aa32009-04-02 13:21:44 +0900216/* set the pointer to a chunk in a page struct */
217static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
218{
219 page->index = (unsigned long)pcpu;
220}
221
222/* obtain pointer to a chunk from a page struct */
223static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
224{
225 return (struct pcpu_chunk *)page->index;
226}
227
Tejun Heoce3141a2009-07-04 08:11:00 +0900228static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
229{
230 *rs = find_next_zero_bit(chunk->populated, end, *rs);
231 *re = find_next_bit(chunk->populated, end, *rs + 1);
232}
233
234static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
235{
236 *rs = find_next_bit(chunk->populated, end, *rs);
237 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
238}
239
240/*
241 * (Un)populated page region iterators. Iterate over (un)populated
242 * page regions betwen @start and @end in @chunk. @rs and @re should
243 * be integer variables and will be set to start and end page index of
244 * the current region.
245 */
246#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
247 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
248 (rs) < (re); \
249 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
250
251#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
252 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
253 (rs) < (re); \
254 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
255
Tejun Heofbf59bc2009-02-20 16:29:08 +0900256/**
Tejun Heo1880d932009-03-07 00:44:09 +0900257 * pcpu_mem_alloc - allocate memory
258 * @size: bytes to allocate
Tejun Heofbf59bc2009-02-20 16:29:08 +0900259 *
Tejun Heo1880d932009-03-07 00:44:09 +0900260 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
261 * kzalloc() is used; otherwise, vmalloc() is used. The returned
262 * memory is always zeroed.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900263 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900264 * CONTEXT:
265 * Does GFP_KERNEL allocation.
266 *
Tejun Heofbf59bc2009-02-20 16:29:08 +0900267 * RETURNS:
Tejun Heo1880d932009-03-07 00:44:09 +0900268 * Pointer to the allocated area on success, NULL on failure.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900269 */
Tejun Heo1880d932009-03-07 00:44:09 +0900270static void *pcpu_mem_alloc(size_t size)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900271{
Tejun Heofbf59bc2009-02-20 16:29:08 +0900272 if (size <= PAGE_SIZE)
Tejun Heo1880d932009-03-07 00:44:09 +0900273 return kzalloc(size, GFP_KERNEL);
274 else {
275 void *ptr = vmalloc(size);
276 if (ptr)
277 memset(ptr, 0, size);
278 return ptr;
279 }
280}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900281
Tejun Heo1880d932009-03-07 00:44:09 +0900282/**
283 * pcpu_mem_free - free memory
284 * @ptr: memory to free
285 * @size: size of the area
286 *
287 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
288 */
289static void pcpu_mem_free(void *ptr, size_t size)
290{
291 if (size <= PAGE_SIZE)
292 kfree(ptr);
293 else
294 vfree(ptr);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900295}
296
297/**
298 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
299 * @chunk: chunk of interest
300 * @oslot: the previous slot it was on
301 *
302 * This function is called after an allocation or free changed @chunk.
303 * New slot according to the changed state is determined and @chunk is
Tejun Heoedcb4632009-03-06 14:33:59 +0900304 * moved to the slot. Note that the reserved chunk is never put on
305 * chunk slots.
Tejun Heoccea34b2009-03-07 00:44:13 +0900306 *
307 * CONTEXT:
308 * pcpu_lock.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900309 */
310static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
311{
312 int nslot = pcpu_chunk_slot(chunk);
313
Tejun Heoedcb4632009-03-06 14:33:59 +0900314 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
Tejun Heofbf59bc2009-02-20 16:29:08 +0900315 if (oslot < nslot)
316 list_move(&chunk->list, &pcpu_slot[nslot]);
317 else
318 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
319 }
320}
321
Tejun Heofbf59bc2009-02-20 16:29:08 +0900322/**
Christoph Lametere1b9aa32009-04-02 13:21:44 +0900323 * pcpu_chunk_addr_search - determine chunk containing specified address
324 * @addr: address for which the chunk needs to be determined.
Tejun Heoccea34b2009-03-07 00:44:13 +0900325 *
Tejun Heofbf59bc2009-02-20 16:29:08 +0900326 * RETURNS:
327 * The address of the found chunk.
328 */
329static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
330{
Tejun Heobba174f2009-08-14 15:00:51 +0900331 void *first_start = pcpu_first_chunk->base_addr;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900332
Tejun Heoae9e6bc92009-04-02 13:19:54 +0900333 /* is it in the first chunk? */
Tejun Heo79ba6ac2009-07-04 08:10:58 +0900334 if (addr >= first_start && addr < first_start + pcpu_unit_size) {
Tejun Heoae9e6bc92009-04-02 13:19:54 +0900335 /* is it in the reserved area? */
336 if (addr < first_start + pcpu_reserved_chunk_limit)
Tejun Heoedcb4632009-03-06 14:33:59 +0900337 return pcpu_reserved_chunk;
Tejun Heoae9e6bc92009-04-02 13:19:54 +0900338 return pcpu_first_chunk;
Tejun Heoedcb4632009-03-06 14:33:59 +0900339 }
340
Tejun Heo04a13c72009-09-01 21:12:28 +0900341 /*
342 * The address is relative to unit0 which might be unused and
343 * thus unmapped. Offset the address to the unit space of the
344 * current processor before looking it up in the vmalloc
345 * space. Note that any possible cpu id can be used here, so
346 * there's no need to worry about preemption or cpu hotplug.
347 */
Tejun Heo5579fd72009-09-15 09:57:19 +0900348 addr += pcpu_unit_offsets[raw_smp_processor_id()];
Christoph Lametere1b9aa32009-04-02 13:21:44 +0900349 return pcpu_get_page_chunk(vmalloc_to_page(addr));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900350}
351
352/**
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900353 * pcpu_extend_area_map - extend area map for allocation
354 * @chunk: target chunk
355 *
356 * Extend area map of @chunk so that it can accomodate an allocation.
357 * A single allocation can split an area into three areas, so this
358 * function makes sure that @chunk->map has at least two extra slots.
359 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900360 * CONTEXT:
361 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
362 * if area map is extended.
363 *
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900364 * RETURNS:
365 * 0 if noop, 1 if successfully extended, -errno on failure.
366 */
367static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
Tejun Heo0f5e4812009-10-29 22:34:12 +0900368 __releases(lock) __acquires(lock)
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900369{
370 int new_alloc;
371 int *new;
372 size_t size;
373
374 /* has enough? */
375 if (chunk->map_alloc >= chunk->map_used + 2)
376 return 0;
377
Tejun Heoccea34b2009-03-07 00:44:13 +0900378 spin_unlock_irq(&pcpu_lock);
379
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900380 new_alloc = PCPU_DFL_MAP_ALLOC;
381 while (new_alloc < chunk->map_used + 2)
382 new_alloc *= 2;
383
384 new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
Tejun Heoccea34b2009-03-07 00:44:13 +0900385 if (!new) {
386 spin_lock_irq(&pcpu_lock);
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900387 return -ENOMEM;
Tejun Heoccea34b2009-03-07 00:44:13 +0900388 }
389
390 /*
391 * Acquire pcpu_lock and switch to new area map. Only free
392 * could have happened inbetween, so map_used couldn't have
393 * grown.
394 */
395 spin_lock_irq(&pcpu_lock);
396 BUG_ON(new_alloc < chunk->map_used + 2);
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900397
398 size = chunk->map_alloc * sizeof(chunk->map[0]);
399 memcpy(new, chunk->map, size);
400
401 /*
402 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
403 * one of the first chunks and still using static map.
404 */
405 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
406 pcpu_mem_free(chunk->map, size);
407
408 chunk->map_alloc = new_alloc;
409 chunk->map = new;
410 return 0;
411}
412
413/**
Tejun Heofbf59bc2009-02-20 16:29:08 +0900414 * pcpu_split_block - split a map block
415 * @chunk: chunk of interest
416 * @i: index of map block to split
Tejun Heocae3aeb2009-02-21 16:56:23 +0900417 * @head: head size in bytes (can be 0)
418 * @tail: tail size in bytes (can be 0)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900419 *
420 * Split the @i'th map block into two or three blocks. If @head is
421 * non-zero, @head bytes block is inserted before block @i moving it
422 * to @i+1 and reducing its size by @head bytes.
423 *
424 * If @tail is non-zero, the target block, which can be @i or @i+1
425 * depending on @head, is reduced by @tail bytes and @tail byte block
426 * is inserted after the target block.
427 *
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900428 * @chunk->map must have enough free slots to accomodate the split.
Tejun Heoccea34b2009-03-07 00:44:13 +0900429 *
430 * CONTEXT:
431 * pcpu_lock.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900432 */
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900433static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
434 int head, int tail)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900435{
436 int nr_extra = !!head + !!tail;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900437
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900438 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900439
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900440 /* insert new subblocks */
Tejun Heofbf59bc2009-02-20 16:29:08 +0900441 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
442 sizeof(chunk->map[0]) * (chunk->map_used - i));
443 chunk->map_used += nr_extra;
444
445 if (head) {
446 chunk->map[i + 1] = chunk->map[i] - head;
447 chunk->map[i++] = head;
448 }
449 if (tail) {
450 chunk->map[i++] -= tail;
451 chunk->map[i] = tail;
452 }
Tejun Heofbf59bc2009-02-20 16:29:08 +0900453}
454
455/**
456 * pcpu_alloc_area - allocate area from a pcpu_chunk
457 * @chunk: chunk of interest
Tejun Heocae3aeb2009-02-21 16:56:23 +0900458 * @size: wanted size in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +0900459 * @align: wanted align
460 *
461 * Try to allocate @size bytes area aligned at @align from @chunk.
462 * Note that this function only allocates the offset. It doesn't
463 * populate or map the area.
464 *
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900465 * @chunk->map must have at least two free slots.
466 *
Tejun Heoccea34b2009-03-07 00:44:13 +0900467 * CONTEXT:
468 * pcpu_lock.
469 *
Tejun Heofbf59bc2009-02-20 16:29:08 +0900470 * RETURNS:
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900471 * Allocated offset in @chunk on success, -1 if no matching area is
472 * found.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900473 */
474static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
475{
476 int oslot = pcpu_chunk_slot(chunk);
477 int max_contig = 0;
478 int i, off;
479
Tejun Heofbf59bc2009-02-20 16:29:08 +0900480 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
481 bool is_last = i + 1 == chunk->map_used;
482 int head, tail;
483
484 /* extra for alignment requirement */
485 head = ALIGN(off, align) - off;
486 BUG_ON(i == 0 && head != 0);
487
488 if (chunk->map[i] < 0)
489 continue;
490 if (chunk->map[i] < head + size) {
491 max_contig = max(chunk->map[i], max_contig);
492 continue;
493 }
494
495 /*
496 * If head is small or the previous block is free,
497 * merge'em. Note that 'small' is defined as smaller
498 * than sizeof(int), which is very small but isn't too
499 * uncommon for percpu allocations.
500 */
501 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
502 if (chunk->map[i - 1] > 0)
503 chunk->map[i - 1] += head;
504 else {
505 chunk->map[i - 1] -= head;
506 chunk->free_size -= head;
507 }
508 chunk->map[i] -= head;
509 off += head;
510 head = 0;
511 }
512
513 /* if tail is small, just keep it around */
514 tail = chunk->map[i] - head - size;
515 if (tail < sizeof(int))
516 tail = 0;
517
518 /* split if warranted */
519 if (head || tail) {
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900520 pcpu_split_block(chunk, i, head, tail);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900521 if (head) {
522 i++;
523 off += head;
524 max_contig = max(chunk->map[i - 1], max_contig);
525 }
526 if (tail)
527 max_contig = max(chunk->map[i + 1], max_contig);
528 }
529
530 /* update hint and mark allocated */
531 if (is_last)
532 chunk->contig_hint = max_contig; /* fully scanned */
533 else
534 chunk->contig_hint = max(chunk->contig_hint,
535 max_contig);
536
537 chunk->free_size -= chunk->map[i];
538 chunk->map[i] = -chunk->map[i];
539
540 pcpu_chunk_relocate(chunk, oslot);
541 return off;
542 }
543
544 chunk->contig_hint = max_contig; /* fully scanned */
545 pcpu_chunk_relocate(chunk, oslot);
546
Tejun Heo9f7dcf22009-03-07 00:44:09 +0900547 /* tell the upper layer that this chunk has no matching area */
548 return -1;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900549}
550
551/**
552 * pcpu_free_area - free area to a pcpu_chunk
553 * @chunk: chunk of interest
554 * @freeme: offset of area to free
555 *
556 * Free area starting from @freeme to @chunk. Note that this function
557 * only modifies the allocation map. It doesn't depopulate or unmap
558 * the area.
Tejun Heoccea34b2009-03-07 00:44:13 +0900559 *
560 * CONTEXT:
561 * pcpu_lock.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900562 */
563static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
564{
565 int oslot = pcpu_chunk_slot(chunk);
566 int i, off;
567
568 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
569 if (off == freeme)
570 break;
571 BUG_ON(off != freeme);
572 BUG_ON(chunk->map[i] > 0);
573
574 chunk->map[i] = -chunk->map[i];
575 chunk->free_size += chunk->map[i];
576
577 /* merge with previous? */
578 if (i > 0 && chunk->map[i - 1] >= 0) {
579 chunk->map[i - 1] += chunk->map[i];
580 chunk->map_used--;
581 memmove(&chunk->map[i], &chunk->map[i + 1],
582 (chunk->map_used - i) * sizeof(chunk->map[0]));
583 i--;
584 }
585 /* merge with next? */
586 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
587 chunk->map[i] += chunk->map[i + 1];
588 chunk->map_used--;
589 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
590 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
591 }
592
593 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
594 pcpu_chunk_relocate(chunk, oslot);
595}
596
597/**
Tejun Heoce3141a2009-07-04 08:11:00 +0900598 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
Tejun Heofbf59bc2009-02-20 16:29:08 +0900599 * @chunk: chunk of interest
Tejun Heoce3141a2009-07-04 08:11:00 +0900600 * @bitmapp: output parameter for bitmap
601 * @may_alloc: may allocate the array
Tejun Heofbf59bc2009-02-20 16:29:08 +0900602 *
Tejun Heoce3141a2009-07-04 08:11:00 +0900603 * Returns pointer to array of pointers to struct page and bitmap,
604 * both of which can be indexed with pcpu_page_idx(). The returned
605 * array is cleared to zero and *@bitmapp is copied from
606 * @chunk->populated. Note that there is only one array and bitmap
607 * and access exclusion is the caller's responsibility.
608 *
609 * CONTEXT:
610 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
611 * Otherwise, don't care.
612 *
613 * RETURNS:
614 * Pointer to temp pages array on success, NULL on failure.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900615 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900616static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
617 unsigned long **bitmapp,
618 bool may_alloc)
619{
620 static struct page **pages;
621 static unsigned long *bitmap;
Tejun Heo2f39e632009-07-04 08:11:00 +0900622 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
Tejun Heoce3141a2009-07-04 08:11:00 +0900623 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
624 sizeof(unsigned long);
625
626 if (!pages || !bitmap) {
627 if (may_alloc && !pages)
628 pages = pcpu_mem_alloc(pages_size);
629 if (may_alloc && !bitmap)
630 bitmap = pcpu_mem_alloc(bitmap_size);
631 if (!pages || !bitmap)
632 return NULL;
633 }
634
635 memset(pages, 0, pages_size);
636 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
637
638 *bitmapp = bitmap;
639 return pages;
640}
641
642/**
643 * pcpu_free_pages - free pages which were allocated for @chunk
644 * @chunk: chunk pages were allocated for
645 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
646 * @populated: populated bitmap
647 * @page_start: page index of the first page to be freed
648 * @page_end: page index of the last page to be freed + 1
649 *
650 * Free pages [@page_start and @page_end) in @pages for all units.
651 * The pages were allocated for @chunk.
652 */
653static void pcpu_free_pages(struct pcpu_chunk *chunk,
654 struct page **pages, unsigned long *populated,
655 int page_start, int page_end)
656{
657 unsigned int cpu;
658 int i;
659
660 for_each_possible_cpu(cpu) {
661 for (i = page_start; i < page_end; i++) {
662 struct page *page = pages[pcpu_page_idx(cpu, i)];
663
664 if (page)
665 __free_page(page);
666 }
667 }
668}
669
670/**
671 * pcpu_alloc_pages - allocates pages for @chunk
672 * @chunk: target chunk
673 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
674 * @populated: populated bitmap
675 * @page_start: page index of the first page to be allocated
676 * @page_end: page index of the last page to be allocated + 1
677 *
678 * Allocate pages [@page_start,@page_end) into @pages for all units.
679 * The allocation is for @chunk. Percpu core doesn't care about the
680 * content of @pages and will pass it verbatim to pcpu_map_pages().
681 */
682static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
683 struct page **pages, unsigned long *populated,
684 int page_start, int page_end)
685{
686 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
687 unsigned int cpu;
688 int i;
689
690 for_each_possible_cpu(cpu) {
691 for (i = page_start; i < page_end; i++) {
692 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
693
694 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
695 if (!*pagep) {
696 pcpu_free_pages(chunk, pages, populated,
697 page_start, page_end);
698 return -ENOMEM;
699 }
700 }
701 }
702 return 0;
703}
704
705/**
706 * pcpu_pre_unmap_flush - flush cache prior to unmapping
707 * @chunk: chunk the regions to be flushed belongs to
708 * @page_start: page index of the first page to be flushed
709 * @page_end: page index of the last page to be flushed + 1
710 *
711 * Pages in [@page_start,@page_end) of @chunk are about to be
712 * unmapped. Flush cache. As each flushing trial can be very
713 * expensive, issue flush on the whole region at once rather than
714 * doing it for each cpu. This could be an overkill but is more
715 * scalable.
716 */
717static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
718 int page_start, int page_end)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900719{
Tejun Heo2f39e632009-07-04 08:11:00 +0900720 flush_cache_vunmap(
721 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
722 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
Tejun Heoce3141a2009-07-04 08:11:00 +0900723}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900724
Tejun Heoce3141a2009-07-04 08:11:00 +0900725static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
726{
727 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
728}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900729
Tejun Heoce3141a2009-07-04 08:11:00 +0900730/**
731 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
732 * @chunk: chunk of interest
733 * @pages: pages array which can be used to pass information to free
734 * @populated: populated bitmap
Tejun Heofbf59bc2009-02-20 16:29:08 +0900735 * @page_start: page index of the first page to unmap
736 * @page_end: page index of the last page to unmap + 1
Tejun Heofbf59bc2009-02-20 16:29:08 +0900737 *
738 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
Tejun Heoce3141a2009-07-04 08:11:00 +0900739 * Corresponding elements in @pages were cleared by the caller and can
740 * be used to carry information to pcpu_free_pages() which will be
741 * called after all unmaps are finished. The caller should call
742 * proper pre/post flush functions.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900743 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900744static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
745 struct page **pages, unsigned long *populated,
746 int page_start, int page_end)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900747{
Tejun Heofbf59bc2009-02-20 16:29:08 +0900748 unsigned int cpu;
Tejun Heoce3141a2009-07-04 08:11:00 +0900749 int i;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900750
Tejun Heoce3141a2009-07-04 08:11:00 +0900751 for_each_possible_cpu(cpu) {
752 for (i = page_start; i < page_end; i++) {
753 struct page *page;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900754
Tejun Heoce3141a2009-07-04 08:11:00 +0900755 page = pcpu_chunk_page(chunk, cpu, i);
756 WARN_ON(!page);
757 pages[pcpu_page_idx(cpu, i)] = page;
758 }
759 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
760 page_end - page_start);
761 }
Tejun Heofbf59bc2009-02-20 16:29:08 +0900762
Tejun Heoce3141a2009-07-04 08:11:00 +0900763 for (i = page_start; i < page_end; i++)
764 __clear_bit(i, populated);
765}
Tejun Heofbf59bc2009-02-20 16:29:08 +0900766
Tejun Heoce3141a2009-07-04 08:11:00 +0900767/**
768 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
769 * @chunk: pcpu_chunk the regions to be flushed belong to
770 * @page_start: page index of the first page to be flushed
771 * @page_end: page index of the last page to be flushed + 1
772 *
773 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
774 * TLB for the regions. This can be skipped if the area is to be
775 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
776 *
777 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
778 * for the whole region.
779 */
780static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
781 int page_start, int page_end)
782{
Tejun Heo2f39e632009-07-04 08:11:00 +0900783 flush_tlb_kernel_range(
784 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
785 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900786}
787
Tejun Heoc8a51be2009-07-04 08:10:59 +0900788static int __pcpu_map_pages(unsigned long addr, struct page **pages,
789 int nr_pages)
790{
791 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
792 PAGE_KERNEL, pages);
793}
794
795/**
Tejun Heoce3141a2009-07-04 08:11:00 +0900796 * pcpu_map_pages - map pages into a pcpu_chunk
Tejun Heoc8a51be2009-07-04 08:10:59 +0900797 * @chunk: chunk of interest
Tejun Heoce3141a2009-07-04 08:11:00 +0900798 * @pages: pages array containing pages to be mapped
799 * @populated: populated bitmap
Tejun Heoc8a51be2009-07-04 08:10:59 +0900800 * @page_start: page index of the first page to map
801 * @page_end: page index of the last page to map + 1
802 *
Tejun Heoce3141a2009-07-04 08:11:00 +0900803 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
804 * caller is responsible for calling pcpu_post_map_flush() after all
805 * mappings are complete.
806 *
807 * This function is responsible for setting corresponding bits in
808 * @chunk->populated bitmap and whatever is necessary for reverse
809 * lookup (addr -> chunk).
Tejun Heoc8a51be2009-07-04 08:10:59 +0900810 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900811static int pcpu_map_pages(struct pcpu_chunk *chunk,
812 struct page **pages, unsigned long *populated,
813 int page_start, int page_end)
Tejun Heoc8a51be2009-07-04 08:10:59 +0900814{
Tejun Heoce3141a2009-07-04 08:11:00 +0900815 unsigned int cpu, tcpu;
816 int i, err;
Tejun Heoc8a51be2009-07-04 08:10:59 +0900817
818 for_each_possible_cpu(cpu) {
819 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
Tejun Heoce3141a2009-07-04 08:11:00 +0900820 &pages[pcpu_page_idx(cpu, page_start)],
Tejun Heoc8a51be2009-07-04 08:10:59 +0900821 page_end - page_start);
822 if (err < 0)
Tejun Heoce3141a2009-07-04 08:11:00 +0900823 goto err;
Tejun Heoc8a51be2009-07-04 08:10:59 +0900824 }
825
Tejun Heoce3141a2009-07-04 08:11:00 +0900826 /* mapping successful, link chunk and mark populated */
827 for (i = page_start; i < page_end; i++) {
828 for_each_possible_cpu(cpu)
829 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
830 chunk);
831 __set_bit(i, populated);
832 }
833
834 return 0;
835
836err:
837 for_each_possible_cpu(tcpu) {
838 if (tcpu == cpu)
839 break;
840 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
841 page_end - page_start);
842 }
843 return err;
844}
845
846/**
847 * pcpu_post_map_flush - flush cache after mapping
848 * @chunk: pcpu_chunk the regions to be flushed belong to
849 * @page_start: page index of the first page to be flushed
850 * @page_end: page index of the last page to be flushed + 1
851 *
852 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
853 * cache.
854 *
855 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
856 * for the whole region.
857 */
858static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
859 int page_start, int page_end)
860{
Tejun Heo2f39e632009-07-04 08:11:00 +0900861 flush_cache_vmap(
862 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
863 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900864}
865
866/**
867 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
868 * @chunk: chunk to depopulate
869 * @off: offset to the area to depopulate
Tejun Heocae3aeb2009-02-21 16:56:23 +0900870 * @size: size of the area to depopulate in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +0900871 * @flush: whether to flush cache and tlb or not
872 *
873 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
874 * from @chunk. If @flush is true, vcache is flushed before unmapping
875 * and tlb after.
Tejun Heoccea34b2009-03-07 00:44:13 +0900876 *
877 * CONTEXT:
878 * pcpu_alloc_mutex.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900879 */
Tejun Heoce3141a2009-07-04 08:11:00 +0900880static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
Tejun Heofbf59bc2009-02-20 16:29:08 +0900881{
882 int page_start = PFN_DOWN(off);
883 int page_end = PFN_UP(off + size);
Tejun Heoce3141a2009-07-04 08:11:00 +0900884 struct page **pages;
885 unsigned long *populated;
886 int rs, re;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900887
Tejun Heoce3141a2009-07-04 08:11:00 +0900888 /* quick path, check whether it's empty already */
WANG Cong22b737f2009-12-01 23:28:10 +0900889 rs = page_start;
890 pcpu_next_unpop(chunk, &rs, &re, page_end);
891 if (rs == page_start && re == page_end)
892 return;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900893
Tejun Heoce3141a2009-07-04 08:11:00 +0900894 /* immutable chunks can't be depopulated */
Tejun Heo8d408b42009-02-24 11:57:21 +0900895 WARN_ON(chunk->immutable);
896
Tejun Heoce3141a2009-07-04 08:11:00 +0900897 /*
898 * If control reaches here, there must have been at least one
899 * successful population attempt so the temp pages array must
900 * be available now.
901 */
902 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
903 BUG_ON(!pages);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900904
Tejun Heoce3141a2009-07-04 08:11:00 +0900905 /* unmap and free */
906 pcpu_pre_unmap_flush(chunk, page_start, page_end);
907
908 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
909 pcpu_unmap_pages(chunk, pages, populated, rs, re);
910
911 /* no need to flush tlb, vmalloc will handle it lazily */
912
913 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
914 pcpu_free_pages(chunk, pages, populated, rs, re);
915
916 /* commit new bitmap */
917 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900918}
919
920/**
921 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
922 * @chunk: chunk of interest
923 * @off: offset to the area to populate
Tejun Heocae3aeb2009-02-21 16:56:23 +0900924 * @size: size of the area to populate in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +0900925 *
926 * For each cpu, populate and map pages [@page_start,@page_end) into
927 * @chunk. The area is cleared on return.
Tejun Heoccea34b2009-03-07 00:44:13 +0900928 *
929 * CONTEXT:
930 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
Tejun Heofbf59bc2009-02-20 16:29:08 +0900931 */
932static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
933{
Tejun Heofbf59bc2009-02-20 16:29:08 +0900934 int page_start = PFN_DOWN(off);
935 int page_end = PFN_UP(off + size);
Tejun Heoce3141a2009-07-04 08:11:00 +0900936 int free_end = page_start, unmap_end = page_start;
937 struct page **pages;
938 unsigned long *populated;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900939 unsigned int cpu;
Tejun Heoce3141a2009-07-04 08:11:00 +0900940 int rs, re, rc;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900941
Tejun Heoce3141a2009-07-04 08:11:00 +0900942 /* quick path, check whether all pages are already there */
WANG Cong22b737f2009-12-01 23:28:10 +0900943 rs = page_start;
944 pcpu_next_pop(chunk, &rs, &re, page_end);
945 if (rs == page_start && re == page_end)
946 goto clear;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900947
Tejun Heoce3141a2009-07-04 08:11:00 +0900948 /* need to allocate and map pages, this chunk can't be immutable */
949 WARN_ON(chunk->immutable);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900950
Tejun Heoce3141a2009-07-04 08:11:00 +0900951 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
952 if (!pages)
953 return -ENOMEM;
954
955 /* alloc and map */
956 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
957 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
958 if (rc)
959 goto err_free;
960 free_end = re;
961 }
962
963 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
964 rc = pcpu_map_pages(chunk, pages, populated, rs, re);
965 if (rc)
966 goto err_unmap;
967 unmap_end = re;
968 }
969 pcpu_post_map_flush(chunk, page_start, page_end);
970
971 /* commit new bitmap */
972 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
973clear:
Tejun Heofbf59bc2009-02-20 16:29:08 +0900974 for_each_possible_cpu(cpu)
Tejun Heo2f39e632009-07-04 08:11:00 +0900975 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900976 return 0;
Tejun Heoce3141a2009-07-04 08:11:00 +0900977
978err_unmap:
979 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
980 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
981 pcpu_unmap_pages(chunk, pages, populated, rs, re);
982 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
983err_free:
984 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
985 pcpu_free_pages(chunk, pages, populated, rs, re);
986 return rc;
Tejun Heofbf59bc2009-02-20 16:29:08 +0900987}
988
989static void free_pcpu_chunk(struct pcpu_chunk *chunk)
990{
991 if (!chunk)
992 return;
Tejun Heo65632972009-08-14 15:00:52 +0900993 if (chunk->vms)
994 pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
Tejun Heo1880d932009-03-07 00:44:09 +0900995 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
Tejun Heofbf59bc2009-02-20 16:29:08 +0900996 kfree(chunk);
997}
998
999static struct pcpu_chunk *alloc_pcpu_chunk(void)
1000{
1001 struct pcpu_chunk *chunk;
1002
1003 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
1004 if (!chunk)
1005 return NULL;
1006
Tejun Heo1880d932009-03-07 00:44:09 +09001007 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
Tejun Heofbf59bc2009-02-20 16:29:08 +09001008 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
1009 chunk->map[chunk->map_used++] = pcpu_unit_size;
1010
Tejun Heo65632972009-08-14 15:00:52 +09001011 chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
1012 pcpu_nr_groups, pcpu_atom_size,
1013 GFP_KERNEL);
1014 if (!chunk->vms) {
Tejun Heofbf59bc2009-02-20 16:29:08 +09001015 free_pcpu_chunk(chunk);
1016 return NULL;
1017 }
1018
1019 INIT_LIST_HEAD(&chunk->list);
1020 chunk->free_size = pcpu_unit_size;
1021 chunk->contig_hint = pcpu_unit_size;
Tejun Heo65632972009-08-14 15:00:52 +09001022 chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
Tejun Heofbf59bc2009-02-20 16:29:08 +09001023
1024 return chunk;
1025}
1026
1027/**
Tejun Heoedcb4632009-03-06 14:33:59 +09001028 * pcpu_alloc - the percpu allocator
Tejun Heocae3aeb2009-02-21 16:56:23 +09001029 * @size: size of area to allocate in bytes
Tejun Heofbf59bc2009-02-20 16:29:08 +09001030 * @align: alignment of area (max PAGE_SIZE)
Tejun Heoedcb4632009-03-06 14:33:59 +09001031 * @reserved: allocate from the reserved chunk if available
Tejun Heofbf59bc2009-02-20 16:29:08 +09001032 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001033 * Allocate percpu area of @size bytes aligned at @align.
1034 *
1035 * CONTEXT:
1036 * Does GFP_KERNEL allocation.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001037 *
1038 * RETURNS:
1039 * Percpu pointer to the allocated area on success, NULL on failure.
1040 */
Tejun Heoedcb4632009-03-06 14:33:59 +09001041static void *pcpu_alloc(size_t size, size_t align, bool reserved)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001042{
Tejun Heof2badb02009-09-29 09:17:58 +09001043 static int warn_limit = 10;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001044 struct pcpu_chunk *chunk;
Tejun Heof2badb02009-09-29 09:17:58 +09001045 const char *err;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001046 int slot, off;
1047
Tejun Heo8d408b42009-02-24 11:57:21 +09001048 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
Tejun Heofbf59bc2009-02-20 16:29:08 +09001049 WARN(true, "illegal size (%zu) or align (%zu) for "
1050 "percpu allocation\n", size, align);
1051 return NULL;
1052 }
1053
Tejun Heoccea34b2009-03-07 00:44:13 +09001054 mutex_lock(&pcpu_alloc_mutex);
1055 spin_lock_irq(&pcpu_lock);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001056
Tejun Heoedcb4632009-03-06 14:33:59 +09001057 /* serve reserved allocations from the reserved chunk if available */
1058 if (reserved && pcpu_reserved_chunk) {
1059 chunk = pcpu_reserved_chunk;
Tejun Heo9f7dcf22009-03-07 00:44:09 +09001060 if (size > chunk->contig_hint ||
Tejun Heof2badb02009-09-29 09:17:58 +09001061 pcpu_extend_area_map(chunk) < 0) {
1062 err = "failed to extend area map of reserved chunk";
Tejun Heoccea34b2009-03-07 00:44:13 +09001063 goto fail_unlock;
Tejun Heof2badb02009-09-29 09:17:58 +09001064 }
Tejun Heoedcb4632009-03-06 14:33:59 +09001065 off = pcpu_alloc_area(chunk, size, align);
1066 if (off >= 0)
1067 goto area_found;
Tejun Heof2badb02009-09-29 09:17:58 +09001068 err = "alloc from reserved chunk failed";
Tejun Heoccea34b2009-03-07 00:44:13 +09001069 goto fail_unlock;
Tejun Heoedcb4632009-03-06 14:33:59 +09001070 }
1071
Tejun Heoccea34b2009-03-07 00:44:13 +09001072restart:
Tejun Heoedcb4632009-03-06 14:33:59 +09001073 /* search through normal chunks */
Tejun Heofbf59bc2009-02-20 16:29:08 +09001074 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1075 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1076 if (size > chunk->contig_hint)
1077 continue;
Tejun Heoccea34b2009-03-07 00:44:13 +09001078
1079 switch (pcpu_extend_area_map(chunk)) {
1080 case 0:
1081 break;
1082 case 1:
1083 goto restart; /* pcpu_lock dropped, restart */
1084 default:
Tejun Heof2badb02009-09-29 09:17:58 +09001085 err = "failed to extend area map";
Tejun Heoccea34b2009-03-07 00:44:13 +09001086 goto fail_unlock;
1087 }
1088
Tejun Heofbf59bc2009-02-20 16:29:08 +09001089 off = pcpu_alloc_area(chunk, size, align);
1090 if (off >= 0)
1091 goto area_found;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001092 }
1093 }
1094
1095 /* hmmm... no space left, create a new chunk */
Tejun Heoccea34b2009-03-07 00:44:13 +09001096 spin_unlock_irq(&pcpu_lock);
1097
Tejun Heofbf59bc2009-02-20 16:29:08 +09001098 chunk = alloc_pcpu_chunk();
Tejun Heof2badb02009-09-29 09:17:58 +09001099 if (!chunk) {
1100 err = "failed to allocate new chunk";
Tejun Heoccea34b2009-03-07 00:44:13 +09001101 goto fail_unlock_mutex;
Tejun Heof2badb02009-09-29 09:17:58 +09001102 }
Tejun Heoccea34b2009-03-07 00:44:13 +09001103
1104 spin_lock_irq(&pcpu_lock);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001105 pcpu_chunk_relocate(chunk, -1);
Tejun Heoccea34b2009-03-07 00:44:13 +09001106 goto restart;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001107
1108area_found:
Tejun Heoccea34b2009-03-07 00:44:13 +09001109 spin_unlock_irq(&pcpu_lock);
1110
Tejun Heofbf59bc2009-02-20 16:29:08 +09001111 /* populate, map and clear the area */
1112 if (pcpu_populate_chunk(chunk, off, size)) {
Tejun Heoccea34b2009-03-07 00:44:13 +09001113 spin_lock_irq(&pcpu_lock);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001114 pcpu_free_area(chunk, off);
Tejun Heof2badb02009-09-29 09:17:58 +09001115 err = "failed to populate";
Tejun Heoccea34b2009-03-07 00:44:13 +09001116 goto fail_unlock;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001117 }
1118
Tejun Heoccea34b2009-03-07 00:44:13 +09001119 mutex_unlock(&pcpu_alloc_mutex);
1120
Tejun Heobba174f2009-08-14 15:00:51 +09001121 /* return address relative to base address */
1122 return __addr_to_pcpu_ptr(chunk->base_addr + off);
Tejun Heoccea34b2009-03-07 00:44:13 +09001123
1124fail_unlock:
1125 spin_unlock_irq(&pcpu_lock);
1126fail_unlock_mutex:
1127 mutex_unlock(&pcpu_alloc_mutex);
Tejun Heof2badb02009-09-29 09:17:58 +09001128 if (warn_limit) {
1129 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
1130 "%s\n", size, align, err);
1131 dump_stack();
1132 if (!--warn_limit)
1133 pr_info("PERCPU: limit reached, disable warning\n");
1134 }
Tejun Heoccea34b2009-03-07 00:44:13 +09001135 return NULL;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001136}
Tejun Heoedcb4632009-03-06 14:33:59 +09001137
1138/**
1139 * __alloc_percpu - allocate dynamic percpu area
1140 * @size: size of area to allocate in bytes
1141 * @align: alignment of area (max PAGE_SIZE)
1142 *
1143 * Allocate percpu area of @size bytes aligned at @align. Might
1144 * sleep. Might trigger writeouts.
1145 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001146 * CONTEXT:
1147 * Does GFP_KERNEL allocation.
1148 *
Tejun Heoedcb4632009-03-06 14:33:59 +09001149 * RETURNS:
1150 * Percpu pointer to the allocated area on success, NULL on failure.
1151 */
1152void *__alloc_percpu(size_t size, size_t align)
1153{
1154 return pcpu_alloc(size, align, false);
1155}
Tejun Heofbf59bc2009-02-20 16:29:08 +09001156EXPORT_SYMBOL_GPL(__alloc_percpu);
1157
Tejun Heoedcb4632009-03-06 14:33:59 +09001158/**
1159 * __alloc_reserved_percpu - allocate reserved percpu area
1160 * @size: size of area to allocate in bytes
1161 * @align: alignment of area (max PAGE_SIZE)
1162 *
1163 * Allocate percpu area of @size bytes aligned at @align from reserved
1164 * percpu area if arch has set it up; otherwise, allocation is served
1165 * from the same dynamic area. Might sleep. Might trigger writeouts.
1166 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001167 * CONTEXT:
1168 * Does GFP_KERNEL allocation.
1169 *
Tejun Heoedcb4632009-03-06 14:33:59 +09001170 * RETURNS:
1171 * Percpu pointer to the allocated area on success, NULL on failure.
1172 */
1173void *__alloc_reserved_percpu(size_t size, size_t align)
1174{
1175 return pcpu_alloc(size, align, true);
1176}
1177
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001178/**
1179 * pcpu_reclaim - reclaim fully free chunks, workqueue function
1180 * @work: unused
1181 *
1182 * Reclaim all fully free chunks except for the first one.
Tejun Heoccea34b2009-03-07 00:44:13 +09001183 *
1184 * CONTEXT:
1185 * workqueue context.
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001186 */
1187static void pcpu_reclaim(struct work_struct *work)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001188{
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001189 LIST_HEAD(todo);
1190 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
1191 struct pcpu_chunk *chunk, *next;
1192
Tejun Heoccea34b2009-03-07 00:44:13 +09001193 mutex_lock(&pcpu_alloc_mutex);
1194 spin_lock_irq(&pcpu_lock);
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001195
1196 list_for_each_entry_safe(chunk, next, head, list) {
1197 WARN_ON(chunk->immutable);
1198
1199 /* spare the first one */
1200 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
1201 continue;
1202
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001203 list_move(&chunk->list, &todo);
1204 }
1205
Tejun Heoccea34b2009-03-07 00:44:13 +09001206 spin_unlock_irq(&pcpu_lock);
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001207
1208 list_for_each_entry_safe(chunk, next, &todo, list) {
Tejun Heoce3141a2009-07-04 08:11:00 +09001209 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001210 free_pcpu_chunk(chunk);
1211 }
Tejun Heo971f3912009-08-14 15:00:49 +09001212
1213 mutex_unlock(&pcpu_alloc_mutex);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001214}
1215
1216/**
1217 * free_percpu - free percpu area
1218 * @ptr: pointer to area to free
1219 *
Tejun Heoccea34b2009-03-07 00:44:13 +09001220 * Free percpu area @ptr.
1221 *
1222 * CONTEXT:
1223 * Can be called from atomic context.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001224 */
1225void free_percpu(void *ptr)
1226{
1227 void *addr = __pcpu_ptr_to_addr(ptr);
1228 struct pcpu_chunk *chunk;
Tejun Heoccea34b2009-03-07 00:44:13 +09001229 unsigned long flags;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001230 int off;
1231
1232 if (!ptr)
1233 return;
1234
Tejun Heoccea34b2009-03-07 00:44:13 +09001235 spin_lock_irqsave(&pcpu_lock, flags);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001236
1237 chunk = pcpu_chunk_addr_search(addr);
Tejun Heobba174f2009-08-14 15:00:51 +09001238 off = addr - chunk->base_addr;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001239
1240 pcpu_free_area(chunk, off);
1241
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001242 /* if there are more than one fully free chunks, wake up grim reaper */
Tejun Heofbf59bc2009-02-20 16:29:08 +09001243 if (chunk->free_size == pcpu_unit_size) {
1244 struct pcpu_chunk *pos;
1245
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001246 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001247 if (pos != chunk) {
Tejun Heoa56dbdd2009-03-07 00:44:11 +09001248 schedule_work(&pcpu_reclaim_work);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001249 break;
1250 }
1251 }
1252
Tejun Heoccea34b2009-03-07 00:44:13 +09001253 spin_unlock_irqrestore(&pcpu_lock, flags);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001254}
1255EXPORT_SYMBOL_GPL(free_percpu);
1256
Tejun Heo033e48f2009-08-14 15:00:51 +09001257static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1258 size_t reserved_size,
1259 ssize_t *dyn_sizep)
1260{
1261 size_t size_sum;
1262
1263 size_sum = PFN_ALIGN(static_size + reserved_size +
1264 (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1265 if (*dyn_sizep != 0)
1266 *dyn_sizep = size_sum - static_size - reserved_size;
1267
1268 return size_sum;
1269}
1270
Tejun Heofbf59bc2009-02-20 16:29:08 +09001271/**
Tejun Heofd1e8a12009-08-14 15:00:51 +09001272 * pcpu_alloc_alloc_info - allocate percpu allocation info
1273 * @nr_groups: the number of groups
1274 * @nr_units: the number of units
Tejun Heo033e48f2009-08-14 15:00:51 +09001275 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001276 * Allocate ai which is large enough for @nr_groups groups containing
1277 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1278 * cpu_map array which is long enough for @nr_units and filled with
1279 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1280 * pointer of other groups.
Tejun Heo033e48f2009-08-14 15:00:51 +09001281 *
1282 * RETURNS:
Tejun Heofd1e8a12009-08-14 15:00:51 +09001283 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1284 * failure.
Tejun Heo033e48f2009-08-14 15:00:51 +09001285 */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001286struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1287 int nr_units)
1288{
1289 struct pcpu_alloc_info *ai;
1290 size_t base_size, ai_size;
1291 void *ptr;
1292 int unit;
1293
1294 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1295 __alignof__(ai->groups[0].cpu_map[0]));
1296 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1297
1298 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1299 if (!ptr)
1300 return NULL;
1301 ai = ptr;
1302 ptr += base_size;
1303
1304 ai->groups[0].cpu_map = ptr;
1305
1306 for (unit = 0; unit < nr_units; unit++)
1307 ai->groups[0].cpu_map[unit] = NR_CPUS;
1308
1309 ai->nr_groups = nr_groups;
1310 ai->__ai_size = PFN_ALIGN(ai_size);
1311
1312 return ai;
1313}
1314
1315/**
1316 * pcpu_free_alloc_info - free percpu allocation info
1317 * @ai: pcpu_alloc_info to free
1318 *
1319 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1320 */
1321void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1322{
1323 free_bootmem(__pa(ai), ai->__ai_size);
1324}
1325
1326/**
1327 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
Tejun Heoedcb4632009-03-06 14:33:59 +09001328 * @reserved_size: the size of reserved percpu area in bytes
Tejun Heocafe8812009-03-06 14:33:59 +09001329 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
Tejun Heofd1e8a12009-08-14 15:00:51 +09001330 * @atom_size: allocation atom size
1331 * @cpu_distance_fn: callback to determine distance between cpus, optional
1332 *
1333 * This function determines grouping of units, their mappings to cpus
1334 * and other parameters considering needed percpu size, allocation
1335 * atom size and distances between CPUs.
1336 *
1337 * Groups are always mutliples of atom size and CPUs which are of
1338 * LOCAL_DISTANCE both ways are grouped together and share space for
1339 * units in the same group. The returned configuration is guaranteed
1340 * to have CPUs on different nodes on different groups and >=75% usage
1341 * of allocated virtual address space.
1342 *
1343 * RETURNS:
1344 * On success, pointer to the new allocation_info is returned. On
1345 * failure, ERR_PTR value is returned.
1346 */
1347struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1348 size_t reserved_size, ssize_t dyn_size,
1349 size_t atom_size,
1350 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
Tejun Heo033e48f2009-08-14 15:00:51 +09001351{
1352 static int group_map[NR_CPUS] __initdata;
1353 static int group_cnt[NR_CPUS] __initdata;
1354 const size_t static_size = __per_cpu_end - __per_cpu_start;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001355 int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
Tejun Heo033e48f2009-08-14 15:00:51 +09001356 size_t size_sum, min_unit_size, alloc_size;
1357 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001358 int last_allocs, group, unit;
Tejun Heo033e48f2009-08-14 15:00:51 +09001359 unsigned int cpu, tcpu;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001360 struct pcpu_alloc_info *ai;
1361 unsigned int *cpu_map;
Tejun Heo033e48f2009-08-14 15:00:51 +09001362
Tejun Heofb59e722009-09-24 18:50:34 +09001363 /* this function may be called multiple times */
1364 memset(group_map, 0, sizeof(group_map));
1365 memset(group_cnt, 0, sizeof(group_map));
1366
Tejun Heo033e48f2009-08-14 15:00:51 +09001367 /*
1368 * Determine min_unit_size, alloc_size and max_upa such that
Tejun Heofd1e8a12009-08-14 15:00:51 +09001369 * alloc_size is multiple of atom_size and is the smallest
Tejun Heo033e48f2009-08-14 15:00:51 +09001370 * which can accomodate 4k aligned segments which are equal to
1371 * or larger than min_unit_size.
1372 */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001373 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
Tejun Heo033e48f2009-08-14 15:00:51 +09001374 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1375
Tejun Heofd1e8a12009-08-14 15:00:51 +09001376 alloc_size = roundup(min_unit_size, atom_size);
Tejun Heo033e48f2009-08-14 15:00:51 +09001377 upa = alloc_size / min_unit_size;
1378 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1379 upa--;
1380 max_upa = upa;
1381
1382 /* group cpus according to their proximity */
1383 for_each_possible_cpu(cpu) {
1384 group = 0;
1385 next_group:
1386 for_each_possible_cpu(tcpu) {
1387 if (cpu == tcpu)
1388 break;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001389 if (group_map[tcpu] == group && cpu_distance_fn &&
Tejun Heo033e48f2009-08-14 15:00:51 +09001390 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1391 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1392 group++;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001393 nr_groups = max(nr_groups, group + 1);
Tejun Heo033e48f2009-08-14 15:00:51 +09001394 goto next_group;
1395 }
1396 }
1397 group_map[cpu] = group;
1398 group_cnt[group]++;
1399 group_cnt_max = max(group_cnt_max, group_cnt[group]);
1400 }
1401
1402 /*
1403 * Expand unit size until address space usage goes over 75%
1404 * and then as much as possible without using more address
1405 * space.
1406 */
1407 last_allocs = INT_MAX;
1408 for (upa = max_upa; upa; upa--) {
1409 int allocs = 0, wasted = 0;
1410
1411 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1412 continue;
1413
Tejun Heofd1e8a12009-08-14 15:00:51 +09001414 for (group = 0; group < nr_groups; group++) {
Tejun Heo033e48f2009-08-14 15:00:51 +09001415 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1416 allocs += this_allocs;
1417 wasted += this_allocs * upa - group_cnt[group];
1418 }
1419
1420 /*
1421 * Don't accept if wastage is over 25%. The
1422 * greater-than comparison ensures upa==1 always
1423 * passes the following check.
1424 */
1425 if (wasted > num_possible_cpus() / 3)
1426 continue;
1427
1428 /* and then don't consume more memory */
1429 if (allocs > last_allocs)
1430 break;
1431 last_allocs = allocs;
1432 best_upa = upa;
1433 }
Tejun Heofd1e8a12009-08-14 15:00:51 +09001434 upa = best_upa;
Tejun Heo033e48f2009-08-14 15:00:51 +09001435
Tejun Heofd1e8a12009-08-14 15:00:51 +09001436 /* allocate and fill alloc_info */
1437 for (group = 0; group < nr_groups; group++)
1438 nr_units += roundup(group_cnt[group], upa);
1439
1440 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1441 if (!ai)
1442 return ERR_PTR(-ENOMEM);
1443 cpu_map = ai->groups[0].cpu_map;
1444
1445 for (group = 0; group < nr_groups; group++) {
1446 ai->groups[group].cpu_map = cpu_map;
1447 cpu_map += roundup(group_cnt[group], upa);
Tejun Heo033e48f2009-08-14 15:00:51 +09001448 }
1449
Tejun Heofd1e8a12009-08-14 15:00:51 +09001450 ai->static_size = static_size;
1451 ai->reserved_size = reserved_size;
1452 ai->dyn_size = dyn_size;
1453 ai->unit_size = alloc_size / upa;
1454 ai->atom_size = atom_size;
1455 ai->alloc_size = alloc_size;
1456
1457 for (group = 0, unit = 0; group_cnt[group]; group++) {
1458 struct pcpu_group_info *gi = &ai->groups[group];
1459
1460 /*
1461 * Initialize base_offset as if all groups are located
1462 * back-to-back. The caller should update this to
1463 * reflect actual allocation.
1464 */
1465 gi->base_offset = unit * ai->unit_size;
1466
1467 for_each_possible_cpu(cpu)
1468 if (group_map[cpu] == group)
1469 gi->cpu_map[gi->nr_units++] = cpu;
1470 gi->nr_units = roundup(gi->nr_units, upa);
1471 unit += gi->nr_units;
1472 }
1473 BUG_ON(unit != nr_units);
1474
1475 return ai;
Tejun Heo033e48f2009-08-14 15:00:51 +09001476}
1477
Tejun Heofd1e8a12009-08-14 15:00:51 +09001478/**
1479 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1480 * @lvl: loglevel
1481 * @ai: allocation info to dump
1482 *
1483 * Print out information about @ai using loglevel @lvl.
1484 */
1485static void pcpu_dump_alloc_info(const char *lvl,
1486 const struct pcpu_alloc_info *ai)
Tejun Heo033e48f2009-08-14 15:00:51 +09001487{
Tejun Heofd1e8a12009-08-14 15:00:51 +09001488 int group_width = 1, cpu_width = 1, width;
Tejun Heo033e48f2009-08-14 15:00:51 +09001489 char empty_str[] = "--------";
Tejun Heofd1e8a12009-08-14 15:00:51 +09001490 int alloc = 0, alloc_end = 0;
1491 int group, v;
1492 int upa, apl; /* units per alloc, allocs per line */
Tejun Heo033e48f2009-08-14 15:00:51 +09001493
Tejun Heofd1e8a12009-08-14 15:00:51 +09001494 v = ai->nr_groups;
Tejun Heo033e48f2009-08-14 15:00:51 +09001495 while (v /= 10)
Tejun Heofd1e8a12009-08-14 15:00:51 +09001496 group_width++;
Tejun Heo033e48f2009-08-14 15:00:51 +09001497
Tejun Heofd1e8a12009-08-14 15:00:51 +09001498 v = num_possible_cpus();
1499 while (v /= 10)
1500 cpu_width++;
1501 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
Tejun Heo033e48f2009-08-14 15:00:51 +09001502
Tejun Heofd1e8a12009-08-14 15:00:51 +09001503 upa = ai->alloc_size / ai->unit_size;
1504 width = upa * (cpu_width + 1) + group_width + 3;
1505 apl = rounddown_pow_of_two(max(60 / width, 1));
Tejun Heo033e48f2009-08-14 15:00:51 +09001506
Tejun Heofd1e8a12009-08-14 15:00:51 +09001507 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1508 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1509 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1510
1511 for (group = 0; group < ai->nr_groups; group++) {
1512 const struct pcpu_group_info *gi = &ai->groups[group];
1513 int unit = 0, unit_end = 0;
1514
1515 BUG_ON(gi->nr_units % upa);
1516 for (alloc_end += gi->nr_units / upa;
1517 alloc < alloc_end; alloc++) {
1518 if (!(alloc % apl)) {
Tejun Heo033e48f2009-08-14 15:00:51 +09001519 printk("\n");
Tejun Heofd1e8a12009-08-14 15:00:51 +09001520 printk("%spcpu-alloc: ", lvl);
1521 }
1522 printk("[%0*d] ", group_width, group);
1523
1524 for (unit_end += upa; unit < unit_end; unit++)
1525 if (gi->cpu_map[unit] != NR_CPUS)
1526 printk("%0*d ", cpu_width,
1527 gi->cpu_map[unit]);
1528 else
1529 printk("%s ", empty_str);
Tejun Heo033e48f2009-08-14 15:00:51 +09001530 }
Tejun Heo033e48f2009-08-14 15:00:51 +09001531 }
1532 printk("\n");
1533}
Tejun Heo033e48f2009-08-14 15:00:51 +09001534
Tejun Heofbf59bc2009-02-20 16:29:08 +09001535/**
Tejun Heo8d408b42009-02-24 11:57:21 +09001536 * pcpu_setup_first_chunk - initialize the first percpu chunk
Tejun Heofd1e8a12009-08-14 15:00:51 +09001537 * @ai: pcpu_alloc_info describing how to percpu area is shaped
Tejun Heo38a6be52009-07-04 08:10:59 +09001538 * @base_addr: mapped address
Tejun Heofbf59bc2009-02-20 16:29:08 +09001539 *
Tejun Heo8d408b42009-02-24 11:57:21 +09001540 * Initialize the first percpu chunk which contains the kernel static
1541 * perpcu area. This function is to be called from arch percpu area
Tejun Heo38a6be52009-07-04 08:10:59 +09001542 * setup path.
Tejun Heo8d408b42009-02-24 11:57:21 +09001543 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001544 * @ai contains all information necessary to initialize the first
1545 * chunk and prime the dynamic percpu allocator.
Tejun Heo8d408b42009-02-24 11:57:21 +09001546 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001547 * @ai->static_size is the size of static percpu area.
1548 *
1549 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
Tejun Heoedcb4632009-03-06 14:33:59 +09001550 * reserve after the static area in the first chunk. This reserves
1551 * the first chunk such that it's available only through reserved
1552 * percpu allocation. This is primarily used to serve module percpu
1553 * static areas on architectures where the addressing model has
1554 * limited offset range for symbol relocations to guarantee module
1555 * percpu symbols fall inside the relocatable range.
1556 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001557 * @ai->dyn_size determines the number of bytes available for dynamic
1558 * allocation in the first chunk. The area between @ai->static_size +
1559 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
Tejun Heo6074d5b2009-03-10 16:27:48 +09001560 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001561 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1562 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1563 * @ai->dyn_size.
Tejun Heo8d408b42009-02-24 11:57:21 +09001564 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001565 * @ai->atom_size is the allocation atom size and used as alignment
1566 * for vm areas.
Tejun Heo8d408b42009-02-24 11:57:21 +09001567 *
Tejun Heofd1e8a12009-08-14 15:00:51 +09001568 * @ai->alloc_size is the allocation size and always multiple of
1569 * @ai->atom_size. This is larger than @ai->atom_size if
1570 * @ai->unit_size is larger than @ai->atom_size.
1571 *
1572 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1573 * percpu areas. Units which should be colocated are put into the
1574 * same group. Dynamic VM areas will be allocated according to these
1575 * groupings. If @ai->nr_groups is zero, a single group containing
1576 * all units is assumed.
Tejun Heo8d408b42009-02-24 11:57:21 +09001577 *
Tejun Heo38a6be52009-07-04 08:10:59 +09001578 * The caller should have mapped the first chunk at @base_addr and
1579 * copied static data to each unit.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001580 *
Tejun Heoedcb4632009-03-06 14:33:59 +09001581 * If the first chunk ends up with both reserved and dynamic areas, it
1582 * is served by two chunks - one to serve the core static and reserved
1583 * areas and the other for the dynamic area. They share the same vm
1584 * and page map but uses different area allocation map to stay away
1585 * from each other. The latter chunk is circulated in the chunk slots
1586 * and available for dynamic allocation like any other chunks.
1587 *
Tejun Heofbf59bc2009-02-20 16:29:08 +09001588 * RETURNS:
Tejun Heofb435d52009-08-14 15:00:51 +09001589 * 0 on success, -errno on failure.
Tejun Heofbf59bc2009-02-20 16:29:08 +09001590 */
Tejun Heofb435d52009-08-14 15:00:51 +09001591int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1592 void *base_addr)
Tejun Heofbf59bc2009-02-20 16:29:08 +09001593{
Tejun Heo635b75f2009-09-24 09:43:11 +09001594 static char cpus_buf[4096] __initdata;
Tejun Heoedcb4632009-03-06 14:33:59 +09001595 static int smap[2], dmap[2];
Tejun Heofd1e8a12009-08-14 15:00:51 +09001596 size_t dyn_size = ai->dyn_size;
1597 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
Tejun Heoedcb4632009-03-06 14:33:59 +09001598 struct pcpu_chunk *schunk, *dchunk = NULL;
Tejun Heo65632972009-08-14 15:00:52 +09001599 unsigned long *group_offsets;
1600 size_t *group_sizes;
Tejun Heofb435d52009-08-14 15:00:51 +09001601 unsigned long *unit_off;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001602 unsigned int cpu;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001603 int *unit_map;
1604 int group, unit, i;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001605
Tejun Heo635b75f2009-09-24 09:43:11 +09001606 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1607
1608#define PCPU_SETUP_BUG_ON(cond) do { \
1609 if (unlikely(cond)) { \
1610 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1611 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1612 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1613 BUG(); \
1614 } \
1615} while (0)
1616
Tejun Heo2f39e632009-07-04 08:11:00 +09001617 /* sanity checks */
Tejun Heoedcb4632009-03-06 14:33:59 +09001618 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1619 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
Tejun Heo635b75f2009-09-24 09:43:11 +09001620 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1621 PCPU_SETUP_BUG_ON(!ai->static_size);
1622 PCPU_SETUP_BUG_ON(!base_addr);
1623 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1624 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1625 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
Tejun Heo8d408b42009-02-24 11:57:21 +09001626
Tejun Heo65632972009-08-14 15:00:52 +09001627 /* process group information and build config tables accordingly */
1628 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1629 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
Tejun Heofd1e8a12009-08-14 15:00:51 +09001630 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
Tejun Heofb435d52009-08-14 15:00:51 +09001631 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
Tejun Heo2f39e632009-07-04 08:11:00 +09001632
Tejun Heofd1e8a12009-08-14 15:00:51 +09001633 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
Tejun Heoffe0d5a2009-09-29 09:17:56 +09001634 unit_map[cpu] = UINT_MAX;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001635 pcpu_first_unit_cpu = NR_CPUS;
Tejun Heo2f39e632009-07-04 08:11:00 +09001636
Tejun Heofd1e8a12009-08-14 15:00:51 +09001637 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1638 const struct pcpu_group_info *gi = &ai->groups[group];
Tejun Heo2f39e632009-07-04 08:11:00 +09001639
Tejun Heo65632972009-08-14 15:00:52 +09001640 group_offsets[group] = gi->base_offset;
1641 group_sizes[group] = gi->nr_units * ai->unit_size;
1642
Tejun Heofd1e8a12009-08-14 15:00:51 +09001643 for (i = 0; i < gi->nr_units; i++) {
1644 cpu = gi->cpu_map[i];
1645 if (cpu == NR_CPUS)
1646 continue;
1647
Tejun Heo635b75f2009-09-24 09:43:11 +09001648 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1649 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1650 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001651
1652 unit_map[cpu] = unit + i;
Tejun Heofb435d52009-08-14 15:00:51 +09001653 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1654
Tejun Heofd1e8a12009-08-14 15:00:51 +09001655 if (pcpu_first_unit_cpu == NR_CPUS)
Tejun Heo2f39e632009-07-04 08:11:00 +09001656 pcpu_first_unit_cpu = cpu;
Tejun Heo2f39e632009-07-04 08:11:00 +09001657 }
Tejun Heo2f39e632009-07-04 08:11:00 +09001658 }
Tejun Heofd1e8a12009-08-14 15:00:51 +09001659 pcpu_last_unit_cpu = cpu;
1660 pcpu_nr_units = unit;
1661
1662 for_each_possible_cpu(cpu)
Tejun Heo635b75f2009-09-24 09:43:11 +09001663 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1664
1665 /* we're done parsing the input, undefine BUG macro and dump config */
1666#undef PCPU_SETUP_BUG_ON
1667 pcpu_dump_alloc_info(KERN_INFO, ai);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001668
Tejun Heo65632972009-08-14 15:00:52 +09001669 pcpu_nr_groups = ai->nr_groups;
1670 pcpu_group_offsets = group_offsets;
1671 pcpu_group_sizes = group_sizes;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001672 pcpu_unit_map = unit_map;
Tejun Heofb435d52009-08-14 15:00:51 +09001673 pcpu_unit_offsets = unit_off;
Tejun Heo2f39e632009-07-04 08:11:00 +09001674
1675 /* determine basic parameters */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001676 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
Tejun Heod9b55ee2009-02-24 11:57:21 +09001677 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
Tejun Heo65632972009-08-14 15:00:52 +09001678 pcpu_atom_size = ai->atom_size;
Tejun Heoce3141a2009-07-04 08:11:00 +09001679 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1680 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
Tejun Heocafe8812009-03-06 14:33:59 +09001681
Tejun Heod9b55ee2009-02-24 11:57:21 +09001682 /*
1683 * Allocate chunk slots. The additional last slot is for
1684 * empty chunks.
1685 */
1686 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001687 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1688 for (i = 0; i < pcpu_nr_slots; i++)
1689 INIT_LIST_HEAD(&pcpu_slot[i]);
1690
Tejun Heoedcb4632009-03-06 14:33:59 +09001691 /*
1692 * Initialize static chunk. If reserved_size is zero, the
1693 * static chunk covers static area + dynamic allocation area
1694 * in the first chunk. If reserved_size is not zero, it
1695 * covers static area + reserved area (mostly used for module
1696 * static percpu allocation).
1697 */
Tejun Heo2441d152009-03-06 14:33:59 +09001698 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1699 INIT_LIST_HEAD(&schunk->list);
Tejun Heobba174f2009-08-14 15:00:51 +09001700 schunk->base_addr = base_addr;
Tejun Heo61ace7f2009-03-06 14:33:59 +09001701 schunk->map = smap;
1702 schunk->map_alloc = ARRAY_SIZE(smap);
Tejun Heo38a6be52009-07-04 08:10:59 +09001703 schunk->immutable = true;
Tejun Heoce3141a2009-07-04 08:11:00 +09001704 bitmap_fill(schunk->populated, pcpu_unit_pages);
Tejun Heoedcb4632009-03-06 14:33:59 +09001705
Tejun Heofd1e8a12009-08-14 15:00:51 +09001706 if (ai->reserved_size) {
1707 schunk->free_size = ai->reserved_size;
Tejun Heoae9e6bc92009-04-02 13:19:54 +09001708 pcpu_reserved_chunk = schunk;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001709 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
Tejun Heoedcb4632009-03-06 14:33:59 +09001710 } else {
1711 schunk->free_size = dyn_size;
1712 dyn_size = 0; /* dynamic area covered */
1713 }
Tejun Heo2441d152009-03-06 14:33:59 +09001714 schunk->contig_hint = schunk->free_size;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001715
Tejun Heofd1e8a12009-08-14 15:00:51 +09001716 schunk->map[schunk->map_used++] = -ai->static_size;
Tejun Heo61ace7f2009-03-06 14:33:59 +09001717 if (schunk->free_size)
1718 schunk->map[schunk->map_used++] = schunk->free_size;
1719
Tejun Heoedcb4632009-03-06 14:33:59 +09001720 /* init dynamic chunk if necessary */
1721 if (dyn_size) {
Tejun Heoce3141a2009-07-04 08:11:00 +09001722 dchunk = alloc_bootmem(pcpu_chunk_struct_size);
Tejun Heoedcb4632009-03-06 14:33:59 +09001723 INIT_LIST_HEAD(&dchunk->list);
Tejun Heobba174f2009-08-14 15:00:51 +09001724 dchunk->base_addr = base_addr;
Tejun Heoedcb4632009-03-06 14:33:59 +09001725 dchunk->map = dmap;
1726 dchunk->map_alloc = ARRAY_SIZE(dmap);
Tejun Heo38a6be52009-07-04 08:10:59 +09001727 dchunk->immutable = true;
Tejun Heoce3141a2009-07-04 08:11:00 +09001728 bitmap_fill(dchunk->populated, pcpu_unit_pages);
Tejun Heoedcb4632009-03-06 14:33:59 +09001729
1730 dchunk->contig_hint = dchunk->free_size = dyn_size;
1731 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1732 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1733 }
1734
Tejun Heo2441d152009-03-06 14:33:59 +09001735 /* link the first chunk in */
Tejun Heoae9e6bc92009-04-02 13:19:54 +09001736 pcpu_first_chunk = dchunk ?: schunk;
1737 pcpu_chunk_relocate(pcpu_first_chunk, -1);
Tejun Heofbf59bc2009-02-20 16:29:08 +09001738
1739 /* we're done */
Tejun Heobba174f2009-08-14 15:00:51 +09001740 pcpu_base_addr = base_addr;
Tejun Heofb435d52009-08-14 15:00:51 +09001741 return 0;
Tejun Heofbf59bc2009-02-20 16:29:08 +09001742}
Tejun Heo66c3a752009-03-10 16:27:48 +09001743
Tejun Heof58dc012009-08-14 15:00:50 +09001744const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1745 [PCPU_FC_AUTO] = "auto",
1746 [PCPU_FC_EMBED] = "embed",
1747 [PCPU_FC_PAGE] = "page",
Tejun Heof58dc012009-08-14 15:00:50 +09001748};
Tejun Heo66c3a752009-03-10 16:27:48 +09001749
Tejun Heof58dc012009-08-14 15:00:50 +09001750enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1751
1752static int __init percpu_alloc_setup(char *str)
Tejun Heo66c3a752009-03-10 16:27:48 +09001753{
Tejun Heof58dc012009-08-14 15:00:50 +09001754 if (0)
1755 /* nada */;
1756#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1757 else if (!strcmp(str, "embed"))
1758 pcpu_chosen_fc = PCPU_FC_EMBED;
1759#endif
1760#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1761 else if (!strcmp(str, "page"))
1762 pcpu_chosen_fc = PCPU_FC_PAGE;
1763#endif
Tejun Heof58dc012009-08-14 15:00:50 +09001764 else
1765 pr_warning("PERCPU: unknown allocator %s specified\n", str);
Tejun Heo66c3a752009-03-10 16:27:48 +09001766
Tejun Heof58dc012009-08-14 15:00:50 +09001767 return 0;
Tejun Heo66c3a752009-03-10 16:27:48 +09001768}
Tejun Heof58dc012009-08-14 15:00:50 +09001769early_param("percpu_alloc", percpu_alloc_setup);
Tejun Heo66c3a752009-03-10 16:27:48 +09001770
Tejun Heo08fc4582009-08-14 15:00:49 +09001771#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1772 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
Tejun Heo66c3a752009-03-10 16:27:48 +09001773/**
1774 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
Tejun Heo66c3a752009-03-10 16:27:48 +09001775 * @reserved_size: the size of reserved percpu area in bytes
1776 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
Tejun Heoc8826dd2009-08-14 15:00:52 +09001777 * @atom_size: allocation atom size
1778 * @cpu_distance_fn: callback to determine distance between cpus, optional
1779 * @alloc_fn: function to allocate percpu page
1780 * @free_fn: funtion to free percpu page
Tejun Heo66c3a752009-03-10 16:27:48 +09001781 *
1782 * This is a helper to ease setting up embedded first percpu chunk and
1783 * can be called where pcpu_setup_first_chunk() is expected.
1784 *
1785 * If this function is used to setup the first chunk, it is allocated
Tejun Heoc8826dd2009-08-14 15:00:52 +09001786 * by calling @alloc_fn and used as-is without being mapped into
1787 * vmalloc area. Allocations are always whole multiples of @atom_size
1788 * aligned to @atom_size.
1789 *
1790 * This enables the first chunk to piggy back on the linear physical
1791 * mapping which often uses larger page size. Please note that this
1792 * can result in very sparse cpu->unit mapping on NUMA machines thus
1793 * requiring large vmalloc address space. Don't use this allocator if
1794 * vmalloc space is not orders of magnitude larger than distances
1795 * between node memory addresses (ie. 32bit NUMA machines).
Tejun Heo66c3a752009-03-10 16:27:48 +09001796 *
1797 * When @dyn_size is positive, dynamic area might be larger than
Tejun Heo788e5ab2009-07-04 08:10:58 +09001798 * specified to fill page alignment. When @dyn_size is auto,
1799 * @dyn_size is just big enough to fill page alignment after static
1800 * and reserved areas.
Tejun Heo66c3a752009-03-10 16:27:48 +09001801 *
1802 * If the needed size is smaller than the minimum or specified unit
Tejun Heoc8826dd2009-08-14 15:00:52 +09001803 * size, the leftover is returned using @free_fn.
Tejun Heo66c3a752009-03-10 16:27:48 +09001804 *
1805 * RETURNS:
Tejun Heofb435d52009-08-14 15:00:51 +09001806 * 0 on success, -errno on failure.
Tejun Heo66c3a752009-03-10 16:27:48 +09001807 */
Tejun Heoc8826dd2009-08-14 15:00:52 +09001808int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1809 size_t atom_size,
1810 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1811 pcpu_fc_alloc_fn_t alloc_fn,
1812 pcpu_fc_free_fn_t free_fn)
Tejun Heo66c3a752009-03-10 16:27:48 +09001813{
Tejun Heoc8826dd2009-08-14 15:00:52 +09001814 void *base = (void *)ULONG_MAX;
1815 void **areas = NULL;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001816 struct pcpu_alloc_info *ai;
Tejun Heo6ea529a2009-09-24 18:46:01 +09001817 size_t size_sum, areas_size, max_distance;
Tejun Heoc8826dd2009-08-14 15:00:52 +09001818 int group, i, rc;
Tejun Heo66c3a752009-03-10 16:27:48 +09001819
Tejun Heoc8826dd2009-08-14 15:00:52 +09001820 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1821 cpu_distance_fn);
Tejun Heofd1e8a12009-08-14 15:00:51 +09001822 if (IS_ERR(ai))
1823 return PTR_ERR(ai);
Tejun Heo66c3a752009-03-10 16:27:48 +09001824
Tejun Heofd1e8a12009-08-14 15:00:51 +09001825 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
Tejun Heoc8826dd2009-08-14 15:00:52 +09001826 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
Tejun Heo66c3a752009-03-10 16:27:48 +09001827
Tejun Heoc8826dd2009-08-14 15:00:52 +09001828 areas = alloc_bootmem_nopanic(areas_size);
1829 if (!areas) {
Tejun Heofb435d52009-08-14 15:00:51 +09001830 rc = -ENOMEM;
Tejun Heoc8826dd2009-08-14 15:00:52 +09001831 goto out_free;
Tejun Heofa8a7092009-06-22 11:56:24 +09001832 }
Tejun Heo66c3a752009-03-10 16:27:48 +09001833
Tejun Heoc8826dd2009-08-14 15:00:52 +09001834 /* allocate, copy and determine base address */
1835 for (group = 0; group < ai->nr_groups; group++) {
1836 struct pcpu_group_info *gi = &ai->groups[group];
1837 unsigned int cpu = NR_CPUS;
1838 void *ptr;
Tejun Heo66c3a752009-03-10 16:27:48 +09001839
Tejun Heoc8826dd2009-08-14 15:00:52 +09001840 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1841 cpu = gi->cpu_map[i];
1842 BUG_ON(cpu == NR_CPUS);
1843
1844 /* allocate space for the whole group */
1845 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1846 if (!ptr) {
1847 rc = -ENOMEM;
1848 goto out_free_areas;
1849 }
1850 areas[group] = ptr;
1851
1852 base = min(ptr, base);
1853
1854 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1855 if (gi->cpu_map[i] == NR_CPUS) {
1856 /* unused unit, free whole */
1857 free_fn(ptr, ai->unit_size);
1858 continue;
1859 }
1860 /* copy and return the unused part */
1861 memcpy(ptr, __per_cpu_load, ai->static_size);
1862 free_fn(ptr + size_sum, ai->unit_size - size_sum);
1863 }
Tejun Heo66c3a752009-03-10 16:27:48 +09001864 }
1865
Tejun Heoc8826dd2009-08-14 15:00:52 +09001866 /* base address is now known, determine group base offsets */
Tejun Heo6ea529a2009-09-24 18:46:01 +09001867 max_distance = 0;
1868 for (group = 0; group < ai->nr_groups; group++) {
Tejun Heoc8826dd2009-08-14 15:00:52 +09001869 ai->groups[group].base_offset = areas[group] - base;
Tejun Heo1a0c3292009-10-04 09:31:05 +09001870 max_distance = max_t(size_t, max_distance,
1871 ai->groups[group].base_offset);
Tejun Heo6ea529a2009-09-24 18:46:01 +09001872 }
1873 max_distance += ai->unit_size;
1874
1875 /* warn if maximum distance is further than 75% of vmalloc space */
1876 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
Tejun Heo1a0c3292009-10-04 09:31:05 +09001877 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
Tejun Heo6ea529a2009-09-24 18:46:01 +09001878 "space 0x%lx\n",
1879 max_distance, VMALLOC_END - VMALLOC_START);
1880#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1881 /* and fail if we have fallback */
1882 rc = -EINVAL;
1883 goto out_free;
1884#endif
1885 }
Tejun Heoc8826dd2009-08-14 15:00:52 +09001886
Tejun Heo004018e2009-08-14 15:00:49 +09001887 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
Tejun Heofd1e8a12009-08-14 15:00:51 +09001888 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1889 ai->dyn_size, ai->unit_size);
Tejun Heo66c3a752009-03-10 16:27:48 +09001890
Tejun Heofb435d52009-08-14 15:00:51 +09001891 rc = pcpu_setup_first_chunk(ai, base);
Tejun Heoc8826dd2009-08-14 15:00:52 +09001892 goto out_free;
1893
1894out_free_areas:
1895 for (group = 0; group < ai->nr_groups; group++)
1896 free_fn(areas[group],
1897 ai->groups[group].nr_units * ai->unit_size);
1898out_free:
Tejun Heofd1e8a12009-08-14 15:00:51 +09001899 pcpu_free_alloc_info(ai);
Tejun Heoc8826dd2009-08-14 15:00:52 +09001900 if (areas)
1901 free_bootmem(__pa(areas), areas_size);
Tejun Heofb435d52009-08-14 15:00:51 +09001902 return rc;
Tejun Heod4b95f82009-07-04 08:10:59 +09001903}
Tejun Heo08fc4582009-08-14 15:00:49 +09001904#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1905 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
Tejun Heod4b95f82009-07-04 08:10:59 +09001906
Tejun Heo08fc4582009-08-14 15:00:49 +09001907#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
Tejun Heod4b95f82009-07-04 08:10:59 +09001908/**
Tejun Heo00ae4062009-08-14 15:00:49 +09001909 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
Tejun Heod4b95f82009-07-04 08:10:59 +09001910 * @reserved_size: the size of reserved percpu area in bytes
1911 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1912 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1913 * @populate_pte_fn: function to populate pte
1914 *
Tejun Heo00ae4062009-08-14 15:00:49 +09001915 * This is a helper to ease setting up page-remapped first percpu
1916 * chunk and can be called where pcpu_setup_first_chunk() is expected.
Tejun Heod4b95f82009-07-04 08:10:59 +09001917 *
1918 * This is the basic allocator. Static percpu area is allocated
1919 * page-by-page into vmalloc area.
1920 *
1921 * RETURNS:
Tejun Heofb435d52009-08-14 15:00:51 +09001922 * 0 on success, -errno on failure.
Tejun Heod4b95f82009-07-04 08:10:59 +09001923 */
Tejun Heofb435d52009-08-14 15:00:51 +09001924int __init pcpu_page_first_chunk(size_t reserved_size,
1925 pcpu_fc_alloc_fn_t alloc_fn,
1926 pcpu_fc_free_fn_t free_fn,
1927 pcpu_fc_populate_pte_fn_t populate_pte_fn)
Tejun Heod4b95f82009-07-04 08:10:59 +09001928{
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001929 static struct vm_struct vm;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001930 struct pcpu_alloc_info *ai;
Tejun Heo00ae4062009-08-14 15:00:49 +09001931 char psize_str[16];
Tejun Heoce3141a2009-07-04 08:11:00 +09001932 int unit_pages;
Tejun Heod4b95f82009-07-04 08:10:59 +09001933 size_t pages_size;
Tejun Heoce3141a2009-07-04 08:11:00 +09001934 struct page **pages;
Tejun Heofb435d52009-08-14 15:00:51 +09001935 int unit, i, j, rc;
Tejun Heod4b95f82009-07-04 08:10:59 +09001936
Tejun Heo00ae4062009-08-14 15:00:49 +09001937 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1938
Tejun Heofd1e8a12009-08-14 15:00:51 +09001939 ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
1940 if (IS_ERR(ai))
1941 return PTR_ERR(ai);
1942 BUG_ON(ai->nr_groups != 1);
1943 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1944
1945 unit_pages = ai->unit_size >> PAGE_SHIFT;
Tejun Heod4b95f82009-07-04 08:10:59 +09001946
1947 /* unaligned allocations can't be freed, round up to page size */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001948 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1949 sizeof(pages[0]));
Tejun Heoce3141a2009-07-04 08:11:00 +09001950 pages = alloc_bootmem(pages_size);
Tejun Heod4b95f82009-07-04 08:10:59 +09001951
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001952 /* allocate pages */
Tejun Heod4b95f82009-07-04 08:10:59 +09001953 j = 0;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001954 for (unit = 0; unit < num_possible_cpus(); unit++)
Tejun Heoce3141a2009-07-04 08:11:00 +09001955 for (i = 0; i < unit_pages; i++) {
Tejun Heofd1e8a12009-08-14 15:00:51 +09001956 unsigned int cpu = ai->groups[0].cpu_map[unit];
Tejun Heod4b95f82009-07-04 08:10:59 +09001957 void *ptr;
1958
Tejun Heo3cbc8562009-08-14 15:00:50 +09001959 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
Tejun Heod4b95f82009-07-04 08:10:59 +09001960 if (!ptr) {
Tejun Heo00ae4062009-08-14 15:00:49 +09001961 pr_warning("PERCPU: failed to allocate %s page "
1962 "for cpu%u\n", psize_str, cpu);
Tejun Heod4b95f82009-07-04 08:10:59 +09001963 goto enomem;
1964 }
Tejun Heoce3141a2009-07-04 08:11:00 +09001965 pages[j++] = virt_to_page(ptr);
Tejun Heod4b95f82009-07-04 08:10:59 +09001966 }
1967
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001968 /* allocate vm area, map the pages and copy static data */
1969 vm.flags = VM_ALLOC;
Tejun Heofd1e8a12009-08-14 15:00:51 +09001970 vm.size = num_possible_cpus() * ai->unit_size;
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001971 vm_area_register_early(&vm, PAGE_SIZE);
1972
Tejun Heofd1e8a12009-08-14 15:00:51 +09001973 for (unit = 0; unit < num_possible_cpus(); unit++) {
Tejun Heo1d9d3252009-08-14 15:00:50 +09001974 unsigned long unit_addr =
Tejun Heofd1e8a12009-08-14 15:00:51 +09001975 (unsigned long)vm.addr + unit * ai->unit_size;
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001976
Tejun Heoce3141a2009-07-04 08:11:00 +09001977 for (i = 0; i < unit_pages; i++)
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001978 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1979
1980 /* pte already populated, the following shouldn't fail */
Tejun Heofb435d52009-08-14 15:00:51 +09001981 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1982 unit_pages);
1983 if (rc < 0)
1984 panic("failed to map percpu area, err=%d\n", rc);
Tejun Heo8f05a6a2009-07-04 08:10:59 +09001985
1986 /*
1987 * FIXME: Archs with virtual cache should flush local
1988 * cache for the linear mapping here - something
1989 * equivalent to flush_cache_vmap() on the local cpu.
1990 * flush_cache_vmap() can't be used as most supporting
1991 * data structures are not set up yet.
1992 */
1993
1994 /* copy static data */
Tejun Heofd1e8a12009-08-14 15:00:51 +09001995 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
Tejun Heo66c3a752009-03-10 16:27:48 +09001996 }
1997
1998 /* we're ready, commit */
Tejun Heo1d9d3252009-08-14 15:00:50 +09001999 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
Tejun Heofd1e8a12009-08-14 15:00:51 +09002000 unit_pages, psize_str, vm.addr, ai->static_size,
2001 ai->reserved_size, ai->dyn_size);
Tejun Heo66c3a752009-03-10 16:27:48 +09002002
Tejun Heofb435d52009-08-14 15:00:51 +09002003 rc = pcpu_setup_first_chunk(ai, vm.addr);
Tejun Heod4b95f82009-07-04 08:10:59 +09002004 goto out_free_ar;
2005
2006enomem:
2007 while (--j >= 0)
Tejun Heoce3141a2009-07-04 08:11:00 +09002008 free_fn(page_address(pages[j]), PAGE_SIZE);
Tejun Heofb435d52009-08-14 15:00:51 +09002009 rc = -ENOMEM;
Tejun Heod4b95f82009-07-04 08:10:59 +09002010out_free_ar:
Tejun Heoce3141a2009-07-04 08:11:00 +09002011 free_bootmem(__pa(pages), pages_size);
Tejun Heofd1e8a12009-08-14 15:00:51 +09002012 pcpu_free_alloc_info(ai);
Tejun Heofb435d52009-08-14 15:00:51 +09002013 return rc;
Tejun Heo66c3a752009-03-10 16:27:48 +09002014}
Tejun Heo08fc4582009-08-14 15:00:49 +09002015#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
Tejun Heod4b95f82009-07-04 08:10:59 +09002016
Tejun Heo8c4bfc62009-07-04 08:10:59 +09002017/*
Tejun Heoe74e3962009-03-30 19:07:44 +09002018 * Generic percpu area setup.
2019 *
2020 * The embedding helper is used because its behavior closely resembles
2021 * the original non-dynamic generic percpu area setup. This is
2022 * important because many archs have addressing restrictions and might
2023 * fail if the percpu area is located far away from the previous
2024 * location. As an added bonus, in non-NUMA cases, embedding is
2025 * generally a good idea TLB-wise because percpu area can piggy back
2026 * on the physical linear memory mapping which uses large page
2027 * mappings on applicable archs.
2028 */
2029#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2030unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2031EXPORT_SYMBOL(__per_cpu_offset);
2032
Tejun Heoc8826dd2009-08-14 15:00:52 +09002033static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2034 size_t align)
2035{
2036 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
2037}
2038
2039static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2040{
2041 free_bootmem(__pa(ptr), size);
2042}
2043
Tejun Heoe74e3962009-03-30 19:07:44 +09002044void __init setup_per_cpu_areas(void)
2045{
Tejun Heoe74e3962009-03-30 19:07:44 +09002046 unsigned long delta;
2047 unsigned int cpu;
Tejun Heofb435d52009-08-14 15:00:51 +09002048 int rc;
Tejun Heoe74e3962009-03-30 19:07:44 +09002049
2050 /*
2051 * Always reserve area for module percpu variables. That's
2052 * what the legacy allocator did.
2053 */
Tejun Heofb435d52009-08-14 15:00:51 +09002054 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
Tejun Heoc8826dd2009-08-14 15:00:52 +09002055 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2056 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
Tejun Heofb435d52009-08-14 15:00:51 +09002057 if (rc < 0)
Tejun Heoe74e3962009-03-30 19:07:44 +09002058 panic("Failed to initialized percpu areas.");
2059
2060 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2061 for_each_possible_cpu(cpu)
Tejun Heofb435d52009-08-14 15:00:51 +09002062 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
Tejun Heoe74e3962009-03-30 19:07:44 +09002063}
2064#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */