blob: df34ceae0c678569446930eaf5ceb982a496f8b9 [file] [log] [blame]
Christoph Lameterd00bcc92006-09-25 23:31:50 -07001/*
2 * linux/mm/allocpercpu.c
3 *
Christoph Lametercde53532008-07-04 09:59:22 -07004 * Separated from slab.c August 11, 2006 Christoph Lameter
Christoph Lameterd00bcc92006-09-25 23:31:50 -07005 */
6#include <linux/mm.h>
7#include <linux/module.h>
Tejun Heoe74e3962009-03-30 19:07:44 +09008#include <linux/bootmem.h>
9#include <asm/sections.h>
Christoph Lameterd00bcc92006-09-25 23:31:50 -070010
Eric Dumazetbe852792008-03-04 14:28:35 -080011#ifndef cache_line_size
12#define cache_line_size() L1_CACHE_BYTES
13#endif
14
Christoph Lameterd00bcc92006-09-25 23:31:50 -070015/**
16 * percpu_depopulate - depopulate per-cpu data for given cpu
17 * @__pdata: per-cpu data to depopulate
18 * @cpu: depopulate per-cpu data for this cpu
19 *
20 * Depopulating per-cpu data for a cpu going offline would be a typical
21 * use case. You need to register a cpu hotplug handler for that purpose.
22 */
Adrian Bunk9d8fddf2008-07-25 19:46:23 -070023static void percpu_depopulate(void *__pdata, int cpu)
Christoph Lameterd00bcc92006-09-25 23:31:50 -070024{
25 struct percpu_data *pdata = __percpu_disguise(__pdata);
Alan Sterna1205862006-12-06 20:32:37 -080026
27 kfree(pdata->ptrs[cpu]);
28 pdata->ptrs[cpu] = NULL;
Christoph Lameterd00bcc92006-09-25 23:31:50 -070029}
Christoph Lameterd00bcc92006-09-25 23:31:50 -070030
31/**
32 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
33 * @__pdata: per-cpu data to depopulate
34 * @mask: depopulate per-cpu data for cpu's selected through mask bits
35 */
Stephen Rothwell5d6700e2009-04-06 15:08:29 +100036static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
Christoph Lameterd00bcc92006-09-25 23:31:50 -070037{
38 int cpu;
Mike Travis6d6a4362008-05-12 21:21:13 +020039 for_each_cpu_mask_nr(cpu, *mask)
Christoph Lameterd00bcc92006-09-25 23:31:50 -070040 percpu_depopulate(__pdata, cpu);
41}
Adrian Bunk9d8fddf2008-07-25 19:46:23 -070042
43#define percpu_depopulate_mask(__pdata, mask) \
44 __percpu_depopulate_mask((__pdata), &(mask))
Christoph Lameterd00bcc92006-09-25 23:31:50 -070045
46/**
47 * percpu_populate - populate per-cpu data for given cpu
48 * @__pdata: per-cpu data to populate further
49 * @size: size of per-cpu object
50 * @gfp: may sleep or not etc.
51 * @cpu: populate per-data for this cpu
52 *
53 * Populating per-cpu data for a cpu coming online would be a typical
54 * use case. You need to register a cpu hotplug handler for that purpose.
55 * Per-cpu object is populated with zeroed buffer.
56 */
Adrian Bunk9d8fddf2008-07-25 19:46:23 -070057static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
Christoph Lameterd00bcc92006-09-25 23:31:50 -070058{
59 struct percpu_data *pdata = __percpu_disguise(__pdata);
60 int node = cpu_to_node(cpu);
61
Eric Dumazetbe852792008-03-04 14:28:35 -080062 /*
63 * We should make sure each CPU gets private memory.
64 */
65 size = roundup(size, cache_line_size());
66
Christoph Lameterd00bcc92006-09-25 23:31:50 -070067 BUG_ON(pdata->ptrs[cpu]);
Christoph Lameter94f60302007-07-17 04:03:29 -070068 if (node_online(node))
69 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
70 else
Christoph Lameterd00bcc92006-09-25 23:31:50 -070071 pdata->ptrs[cpu] = kzalloc(size, gfp);
72 return pdata->ptrs[cpu];
73}
Christoph Lameterd00bcc92006-09-25 23:31:50 -070074
75/**
76 * percpu_populate_mask - populate per-cpu data for more cpu's
77 * @__pdata: per-cpu data to populate further
78 * @size: size of per-cpu object
79 * @gfp: may sleep or not etc.
80 * @mask: populate per-cpu data for cpu's selected through mask bits
81 *
82 * Per-cpu objects are populated with zeroed buffers.
83 */
Adrian Bunk9d8fddf2008-07-25 19:46:23 -070084static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
85 cpumask_t *mask)
Christoph Lameterd00bcc92006-09-25 23:31:50 -070086{
Mike Travisd366f8c2008-04-04 18:11:12 -070087 cpumask_t populated;
Christoph Lameterd00bcc92006-09-25 23:31:50 -070088 int cpu;
89
Mike Travisd366f8c2008-04-04 18:11:12 -070090 cpus_clear(populated);
Mike Travis6d6a4362008-05-12 21:21:13 +020091 for_each_cpu_mask_nr(cpu, *mask)
Christoph Lameterd00bcc92006-09-25 23:31:50 -070092 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
93 __percpu_depopulate_mask(__pdata, &populated);
94 return -ENOMEM;
95 } else
96 cpu_set(cpu, populated);
97 return 0;
98}
Adrian Bunk9d8fddf2008-07-25 19:46:23 -070099
100#define percpu_populate_mask(__pdata, size, gfp, mask) \
101 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700102
103/**
Tejun Heof2a82052009-02-20 16:29:08 +0900104 * alloc_percpu - initial setup of per-cpu data
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700105 * @size: size of per-cpu object
Tejun Heof2a82052009-02-20 16:29:08 +0900106 * @align: alignment
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700107 *
Tejun Heof2a82052009-02-20 16:29:08 +0900108 * Allocate dynamic percpu area. Percpu objects are populated with
109 * zeroed buffers.
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700110 */
Tejun Heof2a82052009-02-20 16:29:08 +0900111void *__alloc_percpu(size_t size, size_t align)
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700112{
Eric Dumazetbe852792008-03-04 14:28:35 -0800113 /*
114 * We allocate whole cache lines to avoid false sharing
115 */
116 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
Tejun Heof2a82052009-02-20 16:29:08 +0900117 void *pdata = kzalloc(sz, GFP_KERNEL);
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700118 void *__pdata = __percpu_disguise(pdata);
119
Tejun Heof2a82052009-02-20 16:29:08 +0900120 /*
121 * Can't easily make larger alignment work with kmalloc. WARN
122 * on it. Larger alignment should only be used for module
123 * percpu sections on SMP for which this path isn't used.
124 */
Tejun Heo60db5642009-03-11 14:36:54 +0900125 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
Tejun Heof2a82052009-02-20 16:29:08 +0900126
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700127 if (unlikely(!pdata))
128 return NULL;
Tejun Heof2a82052009-02-20 16:29:08 +0900129 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
130 &cpu_possible_map)))
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700131 return __pdata;
132 kfree(pdata);
133 return NULL;
134}
Tejun Heof2a82052009-02-20 16:29:08 +0900135EXPORT_SYMBOL_GPL(__alloc_percpu);
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700136
137/**
Tejun Heof2a82052009-02-20 16:29:08 +0900138 * free_percpu - final cleanup of per-cpu data
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700139 * @__pdata: object to clean up
140 *
141 * We simply clean up any per-cpu object left. No need for the client to
142 * track and specify through a bis mask which per-cpu objects are to free.
143 */
Tejun Heof2a82052009-02-20 16:29:08 +0900144void free_percpu(void *__pdata)
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700145{
Alan Sterna1205862006-12-06 20:32:37 -0800146 if (unlikely(!__pdata))
147 return;
Rusty Russellaa85ea52009-03-30 22:05:15 -0600148 __percpu_depopulate_mask(__pdata, cpu_possible_mask);
Christoph Lameterd00bcc92006-09-25 23:31:50 -0700149 kfree(__percpu_disguise(__pdata));
150}
Tejun Heof2a82052009-02-20 16:29:08 +0900151EXPORT_SYMBOL_GPL(free_percpu);
Tejun Heoe74e3962009-03-30 19:07:44 +0900152
153/*
154 * Generic percpu area setup.
155 */
156#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
157unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
158
159EXPORT_SYMBOL(__per_cpu_offset);
160
161void __init setup_per_cpu_areas(void)
162{
163 unsigned long size, i;
164 char *ptr;
165 unsigned long nr_possible_cpus = num_possible_cpus();
166
167 /* Copy section for each CPU (we discard the original) */
168 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
169 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
170
171 for_each_possible_cpu(i) {
172 __per_cpu_offset[i] = ptr - __per_cpu_start;
173 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
174 ptr += size;
175 }
176}
177#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */