blob: f087105ed91454e054b4cf9c4da75a864e2b1edb [file] [log] [blame]
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -07001/*
2 * Fast batching percpu counters.
3 */
4
5#include <linux/percpu_counter.h>
Andrew Mortonc67ad912007-07-15 23:39:51 -07006#include <linux/notifier.h>
7#include <linux/mutex.h>
8#include <linux/init.h>
9#include <linux/cpu.h>
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070010#include <linux/module.h>
Tejun Heoe2852ae2010-10-26 14:23:05 -070011#include <linux/debugobjects.h>
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070012
Andrew Mortonc67ad912007-07-15 23:39:51 -070013static LIST_HEAD(percpu_counters);
14static DEFINE_MUTEX(percpu_counters_lock);
Andrew Mortonc67ad912007-07-15 23:39:51 -070015
Tejun Heoe2852ae2010-10-26 14:23:05 -070016#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
17
18static struct debug_obj_descr percpu_counter_debug_descr;
19
20static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
21{
22 struct percpu_counter *fbc = addr;
23
24 switch (state) {
25 case ODEBUG_STATE_ACTIVE:
26 percpu_counter_destroy(fbc);
27 debug_object_free(fbc, &percpu_counter_debug_descr);
28 return 1;
29 default:
30 return 0;
31 }
32}
33
34static struct debug_obj_descr percpu_counter_debug_descr = {
35 .name = "percpu_counter",
36 .fixup_free = percpu_counter_fixup_free,
37};
38
39static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
40{
41 debug_object_init(fbc, &percpu_counter_debug_descr);
42 debug_object_activate(fbc, &percpu_counter_debug_descr);
43}
44
45static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
46{
47 debug_object_deactivate(fbc, &percpu_counter_debug_descr);
48 debug_object_free(fbc, &percpu_counter_debug_descr);
49}
50
51#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
52static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
53{ }
54static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
55{ }
56#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
57
Peter Zijlstra3a587f42007-10-16 23:25:44 -070058void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
59{
60 int cpu;
61
Thomas Gleixnerf032a452009-07-25 16:21:48 +020062 raw_spin_lock(&fbc->lock);
Peter Zijlstra3a587f42007-10-16 23:25:44 -070063 for_each_possible_cpu(cpu) {
64 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
65 *pcount = 0;
66 }
67 fbc->count = amount;
Thomas Gleixnerf032a452009-07-25 16:21:48 +020068 raw_spin_unlock(&fbc->lock);
Peter Zijlstra3a587f42007-10-16 23:25:44 -070069}
70EXPORT_SYMBOL(percpu_counter_set);
71
Peter Zijlstra20e89762007-10-16 23:25:43 -070072void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070073{
Peter Zijlstra20e89762007-10-16 23:25:43 -070074 s64 count;
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070075
Christoph Lameterea00c302010-10-26 14:23:09 -070076 preempt_disable();
Christoph Lameter819a72a2010-12-06 11:16:19 -060077 count = __this_cpu_read(*fbc->counters) + amount;
Peter Zijlstra252e0ba2007-10-16 23:25:43 -070078 if (count >= batch || count <= -batch) {
Thomas Gleixnerf032a452009-07-25 16:21:48 +020079 raw_spin_lock(&fbc->lock);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070080 fbc->count += count;
Christoph Lameter819a72a2010-12-06 11:16:19 -060081 __this_cpu_write(*fbc->counters, 0);
Thomas Gleixnerf032a452009-07-25 16:21:48 +020082 raw_spin_unlock(&fbc->lock);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070083 } else {
Christoph Lameter819a72a2010-12-06 11:16:19 -060084 __this_cpu_write(*fbc->counters, count);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070085 }
Christoph Lameterea00c302010-10-26 14:23:09 -070086 preempt_enable();
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070087}
Peter Zijlstra252e0ba2007-10-16 23:25:43 -070088EXPORT_SYMBOL(__percpu_counter_add);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070089
90/*
91 * Add up all the per-cpu counts, return the result. This is a more accurate
92 * but much slower version of percpu_counter_read_positive()
93 */
Andrew Morton02d21162008-12-09 13:14:14 -080094s64 __percpu_counter_sum(struct percpu_counter *fbc)
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070095{
Mingming Cao0216bfc2006-06-23 02:05:41 -070096 s64 ret;
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070097 int cpu;
98
Thomas Gleixnerf032a452009-07-25 16:21:48 +020099 raw_spin_lock(&fbc->lock);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700100 ret = fbc->count;
Andrew Mortonb4ef0292007-07-15 23:39:51 -0700101 for_each_online_cpu(cpu) {
Mingming Cao0216bfc2006-06-23 02:05:41 -0700102 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700103 ret += *pcount;
104 }
Thomas Gleixnerf032a452009-07-25 16:21:48 +0200105 raw_spin_unlock(&fbc->lock);
Peter Zijlstrabf1d89c2007-10-16 23:25:45 -0700106 return ret;
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700107}
Peter Zijlstrabf1d89c2007-10-16 23:25:45 -0700108EXPORT_SYMBOL(__percpu_counter_sum);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700109
Peter Zijlstraea319512008-12-26 15:08:55 +0100110int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
111 struct lock_class_key *key)
Andrew Mortonc67ad912007-07-15 23:39:51 -0700112{
Thomas Gleixnerf032a452009-07-25 16:21:48 +0200113 raw_spin_lock_init(&fbc->lock);
Peter Zijlstraea319512008-12-26 15:08:55 +0100114 lockdep_set_class(&fbc->lock, key);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700115 fbc->count = amount;
116 fbc->counters = alloc_percpu(s32);
Peter Zijlstra833f4072007-10-16 23:25:45 -0700117 if (!fbc->counters)
118 return -ENOMEM;
Tejun Heoe2852ae2010-10-26 14:23:05 -0700119
120 debug_percpu_counter_activate(fbc);
121
Andrew Mortonc67ad912007-07-15 23:39:51 -0700122#ifdef CONFIG_HOTPLUG_CPU
Masanori ITOH8474b592010-10-26 14:21:20 -0700123 INIT_LIST_HEAD(&fbc->list);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700124 mutex_lock(&percpu_counters_lock);
125 list_add(&fbc->list, &percpu_counters);
126 mutex_unlock(&percpu_counters_lock);
127#endif
Peter Zijlstra833f4072007-10-16 23:25:45 -0700128 return 0;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700129}
Peter Zijlstraea319512008-12-26 15:08:55 +0100130EXPORT_SYMBOL(__percpu_counter_init);
Peter Zijlstradc62a302007-10-16 23:25:46 -0700131
Andrew Mortonc67ad912007-07-15 23:39:51 -0700132void percpu_counter_destroy(struct percpu_counter *fbc)
133{
Peter Zijlstra833f4072007-10-16 23:25:45 -0700134 if (!fbc->counters)
135 return;
136
Tejun Heoe2852ae2010-10-26 14:23:05 -0700137 debug_percpu_counter_deactivate(fbc);
138
Andrew Mortonc67ad912007-07-15 23:39:51 -0700139#ifdef CONFIG_HOTPLUG_CPU
140 mutex_lock(&percpu_counters_lock);
141 list_del(&fbc->list);
142 mutex_unlock(&percpu_counters_lock);
143#endif
Eric Dumazetfd3d6642008-12-09 13:14:11 -0800144 free_percpu(fbc->counters);
145 fbc->counters = NULL;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700146}
147EXPORT_SYMBOL(percpu_counter_destroy);
148
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800149int percpu_counter_batch __read_mostly = 32;
150EXPORT_SYMBOL(percpu_counter_batch);
151
152static void compute_batch_value(void)
153{
154 int nr = num_online_cpus();
155
156 percpu_counter_batch = max(32, nr*2);
157}
158
Andrew Mortonc67ad912007-07-15 23:39:51 -0700159static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
160 unsigned long action, void *hcpu)
161{
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800162#ifdef CONFIG_HOTPLUG_CPU
Andrew Mortonc67ad912007-07-15 23:39:51 -0700163 unsigned int cpu;
164 struct percpu_counter *fbc;
165
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800166 compute_batch_value();
Andrew Mortonc67ad912007-07-15 23:39:51 -0700167 if (action != CPU_DEAD)
168 return NOTIFY_OK;
169
170 cpu = (unsigned long)hcpu;
171 mutex_lock(&percpu_counters_lock);
172 list_for_each_entry(fbc, &percpu_counters, list) {
173 s32 *pcount;
Gautham R Shenoyd2b20b12007-10-18 23:40:47 -0700174 unsigned long flags;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700175
Thomas Gleixnerf032a452009-07-25 16:21:48 +0200176 raw_spin_lock_irqsave(&fbc->lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700177 pcount = per_cpu_ptr(fbc->counters, cpu);
178 fbc->count += *pcount;
179 *pcount = 0;
Thomas Gleixnerf032a452009-07-25 16:21:48 +0200180 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700181 }
182 mutex_unlock(&percpu_counters_lock);
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800183#endif
Andrew Mortonc67ad912007-07-15 23:39:51 -0700184 return NOTIFY_OK;
185}
186
Tim Chen27f5e0f2010-08-09 17:19:04 -0700187/*
188 * Compare counter against given value.
189 * Return 1 if greater, 0 if equal and -1 if less
190 */
191int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
192{
193 s64 count;
194
195 count = percpu_counter_read(fbc);
196 /* Check to see if rough count will be sufficient for comparison */
197 if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
198 if (count > rhs)
199 return 1;
200 else
201 return -1;
202 }
203 /* Need to use precise count */
204 count = percpu_counter_sum(fbc);
205 if (count > rhs)
206 return 1;
207 else if (count < rhs)
208 return -1;
209 else
210 return 0;
211}
212EXPORT_SYMBOL(percpu_counter_compare);
213
Andrew Mortonc67ad912007-07-15 23:39:51 -0700214static int __init percpu_counter_startup(void)
215{
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800216 compute_batch_value();
Andrew Mortonc67ad912007-07-15 23:39:51 -0700217 hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
218 return 0;
219}
220module_init(percpu_counter_startup);