blob: f051d69f0910a65be2dbce9799736e2ba4eee2c7 [file] [log] [blame]
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -07001/*
2 * Fast batching percpu counters.
3 */
4
5#include <linux/percpu_counter.h>
Andrew Mortonc67ad912007-07-15 23:39:51 -07006#include <linux/notifier.h>
7#include <linux/mutex.h>
8#include <linux/init.h>
9#include <linux/cpu.h>
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070010#include <linux/module.h>
Tejun Heoe2852ae2010-10-26 14:23:05 -070011#include <linux/debugobjects.h>
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070012
Glauber Costa3a8495c2011-10-31 17:12:34 -070013#ifdef CONFIG_HOTPLUG_CPU
Andrew Mortonc67ad912007-07-15 23:39:51 -070014static LIST_HEAD(percpu_counters);
Al Virod87aae22012-07-31 09:28:31 +040015static DEFINE_SPINLOCK(percpu_counters_lock);
Glauber Costa3a8495c2011-10-31 17:12:34 -070016#endif
Andrew Mortonc67ad912007-07-15 23:39:51 -070017
Tejun Heoe2852ae2010-10-26 14:23:05 -070018#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
19
20static struct debug_obj_descr percpu_counter_debug_descr;
21
22static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
23{
24 struct percpu_counter *fbc = addr;
25
26 switch (state) {
27 case ODEBUG_STATE_ACTIVE:
28 percpu_counter_destroy(fbc);
29 debug_object_free(fbc, &percpu_counter_debug_descr);
30 return 1;
31 default:
32 return 0;
33 }
34}
35
36static struct debug_obj_descr percpu_counter_debug_descr = {
37 .name = "percpu_counter",
38 .fixup_free = percpu_counter_fixup_free,
39};
40
41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
42{
43 debug_object_init(fbc, &percpu_counter_debug_descr);
44 debug_object_activate(fbc, &percpu_counter_debug_descr);
45}
46
47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
48{
49 debug_object_deactivate(fbc, &percpu_counter_debug_descr);
50 debug_object_free(fbc, &percpu_counter_debug_descr);
51}
52
53#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
55{ }
56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
57{ }
58#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
59
Peter Zijlstra3a587f42007-10-16 23:25:44 -070060void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
61{
62 int cpu;
Shaohua Li098faf52013-10-24 09:06:45 +010063 unsigned long flags;
Peter Zijlstra3a587f42007-10-16 23:25:44 -070064
Shaohua Li098faf52013-10-24 09:06:45 +010065 raw_spin_lock_irqsave(&fbc->lock, flags);
Peter Zijlstra3a587f42007-10-16 23:25:44 -070066 for_each_possible_cpu(cpu) {
67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
68 *pcount = 0;
69 }
70 fbc->count = amount;
Shaohua Li098faf52013-10-24 09:06:45 +010071 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Peter Zijlstra3a587f42007-10-16 23:25:44 -070072}
73EXPORT_SYMBOL(percpu_counter_set);
74
Peter Zijlstra20e89762007-10-16 23:25:43 -070075void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070076{
Peter Zijlstra20e89762007-10-16 23:25:43 -070077 s64 count;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070078
Christoph Lameterea00c302010-10-26 14:23:09 -070079 preempt_disable();
Christoph Lameter819a72a2010-12-06 11:16:19 -060080 count = __this_cpu_read(*fbc->counters) + amount;
Peter Zijlstra252e0ba2007-10-16 23:25:43 -070081 if (count >= batch || count <= -batch) {
Shaohua Li098faf52013-10-24 09:06:45 +010082 unsigned long flags;
83 raw_spin_lock_irqsave(&fbc->lock, flags);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070084 fbc->count += count;
Hugh Dickinsd1969a82014-01-16 15:26:48 -080085 __this_cpu_sub(*fbc->counters, count - amount);
Shaohua Li098faf52013-10-24 09:06:45 +010086 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070087 } else {
Ming Lei74e72f82014-01-14 17:56:42 -080088 this_cpu_add(*fbc->counters, amount);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070089 }
Christoph Lameterea00c302010-10-26 14:23:09 -070090 preempt_enable();
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070091}
Peter Zijlstra252e0ba2007-10-16 23:25:43 -070092EXPORT_SYMBOL(__percpu_counter_add);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070093
94/*
95 * Add up all the per-cpu counts, return the result. This is a more accurate
96 * but much slower version of percpu_counter_read_positive()
97 */
Andrew Morton02d21162008-12-09 13:14:14 -080098s64 __percpu_counter_sum(struct percpu_counter *fbc)
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070099{
Mingming Cao0216bfc2006-06-23 02:05:41 -0700100 s64 ret;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700101 int cpu;
Shaohua Li098faf52013-10-24 09:06:45 +0100102 unsigned long flags;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700103
Shaohua Li098faf52013-10-24 09:06:45 +0100104 raw_spin_lock_irqsave(&fbc->lock, flags);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700105 ret = fbc->count;
Andrew Mortonb4ef0292007-07-15 23:39:51 -0700106 for_each_online_cpu(cpu) {
Mingming Cao0216bfc2006-06-23 02:05:41 -0700107 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700108 ret += *pcount;
109 }
Shaohua Li098faf52013-10-24 09:06:45 +0100110 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Peter Zijlstrabf1d89c2007-10-16 23:25:45 -0700111 return ret;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700112}
Peter Zijlstrabf1d89c2007-10-16 23:25:45 -0700113EXPORT_SYMBOL(__percpu_counter_sum);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700114
Tejun Heo908c7f12014-09-08 09:51:29 +0900115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
Peter Zijlstraea319512008-12-26 15:08:55 +0100116 struct lock_class_key *key)
Andrew Mortonc67ad912007-07-15 23:39:51 -0700117{
Tejun Heoebd8fef2014-09-08 09:51:29 +0900118 unsigned long flags __maybe_unused;
119
Thomas Gleixnerf032a452009-07-25 16:21:48 +0200120 raw_spin_lock_init(&fbc->lock);
Peter Zijlstraea319512008-12-26 15:08:55 +0100121 lockdep_set_class(&fbc->lock, key);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700122 fbc->count = amount;
Tejun Heo908c7f12014-09-08 09:51:29 +0900123 fbc->counters = alloc_percpu_gfp(s32, gfp);
Peter Zijlstra833f4072007-10-16 23:25:45 -0700124 if (!fbc->counters)
125 return -ENOMEM;
Tejun Heoe2852ae2010-10-26 14:23:05 -0700126
127 debug_percpu_counter_activate(fbc);
128
Andrew Mortonc67ad912007-07-15 23:39:51 -0700129#ifdef CONFIG_HOTPLUG_CPU
Masanori ITOH8474b592010-10-26 14:21:20 -0700130 INIT_LIST_HEAD(&fbc->list);
Tejun Heoebd8fef2014-09-08 09:51:29 +0900131 spin_lock_irqsave(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700132 list_add(&fbc->list, &percpu_counters);
Tejun Heoebd8fef2014-09-08 09:51:29 +0900133 spin_unlock_irqrestore(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700134#endif
Peter Zijlstra833f4072007-10-16 23:25:45 -0700135 return 0;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700136}
Peter Zijlstraea319512008-12-26 15:08:55 +0100137EXPORT_SYMBOL(__percpu_counter_init);
Peter Zijlstradc62a302007-10-16 23:25:46 -0700138
Andrew Mortonc67ad912007-07-15 23:39:51 -0700139void percpu_counter_destroy(struct percpu_counter *fbc)
140{
Tejun Heoebd8fef2014-09-08 09:51:29 +0900141 unsigned long flags __maybe_unused;
142
Peter Zijlstra833f4072007-10-16 23:25:45 -0700143 if (!fbc->counters)
144 return;
145
Tejun Heoe2852ae2010-10-26 14:23:05 -0700146 debug_percpu_counter_deactivate(fbc);
147
Andrew Mortonc67ad912007-07-15 23:39:51 -0700148#ifdef CONFIG_HOTPLUG_CPU
Tejun Heoebd8fef2014-09-08 09:51:29 +0900149 spin_lock_irqsave(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700150 list_del(&fbc->list);
Tejun Heoebd8fef2014-09-08 09:51:29 +0900151 spin_unlock_irqrestore(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700152#endif
Eric Dumazetfd3d6642008-12-09 13:14:11 -0800153 free_percpu(fbc->counters);
154 fbc->counters = NULL;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700155}
156EXPORT_SYMBOL(percpu_counter_destroy);
157
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800158int percpu_counter_batch __read_mostly = 32;
159EXPORT_SYMBOL(percpu_counter_batch);
160
161static void compute_batch_value(void)
162{
163 int nr = num_online_cpus();
164
165 percpu_counter_batch = max(32, nr*2);
166}
167
Paul Gortmaker0db06282013-06-19 14:53:51 -0400168static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
Andrew Mortonc67ad912007-07-15 23:39:51 -0700169 unsigned long action, void *hcpu)
170{
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800171#ifdef CONFIG_HOTPLUG_CPU
Andrew Mortonc67ad912007-07-15 23:39:51 -0700172 unsigned int cpu;
173 struct percpu_counter *fbc;
174
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800175 compute_batch_value();
Jens Axboee39435c2014-04-08 16:04:12 -0700176 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Andrew Mortonc67ad912007-07-15 23:39:51 -0700177 return NOTIFY_OK;
178
179 cpu = (unsigned long)hcpu;
Tejun Heoebd8fef2014-09-08 09:51:29 +0900180 spin_lock_irq(&percpu_counters_lock);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700181 list_for_each_entry(fbc, &percpu_counters, list) {
182 s32 *pcount;
Gautham R Shenoyd2b20b12007-10-18 23:40:47 -0700183 unsigned long flags;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700184
Thomas Gleixnerf032a452009-07-25 16:21:48 +0200185 raw_spin_lock_irqsave(&fbc->lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700186 pcount = per_cpu_ptr(fbc->counters, cpu);
187 fbc->count += *pcount;
188 *pcount = 0;
Thomas Gleixnerf032a452009-07-25 16:21:48 +0200189 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700190 }
Tejun Heoebd8fef2014-09-08 09:51:29 +0900191 spin_unlock_irq(&percpu_counters_lock);
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800192#endif
Andrew Mortonc67ad912007-07-15 23:39:51 -0700193 return NOTIFY_OK;
194}
195
Tim Chen27f5e0f2010-08-09 17:19:04 -0700196/*
197 * Compare counter against given value.
198 * Return 1 if greater, 0 if equal and -1 if less
199 */
Dave Chinner80188b02015-05-29 07:39:34 +1000200int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
Tim Chen27f5e0f2010-08-09 17:19:04 -0700201{
202 s64 count;
203
204 count = percpu_counter_read(fbc);
205 /* Check to see if rough count will be sufficient for comparison */
Dave Chinner80188b02015-05-29 07:39:34 +1000206 if (abs(count - rhs) > (batch * num_online_cpus())) {
Tim Chen27f5e0f2010-08-09 17:19:04 -0700207 if (count > rhs)
208 return 1;
209 else
210 return -1;
211 }
212 /* Need to use precise count */
213 count = percpu_counter_sum(fbc);
214 if (count > rhs)
215 return 1;
216 else if (count < rhs)
217 return -1;
218 else
219 return 0;
220}
Dave Chinner80188b02015-05-29 07:39:34 +1000221EXPORT_SYMBOL(__percpu_counter_compare);
Tim Chen27f5e0f2010-08-09 17:19:04 -0700222
Andrew Mortonc67ad912007-07-15 23:39:51 -0700223static int __init percpu_counter_startup(void)
224{
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800225 compute_batch_value();
Andrew Mortonc67ad912007-07-15 23:39:51 -0700226 hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
227 return 0;
228}
229module_init(percpu_counter_startup);