blob: 3bf4a9984f4cb094b7d74550be1897c20ac292a6 [file] [log] [blame]
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -07001/*
2 * Fast batching percpu counters.
3 */
4
5#include <linux/percpu_counter.h>
Andrew Mortonc67ad912007-07-15 23:39:51 -07006#include <linux/notifier.h>
7#include <linux/mutex.h>
8#include <linux/init.h>
9#include <linux/cpu.h>
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070010#include <linux/module.h>
Tejun Heoe2852ae2010-10-26 14:23:05 -070011#include <linux/debugobjects.h>
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070012
Glauber Costa3a8495c2011-10-31 17:12:34 -070013#ifdef CONFIG_HOTPLUG_CPU
Andrew Mortonc67ad912007-07-15 23:39:51 -070014static LIST_HEAD(percpu_counters);
Al Virod87aae22012-07-31 09:28:31 +040015static DEFINE_SPINLOCK(percpu_counters_lock);
Glauber Costa3a8495c2011-10-31 17:12:34 -070016#endif
Andrew Mortonc67ad912007-07-15 23:39:51 -070017
Tejun Heoe2852ae2010-10-26 14:23:05 -070018#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
19
20static struct debug_obj_descr percpu_counter_debug_descr;
21
Du, Changbind99b1d82016-05-19 17:09:35 -070022static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
Tejun Heoe2852ae2010-10-26 14:23:05 -070023{
24 struct percpu_counter *fbc = addr;
25
26 switch (state) {
27 case ODEBUG_STATE_ACTIVE:
28 percpu_counter_destroy(fbc);
29 debug_object_free(fbc, &percpu_counter_debug_descr);
Du, Changbind99b1d82016-05-19 17:09:35 -070030 return true;
Tejun Heoe2852ae2010-10-26 14:23:05 -070031 default:
Du, Changbind99b1d82016-05-19 17:09:35 -070032 return false;
Tejun Heoe2852ae2010-10-26 14:23:05 -070033 }
34}
35
36static struct debug_obj_descr percpu_counter_debug_descr = {
37 .name = "percpu_counter",
38 .fixup_free = percpu_counter_fixup_free,
39};
40
41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
42{
43 debug_object_init(fbc, &percpu_counter_debug_descr);
44 debug_object_activate(fbc, &percpu_counter_debug_descr);
45}
46
47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
48{
49 debug_object_deactivate(fbc, &percpu_counter_debug_descr);
50 debug_object_free(fbc, &percpu_counter_debug_descr);
51}
52
53#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
55{ }
56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
57{ }
58#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
59
Peter Zijlstra3a587f42007-10-16 23:25:44 -070060void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
61{
62 int cpu;
Shaohua Li098faf52013-10-24 09:06:45 +010063 unsigned long flags;
Peter Zijlstra3a587f42007-10-16 23:25:44 -070064
Shaohua Li098faf52013-10-24 09:06:45 +010065 raw_spin_lock_irqsave(&fbc->lock, flags);
Peter Zijlstra3a587f42007-10-16 23:25:44 -070066 for_each_possible_cpu(cpu) {
67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
68 *pcount = 0;
69 }
70 fbc->count = amount;
Shaohua Li098faf52013-10-24 09:06:45 +010071 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Peter Zijlstra3a587f42007-10-16 23:25:44 -070072}
73EXPORT_SYMBOL(percpu_counter_set);
74
Nikolay Borisov3e8f3992017-07-12 14:37:51 -070075/**
76 * This function is both preempt and irq safe. The former is due to explicit
77 * preemption disable. The latter is guaranteed by the fact that the slow path
78 * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
79 * this_cpu_add which is irq-safe by definition. Hence there is no need muck
80 * with irq state before calling this one
81 */
Nikolay Borisov104b4e52017-06-20 21:01:20 +030082void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070083{
Peter Zijlstra20e89762007-10-16 23:25:43 -070084 s64 count;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070085
Christoph Lameterea00c302010-10-26 14:23:09 -070086 preempt_disable();
Christoph Lameter819a72a2010-12-06 11:16:19 -060087 count = __this_cpu_read(*fbc->counters) + amount;
Peter Zijlstra252e0ba2007-10-16 23:25:43 -070088 if (count >= batch || count <= -batch) {
Shaohua Li098faf52013-10-24 09:06:45 +010089 unsigned long flags;
90 raw_spin_lock_irqsave(&fbc->lock, flags);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070091 fbc->count += count;
Hugh Dickinsd1969a82014-01-16 15:26:48 -080092 __this_cpu_sub(*fbc->counters, count - amount);
Shaohua Li098faf52013-10-24 09:06:45 +010093 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070094 } else {
Ming Lei74e72f82014-01-14 17:56:42 -080095 this_cpu_add(*fbc->counters, amount);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070096 }
Christoph Lameterea00c302010-10-26 14:23:09 -070097 preempt_enable();
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070098}
Nikolay Borisov104b4e52017-06-20 21:01:20 +030099EXPORT_SYMBOL(percpu_counter_add_batch);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700100
101/*
102 * Add up all the per-cpu counts, return the result. This is a more accurate
103 * but much slower version of percpu_counter_read_positive()
104 */
Andrew Morton02d21162008-12-09 13:14:14 -0800105s64 __percpu_counter_sum(struct percpu_counter *fbc)
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700106{
Mingming Cao0216bfc2006-06-23 02:05:41 -0700107 s64 ret;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700108 int cpu;
Shaohua Li098faf52013-10-24 09:06:45 +0100109 unsigned long flags;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700110
Shaohua Li098faf52013-10-24 09:06:45 +0100111 raw_spin_lock_irqsave(&fbc->lock, flags);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700112 ret = fbc->count;
Andrew Mortonb4ef0292007-07-15 23:39:51 -0700113 for_each_online_cpu(cpu) {
Mingming Cao0216bfc2006-06-23 02:05:41 -0700114 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700115 ret += *pcount;
116 }
Shaohua Li098faf52013-10-24 09:06:45 +0100117 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Peter Zijlstrabf1d89c2007-10-16 23:25:45 -0700118 return ret;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -0700119}
Peter Zijlstrabf1d89c2007-10-16 23:25:45 -0700120EXPORT_SYMBOL(__percpu_counter_sum);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700121
Tejun Heo908c7f12014-09-08 09:51:29 +0900122int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
Peter Zijlstraea319512008-12-26 15:08:55 +0100123 struct lock_class_key *key)
Andrew Mortonc67ad912007-07-15 23:39:51 -0700124{
Tejun Heoebd8fef2014-09-08 09:51:29 +0900125 unsigned long flags __maybe_unused;
126
Thomas Gleixnerf032a452009-07-25 16:21:48 +0200127 raw_spin_lock_init(&fbc->lock);
Peter Zijlstraea319512008-12-26 15:08:55 +0100128 lockdep_set_class(&fbc->lock, key);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700129 fbc->count = amount;
Tejun Heo908c7f12014-09-08 09:51:29 +0900130 fbc->counters = alloc_percpu_gfp(s32, gfp);
Peter Zijlstra833f4072007-10-16 23:25:45 -0700131 if (!fbc->counters)
132 return -ENOMEM;
Tejun Heoe2852ae2010-10-26 14:23:05 -0700133
134 debug_percpu_counter_activate(fbc);
135
Andrew Mortonc67ad912007-07-15 23:39:51 -0700136#ifdef CONFIG_HOTPLUG_CPU
Masanori ITOH8474b592010-10-26 14:21:20 -0700137 INIT_LIST_HEAD(&fbc->list);
Tejun Heoebd8fef2014-09-08 09:51:29 +0900138 spin_lock_irqsave(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700139 list_add(&fbc->list, &percpu_counters);
Tejun Heoebd8fef2014-09-08 09:51:29 +0900140 spin_unlock_irqrestore(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700141#endif
Peter Zijlstra833f4072007-10-16 23:25:45 -0700142 return 0;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700143}
Peter Zijlstraea319512008-12-26 15:08:55 +0100144EXPORT_SYMBOL(__percpu_counter_init);
Peter Zijlstradc62a302007-10-16 23:25:46 -0700145
Andrew Mortonc67ad912007-07-15 23:39:51 -0700146void percpu_counter_destroy(struct percpu_counter *fbc)
147{
Tejun Heoebd8fef2014-09-08 09:51:29 +0900148 unsigned long flags __maybe_unused;
149
Peter Zijlstra833f4072007-10-16 23:25:45 -0700150 if (!fbc->counters)
151 return;
152
Tejun Heoe2852ae2010-10-26 14:23:05 -0700153 debug_percpu_counter_deactivate(fbc);
154
Andrew Mortonc67ad912007-07-15 23:39:51 -0700155#ifdef CONFIG_HOTPLUG_CPU
Tejun Heoebd8fef2014-09-08 09:51:29 +0900156 spin_lock_irqsave(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700157 list_del(&fbc->list);
Tejun Heoebd8fef2014-09-08 09:51:29 +0900158 spin_unlock_irqrestore(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700159#endif
Eric Dumazetfd3d6642008-12-09 13:14:11 -0800160 free_percpu(fbc->counters);
161 fbc->counters = NULL;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700162}
163EXPORT_SYMBOL(percpu_counter_destroy);
164
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800165int percpu_counter_batch __read_mostly = 32;
166EXPORT_SYMBOL(percpu_counter_batch);
167
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100168static int compute_batch_value(unsigned int cpu)
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800169{
170 int nr = num_online_cpus();
171
172 percpu_counter_batch = max(32, nr*2);
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100173 return 0;
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800174}
175
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100176static int percpu_counter_cpu_dead(unsigned int cpu)
Andrew Mortonc67ad912007-07-15 23:39:51 -0700177{
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800178#ifdef CONFIG_HOTPLUG_CPU
Andrew Mortonc67ad912007-07-15 23:39:51 -0700179 struct percpu_counter *fbc;
180
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100181 compute_batch_value(cpu);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700182
Tejun Heoebd8fef2014-09-08 09:51:29 +0900183 spin_lock_irq(&percpu_counters_lock);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700184 list_for_each_entry(fbc, &percpu_counters, list) {
185 s32 *pcount;
186
Eric Dumazetaaf0f2f2017-01-20 06:34:22 -0800187 raw_spin_lock(&fbc->lock);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700188 pcount = per_cpu_ptr(fbc->counters, cpu);
189 fbc->count += *pcount;
190 *pcount = 0;
Eric Dumazetaaf0f2f2017-01-20 06:34:22 -0800191 raw_spin_unlock(&fbc->lock);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700192 }
Tejun Heoebd8fef2014-09-08 09:51:29 +0900193 spin_unlock_irq(&percpu_counters_lock);
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800194#endif
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100195 return 0;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700196}
197
Tim Chen27f5e0f2010-08-09 17:19:04 -0700198/*
199 * Compare counter against given value.
200 * Return 1 if greater, 0 if equal and -1 if less
201 */
Dave Chinner80188b02015-05-29 07:39:34 +1000202int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
Tim Chen27f5e0f2010-08-09 17:19:04 -0700203{
204 s64 count;
205
206 count = percpu_counter_read(fbc);
207 /* Check to see if rough count will be sufficient for comparison */
Dave Chinner80188b02015-05-29 07:39:34 +1000208 if (abs(count - rhs) > (batch * num_online_cpus())) {
Tim Chen27f5e0f2010-08-09 17:19:04 -0700209 if (count > rhs)
210 return 1;
211 else
212 return -1;
213 }
214 /* Need to use precise count */
215 count = percpu_counter_sum(fbc);
216 if (count > rhs)
217 return 1;
218 else if (count < rhs)
219 return -1;
220 else
221 return 0;
222}
Dave Chinner80188b02015-05-29 07:39:34 +1000223EXPORT_SYMBOL(__percpu_counter_compare);
Tim Chen27f5e0f2010-08-09 17:19:04 -0700224
Andrew Mortonc67ad912007-07-15 23:39:51 -0700225static int __init percpu_counter_startup(void)
226{
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100227 int ret;
228
229 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
230 compute_batch_value, NULL);
231 WARN_ON(ret < 0);
232 ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
233 "lib/percpu_cnt:dead", NULL,
234 percpu_counter_cpu_dead);
235 WARN_ON(ret < 0);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700236 return 0;
237}
238module_init(percpu_counter_startup);