blob: c72577e472f2f2929f825b0c105a237f54c42971 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -07002/*
3 * Fast batching percpu counters.
4 */
5
6#include <linux/percpu_counter.h>
Andrew Mortonc67ad912007-07-15 23:39:51 -07007#include <linux/notifier.h>
8#include <linux/mutex.h>
9#include <linux/init.h>
10#include <linux/cpu.h>
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070011#include <linux/module.h>
Tejun Heoe2852ae2010-10-26 14:23:05 -070012#include <linux/debugobjects.h>
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070013
Glauber Costa3a8495c2011-10-31 17:12:34 -070014#ifdef CONFIG_HOTPLUG_CPU
Andrew Mortonc67ad912007-07-15 23:39:51 -070015static LIST_HEAD(percpu_counters);
Al Virod87aae22012-07-31 09:28:31 +040016static DEFINE_SPINLOCK(percpu_counters_lock);
Glauber Costa3a8495c2011-10-31 17:12:34 -070017#endif
Andrew Mortonc67ad912007-07-15 23:39:51 -070018
Tejun Heoe2852ae2010-10-26 14:23:05 -070019#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
20
21static struct debug_obj_descr percpu_counter_debug_descr;
22
Du, Changbind99b1d82016-05-19 17:09:35 -070023static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
Tejun Heoe2852ae2010-10-26 14:23:05 -070024{
25 struct percpu_counter *fbc = addr;
26
27 switch (state) {
28 case ODEBUG_STATE_ACTIVE:
29 percpu_counter_destroy(fbc);
30 debug_object_free(fbc, &percpu_counter_debug_descr);
Du, Changbind99b1d82016-05-19 17:09:35 -070031 return true;
Tejun Heoe2852ae2010-10-26 14:23:05 -070032 default:
Du, Changbind99b1d82016-05-19 17:09:35 -070033 return false;
Tejun Heoe2852ae2010-10-26 14:23:05 -070034 }
35}
36
37static struct debug_obj_descr percpu_counter_debug_descr = {
38 .name = "percpu_counter",
39 .fixup_free = percpu_counter_fixup_free,
40};
41
42static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
43{
44 debug_object_init(fbc, &percpu_counter_debug_descr);
45 debug_object_activate(fbc, &percpu_counter_debug_descr);
46}
47
48static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
49{
50 debug_object_deactivate(fbc, &percpu_counter_debug_descr);
51 debug_object_free(fbc, &percpu_counter_debug_descr);
52}
53
54#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
55static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
56{ }
57static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
58{ }
59#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
60
Peter Zijlstra3a587f42007-10-16 23:25:44 -070061void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
62{
63 int cpu;
Shaohua Li098faf52013-10-24 09:06:45 +010064 unsigned long flags;
Peter Zijlstra3a587f42007-10-16 23:25:44 -070065
Shaohua Li098faf52013-10-24 09:06:45 +010066 raw_spin_lock_irqsave(&fbc->lock, flags);
Peter Zijlstra3a587f42007-10-16 23:25:44 -070067 for_each_possible_cpu(cpu) {
68 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
69 *pcount = 0;
70 }
71 fbc->count = amount;
Shaohua Li098faf52013-10-24 09:06:45 +010072 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Peter Zijlstra3a587f42007-10-16 23:25:44 -070073}
74EXPORT_SYMBOL(percpu_counter_set);
75
Nikolay Borisov3e8f3992017-07-12 14:37:51 -070076/**
77 * This function is both preempt and irq safe. The former is due to explicit
78 * preemption disable. The latter is guaranteed by the fact that the slow path
79 * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
80 * this_cpu_add which is irq-safe by definition. Hence there is no need muck
81 * with irq state before calling this one
82 */
Nikolay Borisov104b4e52017-06-20 21:01:20 +030083void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070084{
Peter Zijlstra20e89762007-10-16 23:25:43 -070085 s64 count;
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070086
Christoph Lameterea00c302010-10-26 14:23:09 -070087 preempt_disable();
Christoph Lameter819a72a2010-12-06 11:16:19 -060088 count = __this_cpu_read(*fbc->counters) + amount;
Peter Zijlstra252e0ba2007-10-16 23:25:43 -070089 if (count >= batch || count <= -batch) {
Shaohua Li098faf52013-10-24 09:06:45 +010090 unsigned long flags;
91 raw_spin_lock_irqsave(&fbc->lock, flags);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070092 fbc->count += count;
Hugh Dickinsd1969a82014-01-16 15:26:48 -080093 __this_cpu_sub(*fbc->counters, count - amount);
Shaohua Li098faf52013-10-24 09:06:45 +010094 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070095 } else {
Ming Lei74e72f82014-01-14 17:56:42 -080096 this_cpu_add(*fbc->counters, amount);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070097 }
Christoph Lameterea00c302010-10-26 14:23:09 -070098 preempt_enable();
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -070099}
Nikolay Borisov104b4e52017-06-20 21:01:20 +0300100EXPORT_SYMBOL(percpu_counter_add_batch);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700101
102/*
103 * Add up all the per-cpu counts, return the result. This is a more accurate
104 * but much slower version of percpu_counter_read_positive()
105 */
Andrew Morton02d21162008-12-09 13:14:14 -0800106s64 __percpu_counter_sum(struct percpu_counter *fbc)
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700107{
Mingming Cao0216bfc2006-06-23 02:05:41 -0700108 s64 ret;
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700109 int cpu;
Shaohua Li098faf52013-10-24 09:06:45 +0100110 unsigned long flags;
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700111
Shaohua Li098faf52013-10-24 09:06:45 +0100112 raw_spin_lock_irqsave(&fbc->lock, flags);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700113 ret = fbc->count;
Andrew Mortonb4ef0292007-07-15 23:39:51 -0700114 for_each_online_cpu(cpu) {
Mingming Cao0216bfc2006-06-23 02:05:41 -0700115 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700116 ret += *pcount;
117 }
Shaohua Li098faf52013-10-24 09:06:45 +0100118 raw_spin_unlock_irqrestore(&fbc->lock, flags);
Peter Zijlstrabf1d89c2007-10-16 23:25:45 -0700119 return ret;
Ravikiran G Thirumalai3cbc56402006-06-23 02:05:40 -0700120}
Peter Zijlstrabf1d89c2007-10-16 23:25:45 -0700121EXPORT_SYMBOL(__percpu_counter_sum);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700122
Tejun Heo908c7f12014-09-08 09:51:29 +0900123int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
Peter Zijlstraea319512008-12-26 15:08:55 +0100124 struct lock_class_key *key)
Andrew Mortonc67ad912007-07-15 23:39:51 -0700125{
Tejun Heoebd8fef2014-09-08 09:51:29 +0900126 unsigned long flags __maybe_unused;
127
Thomas Gleixnerf032a452009-07-25 16:21:48 +0200128 raw_spin_lock_init(&fbc->lock);
Peter Zijlstraea319512008-12-26 15:08:55 +0100129 lockdep_set_class(&fbc->lock, key);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700130 fbc->count = amount;
Tejun Heo908c7f12014-09-08 09:51:29 +0900131 fbc->counters = alloc_percpu_gfp(s32, gfp);
Peter Zijlstra833f4072007-10-16 23:25:45 -0700132 if (!fbc->counters)
133 return -ENOMEM;
Tejun Heoe2852ae2010-10-26 14:23:05 -0700134
135 debug_percpu_counter_activate(fbc);
136
Andrew Mortonc67ad912007-07-15 23:39:51 -0700137#ifdef CONFIG_HOTPLUG_CPU
Masanori ITOH8474b592010-10-26 14:21:20 -0700138 INIT_LIST_HEAD(&fbc->list);
Tejun Heoebd8fef2014-09-08 09:51:29 +0900139 spin_lock_irqsave(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700140 list_add(&fbc->list, &percpu_counters);
Tejun Heoebd8fef2014-09-08 09:51:29 +0900141 spin_unlock_irqrestore(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700142#endif
Peter Zijlstra833f4072007-10-16 23:25:45 -0700143 return 0;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700144}
Peter Zijlstraea319512008-12-26 15:08:55 +0100145EXPORT_SYMBOL(__percpu_counter_init);
Peter Zijlstradc62a302007-10-16 23:25:46 -0700146
Andrew Mortonc67ad912007-07-15 23:39:51 -0700147void percpu_counter_destroy(struct percpu_counter *fbc)
148{
Tejun Heoebd8fef2014-09-08 09:51:29 +0900149 unsigned long flags __maybe_unused;
150
Peter Zijlstra833f4072007-10-16 23:25:45 -0700151 if (!fbc->counters)
152 return;
153
Tejun Heoe2852ae2010-10-26 14:23:05 -0700154 debug_percpu_counter_deactivate(fbc);
155
Andrew Mortonc67ad912007-07-15 23:39:51 -0700156#ifdef CONFIG_HOTPLUG_CPU
Tejun Heoebd8fef2014-09-08 09:51:29 +0900157 spin_lock_irqsave(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700158 list_del(&fbc->list);
Tejun Heoebd8fef2014-09-08 09:51:29 +0900159 spin_unlock_irqrestore(&percpu_counters_lock, flags);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700160#endif
Eric Dumazetfd3d6642008-12-09 13:14:11 -0800161 free_percpu(fbc->counters);
162 fbc->counters = NULL;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700163}
164EXPORT_SYMBOL(percpu_counter_destroy);
165
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800166int percpu_counter_batch __read_mostly = 32;
167EXPORT_SYMBOL(percpu_counter_batch);
168
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100169static int compute_batch_value(unsigned int cpu)
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800170{
171 int nr = num_online_cpus();
172
173 percpu_counter_batch = max(32, nr*2);
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100174 return 0;
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800175}
176
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100177static int percpu_counter_cpu_dead(unsigned int cpu)
Andrew Mortonc67ad912007-07-15 23:39:51 -0700178{
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800179#ifdef CONFIG_HOTPLUG_CPU
Andrew Mortonc67ad912007-07-15 23:39:51 -0700180 struct percpu_counter *fbc;
181
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100182 compute_batch_value(cpu);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700183
Tejun Heoebd8fef2014-09-08 09:51:29 +0900184 spin_lock_irq(&percpu_counters_lock);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700185 list_for_each_entry(fbc, &percpu_counters, list) {
186 s32 *pcount;
187
Eric Dumazetaaf0f2f2017-01-20 06:34:22 -0800188 raw_spin_lock(&fbc->lock);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700189 pcount = per_cpu_ptr(fbc->counters, cpu);
190 fbc->count += *pcount;
191 *pcount = 0;
Eric Dumazetaaf0f2f2017-01-20 06:34:22 -0800192 raw_spin_unlock(&fbc->lock);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700193 }
Tejun Heoebd8fef2014-09-08 09:51:29 +0900194 spin_unlock_irq(&percpu_counters_lock);
Eric Dumazet179f7eb2009-01-06 14:41:04 -0800195#endif
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100196 return 0;
Andrew Mortonc67ad912007-07-15 23:39:51 -0700197}
198
Tim Chen27f5e0f2010-08-09 17:19:04 -0700199/*
200 * Compare counter against given value.
201 * Return 1 if greater, 0 if equal and -1 if less
202 */
Dave Chinner80188b02015-05-29 07:39:34 +1000203int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
Tim Chen27f5e0f2010-08-09 17:19:04 -0700204{
205 s64 count;
206
207 count = percpu_counter_read(fbc);
208 /* Check to see if rough count will be sufficient for comparison */
Dave Chinner80188b02015-05-29 07:39:34 +1000209 if (abs(count - rhs) > (batch * num_online_cpus())) {
Tim Chen27f5e0f2010-08-09 17:19:04 -0700210 if (count > rhs)
211 return 1;
212 else
213 return -1;
214 }
215 /* Need to use precise count */
216 count = percpu_counter_sum(fbc);
217 if (count > rhs)
218 return 1;
219 else if (count < rhs)
220 return -1;
221 else
222 return 0;
223}
Dave Chinner80188b02015-05-29 07:39:34 +1000224EXPORT_SYMBOL(__percpu_counter_compare);
Tim Chen27f5e0f2010-08-09 17:19:04 -0700225
Andrew Mortonc67ad912007-07-15 23:39:51 -0700226static int __init percpu_counter_startup(void)
227{
Sebastian Andrzej Siewior5588f5a2016-11-03 15:50:00 +0100228 int ret;
229
230 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
231 compute_batch_value, NULL);
232 WARN_ON(ret < 0);
233 ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
234 "lib/percpu_cnt:dead", NULL,
235 percpu_counter_cpu_dead);
236 WARN_ON(ret < 0);
Andrew Mortonc67ad912007-07-15 23:39:51 -0700237 return 0;
238}
239module_init(percpu_counter_startup);