blob: 850449080e1cfb7f619eca4ae5d5ed6aca01a1d0 [file] [log] [blame]
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -07001/*
2 * Fast batching percpu counters.
3 */
4
5#include <linux/percpu_counter.h>
6#include <linux/module.h>
7
Mingming Cao0216bfc2006-06-23 02:05:41 -07008void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -07009{
10 long count;
Mingming Cao0216bfc2006-06-23 02:05:41 -070011 s32 *pcount;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070012 int cpu = get_cpu();
13
14 pcount = per_cpu_ptr(fbc->counters, cpu);
15 count = *pcount + amount;
16 if (count >= FBC_BATCH || count <= -FBC_BATCH) {
17 spin_lock(&fbc->lock);
18 fbc->count += count;
19 *pcount = 0;
20 spin_unlock(&fbc->lock);
21 } else {
22 *pcount = count;
23 }
24 put_cpu();
25}
26EXPORT_SYMBOL(percpu_counter_mod);
27
28/*
29 * Add up all the per-cpu counts, return the result. This is a more accurate
30 * but much slower version of percpu_counter_read_positive()
31 */
Mingming Cao0216bfc2006-06-23 02:05:41 -070032s64 percpu_counter_sum(struct percpu_counter *fbc)
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070033{
Mingming Cao0216bfc2006-06-23 02:05:41 -070034 s64 ret;
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070035 int cpu;
36
37 spin_lock(&fbc->lock);
38 ret = fbc->count;
39 for_each_possible_cpu(cpu) {
Mingming Cao0216bfc2006-06-23 02:05:41 -070040 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
Ravikiran G Thirumalai3cbc5642006-06-23 02:05:40 -070041 ret += *pcount;
42 }
43 spin_unlock(&fbc->lock);
44 return ret < 0 ? 0 : ret;
45}
46EXPORT_SYMBOL(percpu_counter_sum);