blob: 0d348e011a6e9002825989d37972cc5d0820b1da [file] [log] [blame]
Jan Karaf3109a52012-05-24 18:59:10 +02001/*
2 * Floating proportions with flexible aging period
3 *
4 * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
5 */
6
7#ifndef _LINUX_FLEX_PROPORTIONS_H
8#define _LINUX_FLEX_PROPORTIONS_H
9
10#include <linux/percpu_counter.h>
11#include <linux/spinlock.h>
12#include <linux/seqlock.h>
Tejun Heo20ae0072014-09-08 09:51:30 +090013#include <linux/gfp.h>
Jan Karaf3109a52012-05-24 18:59:10 +020014
15/*
16 * When maximum proportion of some event type is specified, this is the
17 * precision with which we allow limitting. Note that this creates an upper
18 * bound on the number of events per period like
19 * ULLONG_MAX >> FPROP_FRAC_SHIFT.
20 */
21#define FPROP_FRAC_SHIFT 10
22#define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT)
23
24/*
25 * ---- Global proportion definitions ----
26 */
27struct fprop_global {
28 /* Number of events in the current period */
29 struct percpu_counter events;
30 /* Current period */
31 unsigned int period;
32 /* Synchronization with period transitions */
33 seqcount_t sequence;
34};
35
Tejun Heo20ae0072014-09-08 09:51:30 +090036int fprop_global_init(struct fprop_global *p, gfp_t gfp);
Jan Karaf3109a52012-05-24 18:59:10 +020037void fprop_global_destroy(struct fprop_global *p);
38bool fprop_new_period(struct fprop_global *p, int periods);
39
40/*
41 * ---- SINGLE ----
42 */
43struct fprop_local_single {
44 /* the local events counter */
45 unsigned long events;
46 /* Period in which we last updated events */
47 unsigned int period;
48 raw_spinlock_t lock; /* Protect period and numerator */
49};
50
51#define INIT_FPROP_LOCAL_SINGLE(name) \
52{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
53}
54
55int fprop_local_init_single(struct fprop_local_single *pl);
56void fprop_local_destroy_single(struct fprop_local_single *pl);
57void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
58void fprop_fraction_single(struct fprop_global *p,
59 struct fprop_local_single *pl, unsigned long *numerator,
60 unsigned long *denominator);
61
62static inline
63void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
64{
65 unsigned long flags;
66
67 local_irq_save(flags);
68 __fprop_inc_single(p, pl);
69 local_irq_restore(flags);
70}
71
72/*
73 * ---- PERCPU ----
74 */
75struct fprop_local_percpu {
76 /* the local events counter */
77 struct percpu_counter events;
78 /* Period in which we last updated events */
79 unsigned int period;
80 raw_spinlock_t lock; /* Protect period and numerator */
81};
82
Tejun Heo20ae0072014-09-08 09:51:30 +090083int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
Jan Karaf3109a52012-05-24 18:59:10 +020084void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
85void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
86void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
87 int max_frac);
88void fprop_fraction_percpu(struct fprop_global *p,
89 struct fprop_local_percpu *pl, unsigned long *numerator,
90 unsigned long *denominator);
91
92static inline
93void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
94{
95 unsigned long flags;
96
97 local_irq_save(flags);
98 __fprop_inc_percpu(p, pl);
99 local_irq_restore(flags);
100}
101
102#endif