blob: ef35bb73f69b63716a2c0cc12e65f1a64221c86e [file] [log] [blame]
Peter Zijlstra145ca252007-10-16 23:25:49 -07001/*
2 * FLoating proportions
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 * This file contains the public data structure and API definitions.
7 */
8
9#ifndef _LINUX_PROPORTIONS_H
10#define _LINUX_PROPORTIONS_H
11
12#include <linux/percpu_counter.h>
13#include <linux/spinlock.h>
14#include <linux/mutex.h>
15
16struct prop_global {
17 /*
18 * The period over which we differentiate
19 *
20 * period = 2^shift
21 */
22 int shift;
23 /*
24 * The total event counter aka 'time'.
25 *
26 * Treated as an unsigned long; the lower 'shift - 1' bits are the
27 * counter bits, the remaining upper bits the period counter.
28 */
29 struct percpu_counter events;
30};
31
32/*
33 * global proportion descriptor
34 *
35 * this is needed to consitently flip prop_global structures.
36 */
37struct prop_descriptor {
38 int index;
39 struct prop_global pg[2];
40 struct mutex mutex; /* serialize the prop_global switch */
41};
42
43int prop_descriptor_init(struct prop_descriptor *pd, int shift);
44void prop_change_shift(struct prop_descriptor *pd, int new_shift);
45
46/*
47 * ----- PERCPU ------
48 */
49
50struct prop_local_percpu {
51 /*
52 * the local events counter
53 */
54 struct percpu_counter events;
55
56 /*
57 * snapshot of the last seen global state
58 */
59 int shift;
60 unsigned long period;
Thomas Gleixner740969f2009-07-25 16:43:30 +020061 raw_spinlock_t lock; /* protect the snapshot state */
Peter Zijlstra145ca252007-10-16 23:25:49 -070062};
63
64int prop_local_init_percpu(struct prop_local_percpu *pl);
65void prop_local_destroy_percpu(struct prop_local_percpu *pl);
66void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
67void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
68 long *numerator, long *denominator);
69
70static inline
71void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
72{
73 unsigned long flags;
74
75 local_irq_save(flags);
76 __prop_inc_percpu(pd, pl);
77 local_irq_restore(flags);
78}
79
80/*
Peter Zijlstraa42dde02008-04-30 00:54:36 -070081 * Limit the time part in order to ensure there are some bits left for the
82 * cycle counter and fraction multiply.
83 */
84#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
85
86#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
87#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
88
89void __prop_inc_percpu_max(struct prop_descriptor *pd,
90 struct prop_local_percpu *pl, long frac);
91
92
93/*
Peter Zijlstra145ca252007-10-16 23:25:49 -070094 * ----- SINGLE ------
95 */
96
97struct prop_local_single {
98 /*
99 * the local events counter
100 */
101 unsigned long events;
102
103 /*
104 * snapshot of the last seen global state
105 * and a lock protecting this state
106 */
Peter Zijlstra145ca252007-10-16 23:25:49 -0700107 unsigned long period;
Richard Kennedy3fb669d2008-08-01 13:36:28 +0100108 int shift;
Thomas Gleixner740969f2009-07-25 16:43:30 +0200109 raw_spinlock_t lock; /* protect the snapshot state */
Peter Zijlstra145ca252007-10-16 23:25:49 -0700110};
111
112#define INIT_PROP_LOCAL_SINGLE(name) \
Thomas Gleixner740969f2009-07-25 16:43:30 +0200113{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
Peter Zijlstra145ca252007-10-16 23:25:49 -0700114}
115
116int prop_local_init_single(struct prop_local_single *pl);
117void prop_local_destroy_single(struct prop_local_single *pl);
118void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
119void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
120 long *numerator, long *denominator);
121
122static inline
123void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
124{
125 unsigned long flags;
126
127 local_irq_save(flags);
128 __prop_inc_single(pd, pl);
129 local_irq_restore(flags);
130}
131
132#endif /* _LINUX_PROPORTIONS_H */