Eric Dumazet | 16b8a47 | 2010-06-22 10:22:17 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_U64_STATS_SYNC_H |
| 2 | #define _LINUX_U64_STATS_SYNC_H |
| 3 | |
| 4 | /* |
| 5 | * To properly implement 64bits network statistics on 32bit and 64bit hosts, |
| 6 | * we provide a synchronization point, that is a noop on 64bit or UP kernels. |
| 7 | * |
| 8 | * Key points : |
| 9 | * 1) Use a seqcount on SMP 32bits, with low overhead. |
| 10 | * 2) Whole thing is a noop on 64bit arches or UP kernels. |
| 11 | * 3) Write side must ensure mutual exclusion or one seqcount update could |
| 12 | * be lost, thus blocking readers forever. |
| 13 | * If this synchronization point is not a mutex, but a spinlock or |
| 14 | * spinlock_bh() or disable_bh() : |
| 15 | * 3.1) Write side should not sleep. |
| 16 | * 3.2) Write side should not allow preemption. |
| 17 | * 3.3) If applicable, interrupts should be disabled. |
| 18 | * |
| 19 | * 4) If reader fetches several counters, there is no guarantee the whole values |
| 20 | * are consistent (remember point 1) : this is a noop on 64bit arches anyway) |
| 21 | * |
| 22 | * 5) readers are allowed to sleep or be preempted/interrupted : They perform |
| 23 | * pure reads. But if they have to fetch many values, it's better to not allow |
| 24 | * preemptions/interruptions to avoid many retries. |
| 25 | * |
Eric Dumazet | b6b3ecc | 2010-06-24 00:04:38 +0000 | [diff] [blame] | 26 | * 6) If counter might be written by an interrupt, readers should block interrupts. |
| 27 | * (On UP, there is no seqcount_t protection, a reader allowing interrupts could |
| 28 | * read partial values) |
| 29 | * |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 30 | * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and |
| 31 | * u64_stats_fetch_retry_irq() helpers |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 32 | * |
Eric Dumazet | 16b8a47 | 2010-06-22 10:22:17 -0700 | [diff] [blame] | 33 | * Usage : |
| 34 | * |
| 35 | * Stats producer (writer) should use following template granted it already got |
| 36 | * an exclusive access to counters (a lock is already taken, or per cpu |
| 37 | * data is used [in a non preemptable context]) |
| 38 | * |
| 39 | * spin_lock_bh(...) or other synchronization to get exclusive access |
| 40 | * ... |
| 41 | * u64_stats_update_begin(&stats->syncp); |
| 42 | * stats->bytes64 += len; // non atomic operation |
| 43 | * stats->packets64++; // non atomic operation |
| 44 | * u64_stats_update_end(&stats->syncp); |
| 45 | * |
| 46 | * While a consumer (reader) should use following template to get consistent |
| 47 | * snapshot for each variable (but no guarantee on several ones) |
| 48 | * |
| 49 | * u64 tbytes, tpackets; |
| 50 | * unsigned int start; |
| 51 | * |
| 52 | * do { |
| 53 | * start = u64_stats_fetch_begin(&stats->syncp); |
| 54 | * tbytes = stats->bytes64; // non atomic operation |
| 55 | * tpackets = stats->packets64; // non atomic operation |
Eric Dumazet | b6b3ecc | 2010-06-24 00:04:38 +0000 | [diff] [blame] | 56 | * } while (u64_stats_fetch_retry(&stats->syncp, start)); |
Eric Dumazet | 16b8a47 | 2010-06-22 10:22:17 -0700 | [diff] [blame] | 57 | * |
| 58 | * |
| 59 | * Example of use in drivers/net/loopback.c, using per_cpu containers, |
| 60 | * in BH disabled context. |
| 61 | */ |
| 62 | #include <linux/seqlock.h> |
| 63 | |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 64 | struct u64_stats_sync { |
Eric Dumazet | 16b8a47 | 2010-06-22 10:22:17 -0700 | [diff] [blame] | 65 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
Eric Dumazet | 16b8a47 | 2010-06-22 10:22:17 -0700 | [diff] [blame] | 66 | seqcount_t seq; |
Eric Dumazet | 16b8a47 | 2010-06-22 10:22:17 -0700 | [diff] [blame] | 67 | #endif |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 68 | }; |
| 69 | |
John Stultz | 827da44 | 2013-10-07 15:51:58 -0700 | [diff] [blame] | 70 | |
Eric Dumazet | 9464ca6 | 2015-06-12 19:44:48 -0700 | [diff] [blame] | 71 | static inline void u64_stats_init(struct u64_stats_sync *syncp) |
| 72 | { |
John Stultz | 827da44 | 2013-10-07 15:51:58 -0700 | [diff] [blame] | 73 | #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) |
Eric Dumazet | 9464ca6 | 2015-06-12 19:44:48 -0700 | [diff] [blame] | 74 | seqcount_init(&syncp->seq); |
John Stultz | 827da44 | 2013-10-07 15:51:58 -0700 | [diff] [blame] | 75 | #endif |
Eric Dumazet | 9464ca6 | 2015-06-12 19:44:48 -0700 | [diff] [blame] | 76 | } |
John Stultz | 827da44 | 2013-10-07 15:51:58 -0700 | [diff] [blame] | 77 | |
Jesper Juhl | fa9f90b | 2010-11-28 21:39:34 +0100 | [diff] [blame] | 78 | static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 79 | { |
| 80 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
| 81 | write_seqcount_begin(&syncp->seq); |
| 82 | #endif |
| 83 | } |
| 84 | |
Jesper Juhl | fa9f90b | 2010-11-28 21:39:34 +0100 | [diff] [blame] | 85 | static inline void u64_stats_update_end(struct u64_stats_sync *syncp) |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 86 | { |
| 87 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
| 88 | write_seqcount_end(&syncp->seq); |
| 89 | #endif |
| 90 | } |
| 91 | |
Eric Dumazet | 46cc6e4 | 2016-05-03 16:56:03 -0700 | [diff] [blame] | 92 | static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) |
| 93 | { |
| 94 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
| 95 | raw_write_seqcount_begin(&syncp->seq); |
| 96 | #endif |
| 97 | } |
| 98 | |
| 99 | static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp) |
| 100 | { |
| 101 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
| 102 | raw_write_seqcount_end(&syncp->seq); |
| 103 | #endif |
| 104 | } |
| 105 | |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 106 | static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 107 | { |
| 108 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
| 109 | return read_seqcount_begin(&syncp->seq); |
| 110 | #else |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 111 | return 0; |
| 112 | #endif |
| 113 | } |
| 114 | |
| 115 | static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
| 116 | { |
| 117 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 118 | preempt_disable(); |
| 119 | #endif |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 120 | return __u64_stats_fetch_begin(syncp); |
| 121 | } |
| 122 | |
| 123 | static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, |
| 124 | unsigned int start) |
| 125 | { |
| 126 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
| 127 | return read_seqcount_retry(&syncp->seq, start); |
| 128 | #else |
| 129 | return false; |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 130 | #endif |
| 131 | } |
| 132 | |
Jesper Juhl | fa9f90b | 2010-11-28 21:39:34 +0100 | [diff] [blame] | 133 | static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 134 | unsigned int start) |
| 135 | { |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 136 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 137 | preempt_enable(); |
| 138 | #endif |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 139 | return __u64_stats_fetch_retry(syncp, start); |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | /* |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 143 | * In case irq handlers can update u64 counters, readers can use following helpers |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 144 | * - SMP 32bit arches use seqcount protection, irq safe. |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 145 | * - UP 32bit must disable irqs. |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 146 | * - 64bit have no problem atomically reading u64 values, irq safe. |
| 147 | */ |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 148 | static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 149 | { |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 150 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 151 | local_irq_disable(); |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 152 | #endif |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 153 | return __u64_stats_fetch_begin(syncp); |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 154 | } |
| 155 | |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 156 | static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 157 | unsigned int start) |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 158 | { |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 159 | #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 160 | local_irq_enable(); |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 161 | #endif |
Frederic Weisbecker | 68107df | 2016-09-26 02:29:19 +0200 | [diff] [blame] | 162 | return __u64_stats_fetch_retry(syncp, start); |
Eric Dumazet | 33d91f0 | 2010-06-24 00:54:06 +0000 | [diff] [blame] | 163 | } |
Eric Dumazet | 16b8a47 | 2010-06-22 10:22:17 -0700 | [diff] [blame] | 164 | |
| 165 | #endif /* _LINUX_U64_STATS_SYNC_H */ |