Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 1 | /* |
| 2 | * sched_clock.h: support for extending counters to full 64-bit ns counter |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | #ifndef ASM_SCHED_CLOCK |
| 9 | #define ASM_SCHED_CLOCK |
| 10 | |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/types.h> |
| 13 | |
| 14 | struct clock_data { |
| 15 | u64 epoch_ns; |
| 16 | u32 epoch_cyc; |
| 17 | u32 epoch_cyc_copy; |
| 18 | u32 mult; |
| 19 | u32 shift; |
| 20 | }; |
| 21 | |
| 22 | #define DEFINE_CLOCK_DATA(name) struct clock_data name |
| 23 | |
| 24 | static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
| 25 | { |
| 26 | return (cyc * mult) >> shift; |
| 27 | } |
| 28 | |
| 29 | /* |
| 30 | * Atomically update the sched_clock epoch. Your update callback will |
| 31 | * be called from a timer before the counter wraps - read the current |
| 32 | * counter value, and call this function to safely move the epochs |
| 33 | * forward. Only use this from the update callback. |
| 34 | */ |
| 35 | static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask) |
| 36 | { |
| 37 | unsigned long flags; |
| 38 | u64 ns = cd->epoch_ns + |
| 39 | cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift); |
| 40 | |
| 41 | /* |
| 42 | * Write epoch_cyc and epoch_ns in a way that the update is |
| 43 | * detectable in cyc_to_fixed_sched_clock(). |
| 44 | */ |
| 45 | raw_local_irq_save(flags); |
| 46 | cd->epoch_cyc = cyc; |
| 47 | smp_wmb(); |
| 48 | cd->epoch_ns = ns; |
| 49 | smp_wmb(); |
| 50 | cd->epoch_cyc_copy = cyc; |
| 51 | raw_local_irq_restore(flags); |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * If your clock rate is known at compile time, using this will allow |
| 56 | * you to optimize the mult/shift loads away. This is paired with |
| 57 | * init_fixed_sched_clock() to ensure that your mult/shift are correct. |
| 58 | */ |
| 59 | static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd, |
| 60 | u32 cyc, u32 mask, u32 mult, u32 shift) |
| 61 | { |
| 62 | u64 epoch_ns; |
| 63 | u32 epoch_cyc; |
| 64 | |
| 65 | /* |
| 66 | * Load the epoch_cyc and epoch_ns atomically. We do this by |
| 67 | * ensuring that we always write epoch_cyc, epoch_ns and |
| 68 | * epoch_cyc_copy in strict order, and read them in strict order. |
| 69 | * If epoch_cyc and epoch_cyc_copy are not equal, then we're in |
| 70 | * the middle of an update, and we should repeat the load. |
| 71 | */ |
| 72 | do { |
| 73 | epoch_cyc = cd->epoch_cyc; |
| 74 | smp_rmb(); |
| 75 | epoch_ns = cd->epoch_ns; |
| 76 | smp_rmb(); |
| 77 | } while (epoch_cyc != cd->epoch_cyc_copy); |
| 78 | |
| 79 | return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift); |
| 80 | } |
| 81 | |
| 82 | /* |
| 83 | * Otherwise, you need to use this, which will obtain the mult/shift |
| 84 | * from the clock_data structure. Use init_sched_clock() with this. |
| 85 | */ |
| 86 | static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd, |
| 87 | u32 cyc, u32 mask) |
| 88 | { |
| 89 | return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift); |
| 90 | } |
| 91 | |
| 92 | /* |
| 93 | * Initialize the clock data - calculate the appropriate multiplier |
| 94 | * and shift. Also setup a timer to ensure that the epoch is refreshed |
| 95 | * at the appropriate time interval, which will call your update |
| 96 | * handler. |
| 97 | */ |
| 98 | void init_sched_clock(struct clock_data *, void (*)(void), |
| 99 | unsigned int, unsigned long); |
| 100 | |
| 101 | /* |
| 102 | * Use this initialization function rather than init_sched_clock() if |
| 103 | * you're using cyc_to_fixed_sched_clock, which will warn if your |
| 104 | * constants are incorrect. |
| 105 | */ |
| 106 | static inline void init_fixed_sched_clock(struct clock_data *cd, |
| 107 | void (*update)(void), unsigned int bits, unsigned long rate, |
| 108 | u32 mult, u32 shift) |
| 109 | { |
| 110 | init_sched_clock(cd, update, bits, rate); |
| 111 | if (cd->mult != mult || cd->shift != shift) { |
| 112 | pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n" |
| 113 | "sched_clock: fix multiply/shift to avoid scheduler hiccups\n", |
| 114 | mult, shift, cd->mult, cd->shift); |
| 115 | } |
| 116 | } |
| 117 | |
Russell King | 211baa7 | 2011-01-11 16:23:04 +0000 | [diff] [blame] | 118 | extern void sched_clock_postinit(void); |
| 119 | |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 120 | #endif |