Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 1 | #include <linux/slab.h> |
| 2 | #include <linux/spinlock.h> |
| 3 | #include <linux/once.h> |
| 4 | #include <linux/random.h> |
| 5 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 6 | struct once_work { |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 7 | struct work_struct work; |
| 8 | struct static_key *key; |
| 9 | }; |
| 10 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 11 | static void once_deferred(struct work_struct *w) |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 12 | { |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 13 | struct once_work *work; |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 14 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 15 | work = container_of(w, struct once_work, work); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 16 | BUG_ON(!static_key_enabled(work->key)); |
| 17 | static_key_slow_dec(work->key); |
| 18 | kfree(work); |
| 19 | } |
| 20 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 21 | static void once_disable_jump(struct static_key *key) |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 22 | { |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 23 | struct once_work *w; |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 24 | |
| 25 | w = kmalloc(sizeof(*w), GFP_ATOMIC); |
| 26 | if (!w) |
| 27 | return; |
| 28 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 29 | INIT_WORK(&w->work, once_deferred); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 30 | w->key = key; |
| 31 | schedule_work(&w->work); |
| 32 | } |
| 33 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 34 | static DEFINE_SPINLOCK(once_lock); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 35 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 36 | bool __do_once_start(bool *done, unsigned long *flags) |
| 37 | __acquires(once_lock) |
| 38 | { |
| 39 | spin_lock_irqsave(&once_lock, *flags); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 40 | if (*done) { |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 41 | spin_unlock_irqrestore(&once_lock, *flags); |
| 42 | /* Keep sparse happy by restoring an even lock count on |
| 43 | * this lock. In case we return here, we don't call into |
| 44 | * __do_once_done but return early in the DO_ONCE() macro. |
| 45 | */ |
| 46 | __acquire(once_lock); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 47 | return false; |
| 48 | } |
| 49 | |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 50 | return true; |
| 51 | } |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 52 | EXPORT_SYMBOL(__do_once_start); |
| 53 | |
| 54 | void __do_once_done(bool *done, struct static_key *once_key, |
| 55 | unsigned long *flags) |
| 56 | __releases(once_lock) |
| 57 | { |
| 58 | *done = true; |
| 59 | spin_unlock_irqrestore(&once_lock, *flags); |
| 60 | once_disable_jump(once_key); |
| 61 | } |
| 62 | EXPORT_SYMBOL(__do_once_done); |