Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 2 | #include <linux/slab.h> |
| 3 | #include <linux/spinlock.h> |
| 4 | #include <linux/once.h> |
| 5 | #include <linux/random.h> |
| 6 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 7 | struct once_work { |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 8 | struct work_struct work; |
| 9 | struct static_key *key; |
| 10 | }; |
| 11 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 12 | static void once_deferred(struct work_struct *w) |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 13 | { |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 14 | struct once_work *work; |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 15 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 16 | work = container_of(w, struct once_work, work); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 17 | BUG_ON(!static_key_enabled(work->key)); |
| 18 | static_key_slow_dec(work->key); |
| 19 | kfree(work); |
| 20 | } |
| 21 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 22 | static void once_disable_jump(struct static_key *key) |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 23 | { |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 24 | struct once_work *w; |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 25 | |
| 26 | w = kmalloc(sizeof(*w), GFP_ATOMIC); |
| 27 | if (!w) |
| 28 | return; |
| 29 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 30 | INIT_WORK(&w->work, once_deferred); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 31 | w->key = key; |
| 32 | schedule_work(&w->work); |
| 33 | } |
| 34 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 35 | static DEFINE_SPINLOCK(once_lock); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 36 | |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 37 | bool __do_once_start(bool *done, unsigned long *flags) |
| 38 | __acquires(once_lock) |
| 39 | { |
| 40 | spin_lock_irqsave(&once_lock, *flags); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 41 | if (*done) { |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 42 | spin_unlock_irqrestore(&once_lock, *flags); |
| 43 | /* Keep sparse happy by restoring an even lock count on |
| 44 | * this lock. In case we return here, we don't call into |
| 45 | * __do_once_done but return early in the DO_ONCE() macro. |
| 46 | */ |
| 47 | __acquire(once_lock); |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 48 | return false; |
| 49 | } |
| 50 | |
Hannes Frederic Sowa | 4623425 | 2015-10-08 01:20:35 +0200 | [diff] [blame] | 51 | return true; |
| 52 | } |
Hannes Frederic Sowa | c90aeb9 | 2015-10-08 01:20:36 +0200 | [diff] [blame] | 53 | EXPORT_SYMBOL(__do_once_start); |
| 54 | |
| 55 | void __do_once_done(bool *done, struct static_key *once_key, |
| 56 | unsigned long *flags) |
| 57 | __releases(once_lock) |
| 58 | { |
| 59 | *done = true; |
| 60 | spin_unlock_irqrestore(&once_lock, *flags); |
| 61 | once_disable_jump(once_key); |
| 62 | } |
| 63 | EXPORT_SYMBOL(__do_once_done); |