blob: bfb7420d0de33b118f4dbf1aacbd624f60b45cc5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Hannes Frederic Sowa46234252015-10-08 01:20:35 +02002#include <linux/slab.h>
3#include <linux/spinlock.h>
4#include <linux/once.h>
5#include <linux/random.h>
6
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +02007struct once_work {
Hannes Frederic Sowa46234252015-10-08 01:20:35 +02008 struct work_struct work;
9 struct static_key *key;
10};
11
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020012static void once_deferred(struct work_struct *w)
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020013{
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020014 struct once_work *work;
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020015
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020016 work = container_of(w, struct once_work, work);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020017 BUG_ON(!static_key_enabled(work->key));
18 static_key_slow_dec(work->key);
19 kfree(work);
20}
21
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020022static void once_disable_jump(struct static_key *key)
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020023{
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020024 struct once_work *w;
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020025
26 w = kmalloc(sizeof(*w), GFP_ATOMIC);
27 if (!w)
28 return;
29
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020030 INIT_WORK(&w->work, once_deferred);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020031 w->key = key;
32 schedule_work(&w->work);
33}
34
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020035static DEFINE_SPINLOCK(once_lock);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020036
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020037bool __do_once_start(bool *done, unsigned long *flags)
38 __acquires(once_lock)
39{
40 spin_lock_irqsave(&once_lock, *flags);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020041 if (*done) {
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020042 spin_unlock_irqrestore(&once_lock, *flags);
43 /* Keep sparse happy by restoring an even lock count on
44 * this lock. In case we return here, we don't call into
45 * __do_once_done but return early in the DO_ONCE() macro.
46 */
47 __acquire(once_lock);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020048 return false;
49 }
50
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020051 return true;
52}
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020053EXPORT_SYMBOL(__do_once_start);
54
55void __do_once_done(bool *done, struct static_key *once_key,
56 unsigned long *flags)
57 __releases(once_lock)
58{
59 *done = true;
60 spin_unlock_irqrestore(&once_lock, *flags);
61 once_disable_jump(once_key);
62}
63EXPORT_SYMBOL(__do_once_done);