blob: 5a5a1941ca156d36270b37095bf7ef9f7c2247f1 [file] [log] [blame]
Paul E. McKenney8c366db2019-01-17 10:39:22 -08001/* SPDX-License-Identifier: GPL-2.0+ */
Paul E. McKenneyd8be8172017-03-25 09:59:38 -07002/*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
4 * tiny variant.
5 *
Paul E. McKenneyd8be8172017-03-25 09:59:38 -07006 * Copyright (C) IBM Corporation, 2017
7 *
Paul E. McKenney8c366db2019-01-17 10:39:22 -08008 * Author: Paul McKenney <paulmck@linux.ibm.com>
Paul E. McKenneyd8be8172017-03-25 09:59:38 -07009 */
10
11#ifndef _LINUX_SRCU_TINY_H
12#define _LINUX_SRCU_TINY_H
13
14#include <linux/swait.h>
15
16struct srcu_struct {
Paul E. McKenney3ddf20c2017-04-21 13:33:20 -070017 short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
18 short srcu_idx; /* Current reader array element. */
19 u8 srcu_gp_running; /* GP workqueue running? */
20 u8 srcu_gp_waiting; /* GP waiting for readers? */
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070021 struct swait_queue_head srcu_wq;
22 /* Last srcu_read_unlock() wakes GP. */
Paul E. McKenney2464dd92017-05-04 14:29:16 -070023 struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
24 struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070025 struct work_struct srcu_work; /* For driving grace periods. */
26#ifdef CONFIG_DEBUG_LOCK_ALLOC
27 struct lockdep_map dep_map;
28#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
29};
30
31void srcu_drive_gp(struct work_struct *wp);
32
Sebastian Andrzej Siewior9c801722018-05-25 12:19:57 +020033#define __SRCU_STRUCT_INIT(name, __ignored) \
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070034{ \
35 .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
Paul E. McKenney2464dd92017-05-04 14:29:16 -070036 .srcu_cb_tail = &name.srcu_cb_head, \
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070037 .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
38 __SRCU_DEP_MAP_INIT(name) \
39}
40
41/*
42 * This odd _STATIC_ arrangement is needed for API compatibility with
43 * Tree SRCU, which needs some per-CPU data.
44 */
45#define DEFINE_SRCU(name) \
Sebastian Andrzej Siewior9c801722018-05-25 12:19:57 +020046 struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070047#define DEFINE_STATIC_SRCU(name) \
Sebastian Andrzej Siewior9c801722018-05-25 12:19:57 +020048 static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070049
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070050void synchronize_srcu(struct srcu_struct *ssp);
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070051
Paul E. McKenneyd4efe6c2017-04-28 14:16:16 -070052/*
53 * Counts the new reader in the appropriate per-CPU element of the
54 * srcu_struct. Can be invoked from irq/bh handlers, but the matching
55 * __srcu_read_unlock() must be in the same handler instance. Returns an
56 * index that must be passed to the matching srcu_read_unlock().
57 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070058static inline int __srcu_read_lock(struct srcu_struct *ssp)
Paul E. McKenneyd4efe6c2017-04-28 14:16:16 -070059{
60 int idx;
61
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070062 idx = READ_ONCE(ssp->srcu_idx);
63 WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
Paul E. McKenneyd4efe6c2017-04-28 14:16:16 -070064 return idx;
65}
66
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070067static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070068{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070069 synchronize_srcu(ssp);
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070070}
71
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070072static inline void srcu_barrier(struct srcu_struct *ssp)
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070073{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070074 synchronize_srcu(ssp);
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070075}
76
Paul E. McKenney115a1a52017-05-22 13:31:03 -070077/* Defined here to avoid size increase for non-torture kernels. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070078static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
Paul E. McKenney115a1a52017-05-22 13:31:03 -070079 char *tt, char *tf)
80{
81 int idx;
82
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070083 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
Paul E. McKenney115a1a52017-05-22 13:31:03 -070084 pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
85 tt, tf, idx,
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070086 READ_ONCE(ssp->srcu_lock_nesting[!idx]),
87 READ_ONCE(ssp->srcu_lock_nesting[idx]));
Paul E. McKenney115a1a52017-05-22 13:31:03 -070088}
89
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070090#endif