rcu: Eliminate unused expedited_normal counter
Expedited grace periods no longer fall back to normal grace periods
in response to lock contention, given that expedited grace periods
now use the rcu_node tree so as to avoid contention. This commit
therfore removes the expedited_normal counter.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 00a3a38..6549012 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -237,7 +237,7 @@
The output of "cat rcu/rcu_preempt/rcuexp" looks as follows:
-s=21872 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
+s=21872 wd1=0 wd2=0 wd3=5 enq=0 sc=21872
These fields are as follows:
@@ -249,9 +249,6 @@
completed an expedited grace period that satisfies the attempted
request. "Our work is done."
-o "n" is number of times that a concurrent CPU-hotplug operation
- forced a fallback to a normal grace period.
-
o "enq" is the number of quiescent states still outstanding.
o "sc" is the number of times that the attempt to start a
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index fe98dd2..8f750df 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -521,7 +521,6 @@ struct rcu_state {
struct mutex exp_mutex; /* Serialize expedited GP. */
struct mutex exp_wake_mutex; /* Serialize wakeup. */
unsigned long expedited_sequence; /* Take a ticket. */
- atomic_long_t expedited_normal; /* # fallbacks to normal. */
atomic_t expedited_need_qs; /* # CPUs left to check in. */
struct swait_queue_head expedited_wq; /* Wait for check-ins. */
int ncpus_snap; /* # CPUs seen last time. */
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index b1f2897..2e932cd 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -194,9 +194,8 @@ static int show_rcuexp(struct seq_file *m, void *v)
s2 += atomic_long_read(&rdp->exp_workdone2);
s3 += atomic_long_read(&rdp->exp_workdone3);
}
- seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
+ seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu enq=%d sc=%lu\n",
rsp->expedited_sequence, s0, s1, s2, s3,
- atomic_long_read(&rsp->expedited_normal),
atomic_read(&rsp->expedited_need_qs),
rsp->expedited_sequence / 2);
return 0;