perf_counter: Remove perf_counter_context::nr_enabled
now that pctrl() no longer disables other people's counters,
remove the PMU cache code that deals with that.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090523163013.032998331@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 4159ee5..2ddf5e3 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -516,7 +516,6 @@
struct list_head event_list;
int nr_counters;
int nr_active;
- int nr_enabled;
int is_active;
atomic_t refcount;
struct task_struct *task;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 4c86a63..cb40625 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -134,8 +134,6 @@
list_add_rcu(&counter->event_entry, &ctx->event_list);
ctx->nr_counters++;
- if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
- ctx->nr_enabled++;
}
/*
@@ -150,8 +148,6 @@
if (list_empty(&counter->list_entry))
return;
ctx->nr_counters--;
- if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
- ctx->nr_enabled--;
list_del_init(&counter->list_entry);
list_del_rcu(&counter->event_entry);
@@ -406,7 +402,6 @@
else
counter_sched_out(counter, cpuctx, ctx);
counter->state = PERF_COUNTER_STATE_OFF;
- ctx->nr_enabled--;
}
spin_unlock_irqrestore(&ctx->lock, flags);
@@ -448,7 +443,6 @@
if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
update_counter_times(counter);
counter->state = PERF_COUNTER_STATE_OFF;
- ctx->nr_enabled--;
}
spin_unlock_irq(&ctx->lock);
@@ -759,7 +753,6 @@
goto unlock;
counter->state = PERF_COUNTER_STATE_INACTIVE;
counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
- ctx->nr_enabled++;
/*
* If the counter is in a group and isn't the group leader,
@@ -850,7 +843,6 @@
counter->state = PERF_COUNTER_STATE_INACTIVE;
counter->tstamp_enabled =
ctx->time - counter->total_time_enabled;
- ctx->nr_enabled++;
}
out:
spin_unlock_irq(&ctx->lock);
@@ -910,8 +902,7 @@
struct perf_counter_context *ctx2)
{
return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
- && ctx1->parent_gen == ctx2->parent_gen
- && ctx1->nr_enabled == ctx2->nr_enabled;
+ && ctx1->parent_gen == ctx2->parent_gen;
}
/*