ftrace: fix locking

we can hold all cpu trace buffer locks at once - put each into a
separate lock class.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a40687a..b3811ca 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1865,11 +1865,8 @@
 
 	}
 
-	for_each_possible_cpu(cpu) {
+	for_each_cpu_mask(cpu, mask) {
 		data = iter->tr->data[cpu];
-
-		if (!cpu_isset(cpu, mask))
-			continue;
 		spin_unlock(&data->lock);
 		atomic_dec(&data->disabled);
 	}
@@ -2076,6 +2073,7 @@
 	for_each_possible_cpu(i) {
 		data = global_trace.data[i];
 		spin_lock_init(&data->lock);
+		lockdep_set_class(&data->lock, &data->lock_key);
 		page = list_entry(pages.next, struct page, lru);
 		list_del_init(&page->lru);
 		list_add_tail(&page->lru, &data->trace_pages);
@@ -2084,6 +2082,7 @@
 #ifdef CONFIG_TRACER_MAX_TRACE
 		data = max_tr.data[i];
 		spin_lock_init(&data->lock);
+		lockdep_set_class(&data->lock, &data->lock_key);
 		page = list_entry(pages.next, struct page, lru);
 		list_del_init(&page->lru);
 		list_add_tail(&page->lru, &data->trace_pages);
@@ -2203,5 +2202,4 @@
 	}
 	return ret;
 }
-
 fs_initcall(tracer_alloc_buffers);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 29a7ea5..b040835 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -56,6 +56,7 @@
 	struct list_head	trace_pages;
 	atomic_t		disabled;
 	spinlock_t		lock;
+	struct lock_class_key	lock_key;
 	cycle_t			time_offset;
 
 	/* these fields get copied into max-trace: */