[S390] lockless idle time accounting

Replace the spinlock used in the idle time accounting with a sequence
counter mechanism analog to seqlock.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index ec917d4..7a3817a 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -178,7 +178,7 @@
 }
 
 struct s390_idle_data {
-	spinlock_t lock;
+	unsigned int sequence;
 	unsigned long long idle_count;
 	unsigned long long idle_enter;
 	unsigned long long idle_time;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index fd8e311..2270730 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -856,13 +856,20 @@
 {
 	struct s390_idle_data *idle;
 	unsigned long long idle_count;
+	unsigned int sequence;
 
 	idle = &per_cpu(s390_idle, dev->id);
-	spin_lock(&idle->lock);
+repeat:
+	sequence = idle->sequence;
+	smp_rmb();
+	if (sequence & 1)
+		goto repeat;
 	idle_count = idle->idle_count;
 	if (idle->idle_enter)
 		idle_count++;
-	spin_unlock(&idle->lock);
+	smp_rmb();
+	if (idle->sequence != sequence)
+		goto repeat;
 	return sprintf(buf, "%llu\n", idle_count);
 }
 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -872,15 +879,22 @@
 {
 	struct s390_idle_data *idle;
 	unsigned long long now, idle_time, idle_enter;
+	unsigned int sequence;
 
 	idle = &per_cpu(s390_idle, dev->id);
-	spin_lock(&idle->lock);
 	now = get_clock();
+repeat:
+	sequence = idle->sequence;
+	smp_rmb();
+	if (sequence & 1)
+		goto repeat;
 	idle_time = idle->idle_time;
 	idle_enter = idle->idle_enter;
 	if (idle_enter != 0ULL && idle_enter < now)
 		idle_time += now - idle_enter;
-	spin_unlock(&idle->lock);
+	smp_rmb();
+	if (idle->sequence != sequence)
+		goto repeat;
 	return sprintf(buf, "%llu\n", idle_time >> 12);
 }
 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@@ -908,11 +922,7 @@
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
 		idle = &per_cpu(s390_idle, cpu);
-		spin_lock_irq(&idle->lock);
-		idle->idle_enter = 0;
-		idle->idle_time = 0;
-		idle->idle_count = 0;
-		spin_unlock_irq(&idle->lock);
+		memset(idle, 0, sizeof(struct s390_idle_data));
 		if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
 			return NOTIFY_BAD;
 		break;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index ade17e7..c41bb0d 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -27,9 +27,7 @@
 
 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
 
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
-	.lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
-};
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 
 static inline __u64 get_vtimer(void)
 {
@@ -151,11 +149,13 @@
 		vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
 	}
 
-	spin_lock(&idle->lock);
+	idle->sequence++;
+	smp_wmb();
 	idle->idle_time += idle_time;
 	idle->idle_enter = 0ULL;
 	idle->idle_count++;
-	spin_unlock(&idle->lock);
+	smp_wmb();
+	idle->sequence++;
 }
 
 void vtime_stop_cpu(void)
@@ -242,15 +242,23 @@
 {
 	struct s390_idle_data *idle;
 	unsigned long long now, idle_time, idle_enter;
+	unsigned int sequence;
 
 	idle = &per_cpu(s390_idle, cpu);
-	spin_lock(&idle->lock);
+
 	now = get_clock();
+repeat:
+	sequence = idle->sequence;
+	smp_rmb();
+	if (sequence & 1)
+		goto repeat;
 	idle_time = 0;
 	idle_enter = idle->idle_enter;
 	if (idle_enter != 0ULL && idle_enter < now)
 		idle_time = now - idle_enter;
-	spin_unlock(&idle->lock);
+	smp_rmb();
+	if (idle->sequence != sequence)
+		goto repeat;
 	return idle_time;
 }