[S390] topology: increase poll frequency if change is anticipated

Increase cpu topology change poll frequency if a change is anticipated.
Otherwise a user might be a bit confused to have to wait up to a minute
in order to see a change this should be visible immediatly.
However there is no guarantee that the change will happen during the
time frame the poll frequency is increased.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 7016dd7..0837de8 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -35,11 +35,13 @@
 int topology_set_cpu_management(int fc);
 void topology_schedule_update(void);
 void store_topology(struct sysinfo_15_1_x *info);
+void topology_expect_change(void);
 
 #else /* CONFIG_SCHED_BOOK */
 
 static inline void topology_schedule_update(void) { }
 static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline void topology_expect_change(void) { }
 
 #endif /* CONFIG_SCHED_BOOK */
 
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 109e742..8aba77d 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -867,6 +867,7 @@
 			if (!rc) {
 				smp_cpu_state[cpu] = CPU_STATE_STANDBY;
 				cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+				topology_expect_change();
 			}
 		}
 		break;
@@ -876,6 +877,7 @@
 			if (!rc) {
 				smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
 				cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+				topology_expect_change();
 			}
 		}
 		break;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 2abad30..e06fb85 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -31,7 +31,6 @@
 static int topology_enabled = 1;
 static void topology_work_fn(struct work_struct *work);
 static struct sysinfo_15_1_x *tl_info;
-static struct timer_list topology_timer;
 static void set_topology_timer(void);
 static DECLARE_WORK(topology_work, topology_work_fn);
 /* topology_lock protects the core linked list */
@@ -297,12 +296,30 @@
 	set_topology_timer();
 }
 
+static struct timer_list topology_timer =
+	TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
+
+static atomic_t topology_poll = ATOMIC_INIT(0);
+
 static void set_topology_timer(void)
 {
-	topology_timer.function = topology_timer_fn;
-	topology_timer.data = 0;
-	topology_timer.expires = jiffies + 60 * HZ;
-	add_timer(&topology_timer);
+	if (atomic_add_unless(&topology_poll, -1, 0))
+		mod_timer(&topology_timer, jiffies + HZ / 10);
+	else
+		mod_timer(&topology_timer, jiffies + HZ * 60);
+}
+
+void topology_expect_change(void)
+{
+	if (!MACHINE_HAS_TOPOLOGY)
+		return;
+	/* This is racy, but it doesn't matter since it is just a heuristic.
+	 * Worst case is that we poll in a higher frequency for a bit longer.
+	 */
+	if (atomic_read(&topology_poll) > 60)
+		return;
+	atomic_add(60, &topology_poll);
+	set_topology_timer();
 }
 
 static int __init early_parse_topology(char *p)
@@ -379,8 +396,10 @@
 	if (cpu_management == val)
 		goto out;
 	rc = topology_set_cpu_management(val);
-	if (!rc)
-		cpu_management = val;
+	if (rc)
+		goto out;
+	cpu_management = val;
+	topology_expect_change();
 out:
 	mutex_unlock(&smp_cpu_state_mutex);
 	put_online_cpus();
@@ -438,7 +457,6 @@
 		topology_update_polarization_simple();
 		goto out;
 	}
-	init_timer_deferrable(&topology_timer);
 	set_topology_timer();
 out:
 	update_cpu_core_map();