blob: 60c4d4599639cc01fdfbf0210041be6ba093a8e7 [file] [log] [blame]
Paul Burton3179d372014-04-14 11:00:56 +01001/*
2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
Paul Burtonba750502016-09-14 11:00:27 +010011#include <linux/cpuhotplug.h>
Paul Burton3179d372014-04-14 11:00:56 +010012#include <linux/init.h>
13#include <linux/percpu.h>
14#include <linux/slab.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/cacheflush.h>
18#include <asm/cacheops.h>
19#include <asm/idle.h>
20#include <asm/mips-cm.h>
21#include <asm/mips-cpc.h>
22#include <asm/mipsmtregs.h>
23#include <asm/pm.h>
24#include <asm/pm-cps.h>
25#include <asm/smp-cps.h>
26#include <asm/uasm.h>
27
28/*
29 * cps_nc_entry_fn - type of a generated non-coherent state entry function
30 * @online: the count of online coupled VPEs
31 * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
32 *
33 * The code entering & exiting non-coherent states is generated at runtime
34 * using uasm, in order to ensure that the compiler cannot insert a stray
35 * memory access at an unfortunate time and to allow the generation of optimal
36 * core-specific code particularly for cache routines. If coupled_coherence
37 * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
38 * returns the number of VPEs that were in the wait state at the point this
39 * VPE left it. Returns garbage if coupled_coherence is zero or this is not
40 * the entry function for CPS_PM_NC_WAIT.
41 */
42typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
43
44/*
45 * The entry point of the generated non-coherent idle state entry/exit
46 * functions. Actually per-core rather than per-CPU.
47 */
48static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
49 nc_asm_enter);
50
51/* Bitmap indicating which states are supported by the system */
52DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
53
54/*
55 * Indicates the number of coupled VPEs ready to operate in a non-coherent
56 * state. Actually per-core rather than per-CPU.
57 */
58static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
Paul Burton3179d372014-04-14 11:00:56 +010059
60/* Indicates online CPUs coupled with the current CPU */
61static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
62
63/*
64 * Used to synchronize entry to deep idle states. Actually per-core rather
65 * than per-CPU.
66 */
67static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
68
69/* Saved CPU state across the CPS_PM_POWER_GATED state */
70DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
71
72/* A somewhat arbitrary number of labels & relocs for uasm */
Paul Burtonba750502016-09-14 11:00:27 +010073static struct uasm_label labels[32];
74static struct uasm_reloc relocs[32];
Paul Burton3179d372014-04-14 11:00:56 +010075
Paul Burton3179d372014-04-14 11:00:56 +010076enum mips_reg {
77 zero, at, v0, v1, a0, a1, a2, a3,
78 t0, t1, t2, t3, t4, t5, t6, t7,
79 s0, s1, s2, s3, s4, s5, s6, s7,
80 t8, t9, k0, k1, gp, sp, fp, ra,
81};
82
83bool cps_pm_support_state(enum cps_pm_state state)
84{
85 return test_bit(state, state_support);
86}
87
88static void coupled_barrier(atomic_t *a, unsigned online)
89{
90 /*
91 * This function is effectively the same as
92 * cpuidle_coupled_parallel_barrier, which can't be used here since
93 * there's no cpuidle device.
94 */
95
96 if (!coupled_coherence)
97 return;
98
Paul Burton7c5491b2014-06-11 11:00:57 +010099 smp_mb__before_atomic();
Paul Burton3179d372014-04-14 11:00:56 +0100100 atomic_inc(a);
101
102 while (atomic_read(a) < online)
103 cpu_relax();
104
105 if (atomic_inc_return(a) == online * 2) {
106 atomic_set(a, 0);
107 return;
108 }
109
110 while (atomic_read(a) > online)
111 cpu_relax();
112}
113
114int cps_pm_enter_state(enum cps_pm_state state)
115{
116 unsigned cpu = smp_processor_id();
117 unsigned core = current_cpu_data.core;
118 unsigned online, left;
119 cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
120 u32 *core_ready_count, *nc_core_ready_count;
121 void *nc_addr;
122 cps_nc_entry_fn entry;
123 struct core_boot_config *core_cfg;
124 struct vpe_boot_config *vpe_cfg;
125
126 /* Check that there is an entry function for this state */
127 entry = per_cpu(nc_asm_enter, core)[state];
128 if (!entry)
129 return -EINVAL;
130
131 /* Calculate which coupled CPUs (VPEs) are online */
Matt Redfearn929d4f52016-09-07 10:45:17 +0100132#if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
Paul Burton3179d372014-04-14 11:00:56 +0100133 if (cpu_online(cpu)) {
134 cpumask_and(coupled_mask, cpu_online_mask,
135 &cpu_sibling_map[cpu]);
136 online = cpumask_weight(coupled_mask);
137 cpumask_clear_cpu(cpu, coupled_mask);
138 } else
139#endif
140 {
141 cpumask_clear(coupled_mask);
142 online = 1;
143 }
144
145 /* Setup the VPE to run mips_cps_pm_restore when started again */
Masahiro Yamada97f26452016-08-03 13:45:50 -0700146 if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
Paul Burton064231e2014-07-09 12:48:18 +0100147 /* Power gating relies upon CPS SMP */
148 if (!mips_cps_smp_in_use())
149 return -EINVAL;
150
Paul Burton3179d372014-04-14 11:00:56 +0100151 core_cfg = &mips_cps_core_bootcfg[core];
Paul Burtonc90e49f2014-07-09 12:48:21 +0100152 vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)];
Paul Burton3179d372014-04-14 11:00:56 +0100153 vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
154 vpe_cfg->gp = (unsigned long)current_thread_info();
155 vpe_cfg->sp = 0;
156 }
157
158 /* Indicate that this CPU might not be coherent */
159 cpumask_clear_cpu(cpu, &cpu_coherent_mask);
Paul Burton7c5491b2014-06-11 11:00:57 +0100160 smp_mb__after_atomic();
Paul Burton3179d372014-04-14 11:00:56 +0100161
162 /* Create a non-coherent mapping of the core ready_count */
163 core_ready_count = per_cpu(ready_count, core);
164 nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
165 (unsigned long)core_ready_count);
166 nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
167 nc_core_ready_count = nc_addr;
168
169 /* Ensure ready_count is zero-initialised before the assembly runs */
170 ACCESS_ONCE(*nc_core_ready_count) = 0;
171 coupled_barrier(&per_cpu(pm_barrier, core), online);
172
173 /* Run the generated entry code */
174 left = entry(online, nc_core_ready_count);
175
176 /* Remove the non-coherent mapping of ready_count */
177 kunmap_noncoherent();
178
179 /* Indicate that this CPU is definitely coherent */
180 cpumask_set_cpu(cpu, &cpu_coherent_mask);
181
182 /*
183 * If this VPE is the first to leave the non-coherent wait state then
184 * it needs to wake up any coupled VPEs still running their wait
185 * instruction so that they return to cpuidle, which can then complete
186 * coordination between the coupled VPEs & provide the governor with
187 * a chance to reflect on the length of time the VPEs were in the
188 * idle state.
189 */
190 if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
191 arch_send_call_function_ipi_mask(coupled_mask);
192
193 return 0;
194}
195
Paul Burtonba750502016-09-14 11:00:27 +0100196static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
197 struct uasm_reloc **pr,
198 const struct cache_desc *cache,
199 unsigned op, int lbl)
Paul Burton3179d372014-04-14 11:00:56 +0100200{
201 unsigned cache_size = cache->ways << cache->waybit;
202 unsigned i;
203 const unsigned unroll_lines = 32;
204
205 /* If the cache isn't present this function has it easy */
206 if (cache->flags & MIPS_CACHE_NOT_PRESENT)
207 return;
208
209 /* Load base address */
210 UASM_i_LA(pp, t0, (long)CKSEG0);
211
212 /* Calculate end address */
213 if (cache_size < 0x8000)
214 uasm_i_addiu(pp, t1, t0, cache_size);
215 else
216 UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
217
218 /* Start of cache op loop */
219 uasm_build_label(pl, *pp, lbl);
220
221 /* Generate the cache ops */
Markos Chandras0f2a1482016-02-03 03:15:23 +0000222 for (i = 0; i < unroll_lines; i++) {
223 if (cpu_has_mips_r6) {
224 uasm_i_cache(pp, op, 0, t0);
225 uasm_i_addiu(pp, t0, t0, cache->linesz);
226 } else {
227 uasm_i_cache(pp, op, i * cache->linesz, t0);
228 }
229 }
Paul Burton3179d372014-04-14 11:00:56 +0100230
Markos Chandras0f2a1482016-02-03 03:15:23 +0000231 if (!cpu_has_mips_r6)
232 /* Update the base address */
233 uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
Paul Burton3179d372014-04-14 11:00:56 +0100234
235 /* Loop if we haven't reached the end address yet */
236 uasm_il_bne(pp, pr, t0, t1, lbl);
237 uasm_i_nop(pp);
238}
239
Paul Burtonba750502016-09-14 11:00:27 +0100240static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
241 struct uasm_reloc **pr,
242 const struct cpuinfo_mips *cpu_info,
243 int lbl)
Paul Burton3179d372014-04-14 11:00:56 +0100244{
245 unsigned i, fsb_size = 8;
246 unsigned num_loads = (fsb_size * 3) / 2;
247 unsigned line_stride = 2;
248 unsigned line_size = cpu_info->dcache.linesz;
249 unsigned perf_counter, perf_event;
250 unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
251
252 /*
253 * Determine whether this CPU requires an FSB flush, and if so which
254 * performance counter/event reflect stalls due to a full FSB.
255 */
256 switch (__get_cpu_type(cpu_info->cputype)) {
257 case CPU_INTERAPTIV:
258 perf_counter = 1;
259 perf_event = 51;
260 break;
261
262 case CPU_PROAPTIV:
263 /* Newer proAptiv cores don't require this workaround */
264 if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
265 return 0;
266
267 /* On older ones it's unavailable */
268 return -1;
269
Paul Burton3179d372014-04-14 11:00:56 +0100270 default:
Matt Redfearnb97d0b92016-09-07 10:45:11 +0100271 /* Assume that the CPU does not need this workaround */
272 return 0;
Paul Burton3179d372014-04-14 11:00:56 +0100273 }
274
275 /*
276 * Ensure that the fill/store buffer (FSB) is not holding the results
277 * of a prefetch, since if it is then the CPC sequencer may become
278 * stuck in the D3 (ClrBus) state whilst entering a low power state.
279 */
280
281 /* Preserve perf counter setup */
282 uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
283 uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
284
285 /* Setup perf counter to count FSB full pipeline stalls */
286 uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
287 uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
288 uasm_i_ehb(pp);
289 uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
290 uasm_i_ehb(pp);
291
292 /* Base address for loads */
293 UASM_i_LA(pp, t0, (long)CKSEG0);
294
295 /* Start of clear loop */
296 uasm_build_label(pl, *pp, lbl);
297
298 /* Perform some loads to fill the FSB */
299 for (i = 0; i < num_loads; i++)
300 uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
301
302 /*
303 * Invalidate the new D-cache entries so that the cache will need
304 * refilling (via the FSB) if the loop is executed again.
305 */
306 for (i = 0; i < num_loads; i++) {
307 uasm_i_cache(pp, Hit_Invalidate_D,
308 i * line_size * line_stride, t0);
309 uasm_i_cache(pp, Hit_Writeback_Inv_SD,
310 i * line_size * line_stride, t0);
311 }
312
Matt Redfearnf6b43d93542016-09-07 10:45:12 +0100313 /* Barrier ensuring previous cache invalidates are complete */
Matt Redfearn90b084b2016-09-07 10:45:15 +0100314 uasm_i_sync(pp, STYPE_SYNC);
Paul Burton3179d372014-04-14 11:00:56 +0100315 uasm_i_ehb(pp);
316
317 /* Check whether the pipeline stalled due to the FSB being full */
318 uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
319
320 /* Loop if it didn't */
321 uasm_il_beqz(pp, pr, t1, lbl);
322 uasm_i_nop(pp);
323
324 /* Restore perf counter 1. The count may well now be wrong... */
325 uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
326 uasm_i_ehb(pp);
327 uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
328 uasm_i_ehb(pp);
329
330 return 0;
331}
332
Paul Burtonba750502016-09-14 11:00:27 +0100333static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
334 struct uasm_reloc **pr,
335 unsigned r_addr, int lbl)
Paul Burton3179d372014-04-14 11:00:56 +0100336{
337 uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
338 uasm_build_label(pl, *pp, lbl);
339 uasm_i_ll(pp, t1, 0, r_addr);
340 uasm_i_or(pp, t1, t1, t0);
341 uasm_i_sc(pp, t1, 0, r_addr);
342 uasm_il_beqz(pp, pr, t1, lbl);
343 uasm_i_nop(pp);
344}
345
Paul Burtonba750502016-09-14 11:00:27 +0100346static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
Paul Burton3179d372014-04-14 11:00:56 +0100347{
348 struct uasm_label *l = labels;
349 struct uasm_reloc *r = relocs;
350 u32 *buf, *p;
351 const unsigned r_online = a0;
352 const unsigned r_nc_count = a1;
353 const unsigned r_pcohctl = t7;
354 const unsigned max_instrs = 256;
355 unsigned cpc_cmd;
356 int err;
357 enum {
358 lbl_incready = 1,
359 lbl_poll_cont,
360 lbl_secondary_hang,
361 lbl_disable_coherence,
362 lbl_flush_fsb,
363 lbl_invicache,
364 lbl_flushdcache,
365 lbl_hang,
366 lbl_set_cont,
367 lbl_secondary_cont,
368 lbl_decready,
369 };
370
371 /* Allocate a buffer to hold the generated code */
372 p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
373 if (!buf)
374 return NULL;
375
376 /* Clear labels & relocs ready for (re)use */
377 memset(labels, 0, sizeof(labels));
378 memset(relocs, 0, sizeof(relocs));
379
Masahiro Yamada97f26452016-08-03 13:45:50 -0700380 if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
Paul Burton064231e2014-07-09 12:48:18 +0100381 /* Power gating relies upon CPS SMP */
382 if (!mips_cps_smp_in_use())
383 goto out_err;
384
Paul Burton3179d372014-04-14 11:00:56 +0100385 /*
386 * Save CPU state. Note the non-standard calling convention
387 * with the return address placed in v0 to avoid clobbering
388 * the ra register before it is saved.
389 */
390 UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
391 uasm_i_jalr(&p, v0, t0);
392 uasm_i_nop(&p);
393 }
394
395 /*
396 * Load addresses of required CM & CPC registers. This is done early
397 * because they're needed in both the enable & disable coherence steps
398 * but in the coupled case the enable step will only run on one VPE.
399 */
400 UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
401
402 if (coupled_coherence) {
403 /* Increment ready_count */
Matt Redfearn85e540b2016-09-07 10:45:14 +0100404 uasm_i_sync(&p, STYPE_SYNC_MB);
Paul Burton3179d372014-04-14 11:00:56 +0100405 uasm_build_label(&l, p, lbl_incready);
406 uasm_i_ll(&p, t1, 0, r_nc_count);
407 uasm_i_addiu(&p, t2, t1, 1);
408 uasm_i_sc(&p, t2, 0, r_nc_count);
409 uasm_il_beqz(&p, &r, t2, lbl_incready);
410 uasm_i_addiu(&p, t1, t1, 1);
411
Matt Redfearnf6b43d93542016-09-07 10:45:12 +0100412 /* Barrier ensuring all CPUs see the updated r_nc_count value */
Matt Redfearn85e540b2016-09-07 10:45:14 +0100413 uasm_i_sync(&p, STYPE_SYNC_MB);
Paul Burton3179d372014-04-14 11:00:56 +0100414
415 /*
416 * If this is the last VPE to become ready for non-coherence
417 * then it should branch below.
418 */
419 uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
420 uasm_i_nop(&p);
421
422 if (state < CPS_PM_POWER_GATED) {
423 /*
424 * Otherwise this is not the last VPE to become ready
425 * for non-coherence. It needs to wait until coherence
426 * has been disabled before proceeding, which it will do
427 * by polling for the top bit of ready_count being set.
428 */
429 uasm_i_addiu(&p, t1, zero, -1);
430 uasm_build_label(&l, p, lbl_poll_cont);
431 uasm_i_lw(&p, t0, 0, r_nc_count);
432 uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
433 uasm_i_ehb(&p);
Matt Redfearn929d4f52016-09-07 10:45:17 +0100434 if (cpu_has_mipsmt)
435 uasm_i_yield(&p, zero, t1);
Paul Burton3179d372014-04-14 11:00:56 +0100436 uasm_il_b(&p, &r, lbl_poll_cont);
437 uasm_i_nop(&p);
438 } else {
439 /*
440 * The core will lose power & this VPE will not continue
441 * so it can simply halt here.
442 */
Matt Redfearn929d4f52016-09-07 10:45:17 +0100443 if (cpu_has_mipsmt) {
444 /* Halt the VPE via C0 tchalt register */
445 uasm_i_addiu(&p, t0, zero, TCHALT_H);
446 uasm_i_mtc0(&p, t0, 2, 4);
447 } else if (cpu_has_vp) {
448 /* Halt the VP via the CPC VP_STOP register */
449 unsigned int vpe_id;
450
451 vpe_id = cpu_vpe_id(&cpu_data[cpu]);
452 uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
453 UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
454 uasm_i_sw(&p, t0, 0, t1);
455 } else {
456 BUG();
457 }
Paul Burton3179d372014-04-14 11:00:56 +0100458 uasm_build_label(&l, p, lbl_secondary_hang);
459 uasm_il_b(&p, &r, lbl_secondary_hang);
460 uasm_i_nop(&p);
461 }
462 }
463
464 /*
465 * This is the point of no return - this VPE will now proceed to
466 * disable coherence. At this point we *must* be sure that no other
467 * VPE within the core will interfere with the L1 dcache.
468 */
469 uasm_build_label(&l, p, lbl_disable_coherence);
470
471 /* Invalidate the L1 icache */
472 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
473 Index_Invalidate_I, lbl_invicache);
474
475 /* Writeback & invalidate the L1 dcache */
476 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
477 Index_Writeback_Inv_D, lbl_flushdcache);
478
Matt Redfearnf6b43d93542016-09-07 10:45:12 +0100479 /* Barrier ensuring previous cache invalidates are complete */
Matt Redfearn90b084b2016-09-07 10:45:15 +0100480 uasm_i_sync(&p, STYPE_SYNC);
Paul Burton3179d372014-04-14 11:00:56 +0100481 uasm_i_ehb(&p);
482
Matt Redfearn77451992016-09-07 10:45:18 +0100483 if (mips_cm_revision() < CM_REV_CM3) {
484 /*
485 * Disable all but self interventions. The load from COHCTL is
486 * defined by the interAptiv & proAptiv SUMs as ensuring that the
487 * operation resulting from the preceding store is complete.
488 */
489 uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
490 uasm_i_sw(&p, t0, 0, r_pcohctl);
491 uasm_i_lw(&p, t0, 0, r_pcohctl);
Paul Burton3179d372014-04-14 11:00:56 +0100492
Matt Redfearn77451992016-09-07 10:45:18 +0100493 /* Barrier to ensure write to coherence control is complete */
494 uasm_i_sync(&p, STYPE_SYNC);
495 uasm_i_ehb(&p);
496 }
Paul Burton3179d372014-04-14 11:00:56 +0100497
498 /* Disable coherence */
499 uasm_i_sw(&p, zero, 0, r_pcohctl);
500 uasm_i_lw(&p, t0, 0, r_pcohctl);
501
502 if (state >= CPS_PM_CLOCK_GATED) {
503 err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
504 lbl_flush_fsb);
505 if (err)
506 goto out_err;
507
508 /* Determine the CPC command to issue */
509 switch (state) {
510 case CPS_PM_CLOCK_GATED:
511 cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
512 break;
513 case CPS_PM_POWER_GATED:
514 cpc_cmd = CPC_Cx_CMD_PWRDOWN;
515 break;
516 default:
517 BUG();
518 goto out_err;
519 }
520
521 /* Issue the CPC command */
522 UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
523 uasm_i_addiu(&p, t1, zero, cpc_cmd);
524 uasm_i_sw(&p, t1, 0, t0);
525
526 if (state == CPS_PM_POWER_GATED) {
527 /* If anything goes wrong just hang */
528 uasm_build_label(&l, p, lbl_hang);
529 uasm_il_b(&p, &r, lbl_hang);
530 uasm_i_nop(&p);
531
532 /*
533 * There's no point generating more code, the core is
534 * powered down & if powered back up will run from the
535 * reset vector not from here.
536 */
537 goto gen_done;
538 }
539
Matt Redfearnf6b43d93542016-09-07 10:45:12 +0100540 /* Barrier to ensure write to CPC command is complete */
Matt Redfearn90b084b2016-09-07 10:45:15 +0100541 uasm_i_sync(&p, STYPE_SYNC);
Paul Burton3179d372014-04-14 11:00:56 +0100542 uasm_i_ehb(&p);
543 }
544
545 if (state == CPS_PM_NC_WAIT) {
546 /*
547 * At this point it is safe for all VPEs to proceed with
548 * execution. This VPE will set the top bit of ready_count
549 * to indicate to the other VPEs that they may continue.
550 */
551 if (coupled_coherence)
552 cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
553 lbl_set_cont);
554
555 /*
556 * VPEs which did not disable coherence will continue
557 * executing, after coherence has been disabled, from this
558 * point.
559 */
560 uasm_build_label(&l, p, lbl_secondary_cont);
561
562 /* Now perform our wait */
563 uasm_i_wait(&p, 0);
564 }
565
566 /*
567 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
568 * will run this. The first will actually re-enable coherence & the
569 * rest will just be performing a rather unusual nop.
570 */
Matt Redfearn77451992016-09-07 10:45:18 +0100571 uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
572 ? CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK
573 : CM3_GCR_Cx_COHERENCE_COHEN_MSK);
574
Paul Burton3179d372014-04-14 11:00:56 +0100575 uasm_i_sw(&p, t0, 0, r_pcohctl);
576 uasm_i_lw(&p, t0, 0, r_pcohctl);
577
Matt Redfearnf6b43d93542016-09-07 10:45:12 +0100578 /* Barrier to ensure write to coherence control is complete */
Matt Redfearn90b084b2016-09-07 10:45:15 +0100579 uasm_i_sync(&p, STYPE_SYNC);
Paul Burton3179d372014-04-14 11:00:56 +0100580 uasm_i_ehb(&p);
581
582 if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
583 /* Decrement ready_count */
584 uasm_build_label(&l, p, lbl_decready);
Matt Redfearn85e540b2016-09-07 10:45:14 +0100585 uasm_i_sync(&p, STYPE_SYNC_MB);
Paul Burton3179d372014-04-14 11:00:56 +0100586 uasm_i_ll(&p, t1, 0, r_nc_count);
587 uasm_i_addiu(&p, t2, t1, -1);
588 uasm_i_sc(&p, t2, 0, r_nc_count);
589 uasm_il_beqz(&p, &r, t2, lbl_decready);
590 uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
591
Matt Redfearnf6b43d93542016-09-07 10:45:12 +0100592 /* Barrier ensuring all CPUs see the updated r_nc_count value */
Matt Redfearn85e540b2016-09-07 10:45:14 +0100593 uasm_i_sync(&p, STYPE_SYNC_MB);
Paul Burton3179d372014-04-14 11:00:56 +0100594 }
595
596 if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
597 /*
598 * At this point it is safe for all VPEs to proceed with
599 * execution. This VPE will set the top bit of ready_count
600 * to indicate to the other VPEs that they may continue.
601 */
602 cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
603
604 /*
605 * This core will be reliant upon another core sending a
606 * power-up command to the CPC in order to resume operation.
607 * Thus an arbitrary VPE can't trigger the core leaving the
608 * idle state and the one that disables coherence might as well
609 * be the one to re-enable it. The rest will continue from here
610 * after that has been done.
611 */
612 uasm_build_label(&l, p, lbl_secondary_cont);
613
Matt Redfearnf6b43d93542016-09-07 10:45:12 +0100614 /* Barrier ensuring all CPUs see the updated r_nc_count value */
Matt Redfearn85e540b2016-09-07 10:45:14 +0100615 uasm_i_sync(&p, STYPE_SYNC_MB);
Paul Burton3179d372014-04-14 11:00:56 +0100616 }
617
618 /* The core is coherent, time to return to C code */
619 uasm_i_jr(&p, ra);
620 uasm_i_nop(&p);
621
622gen_done:
623 /* Ensure the code didn't exceed the resources allocated for it */
624 BUG_ON((p - buf) > max_instrs);
625 BUG_ON((l - labels) > ARRAY_SIZE(labels));
626 BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
627
628 /* Patch branch offsets */
629 uasm_resolve_relocs(relocs, labels);
630
631 /* Flush the icache */
632 local_flush_icache_range((unsigned long)buf, (unsigned long)p);
633
634 return buf;
635out_err:
636 kfree(buf);
637 return NULL;
638}
639
Paul Burtonba750502016-09-14 11:00:27 +0100640static int cps_pm_online_cpu(unsigned int cpu)
Paul Burton3179d372014-04-14 11:00:56 +0100641{
642 enum cps_pm_state state;
643 unsigned core = cpu_data[cpu].core;
Paul Burton3179d372014-04-14 11:00:56 +0100644 void *entry_fn, *core_rc;
645
646 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
647 if (per_cpu(nc_asm_enter, core)[state])
648 continue;
649 if (!test_bit(state, state_support))
650 continue;
651
652 entry_fn = cps_gen_entry_code(cpu, state);
653 if (!entry_fn) {
654 pr_err("Failed to generate core %u state %u entry\n",
655 core, state);
656 clear_bit(state, state_support);
657 }
658
659 per_cpu(nc_asm_enter, core)[state] = entry_fn;
660 }
661
662 if (!per_cpu(ready_count, core)) {
Paul Burtone9e24fa2017-03-02 14:02:40 -0800663 core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
Paul Burton3179d372014-04-14 11:00:56 +0100664 if (!core_rc) {
665 pr_err("Failed allocate core %u ready_count\n", core);
666 return -ENOMEM;
667 }
Paul Burton3179d372014-04-14 11:00:56 +0100668 per_cpu(ready_count, core) = core_rc;
669 }
670
671 return 0;
672}
673
674static int __init cps_pm_init(void)
675{
Paul Burton3179d372014-04-14 11:00:56 +0100676 /* A CM is required for all non-coherent states */
677 if (!mips_cm_present()) {
678 pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
Paul Burtonba750502016-09-14 11:00:27 +0100679 return 0;
Paul Burton3179d372014-04-14 11:00:56 +0100680 }
681
682 /*
683 * If interrupts were enabled whilst running a wait instruction on a
684 * non-coherent core then the VPE may end up processing interrupts
685 * whilst non-coherent. That would be bad.
686 */
687 if (cpu_wait == r4k_wait_irqoff)
688 set_bit(CPS_PM_NC_WAIT, state_support);
689 else
690 pr_warn("pm-cps: non-coherent wait unavailable\n");
691
692 /* Detect whether a CPC is present */
693 if (mips_cpc_present()) {
694 /* Detect whether clock gating is implemented */
695 if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
696 set_bit(CPS_PM_CLOCK_GATED, state_support);
697 else
698 pr_warn("pm-cps: CPC does not support clock gating\n");
699
700 /* Power gating is available with CPS SMP & any CPC */
701 if (mips_cps_smp_in_use())
702 set_bit(CPS_PM_POWER_GATED, state_support);
703 else
704 pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
705 } else {
706 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
707 }
708
Paul Burtonba750502016-09-14 11:00:27 +0100709 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PM_CPS_CPU_ONLINE",
710 cps_pm_online_cpu, NULL);
Paul Burton3179d372014-04-14 11:00:56 +0100711}
712arch_initcall(cps_pm_init);